karthikeya1212 commited on
Commit
c2fa6a9
Β·
verified Β·
1 Parent(s): fb2e003

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -125
app.py CHANGED
@@ -3,32 +3,23 @@ from transformers import AutoImageProcessor, AutoModelForImageClassification
3
  from PIL import Image
4
  import torch
5
  import torch.nn.functional as F
6
- import numpy as np
7
 
8
  print("\n" + "="*80)
9
- print("πŸ† SENTRY-IMAGE NEURIPS 2023 - BEST FREE AI DETECTOR")
10
  print("="*80)
11
- print("\nBased on NeurIPS 2023 Paper: 'Seeing is not always believing'")
12
- print("βœ“ 87% Accuracy (13% failure rate) - State-of-the-art FREE model")
13
- print("βœ“ Trained on Fake2M: 2M+ real + AI images (Midjourney v5, DALL-E 3, Stable Diffusion)")
14
- print("βœ“ Top-performing model from MPBench benchmark")
15
  print("="*80 + "\n")
16
 
17
- # SENTRY-IMAGE MODELS (NeurIPS 2023 Winner)
18
  MODELS_CONFIG = [
19
  {
20
- "name": "Inf-imagine/Sentry-Image",
21
- "weight": 0.65,
22
- "type": "Sentry-Image (NeurIPS 2023)",
23
- "proven_accuracy": "87% (13% failure rate)",
24
- "best_for": "Midjourney v5, DALL-E 3, Stable Diffusion (State-of-art)"
25
- },
26
- {
27
- "name": "Ateeqq/ai-vs-human-image-detector",
28
- "weight": 0.35,
29
- "type": "SigLIP Ensemble",
30
- "proven_accuracy": "88-94% on diffusion models",
31
- "best_for": "Modern generators (v6.1+, Flux)"
32
  },
33
  ]
34
 
@@ -40,10 +31,11 @@ processors_list = []
40
  model_metadata = []
41
  loaded_count = 0
42
 
 
43
  for i, config in enumerate(MODELS_CONFIG):
44
  model_name = config["name"]
45
  try:
46
- print(f"[{i+1}/2] {model_name}")
47
  print(f" β€’ Type: {config['type']}")
48
  print(f" β€’ Weight: {int(config['weight']*100)}%")
49
  print(f" β€’ Accuracy: {config['proven_accuracy']}")
@@ -62,21 +54,13 @@ for i, config in enumerate(MODELS_CONFIG):
62
  print(f" ⚠️ Warning: Failed to load - {str(e)[:50]}\n")
63
 
64
  if loaded_count == 0:
65
- # Fallback to Ateeqq only
66
- print("Loading fallback model...\n")
67
- processor = AutoImageProcessor.from_pretrained("Ateeqq/ai-vs-human-image-detector")
68
- model = AutoModelForImageClassification.from_pretrained("Ateeqq/ai-vs-human-image-detector").to(device)
69
- model.eval()
70
- models_list.append(model)
71
- processors_list.append(processor)
72
- model_metadata.append(MODELS_CONFIG[1])
73
- loaded_count = 1
74
 
75
  print("="*80)
76
- print(f"βœ… Successfully loaded {loaded_count} models")
77
- print(f"πŸ“Š Ensemble weight: {sum(m['weight'] for m in model_metadata):.1f}")
78
  print("="*80 + "\n")
79
 
 
80
  def predict(image):
81
  if image is None:
82
  return "❌ No image uploaded", 0.0, "Upload an image to analyze"
@@ -88,64 +72,52 @@ def predict(image):
88
  all_scores = []
89
  model_results = []
90
 
91
- # Run all models
92
  for idx, (processor, model) in enumerate(zip(processors_list, models_list)):
93
- try:
94
- inputs = processor(images=image, return_tensors="pt").to(device)
95
-
96
- with torch.no_grad():
97
- outputs = model(**inputs)
98
- logits = outputs.logits
99
- probs = F.softmax(logits, dim=1)[0].cpu().numpy()
100
-
101
- real_prob = float(probs[0])
102
- ai_prob = float(probs[1])
103
-
104
- all_scores.append(ai_prob)
105
-
106
- pred = "πŸ€– AI-Generated" if ai_prob > real_prob else "βœ“ Real Photo"
107
- conf = max(ai_prob, real_prob)
108
-
109
- meta = model_metadata[idx]
110
- model_results.append({
111
- 'name': meta["name"].split('/')[-1],
112
- 'type': meta['type'],
113
- 'weight': meta['weight'],
114
- 'prediction': pred,
115
- 'ai_score': ai_prob,
116
- 'real_score': real_prob,
117
- 'confidence': conf,
118
- 'accuracy': meta['proven_accuracy']
119
- })
120
-
121
- except Exception as e:
122
- print(f"Model error: {e}")
123
- continue
124
-
125
- if not all_scores:
126
- return "❌ Processing error", 0.0, "Could not analyze image"
127
-
128
- # WEIGHTED ENSEMBLE VOTING
129
  weights = [m['weight'] for m in model_metadata[:len(all_scores)]]
130
  total_weight = sum(weights)
131
  normalized_weights = [w/total_weight for w in weights]
132
-
133
  weighted_ai_score = sum(s * w for s, w in zip(all_scores, normalized_weights))
134
 
135
- # Threshold
136
  threshold = 0.50
137
-
138
  is_ai = weighted_ai_score > threshold
139
  final_pred = "🚨 AI-GENERATED" if is_ai else "βœ… REAL PHOTO"
140
  confidence = max(weighted_ai_score, 1 - weighted_ai_score)
141
 
 
142
  ai_votes = sum(1 for r in model_results if "AI" in r['prediction'])
143
  total_votes = len(model_results)
144
 
145
- # BUILD REPORT
146
  report = f"""
147
  ╔════════════════════════════════════════════════════════════════════════╗
148
- β•‘ πŸ† SENTRY-IMAGE NeurIPS 2023 - AI DETECTION REPORT β•‘
149
  β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
150
 
151
  🎯 PREDICTION: {final_pred}
@@ -168,54 +140,6 @@ Model {i}: {result['name']} ({result['type']})
168
  β”œβ”€ AI Score: {result['ai_score']:.4f} | Real Score: {result['real_score']:.4f}
169
  β”œβ”€ Model Confidence: {result['confidence']:.4f}
170
  └─ Accuracy: {result['accuracy']}
171
- """
172
-
173
- report += f"""
174
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
175
-
176
- πŸ† SENTRY-IMAGE NeurIPS 2023 BREAKTHROUGH:
177
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
178
-
179
- Research Finding:
180
- β€’ Humans: 61.3% accuracy (38.7% failure rate) - STRUGGLE to detect AI
181
- β€’ Sentry-Image: 87% accuracy (13% failure rate) - TOP MODEL
182
- β€’ Improvement: 26% better than human perception!
183
-
184
- Dataset (Fake2M):
185
- βœ“ 2M+ real images + AI-generated images
186
- βœ“ Midjourney v5, DALL-E 3, Stable Diffusion
187
- βœ“ Latest diffusion-based generators
188
- βœ“ Covers 8 categories: Landscape, Portrait, Animal, Plant, etc.
189
-
190
- Cross-Generator Evaluation (MPBench):
191
- β€’ Detects: GANs, Diffusion Models, Hybrid generators
192
- β€’ Generalizes: Works across different image domains
193
- β€’ Robust: Handles real-world conditions
194
-
195
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
196
-
197
- βœ… DETECTION CAPABILITIES:
198
- βœ“ Midjourney v5 (Sentry trained on)
199
- βœ“ DALL-E 3 (Sentry trained on)
200
- βœ“ Stable Diffusion 2/3 (Sentry trained on)
201
- βœ“ Modern diffusion variants
202
- βœ“ Realistic AI-generated humans
203
- βœ“ Cross-scene generalization
204
- βœ“ Handles compression & post-processing
205
-
206
- πŸ“š CITATION:
207
- "Seeing is not always believing: Benchmarking Human and Model
208
- Perception of AI-Generated Images" - NeurIPS 2023
209
- Authors: Zeyu Lu, Di Huang, Lei Bai, et al.
210
- Paper: https://github.com/Inf-imagine/Sentry
211
-
212
- ⚠️ NOTE:
213
- This is the SOTA free model for AI detection. Even with 87% accuracy,
214
- 13% of advanced AI images may still fool the detector. For critical
215
- applications, always verify with secondary analysis.
216
-
217
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
218
- β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
219
  """
220
 
221
  return final_pred, round(weighted_ai_score, 4), report
@@ -223,6 +147,7 @@ applications, always verify with secondary analysis.
223
  except Exception as e:
224
  return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
225
 
 
226
  # Gradio Interface
227
  demo = gr.Interface(
228
  fn=predict,
@@ -230,11 +155,11 @@ demo = gr.Interface(
230
  outputs=[
231
  gr.Textbox(label="🎯 Detection Result"),
232
  gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
233
- gr.Textbox(label="πŸ“‹ Sentry-Image Analysis", lines=35)
234
  ],
235
- title="πŸ† Sentry-Image NeurIPS 2023 Detector",
236
- description="Best FREE AI image detector (87% accuracy). NeurIPS 2023 winner - detects Midjourney v5, DALL-E 3, Stable Diffusion with state-of-the-art accuracy."
237
  )
238
 
239
  if __name__ == "__main__":
240
- demo.launch()
 
3
  from PIL import Image
4
  import torch
5
  import torch.nn.functional as F
 
6
 
7
  print("\n" + "="*80)
8
+ print("πŸ† SSP-AI-GENERATED-IMAGE DETECTOR - 2025 STATE-OF-THE-ART")
9
  print("="*80)
10
+ print("\nBased on SSP 2025 Benchmark")
11
+ print("βœ“ High precision on latest AI generators (Midjourney v6+, DALL-E 3, Stable Diffusion)")
12
+ print("βœ“ Trained on massive datasets with real + AI images")
 
13
  print("="*80 + "\n")
14
 
15
+ # NEW MODEL CONFIG
16
  MODELS_CONFIG = [
17
  {
18
+ "name": "bcmi/SSP-AI-Generated-Image-Detection",
19
+ "weight": 1.0,
20
+ "type": "SSP-AI Detection",
21
+ "proven_accuracy": "SOTA (claimed >90% on modern generators)",
22
+ "best_for": "Midjourney v6+, DALL-E 3, Stable Diffusion, GANs, and Hybrid generators"
 
 
 
 
 
 
 
23
  },
24
  ]
25
 
 
31
  model_metadata = []
32
  loaded_count = 0
33
 
34
+ # Load models
35
  for i, config in enumerate(MODELS_CONFIG):
36
  model_name = config["name"]
37
  try:
38
+ print(f"[{i+1}/{len(MODELS_CONFIG)}] {model_name}")
39
  print(f" β€’ Type: {config['type']}")
40
  print(f" β€’ Weight: {int(config['weight']*100)}%")
41
  print(f" β€’ Accuracy: {config['proven_accuracy']}")
 
54
  print(f" ⚠️ Warning: Failed to load - {str(e)[:50]}\n")
55
 
56
  if loaded_count == 0:
57
+ raise RuntimeError("❌ Failed to load SSP-AI model. Check Hugging Face access and model name.")
 
 
 
 
 
 
 
 
58
 
59
  print("="*80)
60
+ print(f"βœ… Successfully loaded {loaded_count} model(s)")
 
61
  print("="*80 + "\n")
62
 
63
+
64
  def predict(image):
65
  if image is None:
66
  return "❌ No image uploaded", 0.0, "Upload an image to analyze"
 
72
  all_scores = []
73
  model_results = []
74
 
75
+ # Run models
76
  for idx, (processor, model) in enumerate(zip(processors_list, models_list)):
77
+ inputs = processor(images=image, return_tensors="pt").to(device)
78
+ with torch.no_grad():
79
+ outputs = model(**inputs)
80
+ logits = outputs.logits
81
+ probs = F.softmax(logits, dim=1)[0].cpu().numpy()
82
+
83
+ real_prob = float(probs[0])
84
+ ai_prob = float(probs[1])
85
+ all_scores.append(ai_prob)
86
+
87
+ pred = "πŸ€– AI-Generated" if ai_prob > real_prob else "βœ“ Real Photo"
88
+ conf = max(ai_prob, real_prob)
89
+ meta = model_metadata[idx]
90
+
91
+ model_results.append({
92
+ 'name': meta["name"].split('/')[-1],
93
+ 'type': meta['type'],
94
+ 'weight': meta['weight'],
95
+ 'prediction': pred,
96
+ 'ai_score': ai_prob,
97
+ 'real_score': real_prob,
98
+ 'confidence': conf,
99
+ 'accuracy': meta['proven_accuracy']
100
+ })
101
+
102
+ # Weighted score
 
 
 
 
 
 
 
 
 
 
103
  weights = [m['weight'] for m in model_metadata[:len(all_scores)]]
104
  total_weight = sum(weights)
105
  normalized_weights = [w/total_weight for w in weights]
 
106
  weighted_ai_score = sum(s * w for s, w in zip(all_scores, normalized_weights))
107
 
 
108
  threshold = 0.50
 
109
  is_ai = weighted_ai_score > threshold
110
  final_pred = "🚨 AI-GENERATED" if is_ai else "βœ… REAL PHOTO"
111
  confidence = max(weighted_ai_score, 1 - weighted_ai_score)
112
 
113
+ # Individual votes
114
  ai_votes = sum(1 for r in model_results if "AI" in r['prediction'])
115
  total_votes = len(model_results)
116
 
117
+ # Build report
118
  report = f"""
119
  ╔════════════════════════════════════════════════════════════════════════╗
120
+ β•‘ πŸ† SSP-AI-Generated-Image Detection Report β•‘
121
  β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
122
 
123
  🎯 PREDICTION: {final_pred}
 
140
  β”œβ”€ AI Score: {result['ai_score']:.4f} | Real Score: {result['real_score']:.4f}
141
  β”œβ”€ Model Confidence: {result['confidence']:.4f}
142
  └─ Accuracy: {result['accuracy']}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  """
144
 
145
  return final_pred, round(weighted_ai_score, 4), report
 
147
  except Exception as e:
148
  return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"
149
 
150
+
151
  # Gradio Interface
152
  demo = gr.Interface(
153
  fn=predict,
 
155
  outputs=[
156
  gr.Textbox(label="🎯 Detection Result"),
157
  gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
158
+ gr.Textbox(label="πŸ“‹ SSP-AI Analysis", lines=30)
159
  ],
160
+ title="πŸ† SSP-AI-Generated-Image Detector 2025",
161
+ description="High-precision AI image detector for latest generators (Midjourney v6+, DALL-E 3, Stable Diffusion, GANs)."
162
  )
163
 
164
  if __name__ == "__main__":
165
+ demo.launch()