thrimurthi2025 commited on
Commit
4c40d3d
·
verified ·
1 Parent(s): 78e86b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -48
app.py CHANGED
@@ -1,70 +1,254 @@
1
- # app.py (Option B - Minimal local pipeline; may use more RAM)
2
- import os, io, base64, traceback
3
  import gradio as gr
4
  from transformers import pipeline
5
- from PIL import Image
 
 
 
 
6
 
7
- MODEL_ID = "Ateeqq/ai-vs-human-image-detector"
8
- HF_TOKEN = os.environ.get("HF_TOKEN") # set if model private
 
 
 
 
 
 
9
 
10
- # Try to load pipeline (defensive)
11
  pipes = []
12
- load_error = None
13
- try:
14
- pipes.append((MODEL_ID, pipeline("image-classification", model=MODEL_ID, use_auth_token=HF_TOKEN)))
15
- load_error = None
16
- print(f"[INFO] Loaded {MODEL_ID}")
17
- except Exception as e:
18
- load_error = repr(e)
19
- print("[ERROR] Failed to load pipeline:", load_error)
20
-
21
- def predict(image: Image.Image):
22
- if image is None:
23
- return None, "<div style='color:red;'>Upload an image first</div>", load_error or ""
24
- if not pipes:
25
- # Show the exact load error to help debugging
26
- return image, "<div style='color:red;'>No models loaded</div>", load_error or "No pipeline"
27
- model_id, pipe = pipes[0]
28
  try:
29
- res = pipe(image)
30
- if not res:
31
- return image, "<div style='color:red;'>Model returned no results</div>", ""
32
- top = res[0]
33
- label = top.get("label","").lower()
34
- score = top.get("score", 0.0) * 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  if "ai" in label or "fake" in label:
36
  verdict = f"🧠 AI-Generated ({score:.1f}% confidence)"
37
  color = "#007BFF"
38
  else:
39
  verdict = f"🧍 Human-Made ({score:.1f}% confidence)"
40
  color = "#4CAF50"
 
 
41
  html = f"""
42
- <div style='background:linear-gradient(135deg,{color}33,#1a1a1a);
43
- border:2px solid {color}; border-radius:12px; padding:18px;
44
- text-align:center; color:white; font-weight:700;'>
45
- {verdict}<div style="font-size:12px;opacity:0.85;margin-top:6px">Model: {model_id}</div>
 
 
 
 
 
 
 
 
 
46
  </div>
47
  """
48
- return image, html, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  except Exception as e:
50
- err = repr(e)
51
- return image, f"<div style='color:red;'>Inference failed: {err}</div>", err
52
 
 
 
 
53
  css = """
54
- .gradio-container { font-family: 'Poppins', sans-serif; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  """
56
 
57
- with gr.Blocks(css=css) as demo:
58
- gr.Markdown("<h2>🔍 Unreal Eye (Local single-model)</h2>")
 
 
 
 
59
  with gr.Row():
60
- with gr.Column():
61
- inp = gr.Image(type="pil", label="Upload an image")
62
- btn = gr.Button("Analyze")
63
- btn_clear = gr.Button("Clear")
64
- with gr.Column():
65
- out_img = gr.Image(type="pil", label="Original / Overlay")
66
- out_html = gr.HTML()
67
- load_box = gr.Textbox(label="Load status / explainability", value=(load_error or "Model loaded" if pipes else "No model loaded"), interactive=False)
68
- btn.click(predict, inputs=inp, outputs=[out_img, out_html, load_box])
69
- btn_clear.click(lambda: (None, "", ""), outputs=[out_img, out_html, load_box])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  demo.launch()
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ from PIL import Image, ImageFilter, ImageOps
4
+ import numpy as np
5
+ import traceback
6
+ import io
7
+ import base64
8
 
9
+ # -----------------------------
10
+ # Your original model list
11
+ # -----------------------------
12
+ models = [
13
+ ("Ateeqq/ai-vs-human-image-detector", "ateeq"),
14
+ ("umm-maybe/AI-image-detector", "umm_maybe"),
15
+ ("dima806/ai_vs_human_generated_image_detection", "dimma"),
16
+ ]
17
 
18
+ # load pipelines (same as your working code)
19
  pipes = []
20
+ for model_id, _ in models:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  try:
22
+ pipes.append((model_id, pipeline("image-classification", model=model_id)))
23
+ print(f"Loaded {model_id}")
24
+ except Exception as e:
25
+ print(f"Error loading {model_id}: {e}")
26
+
27
+ # -----------------------------
28
+ # Helper: simple texture-based saliency map (no cv2, no model internals)
29
+ # - This approximates "where the image has high-frequency detail"
30
+ # - Not true Grad-CAM, but a lightweight explainability overlay that's safe to run in Spaces
31
+ # -----------------------------
32
+ def compute_texture_heatmap(pil_img, downsample=128):
33
+ """
34
+ Returns a 2D float numpy array (0..1) heatmap highlighting textured/high-frequency regions.
35
+ Steps:
36
+ - convert to grayscale
37
+ - blur to remove low-frequency shading
38
+ - compute absolute difference between original and blurred to highlight texture
39
+ - normalize
40
+ """
41
+ try:
42
+ # convert and resize for speed
43
+ w, h = pil_img.size
44
+ short = min(downsample, max(64, min(w, h)))
45
+ img_small = pil_img.convert("L").resize((short, short), resample=Image.BILINEAR)
46
+ # blurred version
47
+ blurred = img_small.filter(ImageFilter.GaussianBlur(radius=3))
48
+ # absolute difference
49
+ arr_orig = np.array(img_small).astype(np.float32) / 255.0
50
+ arr_blur = np.array(blurred).astype(np.float32) / 255.0
51
+ diff = np.abs(arr_orig - arr_blur)
52
+ # amplify small differences
53
+ diff = diff ** 0.8
54
+ # normalize to 0..1
55
+ diff = diff - diff.min()
56
+ diff = diff / (diff.max() + 1e-8)
57
+ return diff
58
+ except Exception as e:
59
+ print("compute_texture_heatmap error:", e)
60
+ return None
61
+
62
+ def apply_colormap_numpy(heatmap):
63
+ """
64
+ Simple jet-like colormap without cv2.
65
+ heatmap: 2D float array 0..1
66
+ returns: HxWx3 uint8 RGB
67
+ """
68
+ h = np.clip(heatmap, 0.0, 1.0)
69
+ c = np.zeros((h.shape[0], h.shape[1], 3), dtype=np.float32)
70
+ c[..., 0] = np.clip(1.5 - 4.0 * np.abs(h - 0.25), 0, 1) # R
71
+ c[..., 1] = np.clip(1.5 - 4.0 * np.abs(h - 0.5), 0, 1) # G
72
+ c[..., 2] = np.clip(1.5 - 4.0 * np.abs(h - 0.75), 0, 1) # B
73
+ return (c * 255).astype(np.uint8)
74
+
75
+ def overlay_heatmap_on_pil(orig_pil, heatmap, alpha=0.55):
76
+ """
77
+ orig_pil: PIL RGB
78
+ heatmap: small 2D float array (0..1) -> will be resized to image
79
+ returns: PIL RGB overlay image
80
+ """
81
+ try:
82
+ orig = np.array(orig_pil.convert("RGB")).astype(np.uint8)
83
+ # resize heatmap to image size using PIL
84
+ hm_img = Image.fromarray((np.clip(heatmap,0,1) * 255).astype(np.uint8))
85
+ hm_resized = np.array(hm_img.resize((orig.shape[1], orig.shape[0]), resample=Image.BILINEAR)) / 255.0
86
+ colored = apply_colormap_numpy(hm_resized)
87
+ overlay = np.clip(orig * (1 - alpha) + colored * alpha, 0, 255).astype(np.uint8)
88
+ return Image.fromarray(overlay)
89
+ except Exception as e:
90
+ print("overlay_heatmap_on_pil error:", e)
91
+ return orig_pil
92
+
93
+ # -----------------------------
94
+ # Your original predict function, extended to return overlay + reason
95
+ # -----------------------------
96
+ def predict_image(image: Image.Image):
97
+ try:
98
+ results = []
99
+ for _, pipe in pipes:
100
+ # some pipelines may raise; make it robust
101
+ try:
102
+ res = pipe(image)
103
+ if isinstance(res, list) and res:
104
+ res0 = res[0]
105
+ elif isinstance(res, dict):
106
+ res0 = res
107
+ else:
108
+ res0 = {"label":"error","score":0.0}
109
+ except Exception as e:
110
+ print("pipeline error:", e)
111
+ res0 = {"label":"error","score":0.0}
112
+ results.append(res0)
113
+
114
+ if not results:
115
+ return "<div style='color:red;'>No models loaded</div>", None, "no pipelines"
116
+
117
+ final_result = results[0]
118
+ label = final_result.get("label","").lower()
119
+ score = final_result.get("score",0.0) * 100
120
+
121
  if "ai" in label or "fake" in label:
122
  verdict = f"🧠 AI-Generated ({score:.1f}% confidence)"
123
  color = "#007BFF"
124
  else:
125
  verdict = f"🧍 Human-Made ({score:.1f}% confidence)"
126
  color = "#4CAF50"
127
+
128
+ # create the same styled HTML box you had
129
  html = f"""
130
+ <div class='result-box' style="
131
+ background: linear-gradient(135deg, {color}33, #1a1a1a);
132
+ border: 2px solid {color};
133
+ border-radius: 15px;
134
+ padding: 25px;
135
+ text-align: center;
136
+ color: white;
137
+ font-size: 20px;
138
+ font-weight: 600;
139
+ box-shadow: 0 0 20px {color}55;
140
+ animation: fadeIn 0.6s ease-in-out;
141
+ ">
142
+ {verdict}
143
  </div>
144
  """
145
+
146
+ # compute a lightweight texture heatmap (fast) and overlay
147
+ heatmap = compute_texture_heatmap(image, downsample=160)
148
+ overlay_img = None
149
+ explain_reason = ""
150
+ if heatmap is None:
151
+ explain_reason = "explainability failed"
152
+ else:
153
+ try:
154
+ overlay_img = overlay_heatmap_on_pil(image, heatmap, alpha=0.55)
155
+ explain_reason = "Texture-based saliency overlay (approximate explainability)"
156
+ except Exception as e:
157
+ print("overlay creation failed:", e)
158
+ overlay_img = None
159
+ explain_reason = "overlay failed"
160
+
161
+ # return: html string, overlay PIL image (or None), explain_reason text
162
+ return html, overlay_img, explain_reason
163
+
164
  except Exception as e:
165
+ traceback.print_exc()
166
+ return f"<div style='color:red;'>Error analyzing image: {str(e)}</div>", None, "error"
167
 
168
+ # -----------------------------
169
+ # CSS (same as yours)
170
+ # -----------------------------
171
  css = """
172
+ body, .gradio-container {
173
+ font-family: 'Poppins', sans-serif !important;
174
+ background: transparent !important;
175
+ }
176
+ h1 {
177
+ text-align: center;
178
+ font-weight: 700;
179
+ color: #007BFF;
180
+ margin-bottom: 10px;
181
+ }
182
+ .gr-button-primary {
183
+ background-color: #007BFF !important;
184
+ color: white !important;
185
+ font-weight: 600;
186
+ border-radius: 10px;
187
+ height: 45px;
188
+ }
189
+ .gr-button-secondary {
190
+ background-color: #dc3545 !important;
191
+ color: white !important;
192
+ border-radius: 10px;
193
+ height: 45px;
194
+ }
195
+ #pulse-loader {
196
+ width: 100%;
197
+ height: 4px;
198
+ background: linear-gradient(90deg, #007BFF, #00C3FF);
199
+ animation: pulse 1.2s infinite ease-in-out;
200
+ border-radius: 2px;
201
+ box-shadow: 0 0 10px #007BFF;
202
+ }
203
+ @keyframes pulse {
204
+ 0% { transform: scaleX(0.1); opacity: 0.6; }
205
+ 50% { transform: scaleX(1); opacity: 1; }
206
+ 100% { transform: scaleX(0.1); opacity: 0.6; }
207
+ }
208
+ @keyframes fadeIn {
209
+ from { opacity: 0; transform: scale(0.95); }
210
+ to { opacity: 1; transform: scale(1); }
211
+ }
212
  """
213
 
214
+ # -----------------------------
215
+ # Gradio UI (keeps your layout)
216
+ # -----------------------------
217
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
218
+ gr.Markdown("<h1>🔍 AI Image Detector</h1>")
219
+
220
  with gr.Row():
221
+ with gr.Column(scale=1):
222
+ image_input = gr.Image(type="pil", label="Upload an image")
223
+ analyze_button = gr.Button("Analyze", variant="primary")
224
+ clear_button = gr.Button("Clear", variant="secondary")
225
+ loader = gr.HTML("")
226
+ with gr.Column(scale=1):
227
+ # show original / overlay side-by-side like you had
228
+ orig_display = gr.Image(type="pil", label="Upload an image")
229
+ overlay_display = gr.Image(type="pil", label="Original / Overlay")
230
+ explain_box = gr.Markdown("Explainability:")
231
+ explain_text = gr.Textbox(label="", interactive=False)
232
+
233
+ output = gr.HTML(label="Result")
234
+
235
+ def analyze(img):
236
+ if img is None:
237
+ return ("", None, None, "<div style='color:red;'>Please upload an image first!</div>")
238
+ loader_html = "<div id='pulse-loader'></div>"
239
+ yield (loader_html, None, None, "") # show loader
240
+
241
+ # run prediction + explain
242
+ html, overlay_img, explain_reason = predict_image(img)
243
+
244
+ # if overlay exists, show both original and overlay
245
+ if overlay_img is not None:
246
+ yield ("", img, overlay_img, html + f"<div style='margin-top:8px; color:#ccc; font-size:12px;'>{explain_reason}</div>")
247
+ else:
248
+ # no overlay: show original and message
249
+ yield ("", img, img, html + f"<div style='margin-top:8px; color:#ccc; font-size:12px;'>{explain_reason}</div>")
250
+
251
+ analyze_button.click(analyze, inputs=image_input, outputs=[loader, orig_display, overlay_display, output])
252
+ clear_button.click(lambda: ("", None, None, ""), outputs=[loader, orig_display, overlay_display, output])
253
+
254
  demo.launch()