Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,14 +4,14 @@ from PIL import Image
|
|
| 4 |
import random
|
| 5 |
import traceback
|
| 6 |
|
| 7 |
-
# Load
|
| 8 |
models = {
|
| 9 |
"Ateeq": pipeline("image-classification", model="ateeqahamed-ai/ai-image-detector"),
|
| 10 |
"HuggingFace": pipeline("image-classification", model="umm-maybe/AI-image-detector"),
|
| 11 |
"Falconsai": pipeline("image-classification", model="falconsai/nsfw_image_detection"),
|
| 12 |
}
|
| 13 |
|
| 14 |
-
#
|
| 15 |
def normalize_confidence(label, confidence):
|
| 16 |
if label.lower() in ["ai-generated", "fake", "ai generated", "ai", "generated"]:
|
| 17 |
min_c, max_c = 0.75, 0.95
|
|
@@ -22,8 +22,7 @@ def normalize_confidence(label, confidence):
|
|
| 22 |
adjusted = max(min_c, min(adjusted, max_c))
|
| 23 |
return round(adjusted * 100, 2)
|
| 24 |
|
| 25 |
-
|
| 26 |
-
# Combine model results with priority weighting
|
| 27 |
def combine_results(results):
|
| 28 |
weights = {"Ateeq": 0.6, "HuggingFace": 0.2, "Falconsai": 0.2}
|
| 29 |
combined_scores = {"AI-generated": 0, "Real": 0}
|
|
@@ -37,8 +36,7 @@ def combine_results(results):
|
|
| 37 |
final_score = combined_scores[final_label]
|
| 38 |
return {"label": final_label, "score": final_score}
|
| 39 |
|
| 40 |
-
|
| 41 |
-
# Main predict function
|
| 42 |
def predict_image(image: Image.Image):
|
| 43 |
try:
|
| 44 |
results = {name: model(image) for name, model in models.items()}
|
|
@@ -47,52 +45,42 @@ def predict_image(image: Image.Image):
|
|
| 47 |
label = combined["label"]
|
| 48 |
score = normalize_confidence(label, combined["score"])
|
| 49 |
|
| 50 |
-
return f"
|
| 51 |
-
|
| 52 |
except Exception as e:
|
| 53 |
traceback.print_exc()
|
| 54 |
-
return f"Error
|
| 55 |
|
| 56 |
-
|
| 57 |
-
# Build Gradio Interface
|
| 58 |
with gr.Blocks(css="""
|
| 59 |
-
#
|
| 60 |
-
display: none;
|
| 61 |
text-align: center;
|
| 62 |
-
font-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
font-weight: bold;
|
| 70 |
margin-top: 15px;
|
| 71 |
}
|
| 72 |
""") as demo:
|
| 73 |
-
gr.Markdown("<h1 style='text-align:center;'>🧠 UnrealEye AI Image Detector</h1>")
|
| 74 |
-
|
| 75 |
-
with gr.Row():
|
| 76 |
-
image_input = gr.Image(type="pil", label="Upload Image", elem_id="input_img")
|
| 77 |
-
|
| 78 |
with gr.Row():
|
| 79 |
-
|
| 80 |
-
clear_btn = gr.Button("🧹 Clear")
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
|
|
|
| 84 |
|
| 85 |
-
|
| 86 |
-
loading.visible = True
|
| 87 |
-
result = predict_image(image)
|
| 88 |
-
loading.visible = False
|
| 89 |
-
return result
|
| 90 |
|
|
|
|
| 91 |
analyze_btn.click(
|
| 92 |
-
predict_image,
|
| 93 |
-
inputs=
|
| 94 |
-
outputs=
|
| 95 |
-
show_progress=True
|
| 96 |
)
|
| 97 |
|
| 98 |
clear_btn.click(lambda: (None, ""), None, [image_input, output_text])
|
|
|
|
| 4 |
import random
|
| 5 |
import traceback
|
| 6 |
|
| 7 |
+
# Load models
|
| 8 |
models = {
|
| 9 |
"Ateeq": pipeline("image-classification", model="ateeqahamed-ai/ai-image-detector"),
|
| 10 |
"HuggingFace": pipeline("image-classification", model="umm-maybe/AI-image-detector"),
|
| 11 |
"Falconsai": pipeline("image-classification", model="falconsai/nsfw_image_detection"),
|
| 12 |
}
|
| 13 |
|
| 14 |
+
# Confidence normalization
|
| 15 |
def normalize_confidence(label, confidence):
|
| 16 |
if label.lower() in ["ai-generated", "fake", "ai generated", "ai", "generated"]:
|
| 17 |
min_c, max_c = 0.75, 0.95
|
|
|
|
| 22 |
adjusted = max(min_c, min(adjusted, max_c))
|
| 23 |
return round(adjusted * 100, 2)
|
| 24 |
|
| 25 |
+
# Combine model predictions with priority weighting
|
|
|
|
| 26 |
def combine_results(results):
|
| 27 |
weights = {"Ateeq": 0.6, "HuggingFace": 0.2, "Falconsai": 0.2}
|
| 28 |
combined_scores = {"AI-generated": 0, "Real": 0}
|
|
|
|
| 36 |
final_score = combined_scores[final_label]
|
| 37 |
return {"label": final_label, "score": final_score}
|
| 38 |
|
| 39 |
+
# Main prediction logic
|
|
|
|
| 40 |
def predict_image(image: Image.Image):
|
| 41 |
try:
|
| 42 |
results = {name: model(image) for name, model in models.items()}
|
|
|
|
| 45 |
label = combined["label"]
|
| 46 |
score = normalize_confidence(label, combined["score"])
|
| 47 |
|
| 48 |
+
return f"🧠 {label}\nConfidence: {score}%"
|
|
|
|
| 49 |
except Exception as e:
|
| 50 |
traceback.print_exc()
|
| 51 |
+
return f"❌ Error: {str(e)}"
|
| 52 |
|
| 53 |
+
# UI
|
|
|
|
| 54 |
with gr.Blocks(css="""
|
| 55 |
+
#result-box {
|
|
|
|
| 56 |
text-align: center;
|
| 57 |
+
font-size: 22px;
|
| 58 |
+
font-weight: 600;
|
| 59 |
+
color: #222;
|
| 60 |
+
background: #f0f6ff;
|
| 61 |
+
border-radius: 12px;
|
| 62 |
+
padding: 15px;
|
| 63 |
+
box-shadow: 0px 0px 8px #a0c4ff;
|
|
|
|
| 64 |
margin-top: 15px;
|
| 65 |
}
|
| 66 |
""") as demo:
|
| 67 |
+
gr.Markdown("<h1 style='text-align:center; color:#007bff;'>🧠 UnrealEye AI Image Detector</h1>")
|
| 68 |
+
|
|
|
|
|
|
|
|
|
|
| 69 |
with gr.Row():
|
| 70 |
+
image_input = gr.Image(type="pil", label="Upload Image")
|
|
|
|
| 71 |
|
| 72 |
+
with gr.Row():
|
| 73 |
+
analyze_btn = gr.Button("🔍 Analyse", variant="primary")
|
| 74 |
+
clear_btn = gr.Button("🧹 Clear", variant="stop")
|
| 75 |
|
| 76 |
+
output_text = gr.Textbox(label="Detection Result", elem_id="result-box")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
+
# Actions
|
| 79 |
analyze_btn.click(
|
| 80 |
+
fn=predict_image,
|
| 81 |
+
inputs=image_input,
|
| 82 |
+
outputs=output_text,
|
| 83 |
+
show_progress=True # shows built-in Gradio loading spinner
|
| 84 |
)
|
| 85 |
|
| 86 |
clear_btn.click(lambda: (None, ""), None, [image_input, output_text])
|