File size: 3,693 Bytes
f9a9479
bf6fb61
f9a9479
 
ecf9e9e
f9a9479
a4975c6
529d98d
1150a03
529d98d
1150a03
 
 
529d98d
 
a4975c6
1150a03
ecf9e9e
529d98d
cb12063
1150a03
 
 
 
 
 
a4975c6
c2fa6a9
a4975c6
 
cb12063
529d98d
1150a03
a4975c6
 
1150a03
a4975c6
1150a03
 
 
 
 
 
a4975c6
 
 
1150a03
a4975c6
637e17b
a4975c6
 
 
 
 
 
 
 
 
529d98d
a4975c6
 
 
 
 
 
637e17b
a4975c6
1150a03
cb12063
529d98d
cb12063
a4975c6
ecf9e9e
f9a9479
529d98d
f9a9479
529d98d
 
a4975c6
cb12063
a4975c6
 
f9a9479
 
cb12063
c2fa6a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForImageClassification
from PIL import Image
import torch
import torch.nn.functional as F

# ===================== Startup Info =====================
print("\n" + "="*80)
print("πŸ” BEST FREE AI IMAGE DETECTOR 2025 - ATEEQQ MODEL ONLY")
print("="*80)
print("\nBased on Ateeqq 2025 benchmarks:")
print("βœ“ Diffusion detection (Midjourney, DALL-E, Stable Diffusion): 88-94% accuracy")
print("βœ“ CNN + Semantic Analysis approach")
print("="*80 + "\n")

# ===================== Load Model =====================
MODEL_NAME = "Ateeqq/ai-vs-human-image-detector"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"πŸ–₯️ Device: {str(device).upper()}\n")

try:
    processor = AutoImageProcessor.from_pretrained(MODEL_NAME)
    model = AutoModelForImageClassification.from_pretrained(MODEL_NAME).to(device)
    model.eval()
    print(f"βœ… Successfully loaded model: {MODEL_NAME}")
except Exception as e:
    raise RuntimeError(f"❌ Failed to load model: {str(e)}")

# ===================== Prediction Function =====================
def predict(image: Image.Image):
    if image is None:
        return "❌ No image uploaded", 0.0, "Upload an image to analyze"

    if image.mode != "RGB":
        image = image.convert("RGB")

    try:
        inputs = processor(images=image, return_tensors="pt").to(device)
        with torch.no_grad():
            outputs = model(**inputs)
            logits = outputs.logits
            probs = F.softmax(logits, dim=1)[0].cpu().numpy()

        real_prob, ai_prob = float(probs[0]), float(probs[1])
        pred = "🚨 AI-GENERATED" if ai_prob > real_prob else "βœ… REAL PHOTO"
        confidence = max(ai_prob, real_prob)

        # Build simple report
        report = f"""
╔════════════════════════════════════════════════════════╗
β•‘          πŸ”¬ Ateeqq AI Image Detection Report          β•‘
β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•

🎯 PREDICTION: {pred}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
AI Probability:       {ai_prob:.4f}
Real Probability:     {real_prob:.4f}
Detection Confidence: {confidence:.4f}

βœ… Detected by: Ateeqq/ai-vs-human-image-detector
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
β€’ High accuracy on DALL-E 3, Midjourney v6+, Stable Diffusion
β€’ CNN + Semantic Analysis approach
β€’ Robust for post-processed AI images
β€’ Free to use for research or analysis
"""
        return pred, round(ai_prob, 4), report

    except Exception as e:
        return f"❌ Error: {str(e)}", 0.0, f"Processing failed: {str(e)}"

# ===================== Gradio Interface =====================
demo = gr.Interface(
    fn=predict,
    inputs=gr.Image(type="pil", label="πŸ“Έ Upload Image"),
    outputs=[
        gr.Textbox(label="🎯 Detection Result"),
        gr.Number(label="πŸ“Š AI Score (0.0-1.0)"),
        gr.Textbox(label="πŸ“‹ Detection Report", lines=25)
    ],
    title="πŸ” Ateeqq AI Image Detector (2025)",
    description="Detect AI-generated images using the official Ateeqq model from Hugging Face. Works best for DALL-E 3, Midjourney v6+, Stable Diffusion."
)

if __name__ == "__main__":
    demo.launch()