File size: 4,697 Bytes
65d7391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import gradio as gr
import torch
from PIL import Image
from typing import Dict, Optional, Tuple
from transformers import AutoImageProcessor, SiglipForImageClassification

from trufor_runner import TruForEngine, TruForResult, TruForUnavailableError

MODEL_ID = "Ateeqq/ai-vs-human-image-detector"

# Use GPU when available so large batches stay responsive.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

try:
    processor = AutoImageProcessor.from_pretrained(MODEL_ID)
    model = SiglipForImageClassification.from_pretrained(MODEL_ID)
    model.to(device)
    model.eval()
except Exception as exc:  # pragma: no cover - surface loading issues early.
    raise RuntimeError(f"Failed to load model from {MODEL_ID}") from exc

try:
    TRUFOR_ENGINE: Optional[TruForEngine] = TruForEngine(device="cpu")
    TRUFOR_STATUS = TRUFOR_ENGINE.status_message
except TruForUnavailableError as exc:
    TRUFOR_ENGINE = None
    TRUFOR_STATUS = str(exc)


def analyze_ai_vs_human(image: Image.Image) -> Tuple[Dict[str, float], str]:
    """Run the Hugging Face detector and return confidences with a readable summary."""
    if image is None:
        empty_scores = {label: 0.0 for label in model.config.id2label.values()}
        return empty_scores, "No image provided."

    image = image.convert("RGB")
    inputs = processor(images=image, return_tensors="pt").to(device)

    with torch.no_grad():
        logits = model(**inputs).logits

    probabilities = torch.softmax(logits, dim=-1)[0]
    scores = {
        model.config.id2label[idx]: float(probabilities[idx])
        for idx in range(probabilities.size(0))
    }

    top_idx = int(probabilities.argmax().item())
    top_label = model.config.id2label[top_idx]
    top_score = scores[top_label]
    summary = f"**Predicted Label:** {top_label}  \
**Confidence:** {top_score:.4f}"

    return scores, summary


def analyze_trufor(image: Image.Image) -> Tuple[str, Optional[Image.Image], Optional[Image.Image]]:
    """Run TruFor inference when available, otherwise return diagnostics."""
    if TRUFOR_ENGINE is None:
        return TRUFOR_STATUS, None, None

    if image is None:
        return "Upload an image to run TruFor.", None, None

    try:
        result: TruForResult = TRUFOR_ENGINE.infer(image)
    except TruForUnavailableError as exc:
        return str(exc), None, None

    summary_lines = []
    if result.score is not None:
        summary_lines.append(f"**Tamper Score:** {result.score:.4f}")
    extras_dict = result.raw_scores.copy()
    if result.score is not None:
        extras_dict.pop("tamper_score", None)
    if extras_dict:
        extras = "  ".join(f"{key}: {value:.4f}" for key, value in extras_dict.items())
        summary_lines.append(f"`{extras}`")
    if not summary_lines:
        summary_lines.append("TruFor returned no scores for this image.")

    return "\n".join(summary_lines), result.map_overlay, result.confidence_overlay


def analyze_image(image: Image.Image) -> Tuple[Dict[str, float], str, str, Optional[Image.Image], Optional[Image.Image]]:
    ai_scores, ai_summary = analyze_ai_vs_human(image)
    trufor_summary, tamper_overlay, conf_overlay = analyze_trufor(image)
    return ai_scores, ai_summary, trufor_summary, tamper_overlay, conf_overlay


with gr.Blocks() as demo:
    gr.Markdown(
        """# Image Authenticity Workbench\nUpload an image to compare the AI-vs-human classifier with the TruFor forgery detector."""
    )

    status_box = gr.Markdown(f"`{TRUFOR_STATUS}`")

    image_input = gr.Image(label="Input Image", type="pil")
    analyze_button = gr.Button("Analyze", variant="primary", size="sm")

    with gr.Tabs():
        with gr.TabItem("AI vs Human"):
            ai_label_output = gr.Label(label="Prediction", num_top_classes=2)
            ai_summary_output = gr.Markdown("Upload an image to view the prediction.")
        with gr.TabItem("TruFor Forgery Detection"):
            trufor_summary_output = gr.Markdown("Configure TruFor assets to enable tamper analysis.")
            tamper_overlay_output = gr.Image(label="Tamper Heatmap", type="pil", interactive=False)
            conf_overlay_output = gr.Image(label="Confidence Heatmap", type="pil", interactive=False)

    output_components = [
        ai_label_output,
        ai_summary_output,
        trufor_summary_output,
        tamper_overlay_output,
        conf_overlay_output,
    ]

    analyze_button.click(
        fn=analyze_image,
        inputs=image_input,
        outputs=output_components,
    )

    image_input.change(
        fn=analyze_image,
        inputs=image_input,
        outputs=output_components,
    )


if __name__ == "__main__":
    demo.launch()