aryn25 commited on
Commit
dd00fe2
Β·
verified Β·
1 Parent(s): 8723f28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -46
app.py CHANGED
@@ -1,71 +1,53 @@
1
- # Image-to-Claim Verifier v2 (with Verdict using BLIP + DuckDuckGo + Flan-T5)
2
 
3
  import gradio as gr
4
- from transformers import BlipProcessor, BlipForConditionalGeneration, pipeline
5
- from PIL import Image
6
  from duckduckgo_search import DDGS
7
- import torch
8
 
9
- # Load BLIP for image captioning
10
- blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
11
- blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
12
 
13
- # Load Flan-T5 for final verdict generation
14
- verdict_pipeline = pipeline("text2text-generation", model="google/flan-t5-base", device=-1)
15
-
16
- # DuckDuckGo search
17
-
18
- def retrieve_passages(query, num_results=3):
19
  docs = []
20
  with DDGS() as ddgs:
21
- for r in ddgs.text(query, region='wt-wt', safesearch='Moderate', max_results=num_results):
22
- docs.append(r['body'])
23
  return docs or ["No relevant documents found."]
24
 
25
- # Main function
26
- def verify_claim(image, claim):
27
  try:
28
- # Step 1: Get caption from BLIP
29
- inputs = blip_processor(image, return_tensors="pt")
30
- out = blip_model.generate(**inputs)
31
- caption = blip_processor.decode(out[0], skip_special_tokens=True)
32
-
33
- # Step 2: Retrieve info
34
- combined_query = f"{caption} {claim}"
35
- passages = retrieve_passages(combined_query)
36
 
37
- # Step 3: Build prompt for LLM
 
38
  prompt = f"""
39
- The image shows: {caption}
40
- The user claims: "{claim}"
41
 
42
- The following information was found online:
43
- 1. {passages[0] if len(passages) > 0 else ''}
44
- 2. {passages[1] if len(passages) > 1 else ''}
45
- 3. {passages[2] if len(passages) > 2 else ''}
46
 
47
- Based on this, is the claim TRUE, FALSE, or UNCERTAIN? Justify your answer in 2-3 lines.
48
- """
49
 
50
- # Step 4: Get final judgment
51
- answer = verdict_pipeline(prompt, max_new_tokens=150)[0]['generated_text']
52
 
53
- return f"πŸ–Ό **Image Caption:** {caption}\n\nπŸ“š **Search Results:**\n- {passages[0]}\n- {passages[1] if len(passages) > 1 else ''}\n- {passages[2] if len(passages) > 2 else ''}\n\nβœ… **Verdict:**\n{answer}"
54
 
55
  except Exception as e:
56
  return f"❌ Error: {str(e)}"
57
 
58
- # Gradio App
59
  demo = gr.Interface(
60
- fn=verify_claim,
61
- inputs=[
62
- gr.Image(type="pil", label="Upload Image"),
63
- gr.Textbox(lines=2, label="Enter a claim about the image")
64
- ],
65
  outputs=gr.Markdown(),
66
- title="🧠 Image-to-Claim Verifier (with Final Verdict)",
67
- description="Upload an image and enter a claim. The app uses BLIP to caption the image, searches the web for context, and uses Flan-T5 to verify the claim as TRUE, FALSE, or UNCERTAIN with justification."
68
  )
69
 
70
  if __name__ == "__main__":
71
- demo.launch()
 
1
+ # Text-Only RAG Claim Verifier for Historical & Political Facts
2
 
3
  import gradio as gr
 
 
4
  from duckduckgo_search import DDGS
5
+ from transformers import pipeline
6
 
7
+ # Load lightweight LLM for verdict generation
8
+ llm_pipeline = pipeline("text2text-generation", model="google/flan-t5-base", device=-1)
 
9
 
10
+ # Function to retrieve supporting info
11
+ def retrieve_context(claim, num_results=3):
 
 
 
 
12
  docs = []
13
  with DDGS() as ddgs:
14
+ for r in ddgs.text(claim, region='wt-wt', safesearch='Moderate', max_results=num_results):
15
+ docs.append(f"- {r['title']}: {r['body']}")
16
  return docs or ["No relevant documents found."]
17
 
18
+ # Function to verify claim
19
+ def verify_claim_text_only(claim):
20
  try:
21
+ # Step 1: Retrieve supporting info
22
+ retrieved_docs = retrieve_context(claim)
 
 
 
 
 
 
23
 
24
+ # Step 2: Compose prompt for RAG+LLM
25
+ context_block = "\n".join(retrieved_docs)
26
  prompt = f"""
27
+ Claim: "{claim}"
 
28
 
29
+ The following information was retrieved from reliable sources:
30
+ {context_block}
 
 
31
 
32
+ Based on the above context, is the claim TRUE, FALSE, or UNCERTAIN? Justify your answer clearly. Also provide the correct version of the claim if it's false.
33
+ """
34
 
35
+ # Step 3: Run LLM
36
+ response = llm_pipeline(prompt, max_new_tokens=250)[0]['generated_text']
37
 
38
+ return f"πŸ“œ **Claim**: {claim}\n\nπŸ” **Retrieved Info**:\n{context_block}\n\nβœ… **Verdict & Justification**:\n{response}"
39
 
40
  except Exception as e:
41
  return f"❌ Error: {str(e)}"
42
 
43
+ # Gradio Interface
44
  demo = gr.Interface(
45
+ fn=verify_claim_text_only,
46
+ inputs=gr.Textbox(lines=2, label="Enter a historical or political claim"),
 
 
 
47
  outputs=gr.Markdown(),
48
+ title="🧠 FactCheckGPT – Historical & Political Claim Verifier",
49
+ description="Enter a factual claim (e.g., 'Alexander the Great died in 1971') and the app will verify if it's TRUE, FALSE, or UNCERTAIN using live web retrieval and LLM justification."
50
  )
51
 
52
  if __name__ == "__main__":
53
+ demo.launch()