aryn25 commited on
Commit
c79b66a
Β·
verified Β·
1 Parent(s): dd00fe2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -41
app.py CHANGED
@@ -1,53 +1,41 @@
1
- # Text-Only RAG Claim Verifier for Historical & Political Facts
 
2
 
3
  import gradio as gr
4
- from duckduckgo_search import DDGS
5
  from transformers import pipeline
 
6
 
7
- # Load lightweight LLM for verdict generation
8
- llm_pipeline = pipeline("text2text-generation", model="google/flan-t5-base", device=-1)
9
-
10
- # Function to retrieve supporting info
11
- def retrieve_context(claim, num_results=3):
12
- docs = []
13
- with DDGS() as ddgs:
14
- for r in ddgs.text(claim, region='wt-wt', safesearch='Moderate', max_results=num_results):
15
- docs.append(f"- {r['title']}: {r['body']}")
16
- return docs or ["No relevant documents found."]
17
-
18
- # Function to verify claim
19
- def verify_claim_text_only(claim):
20
  try:
21
- # Step 1: Retrieve supporting info
22
- retrieved_docs = retrieve_context(claim)
23
-
24
- # Step 2: Compose prompt for RAG+LLM
25
- context_block = "\n".join(retrieved_docs)
26
- prompt = f"""
27
- Claim: "{claim}"
28
-
29
- The following information was retrieved from reliable sources:
30
- {context_block}
31
-
32
- Based on the above context, is the claim TRUE, FALSE, or UNCERTAIN? Justify your answer clearly. Also provide the correct version of the claim if it's false.
33
- """
34
 
35
- # Step 3: Run LLM
36
- response = llm_pipeline(prompt, max_new_tokens=250)[0]['generated_text']
37
 
38
- return f"πŸ“œ **Claim**: {claim}\n\nπŸ” **Retrieved Info**:\n{context_block}\n\nβœ… **Verdict & Justification**:\n{response}"
 
 
 
 
39
 
40
- except Exception as e:
41
- return f"❌ Error: {str(e)}"
 
42
 
43
- # Gradio Interface
44
  demo = gr.Interface(
45
- fn=verify_claim_text_only,
46
- inputs=gr.Textbox(lines=2, label="Enter a historical or political claim"),
47
- outputs=gr.Markdown(),
48
- title="🧠 FactCheckGPT – Historical & Political Claim Verifier",
49
- description="Enter a factual claim (e.g., 'Alexander the Great died in 1971') and the app will verify if it's TRUE, FALSE, or UNCERTAIN using live web retrieval and LLM justification."
50
  )
51
 
52
- if __name__ == "__main__":
53
- demo.launch()
 
1
+ # Historical Claim Verifier using RAG (Free Tools Only)
2
+ # Works on Hugging Face Spaces with Gradio + Wikipedia
3
 
4
  import gradio as gr
5
+ from langchain.document_loaders import WikipediaLoader
6
  from transformers import pipeline
7
+ import wikipedia
8
 
9
+ # Step 1: Define Wikipedia search and summary function
10
+ def search_wikipedia(query):
 
 
 
 
 
 
 
 
 
 
 
11
  try:
12
+ page_titles = wikipedia.search(query, results=2)
13
+ summaries = [wikipedia.summary(title, sentences=3) for title in page_titles]
14
+ return "\n\n".join(summaries)
15
+ except Exception as e:
16
+ return f"Wikipedia search error: {str(e)}"
 
 
 
 
 
 
 
 
17
 
18
+ # Step 2: Load a free Hugging Face LLM pipeline (e.g., Flan-T5)
19
+ llm_pipeline = pipeline("text2text-generation", model="google/flan-t5-large")
20
 
21
+ # Step 3: Define the claim verification function
22
+ def verify_claim(claim):
23
+ context = search_wikipedia(claim)
24
+ if "error" in context.lower() or context.strip() == "":
25
+ return "Could not retrieve relevant information. Please try a different claim."
26
 
27
+ prompt = f"Claim: {claim}\n\nContext: {context}\n\nIs this claim true or false? Explain."
28
+ response = llm_pipeline(prompt, max_length=512, do_sample=False)[0]['generated_text']
29
+ return response
30
 
31
+ # Step 4: Gradio UI setup
32
  demo = gr.Interface(
33
+ fn=verify_claim,
34
+ inputs=gr.Textbox(label="Enter a historical claim", placeholder="e.g., Alexander the Great died in 1971."),
35
+ outputs=gr.Textbox(label="Claim Verification Output"),
36
+ title="Historical Claim Verifier (RAG-Based)",
37
+ description="Uses Wikipedia + a free LLM to verify if a historical claim is true or false, and explains why."
38
  )
39
 
40
+ # Step 5: Launch (will auto-run on Hugging Face Spaces)
41
+ demo.launch()