File size: 1,857 Bytes
c79b66a
 
c530405
 
c79b66a
34220a7
 
c530405
c79b66a
 
fa93154
c79b66a
 
 
 
 
fa93154
34220a7
 
 
 
 
 
c530405
c79b66a
 
 
 
 
fa93154
c79b66a
 
 
c530405
c79b66a
c530405
c79b66a
 
 
 
 
c530405
 
c79b66a
34220a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Historical Claim Verifier using RAG (Free Tools Only)
# Works on Hugging Face Spaces with Gradio + Wikipedia

import gradio as gr
import wikipedia
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import torch

# Step 1: Define Wikipedia search and summary function
def search_wikipedia(query):
    try:
        page_titles = wikipedia.search(query, results=2)
        summaries = [wikipedia.summary(title, sentences=3) for title in page_titles]
        return "\n\n".join(summaries)
    except Exception as e:
        return f"Wikipedia search error: {str(e)}"

# Step 2: Load a free Hugging Face LLM manually to avoid TensorFlow dependency
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

llm_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt")

# Step 3: Define the claim verification function
def verify_claim(claim):
    context = search_wikipedia(claim)
    if "error" in context.lower() or context.strip() == "":
        return "Could not retrieve relevant information. Please try a different claim."

    prompt = f"Claim: {claim}\n\nContext: {context}\n\nIs this claim true or false? Explain."
    response = llm_pipeline(prompt, max_length=512, do_sample=False)[0]['generated_text']
    return response

# Step 4: Gradio UI setup
demo = gr.Interface(
    fn=verify_claim,
    inputs=gr.Textbox(label="Enter a historical claim", placeholder="e.g., Alexander the Great died in 1971."),
    outputs=gr.Textbox(label="Claim Verification Output"),
    title="Historical Claim Verifier (RAG-Based)",
    description="Uses Wikipedia + a free LLM to verify if a historical claim is true or false, and explains why."
)

# Step 5: Launch (will auto-run on Hugging Face Spaces)
demo.launch()