|
|
import os |
|
|
import traceback |
|
|
import uuid |
|
|
|
|
|
from google.adk.agents import Agent, LlmAgent, SequentialAgent |
|
|
from google.adk.models.google_llm import Gemini |
|
|
from google.adk.runners import Runner |
|
|
from google.adk.sessions import InMemorySessionService |
|
|
from google.adk.memory import InMemoryMemoryService |
|
|
from google.adk.tools import google_search, load_memory |
|
|
from google.genai import types |
|
|
|
|
|
|
|
|
|
|
|
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") |
|
|
|
|
|
if not GOOGLE_API_KEY: |
|
|
|
|
|
raise RuntimeError( |
|
|
"GOOGLE_API_KEY is not set. Please add it as a secret in your Hugging Face Space." |
|
|
) |
|
|
|
|
|
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY |
|
|
|
|
|
|
|
|
|
|
|
retry_config = types.HttpRetryOptions( |
|
|
attempts=5, |
|
|
exp_base=7, |
|
|
initial_delay=1, |
|
|
http_status_codes=[429, 500, 503, 504], |
|
|
) |
|
|
|
|
|
APP_NAME = "FakeNewsApp" |
|
|
USER_ID = "demo_user" |
|
|
|
|
|
session_service = InMemorySessionService() |
|
|
memory_service = InMemoryMemoryService() |
|
|
|
|
|
|
|
|
def generate_session_id(): |
|
|
return str(uuid.uuid4()) |
|
|
|
|
|
|
|
|
|
|
|
claim_extractor_agent = Agent( |
|
|
name="ClaimExtractorAgent", |
|
|
model=Gemini( |
|
|
model="gemini-2.5-flash-lite", |
|
|
retry_options=retry_config, |
|
|
), |
|
|
instruction=""" |
|
|
You will receive a WhatsApp forward or viral message. |
|
|
|
|
|
TASK: |
|
|
1. Identify ONE main factual claim in clean, simple form. |
|
|
2. Rewrite it in one sentence. |
|
|
3. Extract 3β5 search keywords. |
|
|
|
|
|
OUTPUT FORMAT: |
|
|
Claim: <cleaned claim> |
|
|
Keywords: <comma separated keywords> |
|
|
""", |
|
|
output_key="extracted_claim", |
|
|
) |
|
|
|
|
|
evidence_search_agent = Agent( |
|
|
name="EvidenceSearchAgent", |
|
|
model=Gemini( |
|
|
model="gemini-2.5-flash-lite", |
|
|
retry_options=retry_config, |
|
|
), |
|
|
tools=[google_search], |
|
|
instruction=""" |
|
|
You will receive extracted claim info: |
|
|
|
|
|
{extracted_claim} |
|
|
|
|
|
TASK: |
|
|
1. Use google_search with 2β3 queries: |
|
|
- "<claim> fact check" |
|
|
- "<keywords> news" |
|
|
- "<keywords> official site" |
|
|
2. Return 5β7 useful results with: |
|
|
- title |
|
|
- url |
|
|
- snippet |
|
|
- source type (gov, fact-check, news, blog) |
|
|
|
|
|
OUTPUT FORMAT: |
|
|
<list of sources in bullet points> |
|
|
""", |
|
|
output_key="search_results", |
|
|
) |
|
|
|
|
|
verdict_agent = Agent( |
|
|
name="VerdictAgent", |
|
|
model=Gemini( |
|
|
model="gemini-2.5-flash", |
|
|
retry_options=retry_config, |
|
|
), |
|
|
instruction=""" |
|
|
You will receive search results: |
|
|
|
|
|
{search_results} |
|
|
|
|
|
TASK: |
|
|
1. For each source, decide SUPPORT / REFUTE / IRRELEVANT. |
|
|
2. Produce a Markdown table: |
|
|
| Source | Type | Stance | Summary | |
|
|
3. Decide the final verdict: |
|
|
- Mostly refute β Likely FALSE |
|
|
- Mostly support β Likely TRUE |
|
|
- Mixed β Partly true/misleading |
|
|
- No credible sources β Unverified β Do not share |
|
|
4. Write a SIMPLE explanation for elderly users. |
|
|
|
|
|
OUTPUT FORMAT: |
|
|
Evidence Table: |
|
|
<table> |
|
|
|
|
|
Verdict: |
|
|
<verdict> |
|
|
|
|
|
Explanation: |
|
|
<simple explanation> |
|
|
""", |
|
|
output_key="final_report", |
|
|
) |
|
|
|
|
|
memory_agent = LlmAgent( |
|
|
name="MemoryAgent", |
|
|
model=Gemini( |
|
|
model="gemini-2.5-flash-lite", |
|
|
retry_options=retry_config, |
|
|
), |
|
|
tools=[load_memory], |
|
|
instruction=""" |
|
|
You are a background history fetcher. You are NOT a chatbot. |
|
|
|
|
|
YOUR STRICT COMMANDS: |
|
|
1. IGNORE any input text you receive from previous agents. |
|
|
2. IMMEDIATELY call the function `load_memory` to get the user's past sessions. |
|
|
3. Once you receive the memory data, extract and list the distinct claims found. |
|
|
4. Return ONLY a bulleted list of the last 2 claims. |
|
|
|
|
|
IF NO MEMORY DATA IS RETURNED: |
|
|
- Output: "No previous checks found." |
|
|
|
|
|
DO NOT ask "Would you like me to?". DO NOT explain what you are doing. JUST RUN THE TOOL. |
|
|
""", |
|
|
output_key="recent_claims", |
|
|
) |
|
|
|
|
|
root_agent = SequentialAgent( |
|
|
name="FakeNewsPipeline", |
|
|
sub_agents=[ |
|
|
claim_extractor_agent, |
|
|
evidence_search_agent, |
|
|
verdict_agent, |
|
|
memory_agent, |
|
|
], |
|
|
) |
|
|
|
|
|
runner = Runner( |
|
|
agent=root_agent, |
|
|
app_name=APP_NAME, |
|
|
session_service=session_service, |
|
|
memory_service=memory_service, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
async def run_eldersafe(query: str, session_id: str = None) -> dict: |
|
|
if session_id is None: |
|
|
session_id = generate_session_id() |
|
|
""" |
|
|
Runs the full ElderSafe pipeline and returns a dict: |
|
|
{ |
|
|
"clean_claim": str, |
|
|
"final_report": str (markdown), |
|
|
"memory_context": str, |
|
|
} |
|
|
This is the function Gradio will call. |
|
|
""" |
|
|
try: |
|
|
|
|
|
try: |
|
|
await session_service.create_session( |
|
|
app_name=APP_NAME, |
|
|
user_id=USER_ID, |
|
|
session_id=session_id, |
|
|
) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
|
|
|
user_msg = types.Content( |
|
|
role="user", |
|
|
parts=[types.Part(text=query)], |
|
|
) |
|
|
|
|
|
|
|
|
async for _ in runner.run_async( |
|
|
user_id=USER_ID, |
|
|
session_id=session_id, |
|
|
new_message=user_msg, |
|
|
): |
|
|
pass |
|
|
|
|
|
|
|
|
session = await session_service.get_session( |
|
|
app_name=APP_NAME, |
|
|
user_id=USER_ID, |
|
|
session_id=session_id, |
|
|
) |
|
|
|
|
|
await memory_service.add_session_to_memory(session) |
|
|
|
|
|
|
|
|
claim = session.state.get("extracted_claim", "No claim extracted.") |
|
|
if isinstance(claim, str) and "Claim:" in claim: |
|
|
clean_claim = claim.split("Keywords:")[0].replace("Claim:", "").strip() |
|
|
else: |
|
|
clean_claim = str(claim) |
|
|
|
|
|
final_report = session.state.get("final_report", "Analysis failed.") |
|
|
memory_context = session.state.get("recent_claims", "") |
|
|
|
|
|
return { |
|
|
"clean_claim": clean_claim, |
|
|
"final_report": final_report, |
|
|
"memory_context": memory_context, |
|
|
} |
|
|
|
|
|
except Exception: |
|
|
|
|
|
return { |
|
|
"clean_claim": query, |
|
|
"final_report": "β An error occurred:\n\n" + traceback.format_exc(), |
|
|
"memory_context": "", |
|
|
} |
|
|
|