File size: 7,295 Bytes
cbb33e1
 
 
 
0552d2e
4063c0b
b945fc7
cbb33e1
2b1f3cb
766b563
dab8605
846b9e4
2b1f3cb
748afe9
0552d2e
766b563
cbb33e1
766b563
4063c0b
 
0552d2e
766b563
cbb33e1
748afe9
 
 
 
 
cbb33e1
748afe9
 
cbb33e1
766b563
1a22ce7
dab8605
41b31c2
cbb33e1
2b1f3cb
8b7fffb
41b31c2
 
 
 
748afe9
0552d2e
766b563
cbb33e1
766b563
748afe9
 
 
b945fc7
766b563
 
cbb33e1
766b563
748afe9
 
3ab22fd
748afe9
 
dab8605
748afe9
 
dab8605
748afe9
 
dab8605
766b563
cbb33e1
766b563
ff2254b
 
 
b945fc7
ff2254b
b945fc7
ff2254b
cbb33e1
 
 
 
 
 
 
 
 
 
 
 
 
 
f7f10e3
cbb33e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b945fc7
 
 
cbb33e1
 
f9dc634
8b7fffb
 
2b1f3cb
8b7fffb
846b9e4
cbb33e1
8b7fffb
cbb33e1
846b9e4
 
 
8b7fffb
846b9e4
cbb33e1
1a22ce7
846b9e4
f7f10e3
b945fc7
cbb33e1
 
 
f7f10e3
2b1f3cb
cbb33e1
2b1f3cb
b945fc7
cbb33e1
 
b945fc7
 
cbb33e1
b945fc7
 
 
cbb33e1
f7f10e3
cbb33e1
 
 
 
 
 
f7f10e3
b945fc7
cbb33e1
 
 
b945fc7
cbb33e1
 
 
b945fc7
2b1f3cb
cbb33e1
 
3eaa8aa
766b563
cbb33e1
730cf2c
0552d2e
cbb33e1
 
 
 
 
 
41b31c2
2b1f3cb
846b9e4
 
0552d2e
 
dab8605
41b31c2
846b9e4
 
0552d2e
 
dab8605
41b31c2
846b9e4
 
0552d2e
 
766b563
41b31c2
846b9e4
 
748afe9
 
 
 
8a7b78f
 
 
 
ff2254b
8a7b78f
cbb33e1
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
# ============================================
# AgriCopilot AI Backend — Optimized Stable Release
# ============================================

import os
import logging
import io
import torch
from fastapi import FastAPI, Request, Header, HTTPException, UploadFile, File
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from transformers import pipeline
from PIL import Image
from vector import query_vector

# ==============================
# Logging Setup
# ==============================
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("AgriCopilot")

# ==============================
# FastAPI App Init
# ==============================
app = FastAPI(title="AgriCopilot")

@app.get("/")
async def root():
    return {"status": "✅ AgriCopilot AI Backend is running and stable."}

# ==============================
# Auth Config
# ==============================
PROJECT_API_KEY = os.getenv("PROJECT_API_KEY", "agricopilot404")

def check_auth(authorization: str | None):
    """Verifies Bearer token for all requests."""
    if not PROJECT_API_KEY:
        return
    if not authorization or not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Missing bearer token")
    token = authorization.split(" ", 1)[1]
    if token != PROJECT_API_KEY:
        raise HTTPException(status_code=403, detail="Invalid token")

# ==============================
# Exception Handler
# ==============================
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
    logger.error(f"Unhandled error: {exc}")
    return JSONResponse(status_code=500, content={"error": str(exc)})

# ==============================
# Request Schemas
# ==============================
class ChatRequest(BaseModel):
    query: str

class DisasterRequest(BaseModel):
    report: str

class MarketRequest(BaseModel):
    product: str

class VectorRequest(BaseModel):
    query: str

# ==============================
# Hugging Face Config
# ==============================
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")

if not HF_TOKEN:
    logger.warning("⚠️ No Hugging Face token found. Gated models may fail to load.")
else:
    logger.info("✅ Hugging Face token detected.")

# Device setup (GPU if available)
device = 0 if torch.cuda.is_available() else -1
logger.info(f"🧠 Using device: {'GPU' if device == 0 else 'CPU'}")

# ==============================
# Pipelines
# ==============================
# Conversational + reasoning models (Meta LLaMA)
chat_pipe = pipeline(
    "text-generation",
    model="meta-llama/Llama-3.1-8B-Instruct",
    token=HF_TOKEN,
    device=device,
)

disaster_pipe = pipeline(
    "text-generation",
    model="meta-llama/Llama-3.1-8B-Instruct",
    token=HF_TOKEN,
    device=device,
)

market_pipe = pipeline(
    "text-generation",
    model="meta-llama/Llama-3.1-8B-Instruct",
    token=HF_TOKEN,
    device=device,
)

# Lightweight Meta Vision backbone (ConvNeXt-Tiny)
crop_vision = pipeline(
    "image-classification",
    model="facebook/convnext-tiny-224",
    token=HF_TOKEN,
    device=device,
)

# ==============================
# Helper Functions
# ==============================
def run_conversational(pipe, prompt: str):
    """Handles conversational tasks safely."""
    try:
        output = pipe(prompt, max_new_tokens=200, temperature=0.7, do_sample=True)
        if isinstance(output, list) and len(output) > 0:
            return output[0].get("generated_text", str(output))
        return str(output)
    except Exception as e:
        logger.error(f"Conversational pipeline error: {e}")
        return f"⚠️ Model error: {str(e)}"

def run_crop_doctor(image_bytes: bytes, symptoms: str):
    """
    Hybrid Crop Doctor System:
    1. Uses ConvNeXt to classify plant visuals.
    2. Pulls related info from vector dataset.
    3. LLaMA 3.1 generates a short diagnosis and treatment guide.
    """
    try:
        # --- Step 1: Vision Classification ---
        image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
        vision_results = crop_vision(image)
        if not vision_results or "label" not in vision_results[0]:
            raise ValueError("No vision classification result received.")
        top_label = vision_results[0]["label"]

        # --- Step 2: Vector Knowledge Recall ---
        vector_matches = query_vector(symptoms)
        related_knowledge = " ".join(vector_matches[:3]) if isinstance(vector_matches, list) else str(vector_matches)

        # --- Step 3: Reasoning via LLaMA ---
        prompt = (
            f"A farmer uploaded a maize image showing signs of '{top_label}'. "
            f"Reported symptoms: {symptoms}. "
            f"Knowledge base reference: {related_knowledge}. "
            "Generate a structured diagnostic report with:\n"
            "1. Disease Name\n2. Cause\n3. Treatment\n4. Prevention Tips\n"
            "Keep the explanation short and easy for farmers to understand."
        )

        response = chat_pipe(prompt, max_new_tokens=250, temperature=0.6, do_sample=False, truncation=True)

        # Extract text output
        if isinstance(response, list) and len(response) > 0:
            text = response[0].get("generated_text", "").strip()
            return text if text else "⚠️ No response generated. Try again with clearer image or symptoms."
        return "⚠️ Unexpected response format from reasoning model."

    except Exception as e:
        logger.error(f"Crop Doctor error: {e}")
        return f"⚠️ Crop Doctor encountered an issue: {str(e)}"

# ==============================
# Endpoints
# ==============================
@app.post("/crop-doctor")
async def crop_doctor(
    symptoms: str = Header(...),
    image: UploadFile = File(...),
    authorization: str | None = Header(None)
):
    """Diagnose crop disease from image and text."""
    check_auth(authorization)
    image_bytes = await image.read()
    diagnosis = run_crop_doctor(image_bytes, symptoms)
    return {"diagnosis": diagnosis}

@app.post("/multilingual-chat")
async def multilingual_chat(req: ChatRequest, authorization: str | None = Header(None)):
    check_auth(authorization)
    reply = run_conversational(chat_pipe, req.query)
    return {"reply": reply}

@app.post("/disaster-summarizer")
async def disaster_summarizer(req: DisasterRequest, authorization: str | None = Header(None)):
    check_auth(authorization)
    summary = run_conversational(disaster_pipe, req.report)
    return {"summary": summary}

@app.post("/marketplace")
async def marketplace(req: MarketRequest, authorization: str | None = Header(None)):
    check_auth(authorization)
    recommendation = run_conversational(market_pipe, req.product)
    return {"recommendation": recommendation}

@app.post("/vector-search")
async def vector_search(req: VectorRequest, authorization: str | None = Header(None)):
    check_auth(authorization)
    try:
        results = query_vector(req.query)
        return {"results": results}
    except Exception as e:
        logger.error(f"Vector search error: {e}")
        return {"error": f"Vector search error: {str(e)}"}

# ============================================
# END OF FILE
# ============================================