alaselababatunde commited on
Commit
f7f10e3
·
1 Parent(s): 766a666
Files changed (1) hide show
  1. app.py +21 -33
app.py CHANGED
@@ -66,37 +66,15 @@ class VectorRequest(BaseModel):
66
  # ==============================
67
  # HuggingFace Pipelines
68
  # ==============================
69
- def load_pipeline(task: str, model_meta: str, model_fallback: str = None):
70
- try:
71
- return pipeline(task, model=model_meta)
72
- except Exception as e:
73
- logger.warning(f"Failed to load {model_meta}: {e}")
74
- if model_fallback:
75
- logger.info(f"Falling back to {model_fallback}")
76
- return pipeline(task, model=model_fallback)
77
- raise e
78
-
79
- # Public LLM pipelines (text-generation)
80
- chat_pipe = load_pipeline(
81
- "text-generation",
82
- model_meta="tiiuae/falcon-7b-instruct",
83
- model_fallback="gpt2"
84
- )
85
- disaster_pipe = load_pipeline(
86
- "text-generation",
87
- model_meta="tiiuae/falcon-7b-instruct",
88
- model_fallback="gpt2"
89
- )
90
- market_pipe = load_pipeline(
91
- "text-generation",
92
- model_meta="tiiuae/falcon-7b-instruct",
93
- model_fallback="gpt2"
94
- )
95
-
96
- # Crop Doctor: image-to-text
97
- crop_pipe = load_pipeline(
98
- "image-to-text",
99
- model_meta="Salesforce/blip-image-captioning-base"
100
  )
101
 
102
  # ==============================
@@ -112,11 +90,21 @@ def run_conversational(pipe, prompt: str):
112
  logger.error(f"Conversational pipeline error: {e}")
113
  return f"⚠️ Unexpected model error: {str(e)}"
114
 
 
115
  def run_crop_doctor(image_bytes: bytes, symptoms: str):
 
 
 
 
 
116
  try:
117
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
118
- prompt = f"Farmer reports: {symptoms}. Diagnose the crop disease and suggest treatment in simple language."
119
- output = crop_pipe(image, prompt=prompt)
 
 
 
 
120
  if isinstance(output, list) and len(output) > 0:
121
  return output[0].get("generated_text", str(output))
122
  return str(output)
 
66
  # ==============================
67
  # HuggingFace Pipelines
68
  # ==============================
69
+ # Conversational endpoints use text-generation
70
+ chat_pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct")
71
+ disaster_pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct")
72
+ market_pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct")
73
+
74
+ # Crop Doctor uses Meta Vision-Instruct model
75
+ crop_pipe = pipeline(
76
+ "image-text-to-text",
77
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
 
80
  # ==============================
 
90
  logger.error(f"Conversational pipeline error: {e}")
91
  return f"⚠️ Unexpected model error: {str(e)}"
92
 
93
+
94
  def run_crop_doctor(image_bytes: bytes, symptoms: str):
95
+ """
96
+ Diagnose crop issues using Meta's multimodal LLaMA Vision model.
97
+ The model sees the crop image and reads the farmer's description,
98
+ then explains the likely disease and simple treatment steps.
99
+ """
100
  try:
101
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
102
+ prompt = (
103
+ f"The farmer reports: {symptoms}. "
104
+ "Analyze the plant image and diagnose the likely crop disease. "
105
+ "Then provide a simple explanation and possible treatment steps."
106
+ )
107
+ output = crop_pipe(image, prompt)
108
  if isinstance(output, list) and len(output) > 0:
109
  return output[0].get("generated_text", str(output))
110
  return str(output)