alaselababatunde commited on
Commit
8a7b78f
Β·
1 Parent(s): 748afe9
Files changed (1) hide show
  1. app.py +42 -15
app.py CHANGED
@@ -7,7 +7,6 @@ from pydantic import BaseModel
7
  from langchain.prompts import PromptTemplate
8
  from langchain_huggingface import HuggingFaceEndpoint
9
  from huggingface_hub.utils import HfHubHTTPError
10
- from langchain.schema import HumanMessage
11
  from vector import query_vector
12
 
13
  # ==============================
@@ -78,7 +77,7 @@ crop_template = PromptTemplate(
78
  )
79
  crop_llm = HuggingFaceEndpoint(
80
  repo_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
81
- task="conversational", # βœ… FIXED
82
  temperature=0.3,
83
  top_p=0.9,
84
  do_sample=True,
@@ -93,7 +92,7 @@ chat_template = PromptTemplate(
93
  )
94
  chat_llm = HuggingFaceEndpoint(
95
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
96
- task="conversational", # βœ… FIXED
97
  temperature=0.3,
98
  top_p=0.9,
99
  do_sample=True,
@@ -108,7 +107,7 @@ disaster_template = PromptTemplate(
108
  )
109
  disaster_llm = HuggingFaceEndpoint(
110
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
111
- task="conversational", # βœ… FIXED
112
  temperature=0.3,
113
  top_p=0.9,
114
  do_sample=True,
@@ -123,7 +122,7 @@ market_template = PromptTemplate(
123
  )
124
  market_llm = HuggingFaceEndpoint(
125
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
126
- task="conversational", # βœ… FIXED
127
  temperature=0.3,
128
  top_p=0.9,
129
  do_sample=True,
@@ -131,6 +130,19 @@ market_llm = HuggingFaceEndpoint(
131
  max_new_tokens=1024
132
  )
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  # ==============================
135
  # ENDPOINTS
136
  # ==============================
@@ -138,32 +150,47 @@ market_llm = HuggingFaceEndpoint(
138
  async def crop_doctor(req: CropRequest, authorization: str | None = Header(None)):
139
  check_auth(authorization)
140
  prompt = crop_template.format(symptoms=req.symptoms)
141
- response = crop_llm.invoke([HumanMessage(content=prompt)])
142
- return {"diagnosis": str(response)}
 
 
 
143
 
144
  @app.post("/multilingual-chat")
145
  async def multilingual_chat(req: ChatRequest, authorization: str | None = Header(None)):
146
  check_auth(authorization)
147
  prompt = chat_template.format(query=req.query)
148
- response = chat_llm.invoke([HumanMessage(content=prompt)])
149
- return {"reply": str(response)}
 
 
 
150
 
151
  @app.post("/disaster-summarizer")
152
  async def disaster_summarizer(req: DisasterRequest, authorization: str | None = Header(None)):
153
  check_auth(authorization)
154
  prompt = disaster_template.format(report=req.report)
155
- response = disaster_llm.invoke([HumanMessage(content=prompt)])
156
- return {"summary": str(response)}
 
 
 
157
 
158
  @app.post("/marketplace")
159
  async def marketplace(req: MarketRequest, authorization: str | None = Header(None)):
160
  check_auth(authorization)
161
  prompt = market_template.format(product=req.product)
162
- response = market_llm.invoke([HumanMessage(content=prompt)])
163
- return {"recommendation": str(response)}
 
 
 
164
 
165
  @app.post("/vector-search")
166
  async def vector_search(req: VectorRequest, authorization: str | None = Header(None)):
167
  check_auth(authorization)
168
- results = query_vector(req.query)
169
- return {"results": results}
 
 
 
 
7
  from langchain.prompts import PromptTemplate
8
  from langchain_huggingface import HuggingFaceEndpoint
9
  from huggingface_hub.utils import HfHubHTTPError
 
10
  from vector import query_vector
11
 
12
  # ==============================
 
77
  )
78
  crop_llm = HuggingFaceEndpoint(
79
  repo_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
80
+ task="conversational",
81
  temperature=0.3,
82
  top_p=0.9,
83
  do_sample=True,
 
92
  )
93
  chat_llm = HuggingFaceEndpoint(
94
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
95
+ task="conversational",
96
  temperature=0.3,
97
  top_p=0.9,
98
  do_sample=True,
 
107
  )
108
  disaster_llm = HuggingFaceEndpoint(
109
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
110
+ task="conversational",
111
  temperature=0.3,
112
  top_p=0.9,
113
  do_sample=True,
 
122
  )
123
  market_llm = HuggingFaceEndpoint(
124
  repo_id="meta-llama/Llama-3.1-8B-Instruct",
125
+ task="conversational",
126
  temperature=0.3,
127
  top_p=0.9,
128
  do_sample=True,
 
130
  max_new_tokens=1024
131
  )
132
 
133
+ # ==============================
134
+ # ENDPOINT HELPERS
135
+ # ==============================
136
+ def run_conversational_model(model, prompt: str):
137
+ """Wraps prompt into HF conversational format"""
138
+ return model.invoke({
139
+ "inputs": {
140
+ "past_user_inputs": [],
141
+ "generated_responses": [],
142
+ "text": prompt
143
+ }
144
+ })
145
+
146
  # ==============================
147
  # ENDPOINTS
148
  # ==============================
 
150
  async def crop_doctor(req: CropRequest, authorization: str | None = Header(None)):
151
  check_auth(authorization)
152
  prompt = crop_template.format(symptoms=req.symptoms)
153
+ try:
154
+ response = run_conversational_model(crop_llm, prompt)
155
+ return {"diagnosis": str(response)}
156
+ except HfHubHTTPError as e:
157
+ return {"error": f"HuggingFace error: {str(e)}"}
158
 
159
  @app.post("/multilingual-chat")
160
  async def multilingual_chat(req: ChatRequest, authorization: str | None = Header(None)):
161
  check_auth(authorization)
162
  prompt = chat_template.format(query=req.query)
163
+ try:
164
+ response = run_conversational_model(chat_llm, prompt)
165
+ return {"reply": str(response)}
166
+ except HfHubHTTPError as e:
167
+ return {"error": f"HuggingFace error: {str(e)}"}
168
 
169
  @app.post("/disaster-summarizer")
170
  async def disaster_summarizer(req: DisasterRequest, authorization: str | None = Header(None)):
171
  check_auth(authorization)
172
  prompt = disaster_template.format(report=req.report)
173
+ try:
174
+ response = run_conversational_model(disaster_llm, prompt)
175
+ return {"summary": str(response)}
176
+ except HfHubHTTPError as e:
177
+ return {"error": f"HuggingFace error: {str(e)}"}
178
 
179
  @app.post("/marketplace")
180
  async def marketplace(req: MarketRequest, authorization: str | None = Header(None)):
181
  check_auth(authorization)
182
  prompt = market_template.format(product=req.product)
183
+ try:
184
+ response = run_conversational_model(market_llm, prompt)
185
+ return {"recommendation": str(response)}
186
+ except HfHubHTTPError as e:
187
+ return {"error": f"HuggingFace error: {str(e)}"}
188
 
189
  @app.post("/vector-search")
190
  async def vector_search(req: VectorRequest, authorization: str | None = Header(None)):
191
  check_auth(authorization)
192
+ try:
193
+ results = query_vector(req.query)
194
+ return {"results": results}
195
+ except Exception as e:
196
+ return {"error": f"Vector search error: {str(e)}"}