yugapurush commited on
Commit
ebbf48e
·
1 Parent(s): dd16530

changed model

Browse files
Files changed (1) hide show
  1. doli4.py +18 -4
doli4.py CHANGED
@@ -109,6 +109,17 @@ def dolibarr_interface(method: str, endpoint: str, api_key=os.getenv("DOLIBARR_A
109
  logger.error(f"Unexpected error in dolibarr_interface: {e}")
110
  return json.dumps({"error": f"Unexpected error: {str(e)}"}, indent=2)
111
 
 
 
 
 
 
 
 
 
 
 
 
112
  class OpenAIDolibarrAgent:
113
  def __init__(self, openai_api_key: str, dolibarr_api_key: str, base_url: str = None):
114
  self.client = openai.OpenAI(api_key=openai_api_key, base_url=base_url)
@@ -237,7 +248,8 @@ Current date: """ + datetime.now().strftime("%Y-%m-%d")
237
 
238
  def execute_dolibarr_call(self, method: str, endpoint: str, payload: str = "") -> str:
239
  """Execute the actual Dolibarr API call"""
240
- return dolibarr_interface(method, endpoint, self.dolibarr_api_key, payload)
 
241
 
242
  def chat(self, message: str, history: List[List[str]]) -> str:
243
  """Main chat function that processes user messages"""
@@ -245,7 +257,9 @@ Current date: """ + datetime.now().strftime("%Y-%m-%d")
245
  # Convert Gradio history to OpenAI format
246
  messages = [{"role": "system", "content": self.system_prompt}]
247
 
248
- for human_msg, assistant_msg in history:
 
 
249
  if human_msg:
250
  messages.append({"role": "user", "content": human_msg})
251
  if assistant_msg:
@@ -257,7 +271,7 @@ Current date: """ + datetime.now().strftime("%Y-%m-%d")
257
  # Call OpenAI API with functions
258
  logger.info("Sending request to Nebius API...")
259
  response = self.client.chat.completions.create(
260
- model="gpt-3.5-turbo", # or gpt-4 "Qwen/Qwen3-235B-A22B",
261
  messages=messages,
262
  functions=self.functions,
263
  function_call="auto",
@@ -296,7 +310,7 @@ Current date: """ + datetime.now().strftime("%Y-%m-%d")
296
  # Get final response
297
  logger.info("Getting final response from Nebius...")
298
  final_response = self.client.chat.completions.create(
299
- model="gpt-3.5-turbo",#"Qwen/Qwen3-235B-A22B",
300
  messages=messages,
301
  max_tokens=1500
302
  )
 
109
  logger.error(f"Unexpected error in dolibarr_interface: {e}")
110
  return json.dumps({"error": f"Unexpected error: {str(e)}"}, indent=2)
111
 
112
+ def format_api_response(api_result, max_items=10):
113
+ try:
114
+ data = json.loads(api_result)
115
+ if isinstance(data, list) and len(data) > max_items:
116
+ truncated = data[:max_items]
117
+ truncated.append({"info": f"Showing first {max_items} results. Ask for more if needed."})
118
+ return json.dumps(truncated, indent=2)
119
+ return api_result
120
+ except Exception:
121
+ return api_result # fallback if not JSON
122
+
123
  class OpenAIDolibarrAgent:
124
  def __init__(self, openai_api_key: str, dolibarr_api_key: str, base_url: str = None):
125
  self.client = openai.OpenAI(api_key=openai_api_key, base_url=base_url)
 
248
 
249
  def execute_dolibarr_call(self, method: str, endpoint: str, payload: str = "") -> str:
250
  """Execute the actual Dolibarr API call"""
251
+ raw_result = dolibarr_interface(method, endpoint, self.dolibarr_api_key, payload)
252
+ return format_api_response(raw_result)
253
 
254
  def chat(self, message: str, history: List[List[str]]) -> str:
255
  """Main chat function that processes user messages"""
 
257
  # Convert Gradio history to OpenAI format
258
  messages = [{"role": "system", "content": self.system_prompt}]
259
 
260
+ # Only keep the last 6 messages (3 user/assistant pairs)
261
+ max_history = 6
262
+ for human_msg, assistant_msg in history[-max_history:]:
263
  if human_msg:
264
  messages.append({"role": "user", "content": human_msg})
265
  if assistant_msg:
 
271
  # Call OpenAI API with functions
272
  logger.info("Sending request to Nebius API...")
273
  response = self.client.chat.completions.create(
274
+ model="gpt-4.1-mini",#"gpt-3.5-turbo", # or gpt-4 "Qwen/Qwen3-235B-A22B",
275
  messages=messages,
276
  functions=self.functions,
277
  function_call="auto",
 
310
  # Get final response
311
  logger.info("Getting final response from Nebius...")
312
  final_response = self.client.chat.completions.create(
313
+ model="gpt-4.1-mini",#"gpt-3.5-turbo",#"Qwen/Qwen3-235B-A22B",
314
  messages=messages,
315
  max_tokens=1500
316
  )