Spaces:
Paused
Paused
| # | |
| # SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org> | |
| # SPDX-License-Identifier: Apache-2.0 | |
| # | |
| import time | |
| from assets.css.reasoning import styles | |
| from ..response.formatter import assistant_response | |
| from ..reasoning.interface import reasoning_interfaces | |
| from ..reasoning.tool_reasoning import tool_reasoning | |
| from .parser import extract_tool_parameters | |
| from .executor import invoke_tool_function | |
| from config import MAX_TOKENS, REASONING_DELAY | |
| def process_tool_interactions(server, model_name, conversation_messages, tool_definitions, search_engine): | |
| maximum_iterations = 1 | |
| max_retry_limit = 10 | |
| retry_count = 0 | |
| logs_generator = "" | |
| tool_results = [] | |
| execution_success = False | |
| last_error = None | |
| error_history = [] | |
| iteration_metrics = { | |
| "attempts": 0, | |
| "failures": 0, | |
| "success_rate": 0, | |
| "error_patterns": {}, | |
| "retry_delays": [ | |
| 0.02, | |
| 0.03, | |
| 0.04, | |
| 0.05, | |
| 0.06, | |
| 0.07 | |
| ], | |
| "backoff_multiplier": 1.0 | |
| } | |
| while maximum_iterations <= max_retry_limit and not execution_success: | |
| iteration_metrics["attempts"] += 1 | |
| current_iteration_successful = False | |
| iteration_errors = [] | |
| for iteration_index in range(maximum_iterations): | |
| try: | |
| retry_delay = iteration_metrics["retry_delays"][min(retry_count, len(iteration_metrics["retry_delays"]) - 1)] | |
| if retry_count > 0: | |
| time.sleep(retry_delay * iteration_metrics["backoff_multiplier"]) | |
| model_response = server.chat.completions.create( | |
| model=model_name, | |
| messages=conversation_messages, | |
| tools=tool_definitions, | |
| tool_choice="auto", | |
| max_tokens=MAX_TOKENS, | |
| temperature=0.6 | |
| ) | |
| response_choice = model_response.choices[0] | |
| assistant_message = response_choice.message | |
| formatted_assistant_message = assistant_response(assistant_message) | |
| conversation_messages.append( | |
| { | |
| "role": formatted_assistant_message["role"], | |
| "content": formatted_assistant_message["content"], | |
| "tool_calls": formatted_assistant_message["tool_calls"] | |
| } | |
| ) | |
| pending_tool_calls = assistant_message.tool_calls or [] | |
| if not pending_tool_calls: | |
| if logs_generator: | |
| logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False) | |
| execution_success = True | |
| current_iteration_successful = True | |
| break | |
| tool_execution_errors = [] | |
| for tool_invocation in pending_tool_calls: | |
| tool_name = tool_invocation.function.name | |
| tool_arguments_raw = tool_invocation.function.arguments | |
| extracted_arguments, extraction_error = extract_tool_parameters(tool_arguments_raw) | |
| if extraction_error: | |
| error_key = f"{tool_name}_extraction" | |
| iteration_metrics["error_patterns"][error_key] = iteration_metrics["error_patterns"].get(error_key, 0) + 1 | |
| tool_execution_errors.append({ | |
| "tool": tool_name, | |
| "error": extraction_error, | |
| "type": "extraction" | |
| }) | |
| reasoning_error = tool_reasoning(tool_name, None, "error", error=extraction_error) | |
| for i in range(0, len(reasoning_error), 10): | |
| logs_generator = styles(reasoning_interfaces(reasoning_error, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| logs_generator = styles(reasoning_error, expanded=True) | |
| yield logs_generator | |
| tool_execution_result = extraction_error | |
| else: | |
| reasoning_status = tool_reasoning(tool_name, extracted_arguments, "parsing") | |
| for i in range(0, len(reasoning_status), 10): | |
| logs_generator = styles(reasoning_interfaces(reasoning_status, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| reasoning_start = tool_reasoning(tool_name, extracted_arguments, "executing") | |
| for i in range(0, len(reasoning_start), 10): | |
| logs_generator = styles(reasoning_interfaces(reasoning_start, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| try: | |
| tool_execution_result = invoke_tool_function( | |
| search_engine, | |
| tool_name, | |
| extracted_arguments | |
| ) | |
| tool_results.append({ | |
| "tool": tool_name, | |
| "arguments": extracted_arguments, | |
| "result": tool_execution_result, | |
| "iteration": maximum_iterations, | |
| "retry_count": retry_count | |
| }) | |
| reasoning_done = tool_reasoning(tool_name, extracted_arguments, "completed", result=tool_execution_result) | |
| for i in range(0, len(reasoning_done), 10): | |
| logs_generator = styles(reasoning_interfaces(reasoning_done, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| logs_generator = styles(reasoning_done, expanded=False) | |
| yield logs_generator | |
| except Exception as tool_error: | |
| error_key = f"{tool_name}_execution" | |
| iteration_metrics["error_patterns"][error_key] = iteration_metrics["error_patterns"].get(error_key, 0) + 1 | |
| tool_execution_errors.append({ | |
| "tool": tool_name, | |
| "error": str(tool_error), | |
| "type": "execution", | |
| "arguments": extracted_arguments | |
| }) | |
| reasoning_error = tool_reasoning(tool_name, extracted_arguments, "error", error=str(tool_error)) | |
| for i in range(0, len(reasoning_error), 10): | |
| logs_generator = styles(reasoning_interfaces(reasoning_error, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| logs_generator = styles(reasoning_error, expanded=True) | |
| yield logs_generator | |
| tool_execution_result = str(tool_error) | |
| conversation_messages.append( | |
| { | |
| "role": "tool", | |
| "tool_call_id": tool_invocation.id, | |
| "name": tool_name, | |
| "content": tool_execution_result | |
| } | |
| ) | |
| if not tool_execution_errors: | |
| execution_success = True | |
| current_iteration_successful = True | |
| break | |
| else: | |
| iteration_errors.extend(tool_execution_errors) | |
| except Exception as model_error: | |
| last_error = str(model_error) | |
| error_history.append({ | |
| "iteration": maximum_iterations, | |
| "error": last_error, | |
| "timestamp": time.time() | |
| }) | |
| iteration_metrics["failures"] += 1 | |
| iteration_errors.append({ | |
| "error": last_error, | |
| "type": "model" | |
| }) | |
| if current_iteration_successful: | |
| execution_success = True | |
| break | |
| else: | |
| if iteration_errors: | |
| error_history.extend(iteration_errors) | |
| retry_count += 1 | |
| previous_iterations = maximum_iterations | |
| if iteration_metrics["error_patterns"]: | |
| frequent_errors = max(iteration_metrics["error_patterns"].values()) | |
| if frequent_errors > 3: | |
| maximum_iterations = min(maximum_iterations + 2, max_retry_limit) | |
| else: | |
| maximum_iterations = min(maximum_iterations + 1, max_retry_limit) | |
| else: | |
| maximum_iterations = min(maximum_iterations + 1, max_retry_limit) | |
| if maximum_iterations > previous_iterations: | |
| retry_reasoning = f"Retrying with increased iterations: {maximum_iterations} (attempt {retry_count + 1})" | |
| for i in range(0, len(retry_reasoning), 10): | |
| logs_generator = styles(reasoning_interfaces(retry_reasoning, i), expanded=True) | |
| yield logs_generator | |
| time.sleep(REASONING_DELAY) | |
| if maximum_iterations >= max_retry_limit: | |
| final_error = f"Maximum retry limit reached after {iteration_metrics['attempts']} attempts with {iteration_metrics['failures']} failures" | |
| logs_generator = styles(final_error, expanded=True) | |
| yield logs_generator | |
| break | |
| iteration_metrics["success_rate"] = (len(tool_results) / max(iteration_metrics["attempts"], 1)) * 100 | |
| if logs_generator: | |
| logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False) | |
| generator_results = len(tool_results) > 0 | |
| return conversation_messages, logs_generator, generator_results |