youssefleb commited on
Commit
4d79a4c
·
verified ·
1 Parent(s): 2f0d926

Update agent_logic.py

Browse files
Files changed (1) hide show
  1. agent_logic.py +71 -25
agent_logic.py CHANGED
@@ -1,8 +1,10 @@
1
- # agent_logic.py (Milestone 5 - FINAL & ROBUST)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
5
  import os
 
 
6
  import google.generativeai as genai
7
  from anthropic import AsyncAnthropic
8
  from openai import AsyncOpenAI
@@ -102,31 +104,48 @@ class StrategicSelectorAgent:
102
  yield f"Diagnosis: {classification}"
103
 
104
  async def solve(self, problem: str) -> AsyncGenerator[str, None]:
105
- classification_generator = self._classify_problem(problem)
106
- classification = ""
107
- async for status_update in classification_generator:
108
- yield status_update
109
- if "Diagnosis: " in status_update:
110
- classification = status_update.split(": ")[-1]
111
-
112
- if "Error generating response" in classification:
113
- yield "Classifier failed. Defaulting to Single Agent."
114
- classification = "Direct_Procedure"
115
 
116
- solution_draft = ""
117
- v_fitness_json = {}
118
- scores = {}
119
-
120
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  # --- MAIN LOOP (Self-Correction) ---
122
  for i in range(2):
123
-
124
  current_problem = problem
125
  if i > 0:
126
  yield f"--- (Loop {i}) Score is too low. Initiating Self-Correction... ---"
127
  correction_prompt_text = self.corrector.get_correction_plan(v_fitness_json)
128
  yield f"Diagnosis: {correction_prompt_text.splitlines()[3].strip()}"
129
  current_problem = f"{problem}\n\n{correction_prompt_text}"
 
 
 
 
 
 
130
 
131
  # --- DEPLOY ---
132
  default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
@@ -142,7 +161,18 @@ class StrategicSelectorAgent:
142
  elif classification == "Cognitive_Labyrinth":
143
  if i == 0:
144
  yield "Deploying: Static Heterogeneous Team (Cognitive Diversity)..."
145
- team_plan, calibration_errors = await self.calibrator.calibrate_team(current_problem)
 
 
 
 
 
 
 
 
 
 
 
146
  if calibration_errors:
147
  yield "--- CALIBRATION WARNINGS ---"
148
  for err in calibration_errors: yield err
@@ -165,11 +195,10 @@ class StrategicSelectorAgent:
165
  yield "Evaluating draft (live)..."
166
  v_fitness_json = await self.evaluator.evaluate(current_problem, solution_draft)
167
 
168
- # --- Robust Normalization of Evaluation Data ---
169
  normalized_fitness = {}
170
  if isinstance(v_fitness_json, dict):
171
  for k, v in v_fitness_json.items():
172
- # Determine score value (safe check for list wrapping, which causes the crash)
173
  if isinstance(v, dict):
174
  score_value = v.get('score')
175
  justification_value = v.get('justification', str(v))
@@ -177,10 +206,9 @@ class StrategicSelectorAgent:
177
  score_value = v[0].get('score')
178
  justification_value = v[0].get('justification', str(v[0]))
179
  else:
180
- score_value = v.get('score', 0) if isinstance(v, dict) else 0 # Fallback check
181
  justification_value = str(v)
182
 
183
- # FIX: Extract the integer score from the string (e.g., "4/5" -> 4)
184
  if isinstance(score_value, str):
185
  try:
186
  score_value = int(re.search(r'\d+', score_value).group())
@@ -193,19 +221,25 @@ class StrategicSelectorAgent:
193
  score_value = 0
194
 
195
  normalized_fitness[k] = {'score': score_value, 'justification': justification_value}
196
-
197
  else:
198
  normalized_fitness = {k: {'score': 0, 'justification': "Invalid JSON structure"} for k in ["Novelty", "Usefulness_Feasibility", "Flexibility", "Elaboration", "Cultural_Appropriateness"]}
199
 
200
  v_fitness_json = normalized_fitness
201
-
202
  scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
203
  yield f"Evaluation Score: {scores}"
204
 
 
 
 
 
 
 
 
 
 
205
  if scores.get('Novelty', 0) <= 1:
206
  yield f"⚠️ Low Score Detected. Reason: {v_fitness_json.get('Novelty', {}).get('justification', 'Unknown')}"
207
 
208
- # Check if we passed
209
  if self.corrector.is_good_enough(scores):
210
  yield "--- Solution approved by self-corrector. ---"
211
  break
@@ -221,4 +255,16 @@ class StrategicSelectorAgent:
221
  except Exception as e:
222
  error_msg = f"An error occurred in the agent's solve loop: {e}"
223
  print(error_msg)
224
- yield error_msg
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # agent_logic.py (Milestone 5 - FINAL & ROBUST + LOGGING)
2
  import asyncio
3
  from typing import AsyncGenerator, Dict, Optional
4
  import json
5
  import os
6
+ import uuid
7
+ import datetime
8
  import google.generativeai as genai
9
  from anthropic import AsyncAnthropic
10
  from openai import AsyncOpenAI
 
104
  yield f"Diagnosis: {classification}"
105
 
106
  async def solve(self, problem: str) -> AsyncGenerator[str, None]:
107
+ # --- 1. Initialize Logging ---
108
+ run_id = str(uuid.uuid4())[:8]
109
+ debug_log = {
110
+ "run_id": run_id,
111
+ "timestamp": datetime.datetime.now().isoformat(),
112
+ "problem": problem,
113
+ "classification": "",
114
+ "trace": []
115
+ }
 
116
 
 
 
 
 
117
  try:
118
+ classification_generator = self._classify_problem(problem)
119
+ classification = ""
120
+ async for status_update in classification_generator:
121
+ yield status_update
122
+ if "Diagnosis: " in status_update:
123
+ classification = status_update.split(": ")[-1]
124
+
125
+ debug_log["classification"] = classification
126
+
127
+ if "Error generating response" in classification:
128
+ yield "Classifier failed. Defaulting to Single Agent."
129
+ classification = "Direct_Procedure"
130
+
131
+ solution_draft = ""
132
+ v_fitness_json = {}
133
+ scores = {}
134
+
135
  # --- MAIN LOOP (Self-Correction) ---
136
  for i in range(2):
 
137
  current_problem = problem
138
  if i > 0:
139
  yield f"--- (Loop {i}) Score is too low. Initiating Self-Correction... ---"
140
  correction_prompt_text = self.corrector.get_correction_plan(v_fitness_json)
141
  yield f"Diagnosis: {correction_prompt_text.splitlines()[3].strip()}"
142
  current_problem = f"{problem}\n\n{correction_prompt_text}"
143
+
144
+ debug_log["trace"].append({
145
+ "step_type": "correction_plan",
146
+ "loop_index": i,
147
+ "prompt": correction_prompt_text
148
+ })
149
 
150
  # --- DEPLOY ---
151
  default_persona = PERSONAS_DATA[config.DEFAULT_PERSONA_KEY]["description"]
 
161
  elif classification == "Cognitive_Labyrinth":
162
  if i == 0:
163
  yield "Deploying: Static Heterogeneous Team (Cognitive Diversity)..."
164
+
165
+ # --- UPDATED: Unpack 3 values now ---
166
+ team_plan, calibration_errors, calib_details = await self.calibrator.calibrate_team(current_problem)
167
+
168
+ # LOG CALIBRATION
169
+ debug_log["trace"].append({
170
+ "step_type": "calibration",
171
+ "details": calib_details,
172
+ "errors": calibration_errors,
173
+ "selected_plan": team_plan
174
+ })
175
+
176
  if calibration_errors:
177
  yield "--- CALIBRATION WARNINGS ---"
178
  for err in calibration_errors: yield err
 
195
  yield "Evaluating draft (live)..."
196
  v_fitness_json = await self.evaluator.evaluate(current_problem, solution_draft)
197
 
198
+ # --- Robust Normalization ---
199
  normalized_fitness = {}
200
  if isinstance(v_fitness_json, dict):
201
  for k, v in v_fitness_json.items():
 
202
  if isinstance(v, dict):
203
  score_value = v.get('score')
204
  justification_value = v.get('justification', str(v))
 
206
  score_value = v[0].get('score')
207
  justification_value = v[0].get('justification', str(v[0]))
208
  else:
209
+ score_value = v.get('score', 0) if isinstance(v, dict) else 0
210
  justification_value = str(v)
211
 
 
212
  if isinstance(score_value, str):
213
  try:
214
  score_value = int(re.search(r'\d+', score_value).group())
 
221
  score_value = 0
222
 
223
  normalized_fitness[k] = {'score': score_value, 'justification': justification_value}
 
224
  else:
225
  normalized_fitness = {k: {'score': 0, 'justification': "Invalid JSON structure"} for k in ["Novelty", "Usefulness_Feasibility", "Flexibility", "Elaboration", "Cultural_Appropriateness"]}
226
 
227
  v_fitness_json = normalized_fitness
 
228
  scores = {k: v.get('score', 0) for k, v in v_fitness_json.items()}
229
  yield f"Evaluation Score: {scores}"
230
 
231
+ # LOG ATTEMPT
232
+ debug_log["trace"].append({
233
+ "step_type": "attempt",
234
+ "loop_index": i,
235
+ "draft": solution_draft,
236
+ "scores": scores,
237
+ "full_evaluation": v_fitness_json
238
+ })
239
+
240
  if scores.get('Novelty', 0) <= 1:
241
  yield f"⚠️ Low Score Detected. Reason: {v_fitness_json.get('Novelty', {}).get('justification', 'Unknown')}"
242
 
 
243
  if self.corrector.is_good_enough(scores):
244
  yield "--- Solution approved by self-corrector. ---"
245
  break
 
255
  except Exception as e:
256
  error_msg = f"An error occurred in the agent's solve loop: {e}"
257
  print(error_msg)
258
+ debug_log["error"] = str(e)
259
+ yield error_msg
260
+
261
+ finally:
262
+ # --- SAVE LOG ---
263
+ try:
264
+ os.makedirs("logs", exist_ok=True)
265
+ log_path = f"logs/run_{run_id}.json"
266
+ with open(log_path, "w", encoding="utf-8") as f:
267
+ json.dump(debug_log, f, indent=2)
268
+ print(f"Detailed execution log saved to {log_path}")
269
+ except Exception as log_err:
270
+ print(f"Failed to save log: {log_err}")