rkihacker commited on
Commit
8d499f8
·
verified ·
1 Parent(s): 3a79416

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +2 -2
main.py CHANGED
@@ -38,7 +38,7 @@ LLM_MODEL = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
38
  MAX_SOURCES_TO_PROCESS = 20 # Increased for more research
39
  MAX_CONCURRENT_REQUESTS = 2
40
  SEARCH_TIMEOUT = 300 # 5 minutes for longer research
41
- TOTAL_TIMEOUT = 360 # Adjusted accordingly
42
  REQUEST_DELAY = 3.0
43
  RETRY_ATTEMPTS = 5
44
  RETRY_DELAY = 5.0
@@ -643,7 +643,7 @@ async def run_deep_research_stream(query: str, search_time: int = 300) -> AsyncG
643
  "data": f"Synthesizing comprehensive report from {successful_sources} sources..."
644
  })
645
 
646
- max_output_tokens = min(16000, int(time_remaining * 6)) # Increased for longer report
647
 
648
  report_prompt = f"""Compose an in-depth analysis report on "{query}".
649
 
 
38
  MAX_SOURCES_TO_PROCESS = 20 # Increased for more research
39
  MAX_CONCURRENT_REQUESTS = 2
40
  SEARCH_TIMEOUT = 300 # 5 minutes for longer research
41
+ TOTAL_TIMEOUT = 600 # Increased to allow more time for generation
42
  REQUEST_DELAY = 3.0
43
  RETRY_ATTEMPTS = 5
44
  RETRY_DELAY = 5.0
 
643
  "data": f"Synthesizing comprehensive report from {successful_sources} sources..."
644
  })
645
 
646
+ max_output_tokens = 16000 # Fixed to allow long response
647
 
648
  report_prompt = f"""Compose an in-depth analysis report on "{query}".
649