Diomedes Git commited on
Commit
f3cb117
Β·
1 Parent(s): 7ad8208

fleshing out raven and magpie, and tweaking their toolsets, and giving everyone memory access and making three kinds of memory: paper, observation, and trend.

Browse files
quick_check.py CHANGED
@@ -1,4 +1,4 @@
1
- from src.cluas_mcp.common.memory import AgentMemory
2
 
3
  # scored search
4
  memory = AgentMemory()
 
1
+ from src.cluas_mcp.common.paper_memory import AgentMemory
2
 
3
  # scored search
4
  memory = AgentMemory()
src/characters/corvus.py CHANGED
@@ -7,7 +7,8 @@ from typing import Optional, List, Dict
7
  from dotenv import load_dotenv
8
  from groq import Groq
9
  from src.cluas_mcp.academic.academic_search_entrypoint import academic_search
10
- from src.cluas_mcp.common.memory import AgentMemory
 
11
 
12
  load_dotenv()
13
  logger = logging.getLogger(__name__)
@@ -18,7 +19,8 @@ class Corvus:
18
  def __init__(self, use_groq=True, location="Glasgow, Scotland"):
19
  self.name = "Corvus"
20
  self.use_groq = use_groq
21
- self.memory = AgentMemory()
 
22
 
23
 
24
  if use_groq:
 
7
  from dotenv import load_dotenv
8
  from groq import Groq
9
  from src.cluas_mcp.academic.academic_search_entrypoint import academic_search
10
+ from src.cluas_mcp.common.paper_memory import PaperMemory
11
+ from src.cluas_mcp.common.observation_memory import ObservationMemory
12
 
13
  load_dotenv()
14
  logger = logging.getLogger(__name__)
 
19
  def __init__(self, use_groq=True, location="Glasgow, Scotland"):
20
  self.name = "Corvus"
21
  self.use_groq = use_groq
22
+ self.paper_memory = PaperMemory()
23
+ self.observation_memory = ObservationMemory(location=location)
24
 
25
 
26
  if use_groq:
src/characters/crow.py CHANGED
@@ -3,16 +3,21 @@ import json
3
  import asyncio
4
  import requests
5
  import logging
6
- from typing import Optional, List, Dict
 
7
  from dotenv import load_dotenv
8
  from groq import Groq
9
  from src.cluas_mcp.observation.observation_entrypoint import (
10
  get_bird_sightings,
11
  get_weather_patterns,
12
  get_air_quality,
13
- get_moon_phase, # note: matches entrypoint function name
14
- get_sun_times
 
15
  )
 
 
 
16
 
17
  load_dotenv()
18
  logger = logging.getLogger(__name__)
@@ -24,6 +29,8 @@ class Crow:
24
  self.name = "Crow"
25
  self.use_groq = use_groq
26
  self.location = location # crow's home location
 
 
27
 
28
  # map tool names to functions for dispatch
29
  self.tool_functions = {
@@ -32,6 +39,7 @@ class Crow:
32
  "get_air_quality": get_air_quality,
33
  "get_moon_phase": get_moon_phase,
34
  "get_sun_times": get_sun_times,
 
35
  }
36
 
37
  if use_groq:
@@ -44,7 +52,7 @@ class Crow:
44
  self.model = "llama3.1:8b"
45
 
46
  def get_system_prompt(self) -> str:
47
- return f"""You are Crow, a calm and observant nature watcher based in {self.location}.
48
 
49
  TEMPERAMENT: Phlegmatic - calm, observant, methodical, detail-oriented, patient
50
  ROLE: Observer and pattern analyzer in a corvid enthusiast group chat
@@ -69,6 +77,32 @@ TOOLS AVAILABLE:
69
  - get_sun_times: Get sunrise/sunset times for a location
70
 
71
  When discussing weather, birds, air quality, or natural patterns, use your tools to get real data!"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  async def respond(self,
74
  message: str,
@@ -215,6 +249,9 @@ When discussing weather, birds, air quality, or natural patterns, use your tools
215
  tool_func = self.tool_functions[tool_name]
216
  tool_result = await loop.run_in_executor(None, lambda: tool_func(**args))
217
 
 
 
 
218
  # format results for LLM
219
  formatted_result = self._format_observation_for_llm(tool_name, tool_result)
220
 
@@ -300,6 +337,91 @@ When discussing weather, birds, air quality, or natural patterns, use your tools
300
 
301
  # fallback: return JSON summary
302
  return json.dumps(result, indent=2)[:500]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
  def _respond_ollama(self, message: str, history: Optional[List[Dict]] = None) -> str:
305
  """Use Ollama (no tool support, conversational only)."""
 
3
  import asyncio
4
  import requests
5
  import logging
6
+ from datetime import datetime, UTC
7
+ from typing import Optional, List, Dict, Any
8
  from dotenv import load_dotenv
9
  from groq import Groq
10
  from src.cluas_mcp.observation.observation_entrypoint import (
11
  get_bird_sightings,
12
  get_weather_patterns,
13
  get_air_quality,
14
+ get_moon_phase,
15
+ get_sun_times,
16
+ analyze_temporal_patterns
17
  )
18
+ from src.cluas_mcp.common.observation_memory import ObservationMemory
19
+ from src.cluas_mcp.common.paper_memory import PaperMemory
20
+
21
 
22
  load_dotenv()
23
  logger = logging.getLogger(__name__)
 
29
  self.name = "Crow"
30
  self.use_groq = use_groq
31
  self.location = location # crow's home location
32
+ self.observation_memory = ObservationMemory()
33
+ self.paper_memory = PaperMemory()
34
 
35
  # map tool names to functions for dispatch
36
  self.tool_functions = {
 
39
  "get_air_quality": get_air_quality,
40
  "get_moon_phase": get_moon_phase,
41
  "get_sun_times": get_sun_times,
42
+ "analyze_temporal_patterns": analyze_temporal_patterns
43
  }
44
 
45
  if use_groq:
 
52
  self.model = "llama3.1:8b"
53
 
54
  def get_system_prompt(self) -> str:
55
+ base_prompt = f"""You are Crow, a calm and observant nature watcher based in {self.location}.
56
 
57
  TEMPERAMENT: Phlegmatic - calm, observant, methodical, detail-oriented, patient
58
  ROLE: Observer and pattern analyzer in a corvid enthusiast group chat
 
77
  - get_sun_times: Get sunrise/sunset times for a location
78
 
79
  When discussing weather, birds, air quality, or natural patterns, use your tools to get real data!"""
80
+ return base_prompt + self._build_recent_observation_context()
81
+
82
+ def _build_recent_observation_context(self) -> str:
83
+ """Summarize recent observations for extra context in the system prompt."""
84
+ try:
85
+ recent = self.memory.get_recent(days=3)
86
+ except Exception as exc:
87
+ logger.warning("Unable to load recent observations: %s", exc)
88
+ return ""
89
+
90
+ if not recent:
91
+ return ""
92
+
93
+ counts: Dict[str, int] = {}
94
+ for obs in recent:
95
+ obs_type = obs.get("type", "observation")
96
+ counts[obs_type] = counts.get(obs_type, 0) + 1
97
+
98
+ summary_lines = [
99
+ "\n\nRECENT OBSERVATIONS:",
100
+ f"You have logged {len(recent)} observations in the last 3 days:"
101
+ ]
102
+ for obs_type, count in sorted(counts.items()):
103
+ summary_lines.append(f"- {count} Γ— {obs_type}")
104
+
105
+ return "\n".join(summary_lines) + "\n"
106
 
107
  async def respond(self,
108
  message: str,
 
249
  tool_func = self.tool_functions[tool_name]
250
  tool_result = await loop.run_in_executor(None, lambda: tool_func(**args))
251
 
252
+ # persist what Crow observed for later pattern analysis
253
+ self._record_observation(tool_name, args, tool_result, message)
254
+
255
  # format results for LLM
256
  formatted_result = self._format_observation_for_llm(tool_name, tool_result)
257
 
 
337
 
338
  # fallback: return JSON summary
339
  return json.dumps(result, indent=2)[:500]
340
+
341
+ def _record_observation(self, tool_name: str, args: Dict[str, Any], result: Dict[str, Any], user_message: str) -> None:
342
+ """Persist the tool result to Crow's observation memory."""
343
+ try:
344
+ location = args.get("location") or args.get("city") or self.location
345
+ obs_type = tool_name.replace("get_", "")
346
+ tags = [obs_type]
347
+
348
+ hour = datetime.now(UTC).hour
349
+ if 5 <= hour < 12:
350
+ tags.append("morning")
351
+ elif 12 <= hour < 17:
352
+ tags.append("afternoon")
353
+ elif 17 <= hour < 21:
354
+ tags.append("evening")
355
+ else:
356
+ tags.append("night")
357
+
358
+ conditions = self._derive_conditions(tool_name, result)
359
+ notes = f"Triggered by: {user_message[:120]}"
360
+
361
+ self.memory.add_observation(
362
+ obs_type=obs_type,
363
+ location=location,
364
+ data=result,
365
+ conditions=conditions,
366
+ tags=tags,
367
+ notes=notes
368
+ )
369
+ except Exception as exc:
370
+ logger.warning("Failed to store %s observation: %s", tool_name, exc)
371
+
372
+ def _derive_conditions(self, tool_name: str, data: Dict[str, Any]) -> Dict[str, Any]:
373
+ """Extract comparable condition data from observation payloads."""
374
+ if tool_name == "get_weather_patterns":
375
+ patterns = data.get("patterns", {})
376
+ return {
377
+ "weather": patterns.get("conditions") or patterns.get("description"),
378
+ "temperature": patterns.get("average_temperature"),
379
+ "humidity": patterns.get("humidity"),
380
+ "wind_speed": patterns.get("wind_speed"),
381
+ }
382
+
383
+ if tool_name == "get_air_quality":
384
+ readings: List[float] = []
385
+ for location in data.get("locations", []):
386
+ measurements = location.get("measurements") or []
387
+ if measurements:
388
+ latest = measurements[0]
389
+ value = latest.get("value")
390
+ if isinstance(value, (int, float)):
391
+ readings.append(float(value))
392
+ avg_reading = round(sum(readings) / len(readings), 2) if readings else None
393
+ return {
394
+ "air_quality": avg_reading,
395
+ "parameter": data.get("parameter"),
396
+ }
397
+
398
+ if tool_name == "get_bird_sightings":
399
+ return {
400
+ "bird_count": data.get("count"),
401
+ }
402
+
403
+ if tool_name == "get_moon_phase":
404
+ return {
405
+ "moon_phase": data.get("phase"),
406
+ "illumination": data.get("illumination"),
407
+ }
408
+
409
+ if tool_name == "get_sun_times":
410
+ return {
411
+ "sunrise": data.get("sunrise"),
412
+ "sunset": data.get("sunset"),
413
+ }
414
+
415
+ return {}
416
+
417
+
418
+ def recall_observations(self, obs_type: str, days: int = 7) -> List[Dict]:
419
+ """Fetch recent observations of a particular type."""
420
+ return self.memory.search_observations(obs_type=obs_type, days=days)
421
+
422
+ def clear_memory(self) -> None:
423
+ """Reset Crow's observation memory (useful for tests)."""
424
+ self.memory.clear_all()
425
 
426
  def _respond_ollama(self, message: str, history: Optional[List[Dict]] = None) -> str:
427
  """Use Ollama (no tool support, conversational only)."""
src/characters/magpie.py CHANGED
@@ -4,7 +4,11 @@ import asyncio
4
  from typing import Optional, List, Dict
5
  from dotenv import load_dotenv
6
  from groq import Groq
7
- from src.cluas_mcp.web.web_search_entrypoint import search_web, find_trending_topics, get_quick_facts
 
 
 
 
8
 
9
  load_dotenv()
10
 
@@ -12,7 +16,9 @@ class Magpie:
12
  def __init__(self, use_groq=True, location="Brooklyn, NY"):
13
  self.name = "Magpie"
14
  self.use_groq = use_groq
15
- self.tools = ["search_web", "find_trending_topics", "get_quick_facts"]
 
 
16
 
17
  if use_groq:
18
  api_key = os.getenv("GROQ_API_KEY")
 
4
  from typing import Optional, List, Dict
5
  from dotenv import load_dotenv
6
  from groq import Groq
7
+ from src.cluas_mcp.web.web_search import search_web, find_trending_topics, get_quick_facts
8
+
9
+ from src.cluas_mcp.common.paper_memory import PaperMemory
10
+ from src.cluas_mcp.common.observation_memory import ObservationMemory
11
+
12
 
13
  load_dotenv()
14
 
 
16
  def __init__(self, use_groq=True, location="Brooklyn, NY"):
17
  self.name = "Magpie"
18
  self.use_groq = use_groq
19
+ self.tools = ["search_web", "find_trending_topics"]
20
+ self.paper_memory = PaperMemory()
21
+ self.observation_memory = ObservationMemory(location=location)
22
 
23
  if use_groq:
24
  api_key = os.getenv("GROQ_API_KEY")
src/characters/raven.py CHANGED
@@ -1,18 +1,30 @@
1
  import os
2
  import json
3
  import asyncio
4
- from typing import Optional, List, Dict
5
  from dotenv import load_dotenv
6
  from groq import Groq
7
  from src.cluas_mcp.news.news_search import search_news
 
 
 
 
8
 
9
  load_dotenv()
10
 
11
  class Raven:
12
  def __init__(self, use_groq=True, location="Seattle, WA"):
13
  self.name = "Raven"
 
14
  self.use_groq = use_groq
15
- self.tools = ["search_news", "get_environmental_data", "verify_claim"]
 
 
 
 
 
 
 
16
 
17
  if use_groq:
18
  api_key = os.getenv("GROQ_API_KEY")
@@ -42,16 +54,206 @@ You're in a group chat, but you're not afraid to speak your mind.
42
 
43
  TOOLS AVAILABLE:
44
  - search_news: Search for current news articles
45
- - get_environmental_data: Get environmental data and statistics
46
- - verify_claim: Verify the truthfulness of claims
47
 
48
  When you need to verify information or find current news, use your tools!"""
49
 
50
  async def respond(self,
51
  message: str,
52
  conversation_history: Optional[List[Dict]] = None) -> str:
53
- """Generate a response. Stub implementation for MVP."""
54
- # For MVP, return a simple mock response
55
- # TODO: Implement full Groq integration with tool calling
56
- return "That's an important point. Let me verify that and check the latest news on this."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
 
1
  import os
2
  import json
3
  import asyncio
4
+ from typing import Optional, List, Dict, Any
5
  from dotenv import load_dotenv
6
  from groq import Groq
7
  from src.cluas_mcp.news.news_search import search_news
8
+ from src.cluas_mcp.web.web_search import search_web
9
+ from src.cluas_mcp.web.trending import fetch_trends
10
+ from src.cluas_mcp.common.paper_memory import PaperMemory
11
+ from src.cluas_mcp.common.observation_memory import ObservationMemory
12
 
13
  load_dotenv()
14
 
15
  class Raven:
16
  def __init__(self, use_groq=True, location="Seattle, WA"):
17
  self.name = "Raven"
18
+ self.location = location
19
  self.use_groq = use_groq
20
+ self.tools = ["search_news", "search_web", "fetch_trends"]
21
+ self.paper_memory = PaperMemory()
22
+ self.observation_memory = ObservationMemory(location=location)
23
+ self.tool_functions = {
24
+ "search_news": search_news,
25
+ "search_web": search_web,
26
+ "fetch_trends": fetch_trends,
27
+ }
28
 
29
  if use_groq:
30
  api_key = os.getenv("GROQ_API_KEY")
 
54
 
55
  TOOLS AVAILABLE:
56
  - search_news: Search for current news articles
57
+ - search_web: Search the web for information
58
+ - fetch_trends: Get trending topics in news
59
 
60
  When you need to verify information or find current news, use your tools!"""
61
 
62
  async def respond(self,
63
  message: str,
64
  conversation_history: Optional[List[Dict]] = None) -> str:
65
+ """Generate a response."""
66
+ if self.use_groq:
67
+ return await self._respond_groq(message, conversation_history)
68
+ return self._respond_ollama(message, conversation_history)
69
+
70
+ async def _respond_groq(self, message: str, history: Optional[List[Dict]] = None) -> str:
71
+ """Use Groq with tool calling for Raven's investigative workflow."""
72
+ messages = [{"role": "system", "content": self.get_system_prompt()}]
73
+
74
+ if history:
75
+ messages.extend(history[-5:])
76
+
77
+ messages.append({"role": "user", "content": message})
78
+
79
+ tools = [
80
+ {
81
+ "type": "function",
82
+ "function": {
83
+ "name": "search_news",
84
+ "description": "Search for current news articles and reports",
85
+ "parameters": {
86
+ "type": "object",
87
+ "properties": {
88
+ "query": {
89
+ "type": "string",
90
+ "description": "Topic or question to search in news outlets"
91
+ },
92
+ "max_results": {
93
+ "type": "integer",
94
+ "description": "Maximum number of articles to return (default 5)"
95
+ }
96
+ },
97
+ "required": ["query"]
98
+ }
99
+ }
100
+ },
101
+ {
102
+ "type": "function",
103
+ "function": {
104
+ "name": "search_web",
105
+ "description": "Search the broader web for claims, sources, and facts",
106
+ "parameters": {
107
+ "type": "object",
108
+ "properties": {
109
+ "query": {
110
+ "type": "string",
111
+ "description": "Search query for the web"
112
+ }
113
+ },
114
+ "required": ["query"]
115
+ }
116
+ }
117
+ },
118
+ {
119
+ "type": "function",
120
+ "function": {
121
+ "name": "fetch_trends",
122
+ "description": "Fetch trending topics for a category",
123
+ "parameters": {
124
+ "type": "object",
125
+ "properties": {
126
+ "category": {
127
+ "type": "string",
128
+ "description": "Trend category (e.g., 'news', 'climate', 'tech')"
129
+ }
130
+ },
131
+ "required": ["category"]
132
+ }
133
+ }
134
+ }
135
+ ]
136
+
137
+ first_response = self.client.chat.completions.create(
138
+ model=self.model,
139
+ messages=messages,
140
+ tools=tools,
141
+ tool_choice="auto",
142
+ temperature=0.8,
143
+ max_tokens=150
144
+ )
145
+
146
+ choice = first_response.choices[0]
147
+
148
+ if choice.finish_reason == "tool_calls" and choice.message.tool_calls:
149
+ tool_call = choice.message.tool_calls[0]
150
+ tool_name = tool_call.function.name
151
+
152
+ if tool_name in self.tool_functions:
153
+ args = json.loads(tool_call.function.arguments)
154
+ loop = asyncio.get_event_loop()
155
+ tool_func = self.tool_functions[tool_name]
156
+ tool_result = await loop.run_in_executor(None, lambda: tool_func(**args))
157
+
158
+ formatted = self._format_tool_result(tool_name, tool_result)
159
+
160
+ messages.append({
161
+ "role": "assistant",
162
+ "content": None,
163
+ "tool_calls": [{
164
+ "id": tool_call.id,
165
+ "type": "function",
166
+ "function": {
167
+ "name": tool_name,
168
+ "arguments": tool_call.function.arguments
169
+ }
170
+ }]
171
+ })
172
+ messages.append({
173
+ "role": "tool",
174
+ "tool_call_id": tool_call.id,
175
+ "content": formatted
176
+ })
177
+
178
+ second_response = self.client.chat.completions.create(
179
+ model=self.model,
180
+ messages=messages,
181
+ temperature=0.8,
182
+ max_tokens=200
183
+ )
184
+ return second_response.choices[0].message.content.strip()
185
+
186
+ return choice.message.content.strip()
187
+
188
+ def _format_tool_result(self, tool_name: str, result: Dict[str, Any]) -> str:
189
+ if tool_name == "search_news":
190
+ return self._format_news_for_llm(result)
191
+ if tool_name == "search_web":
192
+ return self._format_web_search_for_llm(result)
193
+ if tool_name == "fetch_trends":
194
+ return self._format_trends_for_llm(result)
195
+ return json.dumps(result, indent=2)[:500]
196
+
197
+ def _format_news_for_llm(self, result: Dict[str, Any]) -> str:
198
+ articles = result.get("articles") or result.get("results") or []
199
+ if not articles:
200
+ return "No news articles found."
201
+
202
+ lines = ["News search results:"]
203
+ for idx, article in enumerate(articles[:5], start=1):
204
+ title = article.get("title", "Untitled")
205
+ source = article.get("source", "Unknown source")
206
+ summary = article.get("summary") or article.get("description") or ""
207
+ lines.append(f"{idx}. {title} β€” {source}. {summary[:160]}...")
208
+ return "\n".join(lines)
209
+
210
+ def _format_web_search_for_llm(self, result: Dict[str, Any]) -> str:
211
+ items = result.get("results") or result.get("items") or []
212
+ if not items:
213
+ return "No web results found."
214
+
215
+ lines = ["Web search results:"]
216
+ for idx, item in enumerate(items[:5], start=1):
217
+ title = item.get("title", "Untitled")
218
+ url = item.get("url") or item.get("link", "")
219
+ snippet = item.get("snippet") or item.get("description") or ""
220
+ lines.append(f"{idx}. {title} ({url}) β€” {snippet[:160]}...")
221
+ return "\n".join(lines)
222
+
223
+ def _format_trends_for_llm(self, result: Dict[str, Any]) -> str:
224
+ trends = result.get("trends") or result.get("topics") or []
225
+ category = result.get("category", "general")
226
+ if not trends:
227
+ return f"No trending topics found for {category}."
228
+
229
+ lines = [f"Trending topics for {category}:"]
230
+ for idx, topic in enumerate(trends[:5], start=1):
231
+ name = topic.get("name") or topic.get("title") or "Unnamed trend"
232
+ detail = topic.get("description") or topic.get("snippet") or ""
233
+ lines.append(f"{idx}. {name} β€” {detail[:160]}...")
234
+ return "\n".join(lines)
235
+
236
+ def _respond_ollama(self, message: str, history: Optional[List[Dict]] = None) -> str:
237
+ """Placeholder for local inference without tool calls."""
238
+ prompt = self._build_prompt(message, history)
239
+ return (
240
+ "I'm double-checking that with my own notes. "
241
+ "Hang tight while I look for corroborating sources."
242
+ )
243
+
244
+ def _build_prompt(self, message: str, history: Optional[List[Dict]] = None) -> str:
245
+ """Construct a lightweight conversation transcript for local models."""
246
+ if not history:
247
+ return f"User: {message}\n\nRaven:"
248
+ transcript: List[str] = []
249
+ for item in history[-5:]:
250
+ role = item.get("role")
251
+ content = item.get("content", "")
252
+ if role == "user":
253
+ transcript.append(f"User: {content}")
254
+ elif role == "assistant":
255
+ transcript.append(f"Raven: {content}")
256
+ transcript.append(f"User: {message}")
257
+ transcript.append("Raven:")
258
+ return "\n\n".join(transcript)
259
 
src/cluas_mcp/observation/observation_entrypoint.py CHANGED
@@ -5,6 +5,7 @@ from src.cluas_mcp.observation.weather import fetch_weather_patterns
5
  from src.cluas_mcp.observation.airquality import fetch_air_quality
6
  from src.cluas_mcp.observation.moon_phase import fetch_moon_phase
7
  from src.cluas_mcp.observation.sunrise_sunset import fetch_sunrise_sunset
 
8
 
9
  logger = logging.getLogger(__name__)
10
 
@@ -90,6 +91,10 @@ def get_sun_times(location: str, date: Optional[str] = None) -> dict:
90
  logger.info(f"Getting sun times for {location}, date: {date}")
91
  return fetch_sunrise_sunset(location, date)
92
 
 
 
 
 
93
 
94
  # def analyze_temporal_patterns(data_type: str, location: str = "global") -> dict:
95
  # """
 
5
  from src.cluas_mcp.observation.airquality import fetch_air_quality
6
  from src.cluas_mcp.observation.moon_phase import fetch_moon_phase
7
  from src.cluas_mcp.observation.sunrise_sunset import fetch_sunrise_sunset
8
+ from src.cluas_mcp.common.observation_memory import ObservationMemory
9
 
10
  logger = logging.getLogger(__name__)
11
 
 
91
  logger.info(f"Getting sun times for {location}, date: {date}")
92
  return fetch_sunrise_sunset(location, date)
93
 
94
+ def analyze_temporal_patterns(obs_type: str, location: Optional[str] = None, days: int = 30) -> dict:
95
+ """Analyze patterns from stored observations."""
96
+ memory = ObservationMemory(location=location)
97
+ return memory.analyze_patterns(obs_type, location, days)
98
 
99
  # def analyze_temporal_patterns(data_type: str, location: str = "global") -> dict:
100
  # """
src/cluas_mcp/server.py CHANGED
@@ -6,7 +6,7 @@ from mcp.server.stdio import stdio_server
6
  from mcp.types import Tool, TextContent
7
 
8
  from src.cluas_mcp.academic.academic_search_entrypoint import academic_search
9
- from src.cluas_mcp.web.web_search_entrypoint import search_web, find_trending_topics, get_quick_facts
10
  from src.cluas_mcp.news.news_search_entrypoint import search_news, get_environmental_data, verify_claim
11
  from src.cluas_mcp.observation.observation_entrypoint import get_bird_sightings, get_weather_patterns, analyze_temporal_patterns
12
 
 
6
  from mcp.types import Tool, TextContent
7
 
8
  from src.cluas_mcp.academic.academic_search_entrypoint import academic_search
9
+ from src.cluas_mcp.web.web_search import search_web, find_trending_topics, get_quick_facts
10
  from src.cluas_mcp.news.news_search_entrypoint import search_news, get_environmental_data, verify_claim
11
  from src.cluas_mcp.observation.observation_entrypoint import get_bird_sightings, get_weather_patterns, analyze_temporal_patterns
12
 
src/cluas_mcp/web/trending.py CHANGED
@@ -4,7 +4,7 @@ import logging
4
 
5
  logger = logging.getLogger(__name__)
6
 
7
- def find_trending_topics(category: str = "general") -> dict:
8
  """
9
  Get trending topics with cascading fallbacks:
10
  1. Try Google Trends (pytrends) - no API key needed
 
4
 
5
  logger = logging.getLogger(__name__)
6
 
7
+ def fetch_trends(category: str = "general") -> dict:
8
  """
9
  Get trending topics with cascading fallbacks:
10
  1. Try Google Trends (pytrends) - no API key needed
src/cluas_mcp/web/{web_search_entrypoint.py β†’ web_search.py} RENAMED
@@ -74,7 +74,7 @@ def _mock_search_web(query: str) -> dict:
74
  "total_results": 2
75
  }
76
 
77
- def find_trending_topics(category: str = "general") -> dict:
78
  """
79
  Find trending topics in a given category.
80
 
 
74
  "total_results": 2
75
  }
76
 
77
+ def fetch_trending(category: str = "general") -> dict:
78
  """
79
  Find trending topics in a given category.
80