SunDou commited on
Commit
fe52d16
·
verified ·
1 Parent(s): 491984a

Upload data1/main_v2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. data1/main_v2.py +1007 -0
data1/main_v2.py ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+ import os
3
+ import time
4
+
5
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
6
+
7
+ from typing import List, Dict, Any, Optional
8
+ from pydantic import BaseModel
9
+ import pandas as pd
10
+ import shutil
11
+ import subprocess
12
+ import requests
13
+ from requests.exceptions import RequestException, Timeout
14
+ import argparse
15
+ import asyncio
16
+ from pathlib import Path
17
+ from dotenv import load_dotenv
18
+ from datetime import datetime, timedelta
19
+ from util import init_logger, logger, call_llm, CODE_EXTENSIONS, extract_final_answer_from_reasoning
20
+ from langchain_core.output_parsers import JsonOutputParser
21
+
22
+
23
+ # Pydantic models for structured output
24
+ class RelevanceResult(BaseModel):
25
+ relevant: str # YES or NO
26
+ reason: str
27
+
28
+
29
+ class ExpandedKeywords(BaseModel):
30
+ keywords: List[str]
31
+
32
+
33
+ # GitHub 请求的超时与重试配置(可通过环境变量覆盖)
34
+ GITHUB_TIMEOUT = int(os.environ.get("GITHUB_TIMEOUT", "30")) # 单次请求超时秒数
35
+ GITHUB_MAX_RETRIES = int(os.environ.get("GITHUB_MAX_RETRIES", "3")) # 最多重试次数
36
+ GITHUB_RETRY_BACKOFF = float(os.environ.get("GITHUB_RETRY_BACKOFF", "1.5")) # 指数回退基数
37
+
38
+
39
+ def github_get(
40
+ url: str,
41
+ *,
42
+ headers: Optional[Dict[str, str]] = None,
43
+ params: Optional[Dict[str, Any]] = None,
44
+ timeout: Optional[int] = None,
45
+ ) -> Optional[requests.Response]:
46
+ """
47
+ 带超时和重试的 GitHub GET 请求封装。
48
+ - 使用环境变量控制超时和重试次数
49
+ - 对 Timeout / RequestException 进行重试
50
+ """
51
+ if timeout is None:
52
+ timeout = GITHUB_TIMEOUT
53
+
54
+ last_exc: Optional[BaseException] = None
55
+
56
+ for attempt in range(1, GITHUB_MAX_RETRIES + 1):
57
+ try:
58
+ resp = requests.get(url, headers=headers, params=params, timeout=timeout)
59
+ return resp
60
+ except (Timeout, RequestException) as e:
61
+ last_exc = e
62
+ logger.warning(
63
+ f"GitHub 请求失败(第 {attempt}/{GITHUB_MAX_RETRIES} 次): {url} | {e}"
64
+ )
65
+ if attempt < GITHUB_MAX_RETRIES:
66
+ # 指数回退:1, 1.5, 2.25, ...
67
+ sleep_s = GITHUB_RETRY_BACKOFF ** (attempt - 1)
68
+ time.sleep(sleep_s)
69
+
70
+ logger.error(f"GitHub 请求多次失败,放弃: {url} | 最后错误: {last_exc}")
71
+ return None
72
+
73
+
74
+ def search_github_repos(keywords: List[str], token: str, output_csv: Path):
75
+ """Search GitHub repos with incremental write per keyword using pending logic"""
76
+ headers = {"Authorization": f"Bearer {token}", "Accept": "application/vnd.github.v3+json"}
77
+
78
+ # Calculate pending keywords using set difference
79
+ all_keywords = set(k.lower() for k in keywords)
80
+ searched_keywords = set()
81
+
82
+ if output_csv.exists():
83
+ df_existing = pd.read_csv(output_csv)
84
+ searched_keywords = set(df_existing["keyword"].str.lower().unique())
85
+ logger.info(f"Resume: Already searched {len(searched_keywords)} keywords")
86
+ return
87
+
88
+ # Pending keywords = all - searched
89
+ pending_keywords = all_keywords - searched_keywords
90
+ pending = [k for k in keywords if k.lower() in pending_keywords]
91
+
92
+ logger.info(f"Pending: {len(pending)} keywords to search")
93
+
94
+ # Load existing URLs for global deduplication
95
+ global_seen = set()
96
+ if output_csv.exists():
97
+ global_seen = set(df_existing["url"].tolist())
98
+
99
+ def get_count(query: str) -> int:
100
+ """Get total count without fetching data (with timeout & retries)"""
101
+ try:
102
+ resp = github_get(
103
+ "https://api.github.com/search/repositories",
104
+ headers=headers,
105
+ params={"q": query, "per_page": 1},
106
+ )
107
+ if resp is not None and resp.status_code == 200:
108
+ return resp.json().get("total_count", 0)
109
+ except Exception as e:
110
+ logger.error(f"get_count error for query '{query}': {e}")
111
+ return 0
112
+
113
+ def fetch_repos(query: str, keyword: str, local_repos: List[Dict]):
114
+ """Fetch all results for a single query (with timeout & retries)"""
115
+ page = 1
116
+ while page <= 10:
117
+ try:
118
+ resp = github_get(
119
+ "https://api.github.com/search/repositories",
120
+ headers=headers,
121
+ params={"q": query, "per_page": 100, "page": page},
122
+ )
123
+ if resp is None:
124
+ # 多次重试失败,放弃当前 query
125
+ logger.error(f"Fetch error: all retries failed for query '{query}', page {page}")
126
+ break
127
+ if resp.status_code != 200:
128
+ logger.error(
129
+ f"Fetch error: status_code={resp.status_code} for query '{query}', page {page}"
130
+ )
131
+ break
132
+ items = resp.json().get("items", [])
133
+ if not items:
134
+ break
135
+
136
+ for r in items:
137
+ url = r.get("html_url", "")
138
+ if url and url not in global_seen:
139
+ global_seen.add(url)
140
+ repo_data = {
141
+ "keyword": keyword,
142
+ "name": r.get("name", ""),
143
+ "full_name": r.get("full_name", ""),
144
+ "owner": r.get("owner", {}).get("login", ""),
145
+ "url": url,
146
+ "description": r.get("description") or "",
147
+ "language": r.get("language") or "",
148
+ "topics": ",".join(r.get("topics", [])),
149
+ "stars": r.get("stargazers_count", 0),
150
+ "forks": r.get("forks_count", 0),
151
+ "created_at": r.get("created_at", ""),
152
+ "updated_at": r.get("updated_at", ""),
153
+ "pushed_at": r.get("pushed_at", ""),
154
+ "license": r.get("license", {}).get("spdx_id", "") if r.get("license") else "",
155
+ "default_branch": r.get("default_branch", ""),
156
+ "open_issues": r.get("open_issues_count", 0),
157
+ "size": r.get("size", 0),
158
+ "has_wiki": r.get("has_wiki", False),
159
+ "archived": r.get("archived", False),
160
+ }
161
+ local_repos.append(repo_data)
162
+
163
+ if len(items) < 100:
164
+ break
165
+ page += 1
166
+ except Exception as e:
167
+ logger.error(f"Fetch error (unexpected): {e}")
168
+ break
169
+
170
+ def split_by_date(kw: str, keyword: str, start_date: datetime, end_date: datetime, local_repos: List[Dict]):
171
+ """Recursive date splitting with stars>10 and in:readme filters"""
172
+ start_str = start_date.strftime("%Y-%m-%d")
173
+ end_str = end_date.strftime("%Y-%m-%d")
174
+ query = f"{kw} in:readme stars:>10 created:{start_str}..{end_str}"
175
+ count = get_count(query)
176
+ logger.info(f" {start_str} to {end_str}: {count} repos")
177
+
178
+ if count == 0:
179
+ return
180
+ elif count <= 1000:
181
+ fetch_repos(query, keyword, local_repos)
182
+ else:
183
+ days = (end_date - start_date).days
184
+ if days == 0:
185
+ logger.warning(f"Single day has {count} repos, getting first 1000: {start_str}")
186
+ fetch_repos(query, keyword, local_repos)
187
+ else:
188
+ mid_days = days // 2
189
+ mid_date = start_date + timedelta(days=mid_days)
190
+ split_by_date(kw, keyword, start_date, mid_date, local_repos)
191
+ split_by_date(kw, keyword, mid_date + timedelta(days=1), end_date, local_repos)
192
+
193
+ # Search each pending keyword and write immediately
194
+ for kw in pending:
195
+ logger.info(f"Searching keyword: {kw}")
196
+ keyword_repos = []
197
+ start = datetime(2008, 1, 1)
198
+ end = datetime.now()
199
+ split_by_date(kw, kw, start, end, keyword_repos)
200
+
201
+ # Write immediately after each keyword
202
+ if keyword_repos:
203
+ df_new = pd.DataFrame(keyword_repos)
204
+ df_new.to_csv(output_csv, mode="a", header=not output_csv.exists(), index=False, encoding="utf-8")
205
+ logger.info(f"✓ Saved {len(keyword_repos)} repos for keyword: {kw}")
206
+ else:
207
+ logger.info(f"✓ No new repos for keyword: {kw}")
208
+
209
+ logger.info(f"Total repos in CSV: {len(global_seen)}")
210
+
211
+
212
+ async def get_readme(owner: str, repo: str, token: str) -> str:
213
+ """Fetch README content from repo (async with timeout & retries)"""
214
+ try:
215
+ # 使用 asyncio.to_thread 将同步的 github_get 包装为异步,避免阻塞事件循环
216
+ resp = await asyncio.to_thread(
217
+ github_get,
218
+ f"https://api.github.com/repos/{owner}/{repo}/readme",
219
+ headers={"Authorization": f"Bearer {token}", "Accept": "application/vnd.github.v3.raw"},
220
+ )
221
+ if resp is not None and resp.status_code == 200:
222
+ return resp.text
223
+ return ""
224
+ except Exception as e:
225
+ logger.error(f"get_readme error for {owner}/{repo}: {e}")
226
+ return ""
227
+
228
+
229
+ async def check_relevance(
230
+ repo: Dict, keywords: List[str], model: str, base_url: str, api_key: str, token: str, log_file: str
231
+ ) -> bool:
232
+ """Use LLM to check if repo is relevant to keywords"""
233
+ readme = get_readme(repo["owner"], repo["name"], token)[:8000]
234
+
235
+ prompt = f"""Determine if this GitHub repository is relevant to the keywords: {', '.join(keywords)}
236
+
237
+ Repository: {repo['name']}
238
+ Description: {repo['description']}
239
+ Language: {repo['language']}
240
+ README (truncated):
241
+ {readme}
242
+
243
+ Answer 'YES' if the repository is related to any of the keywords, 'NO' otherwise.
244
+ Provide your reasoning in the reason field."""
245
+
246
+ try:
247
+ result = await call_llm(
248
+ [{"role": "user", "content": prompt}],
249
+ model,
250
+ base_url,
251
+ api_key,
252
+ pydantic_object=RelevanceResult,
253
+ log_file=log_file,
254
+ temperature=0.1,
255
+ )
256
+ return result.get("relevant", "").upper() == "YES"
257
+ except Exception as e:
258
+ logger.error(f"LLM error for {repo['name']}: {e}")
259
+ return False
260
+
261
+
262
+ def save_csv(repos: List[Dict], path: str):
263
+ """Save repos to CSV using pandas"""
264
+ df = pd.DataFrame(repos)
265
+ df.to_csv(path, index=False, encoding="utf-8")
266
+ logger.info(f"Saved {len(repos)} repos to {path}")
267
+
268
+
269
+ def clone_repos_batch(repos: List[Dict], dest_dir: Path) -> List[str]:
270
+ """Clone a batch of repos, return list of successfully cloned full_names"""
271
+ dest_dir.mkdir(parents=True, exist_ok=True)
272
+ cloned = []
273
+
274
+ for row in repos:
275
+ full_name = row["full_name"]
276
+ repo_path = dest_dir / full_name.replace("/", "___") # three underscores
277
+ try:
278
+ subprocess.run(
279
+ ["git", "clone", "--depth", "1", row["url"], str(repo_path)],
280
+ check=True,
281
+ capture_output=True,
282
+ timeout=600 # 10 minutes timeout
283
+ )
284
+ cloned.append(full_name)
285
+ logger.info(f"✓ Cloned: {full_name}")
286
+ except subprocess.TimeoutExpired:
287
+ logger.error(f"✗ Clone timeout: {full_name}")
288
+ except Exception as e:
289
+ logger.error(f"✗ Clone failed {full_name}: {e}")
290
+
291
+ return cloned
292
+
293
+
294
+ def filter_code_files(repo_dir: Path, dest_repo: Path) -> int:
295
+ """Filter code files from a repo directory, return file count"""
296
+ # NOTE:
297
+ # - 过滤目录用于后续处理/打开查看;超大文件(尤其是 .ipynb)会导致 IDE/工具无法打开(常见上限 50MB)
298
+ # - 因此这里支持最大单文件大小限制,避免复制超大文件
299
+ file_count = 0
300
+
301
+ for root, dirs, files in os.walk(repo_dir):
302
+ # Skip hidden and common build/dependency directories
303
+ dirs[:] = [
304
+ d
305
+ for d in dirs
306
+ if not d.startswith(".")
307
+ and d not in {"node_modules", "__pycache__", "venv", ".git", "build", "dist", "target"}
308
+ ]
309
+
310
+ for f in files:
311
+ src = Path(root) / f
312
+ if src.suffix.lower() in CODE_EXTENSIONS and src.exists():
313
+ # 默认不限制文件大小(保留超大文件),避免误删重要数据。
314
+ # 如需限制,可设置环境变量 MAX_FILTER_FILE_SIZE_BYTES(单位 bytes)。
315
+ # 约定:<=0 表示不限制。
316
+ try:
317
+ max_bytes = int(os.environ.get("MAX_FILTER_FILE_SIZE_BYTES", "0"))
318
+ except Exception:
319
+ max_bytes = 0
320
+
321
+ try:
322
+ if max_bytes > 0:
323
+ size = src.stat().st_size
324
+ if size > max_bytes:
325
+ logger.info(
326
+ f"Skip large file (> {max_bytes} bytes): {size} bytes | {src}"
327
+ )
328
+ continue
329
+ except Exception as e:
330
+ logger.warning(f"Failed to stat {src}, skip: {e}")
331
+ continue
332
+
333
+ rel = src.relative_to(repo_dir)
334
+ dst = dest_repo / rel
335
+ dst.parent.mkdir(parents=True, exist_ok=True)
336
+ try:
337
+ shutil.copy2(src, dst)
338
+ file_count += 1
339
+ except Exception as e:
340
+ logger.warning(f"Failed to copy {src}: {e}")
341
+
342
+ return file_count
343
+
344
+
345
+ def process_repos_batch(
346
+ repos: List[Dict],
347
+ batch_dir: Path,
348
+ filtered_dir: Path,
349
+ processed_csv: Path
350
+ ) -> List[str]:
351
+ """
352
+ Process a batch of repos: clone -> filter -> delete
353
+ Returns list of successfully processed full_names
354
+ """
355
+ # Clone batch
356
+ cloned_fullnames = clone_repos_batch(repos, batch_dir)
357
+
358
+ if not cloned_fullnames:
359
+ return []
360
+
361
+ # Filter code files for each cloned repo
362
+ processed = []
363
+ for full_name in cloned_fullnames:
364
+ repo_name = full_name.replace("/", "___")
365
+ repo_path = batch_dir / repo_name
366
+ dest_repo = filtered_dir / repo_name
367
+
368
+ if not repo_path.exists():
369
+ continue
370
+
371
+ file_count = filter_code_files(repo_path, dest_repo)
372
+
373
+ if file_count > 0:
374
+ processed.append(full_name)
375
+ logger.info(f"✓ Processed {full_name}: {file_count} code files")
376
+ else:
377
+ # No code files found, remove empty destination
378
+ if dest_repo.exists():
379
+ shutil.rmtree(dest_repo)
380
+ logger.info(f"✗ No code files in {full_name}")
381
+
382
+ # Delete cloned repos to save space
383
+ for full_name in cloned_fullnames:
384
+ repo_name = full_name.replace("/", "___")
385
+ repo_path = batch_dir / repo_name
386
+ if repo_path.exists():
387
+ try:
388
+ shutil.rmtree(repo_path)
389
+ logger.debug(f"Deleted {repo_name}")
390
+ except Exception as e:
391
+ logger.warning(f"Failed to delete {repo_name}: {e}")
392
+
393
+ # Record processed repos
394
+ if processed:
395
+ # Create mapping from full_name to repo dict
396
+ repo_map = {r["full_name"]: r for r in repos}
397
+ processed_records = [
398
+ {"url": repo_map[fn]["url"], "full_name": fn}
399
+ for fn in processed
400
+ if fn in repo_map
401
+ ]
402
+ if processed_records:
403
+ df_processed = pd.DataFrame(processed_records)
404
+ df_processed.to_csv(processed_csv, mode="a", header=not processed_csv.exists(), index=False, encoding="utf-8")
405
+
406
+ return processed
407
+
408
+
409
+ async def main():
410
+ load_dotenv()
411
+ parser = argparse.ArgumentParser(description="GitHub Repo Crawler")
412
+ parser.add_argument(
413
+ "--mode",
414
+ type=str,
415
+ default="all",
416
+ choices=["all", "step2", "step34"],
417
+ help="运行模式:all=步骤2与步骤3&4并行;step2=仅相关性检查;step34=仅克隆+过滤",
418
+ )
419
+ parser.add_argument(
420
+ "--watch",
421
+ action="store_true",
422
+ help="仅对 --mode step34 有效:持续轮询 repos_check_history.csv 并处理新的 YES(不加则只跑一轮就退出)",
423
+ )
424
+ parser.add_argument(
425
+ "--poll_interval",
426
+ type=int,
427
+ default=30,
428
+ help="仅对 --mode step34 --watch 有效:轮询间隔(秒)",
429
+ )
430
+ parser.add_argument(
431
+ "--max_idle",
432
+ type=int,
433
+ default=20,
434
+ help="仅对 --mode step34 --watch 有效:连续空转次数上限(每次间隔 poll_interval 秒),超过后退出",
435
+ )
436
+ parser.add_argument(
437
+ "--keywords",
438
+ type=str,
439
+ default="Chemistry, Biology, Biochemistry, Omics, Medicine, Pharmacology, Toxicology, Bioinformatics, Bioengineering, Biophysics, Viral, Microbial, Prediction, Discovery, Protein, Gene, DNA, RNA, Vaccine, Computational Biology, Computational Biochemistry, Computational Chemistry, Computational Materials, Quantum Chemistry, Disease, Biomedical, Material, Pharmacogenetics, Pharmacogenomics, Modeling, Networks, In Silico, Pathology, Physiology, Genomics, Proteomics, Transcriptomics, Metabolomics, Glycomics, Lipidomics, Immunology, Microbiology, Molecular biology, Pharmaceutics, Network pharmacology, Epigenetics, Sequencing, Design, Multi-omics, Biomarker, System biology, Synthetic biology, Cell biology, Cancer biology, Ensemble, Personalized, Lipid, Metabolic, Genesis, Ion, Heterogeneity, Generative, Generate, Human, Receptor, Ligand, Organoid, Evolution, Pathogens, Homeostasis, Allele, Genotype, Phenotype, Antibody, Antigen, Nucleic acids, Carbohydrate, Substrate, Inhibition, Activation, Allosteric, Cofactor, Coenzyme, Enzyme, Redox, Hydrophilic, Hydrophobic, Codon, Transcription, Translation, Pathway, Cycle, Signaling, Dynamics, Kinetics, Docking, Spectrometry, Profiling, Diagnostics, CRISPR, Bio, Marker, Pharmacokinetics, Pharmacodynamics, Absorption, Mechanism of action, Agonist, Antagonist, Bioavailability, Half-life, Reaction, Drug, Biologics, Pharmacometrics, Beta-blocker, Regulatory networks, Multi-scale modeling, Single-cell, Spatial biology, Integration, Monte Carlo, System immunology, Metagenomics, QSAR, QAPR, Chemical space, AlphaFold, Folding, Mechanism, Digital twin, Virtual human, Gene editing, Bio foundation model, Biotechnology, Assay, Lead discovery, High-throughput, Screening, Hit-to-lead, Lead optimization, De novo, ADMET, Translational medicine, Drug repurpose, Conjugate, Agent-based model, Compartmental model, Reproduction number, Nowcasting, Phylodynamic model, Physiologically based pharmacokinetics model, PBPK model, Organ-on-a-chip, Anomaly detection, Stochastic modeling, Genomic surveillance, Antimicrobial resistance modeling, AMR, Pandemic, Digital PCR, Next-generation sequencing, Biosensors, Imaging, Sensors, Quantum mechanics, DFT, Ab initio, Hartree-Fock, Coupled cluster, Electronic structure, Homo-Lumo, Conformation, Cheminformatics, QM/MM, First-principles based DFT, Diffusion, Finite element method, Phase-field technique, Potential, Metamaterial, 2D, 3D, Porous, Crystal, Rosettafold, Gene regulatory networks, Cell atlas, Human atlas, Spatial transcriptomics, Pseudotime analysis, Quantum biology, Metabolic flux analysis, Free energy perturbation, Protein-protein, Explainable AI, Neurology, Reinforcement learning, Generative AI, Flow matching, Generative adversarial networks, GAN, Variational autoencoders, VAE, Autoregressive, Transformer, Recurrent neural networks, RNN, Score",
440
+ help="Comma-separated keywords",
441
+ )
442
+ parser.add_argument("--workdir", type=str, default="./workdir", help="Working directory")
443
+ parser.add_argument("--model", type=str, default=os.getenv("OPENAI_MODEL", "gpt-4o"))
444
+ parser.add_argument("--base_url", type=str, default=os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1"),
445
+ help="Base URL(s) for VLLM service(s). For multiple GPUs, use comma-separated URLs (e.g., 'http://gpu1:8000/v1,http://gpu2:8000/v1')")
446
+ parser.add_argument("--api_key", type=str, default=os.getenv("OPENAI_API_KEY"))
447
+ parser.add_argument("--batch_size", type=int, default=10, help="Number of repos to process in each batch")
448
+ parser.add_argument("--max_tokens", type=int, default=1000, help="Maximum tokens for LLM generation in relevance check (Step 2)")
449
+ args = parser.parse_args()
450
+ # Unified config
451
+ workdir = Path(args.workdir)
452
+ # Parse base_urls (support multiple VLLM services for parallel processing)
453
+ base_urls = [url.strip() for url in args.base_url.split(",") if url.strip()]
454
+ if not base_urls:
455
+ base_urls = ["https://api.openai.com/v1"] # Default fallback
456
+
457
+ config = {
458
+ "workdir": workdir,
459
+ "keywords_expanded": workdir / "keywords_expanded.json",
460
+ "repos_searched": workdir / "repos_searched.csv",
461
+ "repos_checked": workdir / "repos_checked.csv",
462
+ "repos_raw": workdir / "repos_raw", # Temporary batch directory
463
+ "repos_filtered": workdir / "repos_filtered",
464
+ "repos_processed": workdir / "repos_processed.csv", # Track processed repos
465
+ "log_file": str(workdir / "calls_llm.jsonl"),
466
+ "model": args.model,
467
+ "base_url": args.base_url, # Keep for backward compatibility
468
+ "base_urls": base_urls, # List of VLLM service URLs
469
+ "api_key": args.api_key,
470
+ "github_token": os.environ.get("GITHUB_TOKEN"),
471
+ "keywords": [k.strip() for k in args.keywords.split(",") if k.strip()],
472
+ "batch_size": args.batch_size,
473
+ "max_tokens": args.max_tokens,
474
+ }
475
+
476
+ # Setup
477
+ os.makedirs(config["workdir"], exist_ok=True)
478
+ init_logger(str(config["workdir"] / "run.log"))
479
+ logger.info(f"Base keywords: {config['keywords']}")
480
+ logger.info(f"Model: {config['model']}")
481
+ logger.info(f"Run mode: {args.mode} | step34.watch={args.watch} | poll_interval={args.poll_interval} | max_idle={args.max_idle}")
482
+
483
+ # mode=step34 时,不需要跑 Step0/1/2;只要依赖 repos_check_history + repos_searched(用于补全字段)即可
484
+ expanded = None
485
+ if args.mode in {"all", "step2"}:
486
+ # Step 0: Expand keywords with LLM (skip if keywords_expanded.json exists)
487
+ if config["keywords_expanded"].exists():
488
+ logger.info(f"[Skip] Step 0: {config['keywords_expanded']} exists")
489
+ import json
490
+
491
+ with open(config["keywords_expanded"], "r") as f:
492
+ expanded = json.load(f)["keywords"]
493
+ else:
494
+ logger.info("=" * 60 + "\nStep 0: Expand Keywords with LLM\n" + "=" * 60)
495
+ parser0 = JsonOutputParser(pydantic_object=ExpandedKeywords)
496
+ messages = [
497
+ {
498
+ "role": "system",
499
+ "content": f"You are an assistant that generates diverse and related keywords for scientific disciplines.\n{parser0.get_format_instructions()}",
500
+ },
501
+ {
502
+ "role": "user",
503
+ "content": f"""Generate a list of exactly 5 diverse keywords related to these scientific fields: {', '.join(config['keywords'])}.
504
+ Make sure that the generated keywords do not stray away from these scientific disciplines and do not contain broad terms that will confuse the search (e.g. machine learning, algorithms, etc).
505
+ I would like to use these keywords to retrieve code repositories related to these specific scientific disciplines from GitHub and Papers with Code.""",
506
+ },
507
+ ]
508
+
509
+ try:
510
+ result = await call_llm(
511
+ messages,
512
+ config["model"],
513
+ config["base_url"],
514
+ config["api_key"],
515
+ pydantic_object=ExpandedKeywords,
516
+ log_file=config["log_file"],
517
+ temperature=0.5,
518
+ )
519
+ # Merge with base keywords and dedupe
520
+ expanded = list(set(config["keywords"] + result.get("keywords", [])))
521
+ except Exception as e:
522
+ logger.error(f"Keyword expansion failed: {e}, using base keywords")
523
+ expanded = config["keywords"]
524
+
525
+ # Save expanded keywords
526
+ import json
527
+
528
+ with open(config["keywords_expanded"], "w") as f:
529
+ json.dump({"keywords": expanded}, f, indent=2)
530
+ logger.info(f"[Done] Step 0: {len(expanded)} keywords: {expanded}")
531
+
532
+ # Step 1: Search GitHub repos with pending logic
533
+ logger.info("=" * 60 + "\nStep 1: Search GitHub Repos\n" + "=" * 60)
534
+ search_github_repos(expanded, config["github_token"], config["repos_searched"])
535
+
536
+ # Check final results
537
+ if config["repos_searched"].exists():
538
+ df_final = pd.read_csv(config["repos_searched"])
539
+ logger.info(f"[Done] Step 1: {len(df_final)} total repos in CSV")
540
+ else:
541
+ logger.warning("No repos found")
542
+ pd.DataFrame(
543
+ columns=[
544
+ "keyword",
545
+ "name",
546
+ "full_name",
547
+ "owner",
548
+ "url",
549
+ "description",
550
+ "language",
551
+ "topics",
552
+ "stars",
553
+ "forks",
554
+ "created_at",
555
+ "updated_at",
556
+ "pushed_at",
557
+ "license",
558
+ "default_branch",
559
+ "open_issues",
560
+ "size",
561
+ "has_wiki",
562
+ "archived",
563
+ ]
564
+ ).to_csv(config["repos_searched"], index=False)
565
+ logger.info("[Done] Step 1: 0 repos saved")
566
+
567
+ # Step 2: Concurrent relevance check with resume support (batch processing)
568
+ # Step 3 & 4: Batch clone -> filter -> delete (runs in parallel with Step 2)
569
+ repos_check_history = config["workdir"] / "repos_check_history.csv"
570
+
571
+ # Step 3 & 4 runner (supports one-shot / watch)
572
+ async def run_step34(*, watch: bool) -> None:
573
+ """Run Step 3 & 4: read YES from repos_check_history.csv, clone->filter->delete, with resume."""
574
+ logger.info("[步骤3&4] " + "=" * 60 + "\n[步骤3&4] Step 3 & 4: Background Processing (Clone -> Filter -> Delete)\n[步骤3&4] " + "=" * 60)
575
+
576
+ # Setup directories
577
+ config["repos_raw"].mkdir(parents=True, exist_ok=True)
578
+ config["repos_filtered"].mkdir(parents=True, exist_ok=True)
579
+
580
+ # Track processed repos
581
+ processed_urls = set()
582
+ processed_fullnames = set() # Track by full_name for directory check
583
+
584
+ # Check repos_processed.csv for already processed repos
585
+ if config["repos_processed"].exists():
586
+ df_processed = pd.read_csv(config["repos_processed"])
587
+ if not df_processed.empty:
588
+ processed_urls = set(df_processed["url"].tolist())
589
+ if "full_name" in df_processed.columns:
590
+ processed_fullnames = set(df_processed["full_name"].tolist())
591
+ logger.info(f"[步骤3&4] 已处理记录: {len(processed_urls)} repos from repos_processed.csv")
592
+
593
+ # Also check filtered directory for existing repos (in case CSV is missing but files exist)
594
+ if config["repos_filtered"].exists():
595
+ existing_dirs = [d.name for d in config["repos_filtered"].iterdir() if d.is_dir()]
596
+ existing_fullnames = {name.replace("___", "/") for name in existing_dirs}
597
+ processed_fullnames.update(existing_fullnames)
598
+ logger.info(f"[步骤3&4] 已存在目录: {len(existing_fullnames)} repos from repos_filtered directory")
599
+
600
+ # Read all repos from repos_searched for mapping
601
+ if not config["repos_searched"].exists():
602
+ logger.error(f"[步骤3&4] 缺少 {config['repos_searched']},无法将 url 映射回 repo 元数据;请先运行 step1/step2 或提供 repos_searched.csv")
603
+ return
604
+
605
+ df_all_repos = pd.read_csv(config["repos_searched"])
606
+ repo_map = {r["url"]: r for r in df_all_repos.to_dict("records")}
607
+
608
+ poll_interval = int(args.poll_interval)
609
+ max_idle = int(args.max_idle)
610
+ consecutive_empty_checks = 0
611
+
612
+ async def process_once() -> int:
613
+ """Process pending YES repos once; return processed count in this round."""
614
+ nonlocal consecutive_empty_checks
615
+
616
+ if not repos_check_history.exists():
617
+ if watch and consecutive_empty_checks in {0, 4, 9, 19}:
618
+ logger.info("[步骤3&4] 等待 repos_check_history.csv 文件生成...")
619
+ return 0
620
+
621
+ df_history = pd.read_csv(repos_check_history)
622
+ if df_history.empty:
623
+ if watch and consecutive_empty_checks in {0, 4, 9, 19}:
624
+ logger.info("[步骤3&4] repos_check_history.csv 为空,等待新数据...")
625
+ return 0
626
+
627
+ df_relevant_history = df_history[df_history["is_relevant"] == "YES"].copy()
628
+ if df_relevant_history.empty:
629
+ if watch and consecutive_empty_checks in {0, 4, 9, 19}:
630
+ logger.info("[步骤3&4] 暂无标记为 YES 的相关项目,等待新数据...")
631
+ return 0
632
+
633
+ relevant_urls = set(df_relevant_history["url"].tolist())
634
+ pending_urls = relevant_urls - processed_urls
635
+ if not pending_urls:
636
+ # nothing new to do
637
+ return 0
638
+
639
+ pending_repos: List[Dict[str, Any]] = []
640
+ for url in pending_urls:
641
+ if url in repo_map:
642
+ repo = repo_map[url].copy()
643
+ if repo.get("full_name") and repo["full_name"] not in processed_fullnames:
644
+ pending_repos.append(repo)
645
+ else:
646
+ history_record = df_relevant_history[df_relevant_history["url"] == url].iloc[0]
647
+ full_name = history_record.get("full_name", "")
648
+ repo = {
649
+ "full_name": full_name,
650
+ "url": url,
651
+ "description": history_record.get("description", ""),
652
+ "topics": history_record.get("topics", ""),
653
+ "keyword": history_record.get("keyword", ""),
654
+ "owner": full_name.split("/")[0] if "/" in full_name else "",
655
+ "name": full_name.split("/")[1] if "/" in full_name else "",
656
+ }
657
+ if repo["full_name"] and repo["full_name"] not in processed_fullnames:
658
+ pending_repos.append(repo)
659
+
660
+ if not pending_repos:
661
+ return 0
662
+
663
+ logger.info(
664
+ f"[步骤3&4] 📦 发现 {len(pending_repos)} 个新的相关项目需要处理(总共相关: {len(relevant_urls)}, 已处理: {len(processed_urls)})"
665
+ )
666
+
667
+ processed_this_round = 0
668
+ total_batches = (len(pending_repos) + config["batch_size"] - 1) // config["batch_size"]
669
+ for batch_idx in range(total_batches):
670
+ start_idx = batch_idx * config["batch_size"]
671
+ end_idx = min(start_idx + config["batch_size"], len(pending_repos))
672
+ batch_repos = pending_repos[start_idx:end_idx]
673
+
674
+ logger.info(f"[步骤3&4] \n{'='*60}")
675
+ logger.info(f"[步骤3&4] Batch {batch_idx + 1}/{total_batches}: Processing {len(batch_repos)} repos")
676
+ logger.info(f"[步骤3&4] {'='*60}")
677
+
678
+ processed = await asyncio.to_thread(
679
+ process_repos_batch,
680
+ batch_repos,
681
+ config["repos_raw"],
682
+ config["repos_filtered"],
683
+ config["repos_processed"],
684
+ )
685
+
686
+ for full_name in processed:
687
+ processed_fullnames.add(full_name)
688
+ for repo in batch_repos:
689
+ if repo.get("full_name") == full_name:
690
+ processed_urls.add(repo.get("url"))
691
+ break
692
+
693
+ processed_this_round += len(processed)
694
+ logger.info(
695
+ f"[步骤3&4] ✓ Batch {batch_idx + 1}/{total_batches}: {len(processed)}/{len(batch_repos)} repos processed successfully"
696
+ )
697
+
698
+ return processed_this_round
699
+
700
+ if not watch:
701
+ try:
702
+ n = await process_once()
703
+ if n == 0:
704
+ logger.info("[步骤3&4] 本轮无新项目可处理,退出(未开启 --watch)")
705
+ else:
706
+ logger.info(f"[步骤3&4] 本轮处理完成:新增处理 {n} 个 repo,退出(未开启 --watch)")
707
+ except Exception as e:
708
+ logger.error(f"[步骤3&4] 处理相关项目时出错: {e}")
709
+ return
710
+
711
+ # watch loop
712
+ while True:
713
+ try:
714
+ n = await process_once()
715
+ if n > 0:
716
+ consecutive_empty_checks = 0
717
+ else:
718
+ consecutive_empty_checks += 1
719
+ if consecutive_empty_checks >= max_idle:
720
+ logger.info(f"[步骤3&4] 连续空转 {consecutive_empty_checks} 次,退出(watch 模式)")
721
+ break
722
+ await asyncio.sleep(poll_interval)
723
+ except Exception as e:
724
+ consecutive_empty_checks += 1
725
+ logger.error(f"[步骤3&4] 处理相关项目时出错: {e}")
726
+ if consecutive_empty_checks >= max_idle:
727
+ logger.error("[步骤3&4] 连续错误次数过多,退出(watch 模式)")
728
+ break
729
+ await asyncio.sleep(poll_interval)
730
+
731
+ logger.info(f"[步骤3&4] [Done] 步骤3&4退出:已处理 {len(processed_urls)} 个相关项目")
732
+
733
+ background_task = None
734
+ if args.mode in {"all"}:
735
+ # Start background task for Step 3 & 4 (will run in parallel with Step 2)
736
+ background_task = asyncio.create_task(run_step34(watch=True))
737
+ logger.info("✓ 已启动后台任务:步骤3&4(克隆和过滤)将与步骤2并行运行")
738
+ elif args.mode == "step34":
739
+ # step34 only
740
+ await run_step34(watch=bool(args.watch))
741
+ return
742
+
743
+ if args.mode == "step2":
744
+ logger.info("[步骤2] " + "=" * 60 + "\n[步骤2] Step 2: Check Relevance with LLM (Batch Concurrent)\n[步骤2] " + "=" * 60)
745
+ else:
746
+ # args.mode == "all"
747
+ logger.info("[步骤2] " + "=" * 60 + "\n[步骤2] Step 2: Check Relevance with LLM (Batch Concurrent)\n[步骤2] " + "=" * 60)
748
+
749
+ # read to check list
750
+ df_to_check = pd.read_csv(config["repos_searched"])
751
+ total_repos = len(df_to_check)
752
+ to_check_urls = set(df_to_check["url"].tolist())
753
+
754
+ # read already checked list
755
+ already_checked_urls = set()
756
+ if repos_check_history.exists():
757
+ df_checked = pd.read_csv(repos_check_history)
758
+ already_checked_urls = set(df_checked["url"].tolist())
759
+ logger.info(f"[步骤2] Resume: Already checked {len(already_checked_urls)}/{total_repos} repos")
760
+
761
+ # pending repos to check
762
+ pending_urls = to_check_urls - already_checked_urls
763
+ unchecked = df_to_check[df_to_check["url"].isin(pending_urls)].to_dict("records")
764
+
765
+ if not unchecked:
766
+ logger.info(f"[步骤2] [Skip] Step 2: All {total_repos} repos have been checked")
767
+ # Still need to generate repos_checked.csv if it doesn't exist
768
+ if not config["repos_checked"].exists():
769
+ # collect all relevant repos from history file
770
+ all_repos = df_to_check.to_dict("records")
771
+ relevant_repos = []
772
+ if repos_check_history.exists():
773
+ df_history = pd.read_csv(repos_check_history)
774
+ if not df_history.empty:
775
+ relevant_urls = set(df_history[df_history["is_relevant"] == "YES"]["url"].tolist())
776
+ relevant_repos = [r for r in all_repos if r["url"] in relevant_urls]
777
+
778
+ # deduplicate by url and save
779
+ if relevant_repos:
780
+ df_relevant = pd.DataFrame(relevant_repos)
781
+ df_relevant = df_relevant.drop_duplicates(subset=["url"])
782
+ df_relevant.to_csv(config["repos_checked"], index=False, encoding="utf-8")
783
+ logger.info(f"[步骤2] [Done] Step 2: {len(df_relevant)} relevant repos (deduplicated)")
784
+ else:
785
+ pd.DataFrame(columns=df_to_check.columns).to_csv(config["repos_checked"], index=False)
786
+ logger.info("[步骤2] [Done] Step 2: 0 relevant repos")
787
+ else:
788
+ logger.info(f"[步骤2] Pending: {len(unchecked)} repos to check")
789
+ # create parser
790
+ parser = JsonOutputParser(pydantic_object=RelevanceResult)
791
+ format_instructions = parser.get_format_instructions()
792
+ keywords_str = ", ".join(expanded)
793
+ system_content = f"""You are an expert at reading GitHub README.md files thoroughly and determining whether the repository hosts scientific code that is relevant to the scientific disciplines of {keywords_str}.
794
+ Your task is to decide if the repository's scientific code is related to these disciplines.
795
+ Only answer based on the information available in the README, repository description, and topics.
796
+ {format_instructions}"""
797
+
798
+ # Check if using reasoning model (e.g., Qwen3-32B-AWQ)
799
+ is_reasoning_model = "qwen" in config["model"].lower() or "reasoning" in config["model"].lower()
800
+
801
+ # Get base URLs for parallel processing
802
+ base_urls = config["base_urls"]
803
+ num_gpus = len(base_urls)
804
+ logger.info(f"[步骤2] Using {num_gpus} VLLM service(s): {base_urls}")
805
+
806
+ # concurrent check function with base_url parameter
807
+ async def check_one(repo, base_url: str):
808
+ repo_name = repo['full_name']
809
+ logger.info(f"[步骤2] 🔄 开始检查: {repo_name}")
810
+ try:
811
+ readme = (await get_readme(repo["owner"], repo["name"], config["github_token"]))[:8000]
812
+ logger.info(f"[步骤2] ✓ 已获取 README: {repo_name}, 长度: {len(readme)}")
813
+ except Exception as e:
814
+ logger.error(f"[步骤2] ✗ 获取 README 失败: {repo_name}, 错误: {e}")
815
+ return None
816
+ messages = [
817
+ {"role": "system", "content": system_content},
818
+ {
819
+ "role": "user",
820
+ "content": f"""Think before you respond. Your answer should be based on your thorough understanding of the content of the README.md file.
821
+ Does the README.md file indicate that the repository hosts code related to the scientific disciplines of {keywords_str}?
822
+ Repository: {repo['full_name']}
823
+ Description: {repo['description']}
824
+ Topics: {repo['topics']}
825
+ README: {readme}
826
+ Answer by 'YES' or 'NO' in the relevant field. And provide your reasoning in the reason field.""",
827
+ },
828
+ ]
829
+ try:
830
+ logger.info(f"[步骤2] 🤖 调用 LLM: {repo_name} (base_url: {base_url})")
831
+ result = await call_llm(
832
+ messages,
833
+ config["model"],
834
+ base_url, # Use provided base_url
835
+ config["api_key"],
836
+ pydantic_object=RelevanceResult,
837
+ log_file=config["log_file"],
838
+ temperature=0.1,
839
+ max_tokens=config["max_tokens"], # Limit generation length
840
+ )
841
+
842
+ # 检查 LLM 调用是否成功
843
+ if result is None:
844
+ logger.error(f"[步骤2] ❌ LLM 调用失败(超时或错误): {repo_name}")
845
+ return None
846
+
847
+ logger.info(f"[步骤2] ✓ LLM 调用完成: {repo_name}")
848
+
849
+ # Additional post-processing for reasoning models if needed
850
+ # (call_llm already handles most cases, but this is a safety check)
851
+ if is_reasoning_model:
852
+ if not isinstance(result, dict) or "relevant" not in result:
853
+ logger.warning(f"[步骤2] 推理模型响应格式异常,尝试后处理: {repo['full_name']}")
854
+ # If result is a string, extract from it
855
+ if isinstance(result, str):
856
+ result = extract_final_answer_from_reasoning(result, RelevanceResult)
857
+ # If result dict is malformed, try to extract from reason field
858
+ elif isinstance(result, dict):
859
+ raw_text = result.get("reason", str(result))
860
+ result = extract_final_answer_from_reasoning(raw_text, RelevanceResult)
861
+
862
+ is_relevant = result.get("relevant", "").upper() == "YES"
863
+ reason = result.get("reason", "")
864
+
865
+ # return result for batch write
866
+ return {
867
+ "keyword": repo["keyword"],
868
+ "full_name": repo["full_name"],
869
+ "url": repo["url"],
870
+ "description": repo["description"],
871
+ "topics": repo["topics"],
872
+ "is_relevant": "YES" if is_relevant else "NO",
873
+ "reason": reason,
874
+ "relevant": is_relevant,
875
+ }
876
+
877
+ except Exception as e:
878
+ logger.error(f"[步骤2] Error checking {repo['full_name']}: {e}")
879
+ return None
880
+
881
+ # batch processing
882
+ BATCH_SIZE = 20
883
+ total_batches = (len(unchecked) + BATCH_SIZE - 1) // BATCH_SIZE
884
+
885
+ for batch_idx in range(total_batches):
886
+ start_idx = batch_idx * BATCH_SIZE
887
+ end_idx = min(start_idx + BATCH_SIZE, len(unchecked))
888
+ batch = unchecked[start_idx:end_idx]
889
+
890
+ logger.info(f"[步骤2] \n{'='*60}")
891
+ logger.info(f"[步骤2] 📦 Batch {batch_idx + 1}/{total_batches}: Processing {len(batch)} repos")
892
+ logger.info(f"[步骤2] {'='*60}")
893
+
894
+ # Split batch across multiple GPUs for parallel processing
895
+ if num_gpus > 1:
896
+ # Divide batch into sub-batches for each GPU
897
+ sub_batch_size = len(batch) // num_gpus
898
+ sub_batches = []
899
+ for gpu_idx in range(num_gpus):
900
+ sub_start = gpu_idx * sub_batch_size
901
+ if gpu_idx == num_gpus - 1:
902
+ # Last GPU gets remaining items
903
+ sub_batch = batch[sub_start:]
904
+ else:
905
+ sub_batch = batch[sub_start:sub_start + sub_batch_size]
906
+ if sub_batch: # Only add non-empty batches
907
+ sub_batches.append((sub_batch, base_urls[gpu_idx], gpu_idx))
908
+ logger.info(f"[步骤2] GPU {gpu_idx + 1} ({base_urls[gpu_idx]}): {len(sub_batch)} repos")
909
+
910
+ # Process sub-batches in parallel across GPUs
911
+ gpu_tasks = []
912
+ for sub_batch, base_url, gpu_idx in sub_batches:
913
+ tasks = [check_one(repo, base_url) for repo in sub_batch]
914
+ gpu_tasks.append(asyncio.gather(*tasks))
915
+
916
+ # Execute all GPU tasks in parallel with timeout protection
917
+ if gpu_tasks:
918
+ logger.info(f"[步骤2] ⏳ 等待 {len(gpu_tasks)} 个 GPU 任务完成...")
919
+ try:
920
+ # 为整个 batch 添加超时保护(每个任务最多 120 秒,batch 最多等待 5 分钟)
921
+ batch_timeout = max(300, len(batch) * 2) # 至少 5 分钟,或每个任务 2 分钟
922
+ gpu_results = await asyncio.wait_for(
923
+ asyncio.gather(*gpu_tasks, return_exceptions=True),
924
+ timeout=batch_timeout
925
+ )
926
+ # Flatten results from all GPUs
927
+ batch_results = []
928
+ for sublist in gpu_results:
929
+ if isinstance(sublist, Exception):
930
+ logger.error(f"[步骤2] GPU 任务异常: {sublist}")
931
+ batch_results.append(None)
932
+ else:
933
+ batch_results.extend(sublist)
934
+ except asyncio.TimeoutError:
935
+ logger.error(f"[步骤2] ❌ Batch {batch_idx + 1} 超时({batch_timeout}秒),跳过剩余任务")
936
+ batch_results = [None] * len(batch) # 标记为失败
937
+ else:
938
+ batch_results = []
939
+ else:
940
+ # Single GPU: use original logic with timeout
941
+ logger.info(f"[步骤2] ⏳ 等待 {len(batch)} 个任务完成...")
942
+ try:
943
+ batch_timeout = max(300, len(batch) * 2)
944
+ batch_results = await asyncio.wait_for(
945
+ asyncio.gather(*[check_one(r, base_urls[0]) for r in batch], return_exceptions=True),
946
+ timeout=batch_timeout
947
+ )
948
+ # 处理异常结果
949
+ batch_results = [r if not isinstance(r, Exception) else None for r in batch_results]
950
+ except asyncio.TimeoutError:
951
+ logger.error(f"[步骤2] ❌ Batch {batch_idx + 1} 超时({batch_timeout}秒),跳过剩余任务")
952
+ batch_results = [None] * len(batch)
953
+
954
+ # batch write result to history file
955
+ valid_results = [r for r in batch_results if r is not None]
956
+ if valid_results:
957
+ df_batch = pd.DataFrame(valid_results)
958
+ # only keep columns to write to CSV
959
+ df_batch = df_batch[
960
+ ["keyword", "full_name", "url", "description", "topics", "is_relevant", "reason"]
961
+ ]
962
+ df_batch.to_csv(
963
+ repos_check_history,
964
+ mode="a",
965
+ header=not repos_check_history.exists(),
966
+ index=False,
967
+ encoding="utf-8",
968
+ )
969
+
970
+ for result in valid_results:
971
+ status = "✓ Relevant" if result["relevant"] else "✗ Not relevant"
972
+ logger.info(f"[步骤2] {status}: {result['full_name']}")
973
+
974
+ logger.info(f"[步骤2] ✓ Batch {batch_idx + 1}/{total_batches}: {len(valid_results)} repos saved")
975
+
976
+ # collect all relevant repos from history file
977
+ all_repos = df_to_check.to_dict("records")
978
+ relevant_repos = []
979
+ if repos_check_history.exists():
980
+ df_history = pd.read_csv(repos_check_history)
981
+ if not df_history.empty:
982
+ relevant_urls = set(df_history[df_history["is_relevant"] == "YES"]["url"].tolist())
983
+ relevant_repos = [r for r in all_repos if r["url"] in relevant_urls]
984
+
985
+ # deduplicate by url and save
986
+ if relevant_repos:
987
+ df_relevant = pd.DataFrame(relevant_repos)
988
+ df_relevant = df_relevant.drop_duplicates(subset=["url"])
989
+ df_relevant.to_csv(config["repos_checked"], index=False, encoding="utf-8")
990
+ logger.info(f"[步骤2] [Done] Step 2: {len(df_relevant)} relevant repos (deduplicated)")
991
+ else:
992
+ pd.DataFrame(columns=df_to_check.columns).to_csv(config["repos_checked"], index=False)
993
+ logger.info("[步骤2] [Done] Step 2: 0 relevant repos")
994
+
995
+ if args.mode == "all" and background_task is not None:
996
+ # Step 2 completed, wait for background task to process remaining repos
997
+ logger.info("[步骤2] 步骤2已完成,等待步骤3&4后台任务处理所有相关项目...")
998
+ try:
999
+ await asyncio.wait_for(asyncio.shield(background_task), timeout=600)
1000
+ logger.info("[步骤3&4] ✓ 步骤3&4后台任务已完成")
1001
+ except asyncio.TimeoutError:
1002
+ logger.info("[步骤3&4] ⏱️ 等待步骤3&4超过 600 秒,步骤3&4将继续在后台处理(未被取消)...")
1003
+ logger.info("[步骤3&4] 提示:下次运行时,程序会自动检查已处理的项目,只处理新的项目")
1004
+
1005
+
1006
+ if __name__ == "__main__":
1007
+ asyncio.run(main())