MuLabPKU commited on
Commit
2c31aca
·
verified ·
1 Parent(s): 9b3564d

Add files using upload-large-folder tool

Browse files
__pycache__/generate_content_embeddings.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
__pycache__/generate_content_embeddings_fixed.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
astropy_code_embedding.out ADDED
The diff for this file is too large to render. See raw diff
 
commands.txt ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name Fast-F1 --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/Fast-F1 > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_Fast-F1.out
2
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name Flexget --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/Flexget > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_Flexget.out
3
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name LLaMA-Factory --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/LLaMA-Factory > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_LLaMA-Factory.out
4
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name PyBaMM --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/PyBaMM > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_PyBaMM.out
5
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name PyPSA --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/PyPSA > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_PyPSA.out
6
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name Radicale --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/Radicale > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_Radicale.out
7
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name Solaar --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/Solaar > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_Solaar.out
8
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name WeasyPrint --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/WeasyPrint > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_WeasyPrint.out
9
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name astroid --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/astroid > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_astroid.out
10
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name conda --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/conda > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_conda.out
11
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name django --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/django > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_django.out
12
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name dspy --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/dspy > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_dspy.out
13
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name dvc --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/dvc > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_dvc.out
14
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name faker --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/faker > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_faker.out
15
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name fastmcp --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/fastmcp > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_fastmcp.out
16
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name faststream --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/faststream > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_faststream.out
17
+
18
+
19
+
20
+
21
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name feature_engine --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/feature_engine > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_feature_engine.out
22
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name filesystem_spec --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/filesystem_spec > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_filesystem_spec.out
23
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name flask --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/flask > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_flask.out
24
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name fonttools --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/fonttools > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_fonttools.out
25
+
26
+
27
+
28
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name fusesoc --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/fusesoc > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_fusesoc.out
29
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name geopandas --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/geopandas > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_geopandas.out
30
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name gitingest --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/gitingest > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_gitingest.out
31
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name icloud_photos_downloader --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/icloud_photos_downloader > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_icloud_photos_downloader.out
32
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name instructlab --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/instructlab > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_instructlab.out
33
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name jax --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/jax > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_jax.out
34
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name kedro --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/kedro > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_kedro.out
35
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name kirara-ai --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/kirara-ai > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_kirara-ai.out
36
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name linkding --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/linkding > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_linkding.out
37
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name litellm --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/litellm > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_litellm.out
38
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name llama-stack --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/llama-stack > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_llama-stack.out
39
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name llama_deploy --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/llama_deploy > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_llama_deploy.out
40
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name loguru --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/loguru > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_loguru.out
41
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name marshmallow --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/marshmallow > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_marshmallow.out
42
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name mcp-atlassian --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/mcp-atlassian > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_mcp-atlassian.out
43
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name mesa --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/mesa > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_mesa.out
44
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name mypy --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/mypy > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_mypy.out
45
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name networkx --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/networkx > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_networkx.out
46
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name ntc-templates --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/ntc-templates > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_ntc-templates.out
47
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name openai-agents-python --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/openai-agents-python > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_openai-agents-python.out
48
+
49
+
50
+
51
+
52
+
53
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name patroni --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/patroni > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_patroni.out
54
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pdm --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pdm > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pdm.out
55
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pipenv --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pipenv > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pipenv.out
56
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name poetry --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/poetry > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_poetry.out
57
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name privacyidea --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/privacyidea > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_privacyidea.out
58
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pydicom --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pydicom > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pydicom.out
59
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pymdown-extensions --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pymdown-extensions > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pymdown-extensions.out
60
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pyomo --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pyomo > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pyomo.out
61
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name python-control --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/python-control > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_python-control.out
62
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name python-telegram-bot --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/python-telegram-bot > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_python-telegram-bot.out
63
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name python --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/python > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_python.out
64
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name pyvista --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/pyvista > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_pyvista.out
65
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name qtile --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/qtile > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_qtile.out
66
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name reflex --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/reflex > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_reflex.out
67
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name scipy --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/scipy > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_scipy.out
68
+
69
+
70
+
71
+
72
+ CUDA_VISIBLE_DEVICES=0 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name scrapy-splash --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/scrapy-splash > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_scrapy-splash.out
73
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name segmentation_models.pytorch --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/segmentation_models.pytorch > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_segmentation_models.pytorch.out
74
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name sh --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/sh > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_sh.out
75
+ CUDA_VISIBLE_DEVICES=1 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name smart_open --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/smart_open > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_smart_open.out
76
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name smolagents --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/smolagents > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_smolagents.out
77
+ CUDA_VISIBLE_DEVICES=2 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name sqlfluff --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/sqlfluff > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_sqlfluff.out
78
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name sqllineage --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/sqllineage > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_sqllineage.out
79
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name starlette --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/starlette > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_starlette.out
80
+ CUDA_VISIBLE_DEVICES=3 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name streamlink --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/streamlink > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_streamlink.out
81
+ CUDA_VISIBLE_DEVICES=4 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name tablib --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/tablib > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_tablib.out
82
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name torchtune --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/torchtune > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_torchtune.out
83
+ CUDA_VISIBLE_DEVICES=5 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name tox --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/tox > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_tox.out
84
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name transitions --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/transitions > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_transitions.out
85
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name trimesh --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/trimesh > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_trimesh.out
86
+ CUDA_VISIBLE_DEVICES=6 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name twine --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/twine > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_twine.out
87
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name urllib3 --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/urllib3 > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_urllib3.out
88
+ CUDA_VISIBLE_DEVICES=7 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name wemake-python-styleguide --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/wemake-python-styleguide > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_wemake-python-styleguide.out
89
+ CUDA_VISIBLE_DEVICES=0 nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name yt-dlp --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/yt-dlp > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_yt-dlp.out
90
+
91
+
92
+
93
+
94
+
95
+
96
+
find_what_repo_live_need_to_embed.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ def find_missing_repos():
5
+ repos_dir = "/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/repos"
6
+ embedding_dir = "/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding"
7
+ cnt = 0
8
+ # 遍历repos目录下的所有文件夹
9
+ for folder_name in os.listdir(repos_dir):
10
+ folder_path = os.path.join(repos_dir, folder_name)
11
+
12
+ # 确保是文件夹
13
+ if os.path.isdir(folder_path):
14
+ A = folder_name
15
+
16
+ # 检查embedding_dir下是否存在A或output_A文件夹
17
+ path_A = os.path.join(embedding_dir, A)
18
+ path_output_A = os.path.join(embedding_dir, f"output_{A}")
19
+
20
+ # 如果两者都不存在,则打印输出
21
+ if not os.path.exists(path_A) and not os.path.exists(path_output_A):
22
+ # 检查pyggraph目录下是否存在A.timed.pt文件
23
+ pyggraph_file = f"/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/pyggraph/{A}.timed.pt"
24
+ if os.path.exists(pyggraph_file):
25
+ cnt += 1
26
+ print(A)
27
+
28
+ print(f"Total missing repos: {cnt}")
29
+
30
+ def generate_commands():
31
+
32
+ repos_dir = "/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/repos"
33
+ embedding_dir = "/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding"
34
+ commands = []
35
+
36
+ # 遍历repos目录下的所有文件夹
37
+ for folder_name in os.listdir(repos_dir):
38
+ folder_path = os.path.join(repos_dir, folder_name)
39
+
40
+ # 确保是文件夹
41
+ if os.path.isdir(folder_path):
42
+ A = folder_name
43
+
44
+ # 检查embedding_dir下是否存在A或output_A文件夹
45
+ path_A = os.path.join(embedding_dir, A)
46
+ path_output_A = os.path.join(embedding_dir, f"output_{A}")
47
+
48
+ # 如果两者都不存在,则生成命令
49
+ if not os.path.exists(path_A) and not os.path.exists(path_output_A):
50
+ # 检查pyggraph目录下是否存在A.timed.pt文件
51
+ pyggraph_file = f"/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/pyggraph/{A}.timed.pt"
52
+ if os.path.exists(pyggraph_file):
53
+ gpu_id = random.randint(0, 7)
54
+ command = f"CUDA_VISIBLE_DEVICES={gpu_id} nohup python /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/run_full_generation.py --repo_name {A} --batch_size 8 --output_dir /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/{A} > /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/logs/node_content_embedding_{A}.out"
55
+ commands.append(command)
56
+
57
+ # 写入txt文件
58
+ with open("/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/commands.txt", "w") as f:
59
+ for command in commands:
60
+ f.write(command + "\n")
61
+
62
+ print(f"Generated {len(commands)} commands and saved to commands.txt")
63
+
64
+ if __name__ == "__main__":
65
+ find_missing_repos()
66
+ generate_commands()
generate_content_embeddings.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ import datasets
5
+ import json
6
+ import numpy as np
7
+ from pathlib import Path
8
+ from typing import Dict, List, Tuple
9
+ import argparse
10
+ from tqdm import tqdm
11
+ import gc
12
+
13
+ try:
14
+ from safetensors.torch import save_file
15
+ SAFETENSORS_AVAILABLE = True
16
+ print("✅ safetensors可用")
17
+ except ImportError:
18
+ print("❌ safetensors不可用,将使用numpy格式")
19
+ SAFETENSORS_AVAILABLE = False
20
+
21
+ # vLLM相关导入
22
+ try:
23
+ from vllm import LLM
24
+ from transformers import AutoTokenizer
25
+ VLLM_AVAILABLE = True
26
+ print("✅ vLLM可用")
27
+ except ImportError:
28
+ print("❌ vLLM不可用")
29
+ VLLM_AVAILABLE = False
30
+
31
+ class ContentEmbeddingGenerator:
32
+
33
+
34
+ def __init__(self,
35
+ repo_name: str = "astropy",
36
+ model_path: str = "/data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B",
37
+ output_dir: str = "./output"):
38
+ """
39
+
40
+
41
+ Args:
42
+ repo_name: 仓库名称
43
+ model_path: embedding模型路径
44
+ output_dir: 输出目录
45
+ """
46
+ self.repo_name = repo_name
47
+ self.model_path = model_path
48
+ self.output_dir = Path(output_dir)
49
+ self.output_dir.mkdir(parents=True, exist_ok=True)
50
+
51
+
52
+ self.graph_file = f"/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/pyggraph/{repo_name}.timed.pt"
53
+ self.dataset_dir = f"/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/savedata/repos/{repo_name}/"
54
+
55
+
56
+ self.model = None
57
+ self.tokenizer = None
58
+ self._load_model()
59
+
60
+ def _load_model(self):
61
+ """加载vLLM embedding模型"""
62
+ if not VLLM_AVAILABLE:
63
+ print("❌ vLLM不可用,无法生成embedding")
64
+ return
65
+
66
+ try:
67
+ print(f"🔄 加载embedding模型: {self.model_path}")
68
+ self.model = LLM(
69
+ model=self.model_path,
70
+ task="embed",
71
+ enforce_eager=True,
72
+ gpu_memory_utilization=0.9,
73
+ max_model_len=32768
74
+ )
75
+
76
+ self.tokenizer = AutoTokenizer.from_pretrained(
77
+ self.model_path,
78
+ padding_side='left',
79
+ trust_remote_code=True
80
+ )
81
+ print("✅ 模型加载成功")
82
+
83
+ except Exception as e:
84
+ print(f"❌ 模型加载失败: {e}")
85
+ self.model = None
86
+ self.tokenizer = None
87
+
88
+ def load_graph_data(self) -> torch.Tensor:
89
+ """加载图数据,获取节点ID列表"""
90
+ print(f"🔄 加载图数据: {self.graph_file}")
91
+
92
+ try:
93
+ data = torch.load(self.graph_file, map_location='cpu', weights_only=False)
94
+ num_nodes = data.num_nodes
95
+ print(f"✅ 图数据加载成功,节点数: {num_nodes}")
96
+
97
+
98
+ node_ids = torch.arange(num_nodes, dtype=torch.long)
99
+ return node_ids
100
+
101
+ except Exception as e:
102
+ print(f"❌ 图数据加载失败: {e}")
103
+ raise
104
+
105
+ def load_dataset(self) -> datasets.Dataset:
106
+
107
+ print(f"🔄 加载原始数据集: {self.dataset_dir}")
108
+
109
+ try:
110
+ dataset = datasets.Dataset.load_from_disk(self.dataset_dir)
111
+ print(f"✅ 数据集加载成功,样本数: {len(dataset)}")
112
+ return dataset
113
+
114
+ except Exception as e:
115
+ print(f"❌ 数据集加载失败: {e}")
116
+ raise
117
+
118
+ def extract_node_contents(self, node_ids: torch.Tensor, dataset: datasets.Dataset) -> Dict[int, Dict]:
119
+
120
+ print("🔄 提取节点内容...")
121
+
122
+ node_contents = {}
123
+ dataset_len = len(dataset)
124
+
125
+ for node_id in tqdm(node_ids, desc="提取节点内容"):
126
+ node_id_int = int(node_id)
127
+
128
+ if node_id_int < dataset_len:
129
+
130
+ sample = dataset[node_id_int]
131
+
132
+ node_contents[node_id_int] = {
133
+ 'node_id': node_id_int,
134
+ 'name': sample.get('name', ''),
135
+ 'path': sample.get('path', ''),
136
+ 'attr': sample.get('attr', ''),
137
+ 'type': sample.get('type', ''),
138
+ 'start_commit': sample.get('start_commit', ''),
139
+ 'end_commit': sample.get('end_commit', '')
140
+ }
141
+ else:
142
+
143
+ node_contents[node_id_int] = {
144
+ 'node_id': node_id_int,
145
+ 'name': '',
146
+ 'path': '',
147
+ 'attr': '',
148
+ 'type': '',
149
+ 'start_commit': '',
150
+ 'end_commit': ''
151
+ }
152
+
153
+ print(f"✅ 节点内容提取完成,有效节点: {sum(1 for v in node_contents.values() if v['attr'])}")
154
+ return node_contents
155
+
156
+ def generate_embeddings(self, node_contents: Dict[int, Dict], batch_size: int = 32) -> Dict[int, np.ndarray]:
157
+ """生成embeddings - 为所有节点生成embedding,包括空节点"""
158
+ if not self.model:
159
+ print("❌ 模型未加载,跳过embedding生成")
160
+ return {}
161
+
162
+ print("🔄 生成embeddings...")
163
+
164
+
165
+ texts = []
166
+ node_id_order = []
167
+
168
+ for node_id in sorted(node_contents.keys()):
169
+ content = node_contents[node_id]
170
+ text = content['name'] + content['path'] + content['attr']
171
+
172
+ if not text or text.strip() == '{}' or text.strip() == '':
173
+ text = " "
174
+
175
+ texts.append(text)
176
+ node_id_order.append(node_id)
177
+
178
+ print(f"需要生成embedding的文本数量: {len(texts)}")
179
+
180
+
181
+ embeddings = {}
182
+ total_batches = (len(texts) + batch_size - 1) // batch_size
183
+
184
+ for i in tqdm(range(0, len(texts), batch_size), desc="生成embeddings", total=total_batches):
185
+ batch_texts = texts[i:i + batch_size]
186
+ batch_node_ids = node_id_order[i:i + batch_size]
187
+
188
+ try:
189
+
190
+ tokenized = self.tokenizer(
191
+ batch_texts,
192
+ padding=True,
193
+ truncation=True,
194
+ max_length=32768,
195
+ return_tensors="pt"
196
+ )
197
+
198
+
199
+ processed_texts = self.tokenizer.batch_decode(
200
+ tokenized["input_ids"],
201
+ skip_special_tokens=True
202
+ )
203
+
204
+
205
+ emb_outputs = self.model.embed(processed_texts)
206
+
207
+
208
+ for j, node_id in enumerate(batch_node_ids):
209
+ emb_output = emb_outputs[j]
210
+
211
+
212
+ if hasattr(emb_output, "embedding"):
213
+ embedding = emb_output.embedding
214
+ elif hasattr(emb_output, "hidden_states"):
215
+ embedding = emb_output.hidden_states
216
+ elif hasattr(emb_output, "outputs") and hasattr(emb_output.outputs, "embedding"):
217
+ embedding = emb_output.outputs.embedding
218
+ elif hasattr(emb_output, "outputs") and hasattr(emb_output.outputs, "hidden_states"):
219
+ embedding = emb_output.outputs.hidden_states
220
+ else:
221
+ raise ValueError("无法从模型输出中提取embedding")
222
+
223
+
224
+ if hasattr(embedding, "cpu"):
225
+ embedding = embedding.cpu().numpy()
226
+ elif hasattr(embedding, "numpy"):
227
+ embedding = embedding.numpy()
228
+ else:
229
+ embedding = np.array(embedding)
230
+
231
+ embeddings[node_id] = embedding
232
+
233
+
234
+ if torch.cuda.is_available():
235
+ torch.cuda.empty_cache()
236
+ gc.collect()
237
+
238
+ except Exception as e:
239
+ print(f"❌ 批次 {i//batch_size + 1} embedding生成失败: {e}")
240
+
241
+ for node_id in batch_node_ids:
242
+ embeddings[node_id] = np.zeros(4096, dtype=np.float32)
243
+
244
+ print(f"✅ Embedding生成完成,成功生成: {len(embeddings)}")
245
+ return embeddings
246
+
247
+ def save_content_json(self, node_contents: Dict[int, Dict]):
248
+ """保存节点内容为JSON"""
249
+ output_file = self.output_dir / f"{self.repo_name}_node_contents.json"
250
+ print(f"🔄 保存节点内容到: {output_file}")
251
+
252
+
253
+ sorted_contents = {}
254
+ for node_id in sorted(node_contents.keys()):
255
+ sorted_contents[str(node_id)] = node_contents[node_id]
256
+
257
+ try:
258
+ with open(output_file, 'w', encoding='utf-8') as f:
259
+ json.dump(sorted_contents, f, indent=2, ensure_ascii=False)
260
+ print(f"✅ 节点内容保存完成")
261
+
262
+ except Exception as e:
263
+ print(f"❌ 节点内容保存失败: {e}")
264
+ raise
265
+
266
+ def save_embeddings_safetensor(self, embeddings: Dict[int, np.ndarray], all_node_ids: List[int] = None):
267
+ """保存embedding为safetensor格式 - 确保按node_id顺序保存"""
268
+ if not embeddings:
269
+ print("⚠️ 没有embedding数据,跳过保存")
270
+ return
271
+
272
+ print("🔄 保存embeddings...")
273
+
274
+
275
+ if all_node_ids:
276
+ for node_id in all_node_ids:
277
+ if node_id not in embeddings:
278
+ print(f"⚠️ 节点 {node_id} 缺少embedding,使用零向量")
279
+ embeddings[node_id] = np.zeros(4096, dtype=np.float32)
280
+
281
+
282
+ sorted_node_ids = sorted(embeddings.keys())
283
+ embedding_list = []
284
+
285
+ for node_id in sorted_node_ids:
286
+ embedding = embeddings[node_id]
287
+ if isinstance(embedding, np.ndarray):
288
+ embedding_list.append(torch.from_numpy(embedding))
289
+ else:
290
+ embedding_list.append(torch.tensor(embedding))
291
+
292
+
293
+ embeddings_tensor = torch.stack(embedding_list, dim=0)
294
+ print(f"Embeddings tensor shape: {embeddings_tensor.shape}")
295
+
296
+
297
+ print(f"节点ID范围: {min(sorted_node_ids)} - {max(sorted_node_ids)}")
298
+ print(f"节点ID是否连续: {sorted_node_ids == list(range(min(sorted_node_ids), max(sorted_node_ids) + 1))}")
299
+
300
+
301
+ if SAFETENSORS_AVAILABLE:
302
+ output_file = self.output_dir / f"{self.repo_name}_embeddings.safetensors"
303
+ tensors = {"embeddings": embeddings_tensor}
304
+ save_file(tensors, output_file)
305
+ print(f"✅ Safetensor保存至: {output_file}")
306
+ else:
307
+ output_file = self.output_dir / f"{self.repo_name}_embeddings.npz"
308
+ np.savez_compressed(output_file, embeddings=embeddings_tensor.numpy())
309
+ print(f"✅ Numpy数组保存至: {output_file}")
310
+
311
+ # 保存索引映射
312
+ index_file = self.output_dir / f"{self.repo_name}_embedding_index.json"
313
+ index_mapping = {
314
+ 'node_ids': sorted_node_ids,
315
+ 'embedding_dim': embeddings_tensor.shape[1],
316
+ 'num_nodes': len(sorted_node_ids),
317
+ 'format': 'safetensors' if SAFETENSORS_AVAILABLE else 'numpy',
318
+ 'min_node_id': min(sorted_node_ids),
319
+ 'max_node_id': max(sorted_node_ids),
320
+ 'is_continuous': sorted_node_ids == list(range(min(sorted_node_ids), max(sorted_node_ids) + 1))
321
+ }
322
+
323
+ with open(index_file, 'w', encoding='utf-8') as f:
324
+ json.dump(index_mapping, f, indent=2)
325
+ print(f"✅ 索引映射保存至: {index_file}")
326
+
327
+ def run(self, batch_size: int = 32):
328
+ """运行完整流程"""
329
+ print(f"🚀 开始处理仓库: {self.repo_name}")
330
+
331
+ try:
332
+ # 1. 加载图数据获取节点ID
333
+ node_ids = self.load_graph_data()
334
+
335
+ # 2. 加载原始数据集
336
+ dataset = self.load_dataset()
337
+
338
+ # 3. 提取节点内容
339
+ node_contents = self.extract_node_contents(node_ids, dataset)
340
+
341
+ # 4. 保存节点内容JSON
342
+ #self.save_content_json(node_contents)
343
+
344
+ # 5. 生成embeddings(包括所有节点)
345
+ embeddings = self.generate_embeddings(node_contents, batch_size)
346
+
347
+ # 6. 保存embeddings,传入所有node_ids确保完整性
348
+ self.save_embeddings_safetensor(embeddings, all_node_ids=[int(nid) for nid in node_ids])
349
+
350
+ print(f"🎉 处理完成!")
351
+ print(f"📊 统计信息:")
352
+ print(f" - 总节点数: {len(node_contents)}")
353
+ print(f" - 有内容节点数: {sum(1 for v in node_contents.values() if v['attr'] and v['attr'] != '{}')}")
354
+ print(f" - 生成embedding数: {len(embeddings)}")
355
+
356
+ except Exception as e:
357
+ print(f"❌ 处理失败: {e}")
358
+ import traceback
359
+ traceback.print_exc()
360
+ raise
361
+
362
+ def main():
363
+ parser = argparse.ArgumentParser(description="生成节点内容和embedding")
364
+ parser.add_argument('--repo_name', type=str, default='astropy', help='仓库名称')
365
+ parser.add_argument('--model_path', type=str,
366
+ default='/data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B',
367
+ help='Embedding模型路径')
368
+ parser.add_argument('--output_dir', type=str, default='./output', help='输出目录')
369
+ parser.add_argument('--batch_size', type=int, default=1, help='批处理大小')
370
+
371
+ args = parser.parse_args()
372
+
373
+ generator = ContentEmbeddingGenerator(
374
+ repo_name=args.repo_name,
375
+ model_path=args.model_path,
376
+ output_dir=args.output_dir
377
+ )
378
+
379
+ generator.run(batch_size=args.batch_size)
380
+
381
+ if __name__ == "__main__":
382
+ main()
generate_rewriter_embedding_vllm.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generate embedding for Queries from Rewriter's Inferer using vLLM and Qwen3-Embedding-8B
3
+ """
4
+
5
+ from vllm import LLM
6
+ from transformers import AutoTokenizer
7
+ import torch
8
+ import os
9
+ import tqdm
10
+ import json
11
+ import pickle
12
+ from datasets import Dataset
13
+
14
+ # Input and output paths
15
+ rewriter_output_path = "/data1/wangjuntong/CodeFuse-CGM-wxy/rewriter_output_post.json"
16
+ rewriter_embedding_path = "rewriter_embedding.pkl"
17
+
18
+ # Initialize vLLM model and tokenizer
19
+ print("Loading Qwen3-Embedding-8B model...")
20
+ model = LLM(
21
+ model="Qwen/Qwen3-Embedding-8B",
22
+ task="embed",
23
+ trust_remote_code=True
24
+ )
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained(
27
+ 'Qwen/Qwen3-Embedding-8B',
28
+ padding_side='left',
29
+ trust_remote_code=True,
30
+ local_files_only=os.path.exists('/home/wangjuntong/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B')
31
+ )
32
+
33
+ max_length = 32768
34
+
35
+ def get_embeddings(batch):
36
+ """
37
+ Generate embeddings for a batch of texts using vLLM
38
+ """
39
+ texts = batch["query"]
40
+
41
+ # Tokenize and decode (to ensure proper format)
42
+ prompt_token_ids = tokenizer(
43
+ texts,
44
+ padding=True,
45
+ truncation=True,
46
+ max_length=max_length,
47
+ return_tensors="pt",
48
+ )
49
+
50
+ prompts = tokenizer.batch_decode(prompt_token_ids["input_ids"], skip_special_tokens=True)
51
+
52
+ # Get embeddings using vLLM
53
+ emb_outputs = model.embed(prompts)
54
+
55
+ # Extract embeddings
56
+ embeddings = []
57
+ for out in emb_outputs:
58
+ # Extract embedding based on model output structure
59
+ if hasattr(out, "embedding"):
60
+ emb = out.embedding
61
+ elif hasattr(out, "outputs") and hasattr(out.outputs, "embedding"):
62
+ emb = out.outputs.embedding
63
+ else:
64
+ emb = out.outputs[0].embedding if out.outputs else None
65
+
66
+ if emb is None:
67
+ raise ValueError("Cannot extract embedding from model output")
68
+
69
+ # Convert to list if it's a tensor
70
+ if hasattr(emb, "tolist"):
71
+ emb = emb.tolist()
72
+ elif hasattr(emb, "cpu"):
73
+ emb = emb.cpu().numpy().tolist()
74
+
75
+ embeddings.append(emb)
76
+
77
+ return {"embedding": embeddings}
78
+
79
+ if __name__ == "__main__":
80
+ # Load rewriter output
81
+ with open(rewriter_output_path, 'r') as file:
82
+ rewriter_output_dict = json.load(file)
83
+
84
+ # Prepare dataset
85
+ data = []
86
+ if isinstance(rewriter_output_dict, dict):
87
+ for instance_id, item in rewriter_output_dict.items():
88
+ query = item.get("rewriter_inferer", "")
89
+ if query: # Skip empty queries
90
+ data.append({
91
+ "instance_id": instance_id,
92
+ "query": query
93
+ })
94
+ elif isinstance(rewriter_output_dict, list):
95
+ for idx, item in enumerate(rewriter_output_dict):
96
+ query = item.get("rewriter_inferer", "")
97
+ if query: # Skip empty queries
98
+ data.append({
99
+ "instance_id": str(idx),
100
+ "query": query
101
+ })
102
+
103
+ # Create dataset
104
+ dataset = Dataset.from_list(data)
105
+
106
+ # Process in batches using map
107
+ embedded_dataset = dataset.map(
108
+ get_embeddings,
109
+ batched=True,
110
+ batch_size=10000, # Adjust batch size based on your GPU memory
111
+ remove_columns=["query"]
112
+ )
113
+
114
+ # Create final embedding dictionary
115
+ query_embedding_dict = {}
116
+ for item in tqdm.tqdm(embedded_dataset, desc="Organizing embeddings"):
117
+ instance_id = item["instance_id"]
118
+ embedding = item["embedding"]
119
+ query_embedding_dict[instance_id] = embedding
120
+
121
+ # Save embeddings
122
+ with open(rewriter_embedding_path, 'wb') as f:
123
+ pickle.dump(query_embedding_dict, f)
124
+
125
+ print(f"Saved query embeddings to {rewriter_embedding_path}")
run_full_generation.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import sys
4
+ import os
5
+ import argparse
6
+ sys.path.append('/data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding')
7
+
8
+ from generate_content_embeddings import ContentEmbeddingGenerator
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description="生成完整的节点内容和embedding")
12
+ parser.add_argument('--repo_name', type=str, default='astropy', help='仓库名称')
13
+ parser.add_argument('--batch_size', type=int, default=1, help='批处理大小')
14
+ parser.add_argument('--output_dir', type=str, default='./output', help='输出目录')
15
+ parser.add_argument('--max_nodes', type=int, default=None, help='最大节点数(用于测试)')
16
+
17
+ args = parser.parse_args()
18
+
19
+ print(f"🚀 开始生成 {args.repo_name} 的完整节点内容和embedding")
20
+ print(f"配置参数:")
21
+ print(f" - 仓库名称: {args.repo_name}")
22
+ print(f" - 批处理大小: {args.batch_size}")
23
+ print(f" - 输出目录: {args.output_dir}")
24
+ print(f" - 最大节点数: {args.max_nodes or '全部'}")
25
+
26
+
27
+ generator = ContentEmbeddingGenerator(
28
+ repo_name=args.repo_name,
29
+ output_dir=args.output_dir
30
+ )
31
+
32
+ try:
33
+
34
+ node_ids = generator.load_graph_data()
35
+
36
+
37
+ if args.max_nodes and args.max_nodes < len(node_ids):
38
+ node_ids = node_ids[:args.max_nodes]
39
+ print(f"⚠️ 限制节点数为: {len(node_ids)}")
40
+
41
+
42
+ dataset = generator.load_dataset()
43
+
44
+ # 3. 提取节点内容
45
+ node_contents = generator.extract_node_contents(node_ids, dataset)
46
+
47
+ # 统计有效节点
48
+ valid_nodes = {k: v for k, v in node_contents.items()
49
+ if v['attr'] and v['attr'] != '{}'}
50
+ print(f"📊 节点统计:")
51
+ print(f" - 总节点数: {len(node_contents)}")
52
+ print(f" - 有效节点数(有代码内容): {len(valid_nodes)}")
53
+ print(f" - 空节点数: {len(node_contents) - len(valid_nodes)}")
54
+
55
+ # 4. 保存节点内容JSON
56
+ #generator.save_content_json(node_contents)
57
+
58
+ # 5. 对所有节点生成embedding(包括空节点)
59
+ print(f"🔄 开始生成 {len(node_contents)} 个节点的embedding(包括空节点)...")
60
+ embeddings = generator.generate_embeddings(node_contents, batch_size=args.batch_size)
61
+
62
+ # 6. 保存embedding
63
+ if embeddings:
64
+ generator.save_embeddings_safetensor(embeddings)
65
+ else:
66
+ print("⚠️ 没有找到有效的节点内容,跳过embedding生成")
67
+
68
+ print(f"🎉 处理完成!")
69
+
70
+ except Exception as e:
71
+ print(f"❌ 处理失败: {e}")
72
+ import traceback
73
+ traceback.print_exc()
74
+
75
+ if __name__ == "__main__":
76
+ main()
smart_open/smart_open_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
smolagents/smolagents_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
tablib/tablib_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
test_fixed_version.py ADDED
File without changes
test_small_batch.py ADDED
File without changes
torchtune/torchtune_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
tox/tox_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
transitions/transitions_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
trimesh/trimesh_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
twine/twine_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
urllib3/urllib3_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff
 
wemake-python-styleguide/wemake-python-styleguide_embedding_index.json ADDED
The diff for this file is too large to render. See raw diff