diff --git "a/logs/node_content_embedding_falcon.out" "b/logs/node_content_embedding_falcon.out" new file mode 100644--- /dev/null +++ "b/logs/node_content_embedding_falcon.out" @@ -0,0 +1,66936 @@ +✅ safetensors可用 +INFO 09-07 04:07:29 [__init__.py:244] Automatically detected platform cuda. +✅ vLLM可用 +🚀 开始生成 falcon 的完整节点内容和embedding +配置参数: + - 仓库名称: falcon + - 批处理大小: 8 + - 输出目录: /data/wangjuntong/FROM_120/data1/RepoGNN_backup_0809/get_content_embedding/falcon + - 最大节点数: 全部 +🔄 加载embedding模型: /data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B +INFO 09-07 04:07:41 [config.py:484] Found sentence-transformers modules configuration. +INFO 09-07 04:07:41 [config.py:504] Found pooling configuration. +WARNING 09-07 04:07:41 [arg_utils.py:1642] --task embed is not supported by the V1 Engine. Falling back to V0. +WARNING 09-07 04:07:41 [cuda.py:91] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used +INFO 09-07 04:07:41 [llm_engine.py:230] Initializing a V0 LLM engine (v0.9.1) with config: model='/data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B', speculative_config=None, tokenizer='/data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=32768, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=/data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=None, chunked_prefill_enabled=False, use_async_output_proc=False, pooler_config=PoolerConfig(pooling_type='LAST', normalize=True, softmax=None, step_tag_id=None, returned_token_ids=None), compilation_config={"level":0,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":[],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":0,"cudagraph_capture_sizes":[],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":0,"local_cache_dir":null}, use_cached_outputs=False, +INFO 09-07 04:07:43 [cuda.py:327] Using Flash Attention backend. +INFO 09-07 04:07:44 [parallel_state.py:1065] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 +INFO 09-07 04:07:44 [model_runner.py:1171] Starting to load model /data/wangjuntong/FROM_120/data1/.cache/modelscope/hub/models/Qwen/Qwen3-Embedding-8B... + Loading safetensors checkpoint shards: 0% Completed | 0/4 [00:00