File size: 3,484 Bytes
3f8009c
 
 
63ada65
c5bc9c7
 
3f8009c
 
 
 
c5bc9c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f8009c
 
 
 
c5bc9c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#!/bin/bash
set -euo pipefail

# # 配置
MODEL_REPO="${MODEL_REPO:-stepfun-ai/Step-Audio-R1}"
MODEL_DIR="${MODEL_DIR:-/tmp/models/Step-Audio-R1}"
API_PORT="${API_PORT:-9999}"
GRADIO_PORT="${GRADIO_PORT:-7860}"

echo "Starting Step Audio R1 services..."
echo "Model: $MODEL_REPO"
echo "Model Dir: $MODEL_DIR"
echo "API Port: $API_PORT"

# 下载模型(如果需要)
if [[ ! -d "$MODEL_DIR" ]] || [[ ! -f "$MODEL_DIR/config.json" ]]; then
    echo "Downloading model to: $MODEL_DIR"
    mkdir -p "$MODEL_DIR"

    if command -v hf &> /dev/null; then
        hf download "$MODEL_REPO" --local-dir "$MODEL_DIR"
    elif command -v huggingface-cli &> /dev/null; then
        huggingface-cli download "$MODEL_REPO" --local-dir "$MODEL_DIR" --local-dir-use-symlinks False
    else
        echo "Neither hf nor huggingface-cli found. Skipping download."
        exit 1
    fi

    echo "✓ Model downloaded"
else
    echo "✓ Model already exists locally"
fi

# Step-Audio-R1 的 chat template
CHAT_TEMPLATE='{%- macro render_content(content) -%}{%- if content is string -%}{{- content.replace("<audio_patch>\n", "<audio_patch>") -}}{%- elif content is mapping -%}{{- content['"'"'value'"'"'] if '"'"'value'"'"' in content else content['"'"'text'"'"'] -}}{%- elif content is iterable -%}{%- for item in content -%}{%- if item.type == '"'"'text'"'"' -%}{{- item['"'"'value'"'"'] if '"'"'value'"'"' in item else item['"'"'text'"'"'] -}}{%- elif item.type == '"'"'audio'"'"' -%}<audio_patch>{%- endif -%}{%- endfor -%}{%- endif -%}{%- endmacro -%}{%- if tools -%}{{- '"'"'<|BOT|>system\n'"'"' -}}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{{- '"'"'<|BOT|>tool_json_schemas\n'"'"' + tools|tojson + '"'"'<|EOT|>'"'"' -}}{%- else -%}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- '"'"'<|BOT|>system\n'"'"' + render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- for message in messages -%}{%- if message["role"] == "user" -%}{{- '"'"'<|BOT|>human\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- elif message["role"] == "assistant" -%}{{- '"'"'<|BOT|>assistant\n'"'"' + (render_content(message["content"]) if message["content"] else '"'"''"'"') -}}{%- set is_last_assistant = true -%}{%- for m in messages[loop.index:] -%}{%- if m["role"] == "assistant" -%}{%- set is_last_assistant = false -%}{%- endif -%}{%- endfor -%}{%- if not is_last_assistant -%}{{- '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- elif message["role"] == "function_output" -%}{%- else -%}{%- if not (loop.first and message["role"] == "system") -%}{{- '"'"'<|BOT|>'"'"' + message["role"] + '"'"'\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '"'"'<|BOT|>assistant\n<think>\n'"'"' -}}{%- endif -%}'
echo "starting vllm server"

# 后台启动 vLLM API
python3 -m vllm.entrypoints.openai.api_server \
    --model "$MODEL_DIR" \
    --port "$API_PORT" \
    --host 0.0.0.0 \
    --max-model-len 7192 \
    --tensor-parallel-size 4 \
    --gpu-memory-utilization 0.90 \
    --trust-remote-code \
    --interleave-mm-strings \
    --served-model-name Step-Audio-R1 \
    --chat-template "$CHAT_TEMPLATE" \
    &

VLLM_PID=$!
echo "vLLM started (PID: $VLLM_PID)"

python3 app.py --host 0.0.0.0 --port "$GRADIO_PORT"

# 清理
trap 'kill $VLLM_PID' EXIT