moevis commited on
Commit
63ada65
·
verified ·
1 Parent(s): bd38123

Update start_services.sh

Browse files
Files changed (1) hide show
  1. start_services.sh +44 -49
start_services.sh CHANGED
@@ -1,61 +1,56 @@
1
  #!/bin/bash
2
  set -euo pipefail
3
 
4
- # 配置
5
- MODEL_REPO="${MODEL_REPO:-stepfun-ai/Step-Audio-R1}"
6
- MODEL_DIR="${MODEL_DIR:-/tmp/models/Step-Audio-R1}"
7
  API_PORT="${API_PORT:-9999}"
8
  GRADIO_PORT="${GRADIO_PORT:-7860}"
9
 
10
  echo "Starting Step Audio R1 services..."
11
- echo "Model: $MODEL_REPO"
12
- echo "Model Dir: $MODEL_DIR"
13
- echo "API Port: $API_PORT"
14
-
15
- # 下载模型(如果需要)
16
- if [[ ! -d "$MODEL_DIR" ]] || [[ ! -f "$MODEL_DIR/config.json" ]]; then
17
- echo "Downloading model to: $MODEL_DIR"
18
- mkdir -p "$MODEL_DIR"
19
-
20
- if command -v hf &> /dev/null; then
21
- hf download "$MODEL_REPO" --local-dir "$MODEL_DIR"
22
- elif command -v huggingface-cli &> /dev/null; then
23
- huggingface-cli download "$MODEL_REPO" --local-dir "$MODEL_DIR" --local-dir-use-symlinks False
24
- else
25
- echo "Neither hf nor huggingface-cli found. Skipping download."
26
- exit 1
27
- fi
28
-
29
- echo "✓ Model downloaded"
30
- else
31
- echo "✓ Model already exists locally"
32
- fi
33
 
34
  # # Step-Audio-R1 的 chat template
35
- CHAT_TEMPLATE='{%- macro render_content(content) -%}{%- if content is string -%}{{- content.replace("<audio_patch>\n", "<audio_patch>") -}}{%- elif content is mapping -%}{{- content['"'"'value'"'"'] if '"'"'value'"'"' in content else content['"'"'text'"'"'] -}}{%- elif content is iterable -%}{%- for item in content -%}{%- if item.type == '"'"'text'"'"' -%}{{- item['"'"'value'"'"'] if '"'"'value'"'"' in item else item['"'"'text'"'"'] -}}{%- elif item.type == '"'"'audio'"'"' -%}<audio_patch>{%- endif -%}{%- endfor -%}{%- endif -%}{%- endmacro -%}{%- if tools -%}{{- '"'"'<|BOT|>system\n'"'"' -}}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{{- '"'"'<|BOT|>tool_json_schemas\n'"'"' + tools|tojson + '"'"'<|EOT|>'"'"' -}}{%- else -%}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- '"'"'<|BOT|>system\n'"'"' + render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- for message in messages -%}{%- if message["role"] == "user" -%}{{- '"'"'<|BOT|>human\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- elif message["role"] == "assistant" -%}{{- '"'"'<|BOT|>assistant\n'"'"' + (render_content(message["content"]) if message["content"] else '"'"''"'"') -}}{%- set is_last_assistant = true -%}{%- for m in messages[loop.index:] -%}{%- if m["role"] == "assistant" -%}{%- set is_last_assistant = false -%}{%- endif -%}{%- endfor -%}{%- if not is_last_assistant -%}{{- '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- elif message["role"] == "function_output" -%}{%- else -%}{%- if not (loop.first and message["role"] == "system") -%}{{- '"'"'<|BOT|>'"'"' + message["role"] + '"'"'\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '"'"'<|BOT|>assistant\n<think>\n'"'"' -}}{%- endif -%}'
36
- echo "starting vllm server"
37
-
38
- # 后台启动 vLLM API
39
- python3 -m vllm.entrypoints.openai.api_server \
40
- --model "$MODEL_DIR" \
41
- --port "$API_PORT" \
42
- --host 0.0.0.0 \
43
- --max-model-len 7192 \
44
- --tensor-parallel-size 4 \
45
- --gpu-memory-utilization 0.90 \
46
- --trust-remote-code \
47
- --interleave-mm-strings \
48
- --served-model-name Step-Audio-R1 \
49
- --chat-template "$CHAT_TEMPLATE" \
50
- &
51
-
52
- VLLM_PID=$!
53
- echo "vLLM started (PID: $VLLM_PID)"
54
-
55
-
56
- # 启动 Gradio (前台运行)
57
- export API_BASE_URL="http://localhost:$API_PORT/v1"
58
- export MODEL_NAME="Step-Audio-R1"
59
 
60
  python3 app.py --host 0.0.0.0 --port "$GRADIO_PORT"
61
 
 
1
  #!/bin/bash
2
  set -euo pipefail
3
 
4
+ # # 配置
5
+ # MODEL_REPO="${MODEL_REPO:-stepfun-ai/Step-Audio-R1}"
6
+ # MODEL_DIR="${MODEL_DIR:-/tmp/models/Step-Audio-R1}"
7
  API_PORT="${API_PORT:-9999}"
8
  GRADIO_PORT="${GRADIO_PORT:-7860}"
9
 
10
  echo "Starting Step Audio R1 services..."
11
+ # echo "Model: $MODEL_REPO"
12
+ # echo "Model Dir: $MODEL_DIR"
13
+ # echo "API Port: $API_PORT"
14
+
15
+ # # 下载模型(如果需要)
16
+ # if [[ ! -d "$MODEL_DIR" ]] || [[ ! -f "$MODEL_DIR/config.json" ]]; then
17
+ # echo "Downloading model to: $MODEL_DIR"
18
+ # mkdir -p "$MODEL_DIR"
19
+
20
+ # if command -v hf &> /dev/null; then
21
+ # hf download "$MODEL_REPO" --local-dir "$MODEL_DIR"
22
+ # elif command -v huggingface-cli &> /dev/null; then
23
+ # huggingface-cli download "$MODEL_REPO" --local-dir "$MODEL_DIR" --local-dir-use-symlinks False
24
+ # else
25
+ # echo "Neither hf nor huggingface-cli found. Skipping download."
26
+ # exit 1
27
+ # fi
28
+
29
+ # echo "✓ Model downloaded"
30
+ # else
31
+ # echo "✓ Model already exists locally"
32
+ # fi
33
 
34
  # # Step-Audio-R1 的 chat template
35
+ # CHAT_TEMPLATE='{%- macro render_content(content) -%}{%- if content is string -%}{{- content.replace("<audio_patch>\n", "<audio_patch>") -}}{%- elif content is mapping -%}{{- content['"'"'value'"'"'] if '"'"'value'"'"' in content else content['"'"'text'"'"'] -}}{%- elif content is iterable -%}{%- for item in content -%}{%- if item.type == '"'"'text'"'"' -%}{{- item['"'"'value'"'"'] if '"'"'value'"'"' in item else item['"'"'text'"'"'] -}}{%- elif item.type == '"'"'audio'"'"' -%}<audio_patch>{%- endif -%}{%- endfor -%}{%- endif -%}{%- endmacro -%}{%- if tools -%}{{- '"'"'<|BOT|>system\n'"'"' -}}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{{- '"'"'<|BOT|>tool_json_schemas\n'"'"' + tools|tojson + '"'"'<|EOT|>'"'"' -}}{%- else -%}{%- if messages[0]['"'"'role'"'"'] == '"'"'system'"'"' -%}{{- '"'"'<|BOT|>system\n'"'"' + render_content(messages[0]['"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- for message in messages -%}{%- if message["role"] == "user" -%}{{- '"'"'<|BOT|>human\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- elif message["role"] == "assistant" -%}{{- '"'"'<|BOT|>assistant\n'"'"' + (render_content(message["content"]) if message["content"] else '"'"''"'"') -}}{%- set is_last_assistant = true -%}{%- for m in messages[loop.index:] -%}{%- if m["role"] == "assistant" -%}{%- set is_last_assistant = false -%}{%- endif -%}{%- endfor -%}{%- if not is_last_assistant -%}{{- '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- elif message["role"] == "function_output" -%}{%- else -%}{%- if not (loop.first and message["role"] == "system") -%}{{- '"'"'<|BOT|>'"'"' + message["role"] + '"'"'\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '"'"'<|BOT|>assistant\n<think>\n'"'"' -}}{%- endif -%}'
36
+ # # echo "starting vllm server"
37
+
38
+ # # 后台启动 vLLM API
39
+ # python3 -m vllm.entrypoints.openai.api_server \
40
+ # --model "$MODEL_DIR" \
41
+ # --port "$API_PORT" \
42
+ # --host 0.0.0.0 \
43
+ # --max-model-len 7192 \
44
+ # --tensor-parallel-size 4 \
45
+ # --gpu-memory-utilization 0.90 \
46
+ # --trust-remote-code \
47
+ # --interleave-mm-strings \
48
+ # --served-model-name Step-Audio-R1 \
49
+ # --chat-template "$CHAT_TEMPLATE" \
50
+ # &
51
+
52
+ # VLLM_PID=$!
53
+ # echo "vLLM started (PID: $VLLM_PID)"
 
 
 
 
 
54
 
55
  python3 app.py --host 0.0.0.0 --port "$GRADIO_PORT"
56