euIaxs22 commited on
Commit
0aec94d
·
verified ·
1 Parent(s): 72838be

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +119 -0
  2. builder.sh +351 -0
  3. info.sh +154 -0
  4. start.sh +97 -0
Dockerfile ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # =============================================================================
2
+ # ADUC-SDR Video Suite — High-Perf Diffusers for 8× L40S (SM 8.9)
3
+ # CUDA 12.8 | PyTorch 2.8.0+cu128 | Ubuntu 22.04
4
+ # =============================================================================
5
+ FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
6
+
7
+ LABEL maintainer="Carlos Rodrigues dos Santos & Development Partner"
8
+ LABEL description="High-performance Diffusers stack with FA2/SDPA, 8×L40S"
9
+ LABEL version="4.4.0"
10
+ LABEL cuda_version="12.8.0"
11
+ LABEL python_version="3.10"
12
+ LABEL pytorch_version="2.8.0+cu128"
13
+ LABEL gpu_optimized_for="8x_NVIDIA_L40S"
14
+
15
+ # ---------------- Core env & caches ----------------
16
+ ENV DEBIAN_FRONTEND=noninteractive TZ=UTC LANG=C.UTF-8 LC_ALL=C.UTF-8 \
17
+ PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1 \
18
+ PIP_NO_CACHE_DIR=1 PIP_DISABLE_PIP_VERSION_CHECK=1
19
+
20
+ # GPU/Compute
21
+ ENV NVIDIA_VISIBLE_DEVICES=all
22
+ ENV CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
23
+ ENV TORCH_CUDA_ARCH_LIST="8.9"
24
+ ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
25
+ ENV CUDA_DEVICE_MAX_CONNECTIONS=32
26
+
27
+ # Threads
28
+ ENV OMP_NUM_THREADS=8 MKL_NUM_THREADS=8 MAX_JOBS=160
29
+
30
+ # Alloc/caches
31
+ ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,garbage_collection_threshold:0.8
32
+ ENV CUDA_LAUNCH_BLOCKING=0 CUDA_CACHE_MAXSIZE=2147483648 CUDA_CACHE_DISABLE=0
33
+
34
+ # Hugging Face caches
35
+ ENV APP_HOME=/app
36
+ WORKDIR $APP_HOME
37
+
38
+
39
+ ENV MODELS_DIR=/app/models
40
+ RUN mkdir -p /home/user/.cache/models && ln -sf /home/user/.cache/models /app/models
41
+
42
+
43
+
44
+ # ---------------- Sistema & Python ----------------
45
+ RUN apt-get update && apt-get install -y --no-install-recommends \
46
+ build-essential tree cmake git git-lfs curl wget ffmpeg ninja-build \
47
+ python3.10 python3.10-dev python3.10-distutils python3-pip \
48
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
49
+
50
+ RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
51
+ ln -sf /usr/bin/python3.10 /usr/bin/python && \
52
+ python3 -m pip install --upgrade pip
53
+
54
+
55
+ # ---------------- PyTorch cu128 (pinado) ----------------
56
+ RUN pip install --index-url https://download.pytorch.org/whl/cu128 \
57
+ torch==2.8.0+cu128 torchvision==0.23.0+cu128 torchaudio==2.8.0+cu128
58
+
59
+ # ---------------- Toolchain, Triton, FA2 (sem bnb) ----------------
60
+ RUN pip install packaging ninja cmake pybind11 scikit-build cython hf_transfer numpy==1.24.4
61
+
62
+ # Triton 3.x (sem triton.ops)
63
+ RUN pip uninstall -y triton || true && \
64
+ pip install -v --no-build-isolation triton==3.4.0
65
+
66
+ # FlashAttention 2.8.x
67
+ RUN pip install flash-attn==2.8.3 --no-build-isolation || \
68
+ pip install flash-attn==2.8.2 --no-build-isolation || \
69
+ pip install flash-attn==2.8.1 --no-build-isolation || \
70
+ pip install flash-attn==2.8.0.post2 --no-build-isolation
71
+
72
+ # Diffusers/Transformers estáveis (sem dev)
73
+ RUN pip install --no-cache-dir diffusers==0.31.0 transformers==4.44.2 accelerate==0.34.2 omegaconf==2.3.0
74
+
75
+ # Opcional: seu fork de otimizações
76
+ RUN pip install -U git+https://github.com/carlex22/diffusers-aduc-sdr
77
+
78
+ # ---------------- Dependências da aplicação ----------------
79
+ COPY requirements.txt ./requirements.txt
80
+ RUN pip install --no-cache-dir -r requirements.txt
81
+
82
+ RUN pip install --upgrade bitsandbytes
83
+
84
+ # Scripts e configs
85
+ COPY info.sh ./app/info.sh
86
+ COPY builder.sh ./app/builder.sh
87
+ COPY start_seedvr.sh ./app/start_seedvr.sh
88
+
89
+ # ---------------- Código e permissões ----------------
90
+ COPY . .
91
+ RUN useradd -m -u 1000 -s /bin/bash appuser && \
92
+ chown -R appuser:appuser /app && \
93
+ chmod 0755 /app/start_seedvr.sh /app/info.sh /app/builder.sh || true
94
+
95
+ USER appuser
96
+
97
+ # Declara volume persistente para HF Spaces
98
+ VOLUME /data
99
+
100
+ # Env vars para caches em /data
101
+ ENV HF_HOME=/data/.cache/huggingface
102
+ ENV TORCH_HOME=/data/.cache/torch
103
+ ENV HF_DATASETS_CACHE=/data/.cache/datasets
104
+ ENV TRANSFORMERS_CACHE=/data/.cache/transformers
105
+ ENV DIFFUSERS_CACHE=/data/.cache/diffusers
106
+ ENV HF_HUB_ENABLE_HF_TRANSFER=1
107
+ ENV TOKENIZERS_PARALLELISM=false
108
+
109
+
110
+
111
+
112
+ VOLUME ["/data/.cache/huggingface/hub", "/data/ckpt/VINCIE-3B" ]
113
+
114
+
115
+
116
+
117
+ # ---------------- Entry ----------------
118
+ ENTRYPOINT ["/app/start.sh"]
119
+ CMD [""]
builder.sh ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ echo "🚀 Builder (FlashAttn LayerNorm extra + Apex + Q8) — runtime com GPU visível"
5
+
6
+ # ===== Config e diretórios =====
7
+ export SELF_HF_REPO_ID="${SELF_HF_REPO_ID:-euIaxs22/Aduc-sdr}" # Repo no HF para wheels
8
+ export HF_HOME="${HF_HOME:-/app/model_cache}"
9
+ export HF_HUB_CACHE="${HF_HUB_CACHE:-$HF_HOME/hub}"
10
+ export TORCH_HOME="${TORCH_HOME:-$HF_HOME/torch}"
11
+ export HF_HUB_ENABLE_HF_TRANSFER="${HF_HUB_ENABLE_HF_TRANSFER:-1}"
12
+ export PATH="$HOME/.local/bin:$PATH"
13
+
14
+ mkdir -p /app/wheels /app/cuda_cache "$HF_HOME" "$TORCH_HOME" /app/wheels/src
15
+ chmod -R 777 /app/wheels || true
16
+ export CUDA_CACHE_PATH="/app/cuda_cache"
17
+
18
+ # Preserva licença NGC (se existir)
19
+ if [ -f "/NGC-DL-CONTAINER-LICENSE" ]; then
20
+ cp -f /NGC-DL-CONTAINER-LICENSE /app/wheels/NGC-DL-CONTAINER-LICENSE || true
21
+ fi
22
+
23
+ # ===== Dependências mínimas =====
24
+ python -m pip install -v -U pip build setuptools wheel hatchling hatch-vcs scikit-build-core cmake ninja packaging "huggingface_hub[hf_transfer]" || true
25
+
26
+ # ===== Tags de ambiente (Python/CUDA/Torch) =====
27
+ PY_TAG="$(python -c 'import sys; print(f"cp{sys.version_info[0]}{sys.version_info[1]}")' 2>/dev/null || echo cp310)"
28
+ TORCH_VER="$(python - <<'PY'
29
+ try:
30
+ import torch, re
31
+ v = torch.__version__
32
+ print(re.sub(r'\+.*$', '', v))
33
+ except Exception:
34
+ print("unknown")
35
+ PY
36
+ )"
37
+ CU_TAG="$(python - <<'PY'
38
+ try:
39
+ import torch
40
+ cu = getattr(torch.version, "cuda", None)
41
+ print("cu"+cu.replace(".","")) if cu else print("")
42
+ except Exception:
43
+ print("")
44
+ PY
45
+ )"
46
+ echo "[env] PY_TAG=${PY_TAG} TORCH_VER=${TORCH_VER} CU_TAG=${CU_TAG}"
47
+
48
+ # ============================================================================
49
+ # CHECKERS
50
+ # ============================================================================
51
+
52
+ # Checa especificamente o módulo nativo requerido pelo layer_norm (sem checar 'flash-attn' geral)
53
+ check_flash_layer_norm_bin () {
54
+ python - <<'PY'
55
+ import importlib
56
+ ok = False
57
+ # extensões conhecidas produzidas por csrc/layer_norm
58
+ for name in [
59
+ "dropout_layer_norm", # nome do módulo nativo
60
+ "flash_attn.ops.layer_norm", # wrapper python que usa o nativo
61
+ "flash_attn.ops.rms_norm", # pode depender do mesmo backend em alguns empacotamentos
62
+ ]:
63
+ try:
64
+ importlib.import_module(name)
65
+ ok = True
66
+ break
67
+ except Exception:
68
+ pass
69
+ raise SystemExit(0 if ok else 1)
70
+ PY
71
+ }
72
+
73
+ check_apex () {
74
+ python - <<'PY'
75
+ try:
76
+ from apex.normalization import FusedLayerNorm
77
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
78
+ ok = True
79
+ except Exception:
80
+ ok = False
81
+ raise SystemExit(0 if ok else 1)
82
+ PY
83
+ }
84
+
85
+ check_q8 () {
86
+ python - <<'PY'
87
+ import importlib.util
88
+ spec = importlib.util.find_spec("ltx_q8_kernels") or importlib.util.find_spec("q8_kernels")
89
+ raise SystemExit(0 if spec else 1)
90
+ PY
91
+ }
92
+
93
+ # ============================================================================
94
+ # DOWNLOAD DO HUB (GENÉRICO)
95
+ # ============================================================================
96
+
97
+ # Instala uma wheel do HF por prefixo simples (ex.: apex-, q8_kernels-)
98
+ install_from_hf_by_prefix () {
99
+ local PREFIX="$1"
100
+ echo "[hub] Procurando wheels '${PREFIX}-*.whl' em ${SELF_HF_REPO_ID} com tags ${PY_TAG}/${CU_TAG}"
101
+ python - "$PREFIX" "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
102
+ import os, sys
103
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
104
+
105
+ prefix, py_tag, cu_tag = sys.argv[1], sys.argv[2], sys.argv[3]
106
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
107
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
108
+ try:
109
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
110
+ except Exception:
111
+ raise SystemExit(0)
112
+
113
+ def match(name: str) -> bool:
114
+ return name.endswith(".whl") and name.rsplit("/",1)[-1].startswith(prefix + "-") and (py_tag in name)
115
+
116
+ cands = [f for f in files if match(f)]
117
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
118
+ if not pref:
119
+ raise SystemExit(0)
120
+
121
+ target = sorted(pref, reverse=True)[0]
122
+ print(target)
123
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
124
+ print(path)
125
+ PY
126
+ }
127
+
128
+ # Instala wheels do submódulo layer_norm aceitando variantes de nome
129
+ install_flash_layer_norm_from_hf () {
130
+ echo "[hub] Procurando wheels FlashAttention LayerNorm em ${SELF_HF_REPO_ID}"
131
+ python - "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
132
+ import os, sys, re
133
+ from huggingface_hub import HfApi, hf_hub_download, HfFolder
134
+
135
+ py_tag, cu_tag = sys.argv[1], sys.argv[2]
136
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
137
+ api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
138
+ try:
139
+ files = api.list_repo_files(repo_id=repo, repo_type="model")
140
+ except Exception:
141
+ raise SystemExit(0)
142
+
143
+ pats = [
144
+ r"^flash[_-]?attn[_-]?.*layer[_-]?norm-.*\.whl$",
145
+ r"^dropout[_-]?layer[_-]?norm-.*\.whl$",
146
+ ]
147
+ def ok(fn: str) -> bool:
148
+ name = fn.rsplit("/",1)[-1]
149
+ if py_tag not in name: return False
150
+ return any(re.search(p, name, flags=re.I) for p in pats)
151
+
152
+ cands = [f for f in files if ok(f)]
153
+ pref = [f for f in cands if cu_tag and cu_tag in f] or cands
154
+ if not pref:
155
+ raise SystemExit(0)
156
+
157
+ target = sorted(pref, reverse=True)[0]
158
+ print(target)
159
+ path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
160
+ print(path)
161
+ PY
162
+ }
163
+
164
+ # ============================================================================
165
+ # BUILDERS
166
+ # ============================================================================
167
+
168
+ # Passo extra: SIEMPRE tenta instalar o submódulo layer_norm via wheel do HF;
169
+ # se não houver wheel compatível, compila a partir de csrc/layer_norm e gera wheel.
170
+ build_or_install_flash_layer_norm () {
171
+ echo "[flow] === FlashAttn LayerNorm (passo extra) ==="
172
+
173
+ # 1) Tentar instalar wheel do HF primeiro (evita recompilar)
174
+ HF_OUT="$(install_flash_layer_norm_from_hf || true)"
175
+ if [ -n "${HF_OUT:-}" ]; then
176
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
177
+ echo "[hub] Baixado: ${WHEEL_PATH}"
178
+ python -m pip install -v -U --no-build-isolation --no-deps "${WHEEL_PATH}" || true
179
+ if check_flash_layer_norm_bin; then
180
+ echo "[flow] FlashAttn LayerNorm: OK via wheel do Hub"
181
+ return 0
182
+ fi
183
+ echo "[flow] Wheel do Hub não resolveu import; seguirá com build"
184
+ else
185
+ echo "[hub] Nenhuma wheel compatível encontrada para FlashAttn LayerNorm"
186
+ fi
187
+
188
+ # 2) Build from source do submódulo csrc/layer_norm -> wheel
189
+ local SRC="/app/wheels/src/flash-attn"
190
+ echo "[build] Preparando fonte FlashAttention (layer_norm) em ${SRC}"
191
+ if [ -d "$SRC/.git" ]; then
192
+ git -C "$SRC" fetch --all -p || true
193
+ git -C "$SRC" reset --hard origin/main || true
194
+ git -C "$SRC" clean -fdx || true
195
+ else
196
+ rm -rf "$SRC"
197
+ git clone --depth 1 https://github.com/Dao-AILab/flash-attention "$SRC"
198
+ fi
199
+
200
+ # Define CC alvo a partir da GPU ativa (reduz tempo/ruído de build)
201
+ export TORCH_CUDA_ARCH_LIST="$(python - <<'PY'
202
+ import torch
203
+ try:
204
+ cc = "%d.%d" % torch.cuda.get_device_capability(0)
205
+ print(cc)
206
+ except Exception:
207
+ print("8.9") # fallback p/ Ada (L40S) caso build sem GPU visível
208
+ PY
209
+ )"
210
+ echo "[build] TORCH_CUDA_ARCH_LIST=${TORCH_CUDA_ARCH_LIST}"
211
+
212
+ pushd "$SRC/csrc/layer_norm" >/dev/null
213
+ export MAX_JOBS="${MAX_JOBS:-90}"
214
+ # Gera wheel reutilizável
215
+ python -m pip wheel -v --no-build-isolation --no-deps . -w /app/wheels || true
216
+ popd >/dev/null
217
+
218
+ # Instala a wheel gerada
219
+ local W="$(ls -t /app/wheels/*flash*attn*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
220
+ if [ -z "${W}" ]; then
221
+ W="$(ls -t /app/wheels/*dropout*layer*norm*-*.whl 2>/dev/null | head -n1 || true)"
222
+ fi
223
+ if [ -z "${W}" ]; then
224
+ # fallback para qualquer .whl recém gerado
225
+ W="$(ls -t /app/wheels/*.whl 2>/dev/null | head -n1 || true)"
226
+ fi
227
+
228
+ if [ -n "${W}" ]; then
229
+ python -m pip install -v -U --no-deps "${W}" || true
230
+ echo "[build] FlashAttn LayerNorm instalado da wheel: ${W}"
231
+ else
232
+ echo "[build] Nenhuma wheel gerada; instalando direto do source (último recurso)"
233
+ python -m pip install -v --no-build-isolation "$SRC/csrc/layer_norm" || true
234
+ fi
235
+
236
+ # Checagem final do binário
237
+ if check_flash_layer_norm_bin; then
238
+ echo "[flow] FlashAttn LayerNorm: import OK após build"
239
+ return 0
240
+ fi
241
+ echo "[flow] FlashAttn LayerNorm: falhou import após build"
242
+ return 1
243
+ }
244
+
245
+ build_apex () {
246
+ local SRC="/app/wheels/src/apex"
247
+ echo "[build] Preparando fonte Apex em ${SRC}"
248
+ if [ -d "$SRC/.git" ]; then
249
+ git -C "$SRC" fetch --all -p || true
250
+ git -C "$SRC" reset --hard HEAD || true
251
+ git -C "$SRC" clean -fdx || true
252
+ else
253
+ rm -rf "$SRC"
254
+ git clone --depth 1 https://github.com/NVIDIA/apex "$SRC"
255
+ fi
256
+ echo "[build] Compilando Apex -> wheel"
257
+ export APEX_CPP_EXT=1 APEX_CUDA_EXT=1 APEX_ALL_CONTRIB_EXT=0
258
+ python -m pip wheel -v --no-build-isolation --no-deps "$SRC" -w /app/wheels || true
259
+ local W="$(ls -t /app/wheels/apex-*.whl 2>/dev/null | head -n1 || true)"
260
+ if [ -n "${W}" ]; then
261
+ python -m pip install -v -U --no-deps "${W}" || true
262
+ echo "[build] Apex instalado da wheel recém-compilada: ${W}"
263
+ else
264
+ echo "[build] Nenhuma wheel Apex gerada; instalando do source"
265
+ python -m pip install -v --no-build-isolation "$SRC" || true
266
+ fi
267
+ }
268
+
269
+ Q8_REPO="${Q8_REPO:-https://github.com/Lightricks/LTX-Video-Q8-Kernels.git}"
270
+ Q8_COMMIT="${Q8_COMMIT:-f3066edea210082799ca5a2bbf9ef0321c5dd8fc}"
271
+ build_q8 () {
272
+ local SRC="/app/wheels/src/q8_kernels"
273
+ rm -rf "$SRC"
274
+ git clone --filter=blob:none "$Q8_REPO" "$SRC"
275
+ git -C "$SRC" checkout "$Q8_COMMIT"
276
+ git -C "$SRC" submodule update --init --recursive
277
+ echo "[build] Compilando Q8 Kernels -> wheel"
278
+ python -m pip wheel -v --no-build-isolation "$SRC" -w /app/wheels || true
279
+ local W="$(ls -t /app/wheels/q8_kernels-*.whl 2>/dev/null | head -n1 || true)"
280
+ if [ -n "${W}" ]; then
281
+ python -m pip install -v -U --no-deps "${W}" || true
282
+ echo "[build] Q8 instalado da wheel recém-compilada: ${W}"
283
+ else
284
+ echo "[build] Nenhuma wheel q8_kernels gerada; instalando do source"
285
+ python -m pip install -v --no-build-isolation "$SRC" || true
286
+ fi
287
+ }
288
+
289
+ # ============================================================================
290
+ # EXECUÇÃO
291
+ # ============================================================================
292
+
293
+ # Passo adicional SEM depender de "flash-attn" já instalado: trata somente o layer_norm
294
+ build_or_install_flash_layer_norm || true
295
+
296
+ # Apex (mantido)
297
+ # Tenta primeiro via wheel no HF e, se não houver, compila e instala em wheel
298
+ echo "[flow] === apex ==="
299
+ HF_OUT="$(install_from_hf_by_prefix "apex" || true)"
300
+ if [ -n "${HF_OUT:-}" ]; then
301
+ WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
302
+ echo "[hub] Baixado: ${WHEEL_PATH}"
303
+ python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
304
+ if ! check_apex; then
305
+ echo "[flow] apex: import falhou após wheel; compilando"
306
+ build_apex || true
307
+ fi
308
+ else
309
+ echo "[hub] Nenhuma wheel apex compatível; compilando"
310
+ build_apex || true
311
+ fi
312
+
313
+ # Q8 (opcional)
314
+ # echo "[flow] === q8_kernels ==="
315
+ # HF_OUT="$(install_from_hf_by_prefix "q8_kernels" || true)"
316
+ # if [ -n "${HF_OUT:-}" ]; then
317
+ # WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
318
+ # echo "[hub] Baixado: ${WHEEL_PATH}"
319
+ # python -m pip install -v -U --no-build-isolation "${WHEEL_PATH}" || true
320
+ # if ! check_q8; then
321
+ # echo "[flow] q8_kernels: import falhou após wheel; compilando"
322
+ # build_q8 || true
323
+ # fi
324
+ # else
325
+ # echo "[hub] Nenhuma wheel q8_kernels compatível; compilando"
326
+ # build_q8 || true
327
+ # fi
328
+
329
+ # Upload de wheels produzidas para o HF (cache cross-restarts)
330
+ python - <<'PY'
331
+ import os
332
+ from huggingface_hub import HfApi, HfFolder
333
+
334
+ repo = os.environ.get("SELF_HF_REPO_ID","euIaxs22/Aduc-sdr")
335
+ token = os.getenv("HF_TOKEN") or HfFolder.get_token()
336
+ if not token:
337
+ raise SystemExit("HF_TOKEN ausente; upload desabilitado")
338
+
339
+ api = HfApi(token=token)
340
+ api.upload_folder(
341
+ folder_path="/app/wheels",
342
+ repo_id=repo,
343
+ repo_type="model",
344
+ allow_patterns=["*.whl","NGC-DL-CONTAINER-LICENSE"],
345
+ ignore_patterns=["**/src/**","**/*.log","**/logs/**",".git/**"],
346
+ )
347
+ print("Upload concluído (wheels + licença).")
348
+ PY
349
+
350
+ chmod -R 777 /app/wheels || true
351
+ echo "✅ Builder finalizado."
info.sh ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -euo pipefail
4
+
5
+ echo "================= RUNTIME CAPABILITIES ================="
6
+ date
7
+
8
+ echo
9
+ if command -v nvidia-smi >/dev/null 2>&1; then
10
+ nvidia-smi
11
+ else
12
+ echo "nvidia-smi: not available"
13
+ fi
14
+ echo
15
+
16
+ echo "CUDA_HOME: ${CUDA_HOME:-/usr/local/cuda}"
17
+ if command -v nvcc >/dev/null 2>&1; then
18
+ nvcc --version || true
19
+ else
20
+ echo "nvcc: not available"
21
+ fi
22
+ echo
23
+
24
+ echo "[PyTorch / CUDA backend]"
25
+ python3 - <<'PY'
26
+ import json, os, torch, inspect
27
+
28
+ def to_bool(x):
29
+ try:
30
+ if callable(x):
31
+ try:
32
+ sig = inspect.signature(x)
33
+ if len(sig.parameters)==0:
34
+ return bool(x())
35
+ except Exception:
36
+ pass
37
+ return True
38
+ return bool(x)
39
+ except Exception:
40
+ return None
41
+
42
+ info = {
43
+ "torch": getattr(torch, "__version__", None),
44
+ "cuda_available": torch.cuda.is_available(),
45
+ "cuda_device_count": torch.cuda.device_count(),
46
+ "cuda_runtime_version": getattr(torch.version, "cuda", None),
47
+ "cudnn_version": torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else None,
48
+ "tf32": (torch.backends.cuda.matmul.allow_tf32 if torch.cuda.is_available() else None),
49
+ "flash_sdp": (to_bool(getattr(torch.backends.cuda, "enable_flash_sdp", None)) if torch.cuda.is_available() else None),
50
+ "mem_efficient_sdp": (to_bool(getattr(torch.backends.cuda, "enable_mem_efficient_sdp", None)) if torch.cuda.is_available() else None),
51
+ "math_sdp": (to_bool(getattr(torch.backends.cuda, "enable_math_sdp", None)) if torch.cuda.is_available() else None),
52
+ }
53
+ print(json.dumps(info, indent=2))
54
+ for i in range(min(torch.cuda.device_count(), 16)):
55
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
56
+ PY
57
+ echo
58
+
59
+ echo "[Apex (FusedLayerNorm/RMSNorm)]"
60
+ python3 - <<'PY'
61
+ try:
62
+ from apex.normalization import FusedLayerNorm, FusedRMSNorm
63
+ import importlib; importlib.import_module("fused_layer_norm_cuda")
64
+ print("apex.normalization: OK")
65
+ except Exception as e:
66
+ print("apex.normalization: FAIL ->", e)
67
+ PY
68
+ echo
69
+
70
+ echo "[FlashAttention (CUDA/Triton/RMSNorm)]"
71
+ python3 - <<'PY'
72
+ import importlib
73
+ mods = [
74
+ 'flash_attn', 'flash_attn_2_cuda',
75
+ 'flash_attn.ops.rms_norm', 'flash_attn.ops.layer_norm',
76
+ 'flash_attn.layers.layer_norm'
77
+ ]
78
+ for m in mods:
79
+ try:
80
+ importlib.import_module(m)
81
+ print(f"{m}: OK")
82
+ except Exception as e:
83
+ print(f"{m}: FAIL -> {e}")
84
+ PY
85
+ echo
86
+
87
+ echo "[FlashAttention versão/details]"
88
+ python3 - <<'PY'
89
+ try:
90
+ import flash_attn
91
+ fa_ver = getattr(flash_attn, "__version__", None)
92
+ print(f"flash_attn: {fa_ver}")
93
+ except Exception:
94
+ print("flash_attn: not importable.")
95
+ try:
96
+ import torch
97
+ print(f"torch: {torch.__version__} | cuda: {getattr(torch.version, 'cuda', None)}")
98
+ except Exception:
99
+ pass
100
+ PY
101
+ echo
102
+
103
+ echo "[Triton]"
104
+ python3 - <<'PY'
105
+ try:
106
+ import triton
107
+ print("triton:", triton.__version__)
108
+ try:
109
+ import triton.ops as _; print("triton.ops: OK")
110
+ except Exception:
111
+ print("triton.ops: not present (ok on Triton>=3.x)")
112
+ except Exception as e:
113
+ print("triton: FAIL ->", e)
114
+ PY
115
+ echo
116
+
117
+ echo "[BitsAndBytes (Q8/Q4)]"
118
+ python3 - <<'PY'
119
+ try:
120
+ import bitsandbytes as bnb
121
+ print("bitsandbytes:", bnb.__version__)
122
+ try:
123
+ from bitsandbytes.triton import _custom_ops as _; print("bnb.triton._custom_ops: OK")
124
+ except Exception as e:
125
+ print("bnb.triton: partial ->", e)
126
+ except Exception as e:
127
+ print("bitsandbytes: FAIL ->", e)
128
+ PY
129
+ echo
130
+
131
+ echo "[Transformers / Diffusers / XFormers / EcoML]"
132
+ python3 - <<'PY'
133
+ def _v(m):
134
+ try:
135
+ mod = __import__(m)
136
+ print(f"{m}: {getattr(mod, '__version__', 'unknown')}")
137
+ except Exception as e:
138
+ print(f"{m}: FAIL -> {e}")
139
+ for m in ("transformers", "diffusers", "xformers", "ecuml", "mlx", "ecobase"):
140
+ _v(m)
141
+ PY
142
+ echo
143
+
144
+ echo "[Distribuído / NCCL Env]"
145
+ env | grep -E '^(CUDA_VISIBLE_DEVICES|NCCL_|TORCH_|ENABLE_.*SDP|HF_HUB_.*|CUDA_|NV_.*NCCL.*|PYTORCH_CUDA_ALLOC_CONF)=' | sort
146
+ echo
147
+
148
+ echo "[Output dir/perms]"
149
+ OUT="/app/outputs"
150
+ echo "OUT dir: $OUT"
151
+ mkdir -p "$OUT"
152
+ ls -la "$OUT" || true
153
+
154
+ echo "================= END CAPABILITIES ================="
start.sh ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ # =======================
5
+ # SeedVR — start UI
6
+ # =======================
7
+
8
+
9
+ # 1) Builder (garante Apex/Flash e deps CUDA)
10
+ echo "🛠️ Iniciando o builder.sh para compilar/instalar dependências CUDA..."
11
+ if [ -f "/app/builder.sh" ]; then
12
+ /bin/bash /app/builder.sh
13
+ echo "✅ Builder finalizado."
14
+ else
15
+ echo "⚠️ Aviso: builder.sh não encontrado. Pulando etapa de compilação de dependências."
16
+ fi
17
+
18
+ # Pastas e variáveis (podem ser sobrescritas via env)
19
+ export SEEDVR_ROOT="${SEEDVR_ROOT:-/data/SeedVR}"
20
+ export CKPTS_ROOT="${CKPTS_ROOT:-/data/ckpts/SeedVR2-3B}"
21
+ export OUTPUT_ROOT="${OUTPUT_ROOT:-/app/outputs}"
22
+ export INPUT_ROOT="${INPUT_ROOT:-/app/inputs}"
23
+
24
+ # Transformers v5 recomenda HF_HOME
25
+ export HF_HOME="${HF_HOME:-/data/.cache/huggingface}"
26
+ export HF_TOKEN="${HF_TOKEN:-${HUGGINGFACE_TOKEN:-}}"
27
+
28
+ # Repo/model identifiers
29
+ export SEEDVR_GIT_URL="${SEEDVR_GIT_URL:-https://github.com/ByteDance-Seed/SeedVR.git}"
30
+ export SEEDVR_REPO_ID="${SEEDVR_REPO_ID:-ByteDance-Seed/SeedVR2-3B}"
31
+
32
+ # Multi-GPU / torchrun
33
+ export NUM_GPUS="${NUM_GPUS:-8}"
34
+ export NCCL_P2P_LEVEL="${NCCL_P2P_LEVEL:-NVL}"
35
+ export NCCL_ASYNC_ERROR_HANDLING="${NCCL_ASYNC_ERROR_HANDLING:-1}"
36
+ export OMP_NUM_THREADS="${OMP_NUM_THREADS:-8}"
37
+
38
+ # Gradio
39
+ export GRADIO_SERVER_NAME="${GRADIO_SERVER_NAME:-0.0.0.0}"
40
+ export GRADIO_SERVER_PORT="${GRADIO_SERVER_PORT:-7860}"
41
+
42
+ mkdir -p "$SEEDVR_ROOT" "$CKPTS_ROOT" "$OUTPUT_ROOT" "$INPUT_ROOT" "$HF_HOME"
43
+
44
+ echo "[seedvr][start] checking environment..."
45
+ command -v python >/dev/null || { echo "[seedvr][start] python not found"; exit 1; }
46
+ command -v nvidia-smi >/dev/null && nvidia-smi || echo "[seedvr][start] warn: nvidia-smi not available"
47
+
48
+ echo "[seedvr][start] cloning repo if missing: $SEEDVR_ROOT"
49
+ if [ ! -d "$SEEDVR_ROOT/.git" ]; then
50
+ git clone "$SEEDVR_GIT_URL" "$SEEDVR_ROOT"
51
+ else
52
+ echo "[seedvr][start] repo present"
53
+ fi
54
+
55
+ echo "[seedvr][start] downloading model (snapshot_download) into $CKPTS_ROOT"
56
+ python - <<PY
57
+ import os
58
+ from pathlib import Path
59
+ from huggingface_hub import snapshot_download
60
+
61
+ repo_id = os.environ["SEEDVR_REPO_ID"]
62
+ save_dir = os.environ["CKPTS_ROOT"]
63
+ cache_dir = os.environ["HF_HOME"]
64
+ token = os.environ.get("HF_TOKEN") or None
65
+
66
+ Path(save_dir).mkdir(parents=True, exist_ok=True)
67
+ snapshot_download(
68
+ repo_id=repo_id,
69
+ cache_dir=cache_dir,
70
+ local_dir=save_dir,
71
+ local_dir_use_symlinks=False,
72
+ resume_download=True,
73
+ allow_patterns=["*.json", "*.safetensors", "*.pth", "*.bin", "*.py", "*.md", "*.txt"],
74
+ token=token,
75
+ )
76
+ print("[seedvr][start] snapshot_download ok:", save_dir)
77
+ PY
78
+
79
+
80
+ export OUTPUT_ROOT=/app/outputs
81
+ mkdir -p "$OUTPUT_ROOT" && chmod -R 777 "$OUTPUT_ROOT" || true
82
+
83
+
84
+ echo "[seedvr][start] ensuring ckpt symlink SeedVR/ckpts/SeedVR2-3B -> $CKPTS_ROOT"
85
+ mkdir -p "$SEEDVR_ROOT/ckpts"
86
+ if [ -L "$SEEDVR_ROOT/ckpts/SeedVR2-3B" ]; then
87
+ target="$(readlink -f "$SEEDVR_ROOT/ckpts/SeedVR2-3B" || true)"
88
+ if [ "$target" != "$CKPTS_ROOT" ]; then
89
+ rm -f "$SEEDVR_ROOT/ckpts/SeedVR2-3B"
90
+ fi
91
+ fi
92
+ if [ ! -e "$SEEDVR_ROOT/ckpts/SeedVR2-3B" ]; then
93
+ ln -s "$CKPTS_ROOT" "$SEEDVR_ROOT/ckpts/SeedVR2-3B"
94
+ fi
95
+
96
+ echo "[seedvr][start] launching app_seedvr.py at ${GRADIO_SERVER_NAME}:${GRADIO_SERVER_PORT}"
97
+ exec python app_seedvr.py