akra35567 commited on
Commit
dd61879
·
1 Parent(s): 9420d2c

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +19 -10
Dockerfile CHANGED
@@ -1,24 +1,27 @@
1
- FROM python:3.11-slim
 
2
 
3
  ENV DEBIAN_FRONTEND=noninteractive
4
  ENV PYTHONUNBUFFERED=1
5
  ENV HF_HUB_ENABLE_HF_TRANSFER=1
6
 
 
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
- ca-certificates curl git && \
9
  rm -rf /var/lib/apt/lists/*
10
 
 
11
  RUN useradd -m -u 1000 user
12
  USER user
13
  ENV PATH="/home/user/.local/bin:$PATH"
14
 
15
  WORKDIR /home/user
16
 
17
- # PASTAS
18
  RUN mkdir -p /home/user/data /home/user/models /home/user/data/finetuned_hermes && \
19
  chown -R user:user /home/user
20
 
21
- # BAIXA O GGUF (4.8 GB) - CACHE PERMANENTE
22
  RUN pip install --no-cache-dir huggingface_hub[hf_transfer] && \
23
  python -c "from huggingface_hub import snapshot_download; \
24
  snapshot_download(repo_id='TheBloke/OpenHermes-2.5-Mistral-7B-GGUF', \
@@ -27,21 +30,27 @@ RUN pip install --no-cache-dir huggingface_hub[hf_transfer] && \
27
  local_dir_use_symlinks=False)" && \
28
  echo 'OPENHERMES Q4_K_M BAIXADO COM SUCESSO! BUÉ FIXE, KOTA!'
29
 
 
30
  COPY --chown=user:user requirements.txt .
31
  RUN pip install --upgrade pip --no-cache-dir && \
32
  pip install --no-cache-dir \
33
  torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 \
34
- --index-url https://download.pytorch.org/whl/cpu && \
35
- pip install --no-cache-dir \
36
- llama-cpp-python==0.2.89 \
37
- --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu && \
38
- pip install --no-cache-dir -r requirements.txt && \
 
 
 
 
39
  pip cache purge
40
 
 
41
  COPY --chown=user:user . .
42
 
43
  EXPOSE 7860
44
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health || exit 1
45
 
46
- # RODA DIRETO O main.py (100% compatível com Spaces)
47
  CMD ["python", "main.py"]
 
1
+ # DOCKERFILE FINAL TURBO – FUNCIONA 100% NO HF SPACES CPU
2
+ FROM python:3.11-bullseye
3
 
4
  ENV DEBIAN_FRONTEND=noninteractive
5
  ENV PYTHONUNBUFFERED=1
6
  ENV HF_HUB_ENABLE_HF_TRANSFER=1
7
 
8
+ # Instala dependências do sistema (glibc + build tools para llama-cpp-python)
9
  RUN apt-get update && apt-get install -y --no-install-recommends \
10
+ ca-certificates curl git build-essential cmake libopenblas-dev && \
11
  rm -rf /var/lib/apt/lists/*
12
 
13
+ # Usuário não-root
14
  RUN useradd -m -u 1000 user
15
  USER user
16
  ENV PATH="/home/user/.local/bin:$PATH"
17
 
18
  WORKDIR /home/user
19
 
20
+ # Pastas persistentes
21
  RUN mkdir -p /home/user/data /home/user/models /home/user/data/finetuned_hermes && \
22
  chown -R user:user /home/user
23
 
24
+ # BAIXA O GGUF (4.8 GB) CACHE PERMANENTE
25
  RUN pip install --no-cache-dir huggingface_hub[hf_transfer] && \
26
  python -c "from huggingface_hub import snapshot_download; \
27
  snapshot_download(repo_id='TheBloke/OpenHermes-2.5-Mistral-7B-GGUF', \
 
30
  local_dir_use_symlinks=False)" && \
31
  echo 'OPENHERMES Q4_K_M BAIXADO COM SUCESSO! BUÉ FIXE, KOTA!'
32
 
33
+ # Instala dependências
34
  COPY --chown=user:user requirements.txt .
35
  RUN pip install --upgrade pip --no-cache-dir && \
36
  pip install --no-cache-dir \
37
  torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 \
38
+ --index-url https://download.pytorch.org/whl/cpu
39
+
40
+ # FORÇA RECOMPILAÇÃO DO llama-cpp-python com glibc (resolve o erro libc.musl)
41
+ RUN CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" \
42
+ pip install --no-cache-dir --force-reinstall --no-binary=llama-cpp-python \
43
+ llama-cpp-python==0.2.89
44
+
45
+ # Instala o resto
46
+ RUN pip install --no-cache-dir -r requirements.txt && \
47
  pip cache purge
48
 
49
+ # Copia código
50
  COPY --chown=user:user . .
51
 
52
  EXPOSE 7860
53
  HEALTHCHECK CMD curl --fail http://localhost:7860/_stcore/health || exit 1
54
 
55
+ # RODA O MAIN.PY
56
  CMD ["python", "main.py"]