Spaces:
Running
Running
updated dockerfile
Browse files- Dockerfile +21 -14
Dockerfile
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
-
# Install required system dependencies
|
| 4 |
RUN apt-get update && apt-get install -y \
|
| 5 |
git curl build-essential cmake \
|
| 6 |
&& rm -rf /var/lib/apt/lists/*
|
|
@@ -8,29 +8,36 @@ RUN apt-get update && apt-get install -y \
|
|
| 8 |
# Set working directory
|
| 9 |
WORKDIR /app
|
| 10 |
|
| 11 |
-
# Create writable directories
|
| 12 |
-
# Note: For production, consider using a non-root user and more specific permissions
|
| 13 |
RUN mkdir -p /app/.cache /app/vector_database && chmod -R 777 /app
|
| 14 |
|
| 15 |
-
# Set environment variables
|
| 16 |
ENV TRANSFORMERS_CACHE=/app/.cache \
|
| 17 |
HF_HOME=/app/.cache \
|
| 18 |
CHROMADB_DISABLE_TELEMETRY=true
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
# from your requirements.txt and rely on this explicit, version-pinned installation.
|
| 22 |
-
RUN pip install --no-cache-dir llama-cpp-python==0.2.61
|
| 23 |
-
|
| 24 |
-
# Install other dependencies from requirements.txt
|
| 25 |
COPY requirements.txt .
|
| 26 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 27 |
|
| 28 |
-
# Copy the
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
COPY ./app ./app
|
| 30 |
-
# β
CORRECTED FILENAME: Ensure this matches the output of your chunking script
|
| 31 |
-
COPY ./granular_chunks_improved.jsonl .
|
| 32 |
|
| 33 |
-
# Download your fine-tuned TinyLlama GGUF model
|
| 34 |
RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
| 35 |
https://huggingface.co/Kalpokoch/FinetunedQuantizedTinyLama/resolve/main/tinyllama_dop_q4_k_m.gguf \
|
| 36 |
&& echo "β
TinyLlama model downloaded."
|
|
@@ -38,5 +45,5 @@ RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
|
| 38 |
# Expose the application port
|
| 39 |
EXPOSE 7860
|
| 40 |
|
| 41 |
-
# Run the FastAPI application
|
| 42 |
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
+
# Install required system dependencies
|
| 4 |
RUN apt-get update && apt-get install -y \
|
| 5 |
git curl build-essential cmake \
|
| 6 |
&& rm -rf /var/lib/apt/lists/*
|
|
|
|
| 8 |
# Set working directory
|
| 9 |
WORKDIR /app
|
| 10 |
|
| 11 |
+
# Create writable directories
|
|
|
|
| 12 |
RUN mkdir -p /app/.cache /app/vector_database && chmod -R 777 /app
|
| 13 |
|
| 14 |
+
# Set environment variables
|
| 15 |
ENV TRANSFORMERS_CACHE=/app/.cache \
|
| 16 |
HF_HOME=/app/.cache \
|
| 17 |
CHROMADB_DISABLE_TELEMETRY=true
|
| 18 |
|
| 19 |
+
# Install dependencies from requirements.txt first
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
COPY requirements.txt .
|
| 21 |
RUN pip install --no-cache-dir -r requirements.txt
|
| 22 |
|
| 23 |
+
# β
STEP 1: Copy the source data and the Python script into the image
|
| 24 |
+
COPY ./combined_context.jsonl .
|
| 25 |
+
COPY ./create_granular_chunks.py .
|
| 26 |
+
|
| 27 |
+
# β
STEP 2: Run the script to generate the chunks file inside the image
|
| 28 |
+
RUN python create_granular_chunks.py
|
| 29 |
+
|
| 30 |
+
# β
STEP 3: The 'granular_chunks_improved.jsonl' now exists inside the image.
|
| 31 |
+
# We no longer need to copy it from our local machine.
|
| 32 |
+
|
| 33 |
+
# Note: As recommended before, 'llama-cpp-python' should be removed from requirements.txt
|
| 34 |
+
# to rely on the more stable, version-pinned installation below.
|
| 35 |
+
RUN pip install --no-cache-dir llama-cpp-python==0.2.61
|
| 36 |
+
|
| 37 |
+
# Copy the rest of the application code
|
| 38 |
COPY ./app ./app
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
# Download your fine-tuned TinyLlama GGUF model
|
| 41 |
RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
| 42 |
https://huggingface.co/Kalpokoch/FinetunedQuantizedTinyLama/resolve/main/tinyllama_dop_q4_k_m.gguf \
|
| 43 |
&& echo "β
TinyLlama model downloaded."
|
|
|
|
| 45 |
# Expose the application port
|
| 46 |
EXPOSE 7860
|
| 47 |
|
| 48 |
+
# Run the FastAPI application
|
| 49 |
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "7860"]
|