Spaces:
Running
Running
Update Dockerfile
Browse files- Dockerfile +2 -3
Dockerfile
CHANGED
|
@@ -35,8 +35,7 @@ RUN python create_granular_chunks.py
|
|
| 35 |
RUN pip install --no-cache-dir llama-cpp-python==0.2.61
|
| 36 |
|
| 37 |
# ✅ FIXED: Copy the application files directly to /app (not /app/app/)
|
| 38 |
-
COPY ./app
|
| 39 |
-
COPY ./app/policy_vector_db.py .
|
| 40 |
|
| 41 |
# Download your fine-tuned TinyLlama GGUF model
|
| 42 |
RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
|
@@ -47,4 +46,4 @@ RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
|
| 47 |
EXPOSE 7860
|
| 48 |
|
| 49 |
# ✅ FIXED: Run the FastAPI application with correct module path
|
| 50 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
| 35 |
RUN pip install --no-cache-dir llama-cpp-python==0.2.61
|
| 36 |
|
| 37 |
# ✅ FIXED: Copy the application files directly to /app (not /app/app/)
|
| 38 |
+
COPY ./app ./app
|
|
|
|
| 39 |
|
| 40 |
# Download your fine-tuned TinyLlama GGUF model
|
| 41 |
RUN curl -fL -o /app/tinyllama_dop_q4_k_m.gguf \
|
|
|
|
| 46 |
EXPOSE 7860
|
| 47 |
|
| 48 |
# ✅ FIXED: Run the FastAPI application with correct module path
|
| 49 |
+
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "7860"]
|