Spaces:
Running
Running
| # Start Ollama in the background | |
| ollama serve & | |
| # Wait for Ollama to be ready | |
| echo "Waiting for Ollama to start..." | |
| while ! curl -s http://localhost:11434/api/tags > /dev/null; do | |
| sleep 1 | |
| done | |
| echo "Ollama is ready!" | |
| # Pull the model if it doesn't exist | |
| # We use 'llama3.1' as requested. | |
| MODEL_NAME="llama3.1" | |
| if ! ollama list | grep -q "$MODEL_NAME"; then | |
| echo "Pulling model $MODEL_NAME..." | |
| ollama pull $MODEL_NAME | |
| else | |
| echo "Model $MODEL_NAME already exists." | |
| fi | |
| # Start the FastAPI application | |
| # Using uvicorn on port 7860 (Hugging Face default) | |
| echo "Starting FastAPI app..." | |
| uvicorn src.main:app --host 0.0.0.0 --port 7860 | |