Amaranath commited on
Commit
dee5d65
Β·
verified Β·
1 Parent(s): 8110950

Create start.sh

Browse files
Files changed (1) hide show
  1. start.sh +53 -0
start.sh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ echo "πŸš€ Starting Ollama + FastAPI Server..."
5
+
6
+ # Set Ollama environment variables
7
+ export OLLAMA_HOST=0.0.0.0:11434
8
+ export OLLAMA_ORIGINS="*"
9
+ export OLLAMA_KEEP_ALIVE=5m
10
+
11
+ # Start Ollama in background (as root - this is fine in containers)
12
+ echo "πŸ“‘ Starting Ollama service..."
13
+ ollama serve &
14
+ OLLAMA_PID=$!
15
+
16
+ # Wait for Ollama to be ready
17
+ echo "⏳ Waiting for Ollama to be ready..."
18
+ for i in {1..60}; do # Increased timeout to 60 attempts
19
+ if curl -f http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
20
+ echo "βœ… Ollama is ready!"
21
+ break
22
+ fi
23
+ echo "πŸ”„ Attempt $i/60 - Waiting for Ollama..."
24
+ sleep 2
25
+ done
26
+
27
+ # Check if Ollama is actually running
28
+ if ! curl -f http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
29
+ echo "❌ Ollama failed to start properly"
30
+ echo "πŸ” Checking Ollama process..."
31
+ ps aux | grep ollama || echo "No Ollama process found"
32
+
33
+ echo "πŸ”„ Attempting to restart Ollama..."
34
+ killall ollama 2>/dev/null || true
35
+ sleep 2
36
+ ollama serve &
37
+ sleep 10
38
+ fi
39
+
40
+ # Try to pull the model (no user switching)
41
+ echo "πŸ“₯ Pulling Llama 3.2 1B model..."
42
+ ollama pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M || {
43
+ echo "⚠️ Model pull failed, trying smaller model..."
44
+ ollama pull llama3.2:1b || echo "⚠️ Fallback model pull also failed, continuing..."
45
+ }
46
+
47
+ echo "πŸŽ‰ Setup complete!"
48
+ echo "πŸ“‘ Ollama API: http://localhost:11434"
49
+ echo "πŸ“– FastAPI: http://localhost:7860"
50
+
51
+ # Start FastAPI in foreground (keeps container alive)
52
+ echo "πŸš€ Starting FastAPI server..."
53
+ python app.py