Amaranath commited on
Commit
f9806c8
Β·
verified Β·
1 Parent(s): e277cc2

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +18 -19
start.sh CHANGED
@@ -4,13 +4,19 @@ set -e
4
  echo "πŸš€ Starting Ollama + FastAPI Server..."
5
 
6
  # Set Ollama environment variables
 
7
  export OLLAMA_HOST=0.0.0.0:11434
8
  export OLLAMA_ORIGINS="*"
9
  export OLLAMA_KEEP_ALIVE=5m
10
 
11
- # Start Ollama in background (as root - this is fine in containers)
 
 
 
 
 
12
  echo "πŸ“‘ Starting Ollama service..."
13
- ollama serve &
14
  OLLAMA_PID=$!
15
 
16
  # Wait for Ollama to be ready
@@ -24,26 +30,19 @@ for i in {1..60}; do # Increased timeout to 60 attempts
24
  sleep 2
25
  done
26
 
27
- # Check if Ollama is actually running
28
- if ! curl -f http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
29
- echo "❌ Ollama failed to start properly"
30
- echo "πŸ” Checking Ollama process..."
31
- ps aux | grep ollama || echo "No Ollama process found"
32
 
33
- echo "πŸ”„ Attempting to restart Ollama..."
34
- killall ollama 2>/dev/null || true
35
- sleep 2
36
- ollama serve &
37
- sleep 10
 
38
  fi
39
 
40
- # Try to pull the model (no user switching)
41
- echo "πŸ“₯ Pulling Llama 3.2 1B model..."
42
- ollama pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M || {
43
- echo "⚠️ Model pull failed, trying smaller model..."
44
- ollama pull llama3.2:1b || echo "⚠️ Fallback model pull also failed, continuing..."
45
- }
46
-
47
  echo "πŸŽ‰ Setup complete!"
48
  echo "πŸ“‘ Ollama API: http://localhost:11434"
49
  echo "πŸ“– FastAPI: http://localhost:7860"
 
4
  echo "πŸš€ Starting Ollama + FastAPI Server..."
5
 
6
  # Set Ollama environment variables
7
+ export OLLAMA_HOME=/app/.ollama
8
  export OLLAMA_HOST=0.0.0.0:11434
9
  export OLLAMA_ORIGINS="*"
10
  export OLLAMA_KEEP_ALIVE=5m
11
 
12
+
13
+ # Create Ollama directory if it doesn't exist
14
+ mkdir -p /app/.ollama
15
+ chmod 755 /app/.ollama
16
+
17
+ # Start Ollama in background
18
  echo "πŸ“‘ Starting Ollama service..."
19
+ nohup ollama serve > /app/ollama.log 2>&1 &
20
  OLLAMA_PID=$!
21
 
22
  # Wait for Ollama to be ready
 
30
  sleep 2
31
  done
32
 
33
+
34
+ # Check if Ollama started successfully
35
+ if curl -f http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
36
+ echo "βœ… Ollama is running!"
 
37
 
38
+ # Try to pull model in background (don't block startup)
39
+ echo "πŸ“₯ Starting model download in background..."
40
+ nohup ollama pull llama3.2:1b > /app/pull.log 2>&1 &
41
+ echo "πŸ“ Model download started - check /app/pull.log for progress"
42
+ else
43
+ echo "⚠️ Ollama not responding - check /app/ollama.log for details"
44
  fi
45
 
 
 
 
 
 
 
 
46
  echo "πŸŽ‰ Setup complete!"
47
  echo "πŸ“‘ Ollama API: http://localhost:11434"
48
  echo "πŸ“– FastAPI: http://localhost:7860"