starry / backend /python-services /docker-compose.yml
k-l-lambda's picture
feat: add Python ML services (CPU mode) with model download
2b7aae2
# STARRY Python ML Services - Docker Compose
#
# Setup:
# ./gen-env.sh /path/to/models # Generate .env from models.yaml
# docker compose up -d # Start all services
# docker compose up layout semantic ocr # Start specific services
# docker compose logs -f layout # Follow logs for a service
x-common-settings: &common-settings
restart: unless-stopped
networks:
- starry-ml
x-pytorch-service: &pytorch-service
<<: *common-settings
image: starry-pytorch:test
volumes:
- ${DEEP_STARRY_PATH:-/home/camus/work/deep-starry}:/app/deep-starry:ro
- ${MODELS_ROOT}/starry-dist:/models/starry-dist:ro
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
x-tensorflow-service: &tensorflow-service
<<: *common-settings
image: starry-tensorflow:test
volumes:
- ${STARRY_OCR_PATH:-/home/camus/work/starry-ocr}:/app/starry-ocr:ro
- ${MODELS_ROOT}/ocr-dist:/models/ocr-dist:ro
- /tmp/starry-logs:/tmp/starry-logs
environment:
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
services:
# ============================================================
# PyTorch Services (GPU)
# ============================================================
layout:
<<: *pytorch-service
container_name: starry-layout
ports:
- "12022:12022"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${LAYOUT_MODEL_PATH}
-p 12022 -dv cuda -m layout
mask:
<<: *pytorch-service
container_name: starry-mask
ports:
- "12024:12024"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${MASK_MODEL_PATH}
-p 12024 -dv cuda -m mask
semantic:
<<: *pytorch-service
container_name: starry-semantic
ports:
- "12025:12025"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${SEMANTIC_MODEL_PATH}
-p 12025 -dv cuda -m semanticCluster
gauge:
<<: *pytorch-service
container_name: starry-gauge
ports:
- "12023:12023"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${GAUGE_MODEL_PATH}
-p 12023 -dv cuda -m gauge
# ============================================================
# TensorFlow Services (CPU)
# ============================================================
loc:
<<: *common-settings
image: starry-all:test
container_name: starry-loc
ports:
- "12026:12026"
volumes:
- ${STARRY_OCR_PATH:-/home/camus/work/starry-ocr}:/app/starry-ocr
- ${MODELS_ROOT}/ocr-dist:/models/ocr-dist:ro
- /tmp/starry-logs:/tmp/starry-logs
working_dir: /app/starry-ocr
environment:
- CUDA_VISIBLE_DEVICES=0
- LOG_DIR=/tmp/starry-logs
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
command: >
python locPredictor.py
-w /models/ocr-dist/${LOC_MODEL_PATH}
-p 12026 -dv cuda
ocr:
<<: *tensorflow-service
container_name: starry-ocr
ports:
- "12027:12027"
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
command: >
python /app/starry-ocr/ocrPredictor.py
/models/ocr-dist/${OCR_CONFIG}
-p 12027
brackets:
<<: *tensorflow-service
container_name: starry-brackets
ports:
- "12028:12028"
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
command: >
python /app/starry-ocr/bracketsPredictor.py
/models/ocr-dist/${BRACKETS_CONFIG}
-p 12028
networks:
starry-ml:
driver: bridge