starry / backend /python-services /docker-compose.test.yml
k-l-lambda's picture
feat: add Python ML services (CPU mode) with model download
2b7aae2
# STARRY ML Services - Test Docker Compose
#
# Simplified configuration for testing individual services.
# Uses a single all-in-one image to minimize build time.
#
# Usage:
# docker compose -f docker-compose.test.yml build
# docker compose -f docker-compose.test.yml up layout
# docker compose -f docker-compose.test.yml up ocr
services:
layout:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-layout-test
ports:
- "12022:12022"
volumes:
- /home/camus/data/models/starry/starry-dist:/models/starry-dist:ro
- /home/camus/work/deep-starry:/app/deep-starry:ro
environment:
- CUDA_VISIBLE_DEVICES=0
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
working_dir: /app/deep-starry
command: >
python streamPredictor.py
/models/starry-dist/20221125-scorelayout-1121-residue-u-d4-w64-d4-w64
-p 12022 -dv cuda -m layout
mask:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-mask-test
ports:
- "12024:12024"
volumes:
- /home/camus/data/models/starry/starry-dist:/models/starry-dist:ro
- /home/camus/work/deep-starry:/app/deep-starry:ro
environment:
- CUDA_VISIBLE_DEVICES=0
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
working_dir: /app/deep-starry
command: >
python streamPredictor.py
/models/starry-dist/20210918-scorewidgets.mask-unet-5-32
-p 12024 -dv cuda -m mask
semantic:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-semantic-test
ports:
- "12025:12025"
volumes:
- /home/camus/data/models/starry/starry-dist:/models/starry-dist:ro
- /home/camus/work/deep-starry:/app/deep-starry:ro
environment:
- CUDA_VISIBLE_DEVICES=0
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
working_dir: /app/deep-starry
command: >
python streamPredictor.py
/models/starry-dist/202302-semanticCluster
-p 12025 -dv cuda -m semanticCluster
loc:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-loc-test
ports:
- "12026:12026"
volumes:
- /home/camus/data/models/starry/ocr-dist:/models/ocr-dist:ro
- /home/camus/work/starry-ocr:/app/starry-ocr:ro
environment:
- CUDA_VISIBLE_DEVICES=0
- LOG_DIR=/tmp/starry-logs
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
working_dir: /app/starry-ocr
command: >
python locPredictor.py
-w /models/ocr-dist/DB_gc_loc/v6/model_epoch_88_minibatch_15300
-p 12026 -dv cuda
ocr:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-ocr-test
ports:
- "12027:12027"
volumes:
- /home/camus/data/models/starry/ocr-dist:/models/ocr-dist:ro
- /home/camus/work/starry-ocr:/app/starry-ocr:ro
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
working_dir: /app/starry-ocr
command: >
python ocrPredictor.py
/models/ocr-dist/ocr.yaml
-p 12027
brackets:
build:
context: ../../..
dockerfile: backend/python-services/Dockerfile
target: all-in-one
container_name: starry-brackets-test
ports:
- "12028:12028"
volumes:
- /home/camus/data/models/starry/ocr-dist:/models/ocr-dist:ro
- /home/camus/work/starry-ocr:/app/starry-ocr:ro
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
working_dir: /app/starry-ocr
command: >
python bracketsPredictor.py
/models/ocr-dist/brackets.yaml
-p 12028