Spaces:
Running
Running
File size: 4,041 Bytes
2b7aae2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | # STARRY Python ML Services - Docker Compose
#
# Setup:
# ./gen-env.sh /path/to/models # Generate .env from models.yaml
# docker compose up -d # Start all services
# docker compose up layout semantic ocr # Start specific services
# docker compose logs -f layout # Follow logs for a service
x-common-settings: &common-settings
restart: unless-stopped
networks:
- starry-ml
x-pytorch-service: &pytorch-service
<<: *common-settings
image: starry-pytorch:test
volumes:
- ${DEEP_STARRY_PATH:-/home/camus/work/deep-starry}:/app/deep-starry:ro
- ${MODELS_ROOT}/starry-dist:/models/starry-dist:ro
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
x-tensorflow-service: &tensorflow-service
<<: *common-settings
image: starry-tensorflow:test
volumes:
- ${STARRY_OCR_PATH:-/home/camus/work/starry-ocr}:/app/starry-ocr:ro
- ${MODELS_ROOT}/ocr-dist:/models/ocr-dist:ro
- /tmp/starry-logs:/tmp/starry-logs
environment:
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
services:
# ============================================================
# PyTorch Services (GPU)
# ============================================================
layout:
<<: *pytorch-service
container_name: starry-layout
ports:
- "12022:12022"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${LAYOUT_MODEL_PATH}
-p 12022 -dv cuda -m layout
mask:
<<: *pytorch-service
container_name: starry-mask
ports:
- "12024:12024"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${MASK_MODEL_PATH}
-p 12024 -dv cuda -m mask
semantic:
<<: *pytorch-service
container_name: starry-semantic
ports:
- "12025:12025"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${SEMANTIC_MODEL_PATH}
-p 12025 -dv cuda -m semanticCluster
gauge:
<<: *pytorch-service
container_name: starry-gauge
ports:
- "12023:12023"
environment:
- CUDA_VISIBLE_DEVICES=0
command: >
python /app/deep-starry/streamPredictor.py
/models/starry-dist/${GAUGE_MODEL_PATH}
-p 12023 -dv cuda -m gauge
# ============================================================
# TensorFlow Services (CPU)
# ============================================================
loc:
<<: *common-settings
image: starry-all:test
container_name: starry-loc
ports:
- "12026:12026"
volumes:
- ${STARRY_OCR_PATH:-/home/camus/work/starry-ocr}:/app/starry-ocr
- ${MODELS_ROOT}/ocr-dist:/models/ocr-dist:ro
- /tmp/starry-logs:/tmp/starry-logs
working_dir: /app/starry-ocr
environment:
- CUDA_VISIBLE_DEVICES=0
- LOG_DIR=/tmp/starry-logs
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
command: >
python locPredictor.py
-w /models/ocr-dist/${LOC_MODEL_PATH}
-p 12026 -dv cuda
ocr:
<<: *tensorflow-service
container_name: starry-ocr
ports:
- "12027:12027"
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
command: >
python /app/starry-ocr/ocrPredictor.py
/models/ocr-dist/${OCR_CONFIG}
-p 12027
brackets:
<<: *tensorflow-service
container_name: starry-brackets
ports:
- "12028:12028"
environment:
- CUDA_VISIBLE_DEVICES=-1
- TF_USE_LEGACY_KERAS=1
- LOG_DIR=/tmp/starry-logs
command: >
python /app/starry-ocr/bracketsPredictor.py
/models/ocr-dist/${BRACKETS_CONFIG}
-p 12028
networks:
starry-ml:
driver: bridge
|