Stack-2-9-finetuned / stack /voice /docker-compose.yml
walidsobhie-code
refactor: Squeeze folders further - cleaner structure
65888d5
version: '3.8'
services:
voice-api:
build: .
ports:
- "8000:8000"
volumes:
- ./voice_models:/app/voice_models
- ./audio_files:/app/audio_files
environment:
- MODEL_PATH=/app/models/coqui_xtts
- VOICE_CACHE_DIR=/app/voice_cache
- WORKERS=4
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '1.0'
memory: 2G
restart: unless-stopped
tts-model:
image: coqui/tts:latest
ports:
- "9000:9000"
volumes:
- ./models:/models
- ./tts_cache:/tts_cache
environment:
- MODEL_NAME=x TTS
- MODEL_PATH=/models/coqui_xtts
- CACHE_DIR=/tts_cache
- GPU_SUPPORT=${GPU_SUPPORT:-false}
deploy:
resources:
limits:
cpus: '4.0'
memory: 8G
${GPU_LIMITS}
reservations:
cpus: '2.0'
memory: 4G
restart: unless-stopped
redis:
image: redis:alpine
ports:
- "6379:6379"
volumes:
- ./redis_data:/data
command: redis-server --appendonly yes
deploy:
resources:
limits:
cpus: '0.5'
memory: 256M
reservations:
cpus: '0.25'
memory: 128M
restart: unless-stopped
# Optional: Speech-to-text service for voice input
stt-service:
image: vosk/kaldi:latest
ports:
- "9001:9001"
volumes:
- ./models/vosk:/models/vosk
environment:
- MODEL_PATH=/models/vosk/model
deploy:
resources:
limits:
cpus: '2.0'
memory: 4G
reservations:
cpus: '1.0'
memory: 2G
restart: unless-stopped
volumes:
voice_models:
driver: local
audio_files:
driver: local
models:
driver: local
tts_cache:
driver: local
redis_data:
driver: local
vosk_models:
driver: local
networks:
default:
driver: bridge
# Environment variables for GPU support
# Set GPU_SUPPORT=true and provide GPU_LIMITS when using GPU
# Example: GPU_LIMITS=nvidia.com/gpu=1