office_translator/docker-compose.yml
Sepehr 29178a75a5 feat: Add complete production deployment infrastructure
- Docker configuration:
  - Multi-stage Dockerfiles for backend (Python 3.11) and frontend (Node 20)
  - Production docker-compose.yml with all services
  - Development docker-compose.dev.yml with hot-reload

- Nginx reverse proxy:
  - SSL/TLS termination with modern cipher suites
  - Rate limiting and security headers
  - Caching and compression
  - Load balancing ready

- Kubernetes manifests:
  - Deployment, Service, Ingress configurations
  - ConfigMap and Secrets
  - HPA for auto-scaling
  - PersistentVolumeClaims

- Deployment scripts:
  - deploy.sh: Automated deployment with health checks
  - backup.sh: Automated backup with retention
  - health-check.sh: Service health monitoring
  - setup-ssl.sh: Let's Encrypt SSL automation

- Monitoring:
  - Prometheus configuration
  - Grafana dashboards (optional)
  - Structured logging

- Documentation:
  - DEPLOYMENT_GUIDE.md: Complete deployment instructions
  - Environment templates (.env.production)

Ready for commercial deployment!
2025-11-30 20:56:15 +01:00

209 lines
5.6 KiB
YAML

# Document Translation API - Production Docker Compose
# Usage: docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
version: '3.8'
services:
# ===========================================
# Backend API Service
# ===========================================
backend:
build:
context: .
dockerfile: docker/backend/Dockerfile
container_name: translate-backend
restart: unless-stopped
environment:
- TRANSLATION_SERVICE=${TRANSLATION_SERVICE:-ollama}
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
- OLLAMA_MODEL=${OLLAMA_MODEL:-llama3}
- DEEPL_API_KEY=${DEEPL_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- MAX_FILE_SIZE_MB=${MAX_FILE_SIZE_MB:-50}
- RATE_LIMIT_REQUESTS_PER_MINUTE=${RATE_LIMIT_REQUESTS_PER_MINUTE:-60}
- RATE_LIMIT_TRANSLATIONS_PER_MINUTE=${RATE_LIMIT_TRANSLATIONS_PER_MINUTE:-10}
- ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-changeme123}
- CORS_ORIGINS=${CORS_ORIGINS:-*}
volumes:
- uploads_data:/app/uploads
- outputs_data:/app/outputs
- logs_data:/app/logs
networks:
- translate-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 512M
# ===========================================
# Frontend Web Service
# ===========================================
frontend:
build:
context: .
dockerfile: docker/frontend/Dockerfile
args:
- NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://backend:8000}
container_name: translate-frontend
restart: unless-stopped
environment:
- NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://backend:8000}
networks:
- translate-network
depends_on:
backend:
condition: service_healthy
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
# ===========================================
# Nginx Reverse Proxy
# ===========================================
nginx:
image: nginx:alpine
container_name: translate-nginx
restart: unless-stopped
ports:
- "${HTTP_PORT:-80}:80"
- "${HTTPS_PORT:-443}:443"
volumes:
- ./docker/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./docker/nginx/conf.d:/etc/nginx/conf.d:ro
- ./docker/nginx/ssl:/etc/nginx/ssl:ro
- nginx_cache:/var/cache/nginx
networks:
- translate-network
depends_on:
- frontend
- backend
healthcheck:
test: ["CMD", "nginx", "-t"]
interval: 30s
timeout: 10s
retries: 3
# ===========================================
# Ollama (Optional - Local LLM)
# ===========================================
ollama:
image: ollama/ollama:latest
container_name: translate-ollama
restart: unless-stopped
volumes:
- ollama_data:/root/.ollama
networks:
- translate-network
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
profiles:
- with-ollama
# ===========================================
# Redis (Optional - For caching & sessions)
# ===========================================
redis:
image: redis:7-alpine
container_name: translate-redis
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
networks:
- translate-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
profiles:
- with-cache
# ===========================================
# Prometheus (Optional - Monitoring)
# ===========================================
prometheus:
image: prom/prometheus:latest
container_name: translate-prometheus
restart: unless-stopped
volumes:
- ./docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.enable-lifecycle'
networks:
- translate-network
profiles:
- with-monitoring
# ===========================================
# Grafana (Optional - Dashboards)
# ===========================================
grafana:
image: grafana/grafana:latest
container_name: translate-grafana
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_USER=${GRAFANA_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./docker/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
networks:
- translate-network
depends_on:
- prometheus
profiles:
- with-monitoring
# ===========================================
# Networks
# ===========================================
networks:
translate-network:
driver: bridge
ipam:
config:
- subnet: 172.28.0.0/16
# ===========================================
# Volumes
# ===========================================
volumes:
uploads_data:
driver: local
outputs_data:
driver: local
logs_data:
driver: local
nginx_cache:
driver: local
ollama_data:
driver: local
redis_data:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local