prvng_platform/docker-compose/docker-compose.enterprise.yaml

326 lines
9.1 KiB
YAML
Raw Normal View History

2025-10-07 10:59:52 +01:00
version: '3.8'
# Enterprise Mode - Full production stack with monitoring, KMS, and audit logging
# Usage: docker-compose -f docker-compose.yaml -f docker-compose/docker-compose.multi-user.yaml -f docker-compose/docker-compose.cicd.yaml -f docker-compose/docker-compose.enterprise.yaml up
services:
orchestrator:
environment:
- PROVISIONING_MODE=enterprise
- ORCHESTRATOR_AUDIT_ENABLED=true
- ORCHESTRATOR_KMS_ENABLED=${KMS_ENABLED:-false}
- ORCHESTRATOR_KMS_SERVER=${KMS_SERVER}
deploy:
resources:
limits:
cpus: '8.0'
memory: 8192M
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
control-center:
environment:
- PROVISIONING_MODE=enterprise
- CONTROL_CENTER_AUDIT_ENABLED=true
- CONTROL_CENTER_KMS_ENABLED=${KMS_ENABLED:-false}
# Use Harbor instead of Zot in enterprise mode
oci-registry:
profiles:
- disabled
harbor-core:
profiles:
- enterprise
image: goharbor/harbor-core:v2.10.0
container_name: provisioning-harbor-core
environment:
- CORE_SECRET=${HARBOR_CORE_SECRET}
- JOBSERVICE_SECRET=${HARBOR_JOBSERVICE_SECRET}
- POSTGRESQL_HOST=postgres
- POSTGRESQL_PORT=5432
- POSTGRESQL_USERNAME=${POSTGRES_USER}
- POSTGRESQL_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRESQL_DATABASE=harbor
- REGISTRY_URL=http://harbor-registry:5000
- TOKEN_SERVICE_URL=http://harbor-core:8080/service/token
volumes:
- harbor-config:/etc/harbor
- harbor-data:/storage
depends_on:
postgres:
condition: service_healthy
networks:
- provisioning-net-backend
deploy:
resources:
limits:
cpus: '${HARBOR_CORE_CPU_LIMIT:-2000m}'
memory: ${HARBOR_CORE_MEMORY_LIMIT:-2048M}
harbor-registry:
profiles:
- enterprise
image: goharbor/registry-photon:v2.10.0
container_name: provisioning-harbor-registry
volumes:
- harbor-registry-data:/storage
networks:
- provisioning-net-backend
harbor-jobservice:
profiles:
- enterprise
image: goharbor/harbor-jobservice:v2.10.0
container_name: provisioning-harbor-jobservice
environment:
- CORE_SECRET=${HARBOR_CORE_SECRET}
- JOBSERVICE_SECRET=${HARBOR_JOBSERVICE_SECRET}
networks:
- provisioning-net-backend
# Cosmian KMS for secret management
kms:
profiles:
- enterprise
image: ghcr.io/cosmian/kms:latest
container_name: provisioning-kms
ports:
- "9998:9998"
volumes:
- kms-data:/data
- ./kms/config:/etc/kms:ro
environment:
- KMS_DATABASE_PATH=/data/kms.db
- KMS_AUTH_METHOD=${KMS_AUTH_METHOD:-certificate}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9998/health"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- provisioning-net-backend
deploy:
resources:
limits:
cpus: '${KMS_CPU_LIMIT:-1000m}'
memory: ${KMS_MEMORY_LIMIT:-1024M}
# Prometheus for metrics
prometheus:
profiles:
- enterprise
image: prom/prometheus:latest
container_name: provisioning-prometheus
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
volumes:
- ../monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- ../monitoring/prometheus/rules:/etc/prometheus/rules:ro
- prometheus-data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=${PROMETHEUS_RETENTION_TIME:-15d}'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- provisioning-net-backend
deploy:
resources:
limits:
cpus: '${PROMETHEUS_CPU_LIMIT:-2000m}'
memory: ${PROMETHEUS_MEMORY_LIMIT:-2048M}
# Grafana for visualization
grafana:
profiles:
- enterprise
image: grafana/grafana:latest
container_name: provisioning-grafana
ports:
- "${GRAFANA_PORT:-3001}:3000"
volumes:
- ../monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ../monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
- grafana-data:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
- GF_INSTALL_PLUGINS=
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
depends_on:
prometheus:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- provisioning-net-backend
- provisioning-net-frontend
deploy:
resources:
limits:
cpus: '${GRAFANA_CPU_LIMIT:-500m}'
memory: ${GRAFANA_MEMORY_LIMIT:-512M}
# Loki for log aggregation
loki:
profiles:
- enterprise
image: grafana/loki:latest
container_name: provisioning-loki
ports:
- "${LOKI_PORT:-3100}:3100"
volumes:
- ../monitoring/loki/loki-config.yml:/etc/loki/local-config.yaml:ro
- loki-data:/loki
command: -config.file=/etc/loki/local-config.yaml
restart: unless-stopped
networks:
- provisioning-net-backend
deploy:
resources:
limits:
cpus: '${LOKI_CPU_LIMIT:-1000m}'
memory: ${LOKI_MEMORY_LIMIT:-1024M}
# Promtail for log shipping
promtail:
profiles:
- enterprise
image: grafana/promtail:latest
container_name: provisioning-promtail
volumes:
- ../monitoring/promtail/promtail-config.yml:/etc/promtail/config.yml:ro
- /var/log:/var/log:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
command: -config.file=/etc/promtail/config.yml
depends_on:
- loki
restart: unless-stopped
networks:
- provisioning-net-backend
# Elasticsearch for audit logs
elasticsearch:
profiles:
- enterprise
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
container_name: provisioning-elasticsearch
ports:
- "${ELASTICSEARCH_PORT:-9200}:9200"
environment:
- discovery.type=single-node
- cluster.name=${ELASTICSEARCH_CLUSTER_NAME:-provisioning-logs}
- ES_JAVA_OPTS=-Xms${ELASTICSEARCH_HEAP_SIZE:-1g} -Xmx${ELASTICSEARCH_HEAP_SIZE:-1g}
- xpack.security.enabled=false
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health | grep -vq '\"status\":\"red\"'"]
interval: 30s
timeout: 10s
retries: 5
restart: unless-stopped
networks:
- provisioning-net-backend
deploy:
resources:
limits:
cpus: '${ELASTICSEARCH_CPU_LIMIT:-2000m}'
memory: ${ELASTICSEARCH_MEMORY_LIMIT:-2048M}
# Kibana for log analysis
kibana:
profiles:
- enterprise
image: docker.elastic.co/kibana/kibana:8.11.0
container_name: provisioning-kibana
ports:
- "${KIBANA_PORT:-5601}:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
depends_on:
elasticsearch:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status | grep -q '\"level\":\"available\"'"]
interval: 30s
timeout: 10s
retries: 5
restart: unless-stopped
networks:
- provisioning-net-backend
- provisioning-net-frontend
deploy:
resources:
limits:
cpus: '${KIBANA_CPU_LIMIT:-1000m}'
memory: ${KIBANA_MEMORY_LIMIT:-1024M}
# Nginx reverse proxy
nginx:
profiles:
- enterprise
image: nginx:alpine
container_name: provisioning-nginx
ports:
- "${NGINX_HTTP_PORT:-80}:80"
- "${NGINX_HTTPS_PORT:-443}:443"
volumes:
- ../nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ../nginx/conf.d:/etc/nginx/conf.d:ro
- ../nginx/ssl:/etc/nginx/ssl:ro
- nginx-logs:/var/log/nginx
depends_on:
- orchestrator
- control-center
- api-server
- grafana
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:80/health"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
networks:
- provisioning-net-frontend
deploy:
resources:
limits:
cpus: '${NGINX_CPU_LIMIT:-500m}'
memory: ${NGINX_MEMORY_LIMIT:-256M}
volumes:
harbor-config:
driver: local
harbor-data:
driver: local
harbor-registry-data:
driver: local
kms-data:
driver: local
prometheus-data:
driver: local
grafana-data:
driver: local
loki-data:
driver: local
elasticsearch-data:
driver: local
nginx-logs:
driver: local