meldestelle/docker-compose.yml
StefanMoCoAt 1ecac43d72 (vision) SCS/DDD
Service Discovery einführen
Consul als Service-Registry implementieren
Services für automatische Registrierung konfigurieren
Dynamisches Service-Routing im API-Gateway einrichten
Health-Checks für jeden Service implementieren
2025-07-21 23:54:13 +02:00

324 lines
9.2 KiB
YAML

services:
api-gateway:
build:
context: . # Build with Dockerfile in root
image: meldestelle/api-gateway:latest
container_name: meldestelle-api-gateway
restart: unless-stopped
ports:
- "8080:8081"
environment:
- DB_USER=${POSTGRES_USER}
- DB_PASSWORD=${POSTGRES_PASSWORD}
- DB_NAME=${POSTGRES_DB}
- DB_HOST=db
- DB_PORT=5432
- REDIS_HOST=redis
- REDIS_PORT=6379
- JAVA_OPTS=-Xms512m -Xmx1024m
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
meldestelle-net:
aliases:
- server
deploy:
resources:
limits:
cpus: '2'
memory: 1536M
reservations:
cpus: '0.5'
memory: 512M
# Healthcheck is now defined in Dockerfile
# Redis for caching
redis:
image: redis:7-alpine
container_name: meldestelle-redis
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
ports:
- "127.0.0.1:6379:6379"
networks:
- meldestelle-net
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
deploy:
resources:
limits:
cpus: '1'
memory: 384M
reservations:
cpus: '0.2'
memory: 128M
# PostgreSQL Datenbank (Service-Name 'db')
db:
image: postgres:16-alpine # Spezifische Version
container_name: meldestelle-db
restart: unless-stopped
environment:
# Liest Werte aus .env
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
# PostgreSQL performance tuning
POSTGRES_INITDB_ARGS: "--data-checksums"
POSTGRES_INITDB_WALDIR: "/var/lib/postgresql/wal"
# PostgreSQL configuration
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-256MB}
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-768MB}
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-16MB}
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
# PGDATA nicht nötig, Standard verwenden
volumes:
# Benanntes Volume für Daten auf Standardpfad
- postgres_data:/var/lib/postgresql/data
- postgres_wal:/var/lib/postgresql/wal
# Add custom PostgreSQL configuration
- ./config/postgres/postgresql.conf:/etc/postgresql/postgresql.conf:ro
command: ["postgres", "-c", "config_file=/etc/postgresql/postgresql.conf"]
networks:
- meldestelle-net # <--- Muss zum Netzwerk-Namen passen
healthcheck: # Wichtig für depends_on
test: [ "CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}" ] # Doppelte $$ beachten!
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
ports: # Nur bei Bedarf freigeben, z.B. für lokalen Zugriff
- "127.0.0.1:54321:5432" # Host-Port 54321 → Container-Port 5432
deploy:
resources:
limits:
cpus: '2'
memory: 1024M
reservations:
cpus: '0.5'
memory: 256M
# PgAdmin Service
pgadmin:
image: dpage/pgadmin4:latest # Oder spezifische Version
container_name: meldestelle-pgadmin
restart: unless-stopped
environment:
# Werte aus .env lesen (oder Defaults nutzen)
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-admin@example.com}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-admin_password_change_me} # UNBEDINGT IN .env SETZEN!
PGADMIN_CONFIG_SERVER_MODE: 'False'
volumes:
- pgadmin_data:/var/lib/pgadmin # Benanntes Volume
ports:
# Port 5050 auf dem Host (nur localhost) → Port 80 im Container
- "${PGADMIN_PORT:-127.0.0.1:5050}:80"
networks:
- meldestelle-net # <--- Muss zum Netzwerk-Namen passen
depends_on: # PgAdmin braucht die DB
- db
# Prometheus Service
prometheus:
image: prom/prometheus:latest
container_name: meldestelle-prometheus
restart: unless-stopped
volumes:
- ./config/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
ports:
- "9090:9090"
networks:
- meldestelle-net
depends_on:
- api-gateway
# Grafana Service
grafana:
image: grafana/grafana:latest
container_name: meldestelle-grafana
restart: unless-stopped
volumes:
- ./config/monitoring/grafana/provisioning:/etc/grafana/provisioning
- ./config/monitoring/grafana/dashboards:/var/lib/grafana/dashboards
- grafana_data:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
ports:
- "3000:3000"
networks:
- meldestelle-net
depends_on:
- prometheus
# Alertmanager Service
alertmanager:
image: prom/alertmanager:latest
container_name: meldestelle-alertmanager
restart: unless-stopped
volumes:
- ./config/monitoring/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml
- alertmanager_data:/alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--storage.path=/alertmanager'
ports:
- "9093:9093"
networks:
- meldestelle-net
depends_on:
- prometheus
# Elasticsearch Service
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.12.2
container_name: meldestelle-elasticsearch
restart: unless-stopped
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
- ./config/monitoring/elk/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
ports:
- "9200:9200"
networks:
- meldestelle-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9200"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
deploy:
resources:
limits:
cpus: '2'
memory: 1024M
reservations:
cpus: '0.5'
memory: 512M
# Logstash Service
logstash:
image: docker.elastic.co/logstash/logstash:8.12.2
container_name: meldestelle-logstash
restart: unless-stopped
volumes:
- ./config/monitoring/elk/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
ports:
- "5044:5044"
- "5000:5000/tcp"
- "5000:5000/udp"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- meldestelle-net
depends_on:
elasticsearch:
condition: service_healthy
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.2'
memory: 256M
# Kibana Service
kibana:
image: docker.elastic.co/kibana/kibana:8.12.2
container_name: meldestelle-kibana
restart: unless-stopped
ports:
- "5601:5601"
environment:
ELASTICSEARCH_URL: http://elasticsearch:9200
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
networks:
- meldestelle-net
depends_on:
elasticsearch:
condition: service_healthy
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.2'
memory: 256M
# Consul Service for Service Discovery
consul:
image: consul:1.15
container_name: meldestelle-consul
restart: unless-stopped
ports:
- "8500:8500" # HTTP UI and API
- "8600:8600/udp" # DNS interface
volumes:
- consul_data:/consul/data
environment:
- CONSUL_BIND_INTERFACE=eth0
- CONSUL_CLIENT_INTERFACE=eth0
command: "agent -server -ui -bootstrap-expect=1 -client=0.0.0.0"
networks:
- meldestelle-net
healthcheck:
test: ["CMD", "consul", "members"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
deploy:
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.2'
memory: 128M
networks:
meldestelle-net:
driver: bridge
volumes:
postgres_data: # <--- Konsistenter Name
postgres_wal: # Volume for PostgreSQL WAL files
driver: local
pgadmin_data: # <--- Konsistenter Name
prometheus_data: # Volume for Prometheus data
grafana_data: # Volume for Grafana data
alertmanager_data: # Volume for Alertmanager data
elasticsearch_data: # Volume for Elasticsearch data
redis_data: # Volume for Redis data
driver: local
consul_data: # Volume for Consul data
driver: local