Docker-Compose Dateien vereinfachen und redundante entfernen

This commit is contained in:
stefan 2025-09-02 15:08:28 +02:00
parent 4fcb4bb48c
commit abd2543caf
9 changed files with 314 additions and 2014 deletions

279
Makefile
View File

@ -8,9 +8,23 @@
.PHONY: clients-up clients-down clients-restart clients-logs
.PHONY: prod-up prod-down prod-restart prod-logs
.PHONY: infrastructure-up infrastructure-down infrastructure-logs
.PHONY: dev-tools-up dev-tools-down status health-check
.PHONY: dev-tools-up dev-tools-down status health-check logs shell env-template dev-info clean-all build-service build-client
.ONESHELL:
# Choose docker compose CLI (prefers new plugin)
DOCKER_COMPOSE_PLUGIN := $(shell docker compose version >/dev/null 2>&1 && echo 1 || echo 0)
DOCKER_COMPOSE_LEGACY := $(shell command -v docker-compose >/dev/null 2>&1 && echo 1 || echo 0)
ifeq ($(DOCKER_COMPOSE_PLUGIN),1)
COMPOSE = docker compose
else ifeq ($(DOCKER_COMPOSE_LEGACY),1)
COMPOSE = docker-compose
else
COMPOSE = docker compose
endif
# Default target
.DEFAULT_GOAL := help
help: ## Show this help message
@echo "Meldestelle Docker Development Commands"
@echo "======================================"
@ -20,163 +34,124 @@ help: ## Show this help message
# Development Workflow Commands
# ===================================================================
dev-up: ## Start full development environment (infrastructure + services + clients)
@echo "🚀 Starting full development environment..."
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
up -d
dev-up: ## Start development environment (single compose)
@echo "🚀 Starting development environment..."
$(COMPOSE) -f docker-compose.yml up -d
@$(MAKE) dev-info
dev-down: ## Stop full development environment
@echo "🛑 Stopping full development environment..."
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
down
dev-down: ## Stop development environment
@echo "🛑 Stopping development environment..."
$(COMPOSE) -f docker-compose.yml down
dev-restart: ## Restart full development environment
@$(MAKE) dev-down
@$(MAKE) dev-up
dev-logs: ## Show logs for all development services
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
logs -f
$(COMPOSE) -f docker-compose.yml logs -f
# ===================================================================
# Layer-specific Commands
# ===================================================================
infrastructure-up: ## Start only infrastructure services (postgres, redis, etc.)
infrastructure-up: ## Start only infrastructure services (postgres, redis, keycloak, consul)
@echo "🏗️ Starting infrastructure services..."
docker-compose -f docker-compose.yml up -d
$(COMPOSE) -f docker-compose.yml up -d
@echo "✅ Infrastructure services started"
@echo "📊 Grafana: http://localhost:3000 (admin/admin)"
@echo "🔍 Prometheus: http://localhost:9090"
@echo "🗄️ PostgreSQL: localhost:5432"
@echo "🔴 Redis: localhost:6379"
@echo "🗄️ PostgreSQL: localhost:5432"
@echo "🔴 Redis: localhost:6379"
@echo "🔐 Keycloak: http://localhost:8180"
@echo "🧭 Consul: http://localhost:8500"
infrastructure-down: ## Stop infrastructure services
docker-compose -f docker-compose.yml down
$(COMPOSE) -f docker-compose.yml down
infrastructure-logs: ## Show infrastructure logs
docker-compose -f docker-compose.yml logs -f
$(COMPOSE) -f docker-compose.yml logs -f
services-up: ## Start application services (requires infrastructure)
@echo "⚙️ Starting application services..."
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
up -d
@echo "✅ Application services started"
@echo "🚪 API Gateway: http://localhost:8080"
@echo "🔐 Auth Server: http://localhost:8081"
@echo "📊 Monitoring Server: http://localhost:8083"
@echo "🏓 Ping Service: http://localhost:8082"
services-up: ## Start application services (simplified: base compose only)
@echo "⚙️ Starting services (simplified setup using docker-compose.yml only)..."
$(COMPOSE) -f docker-compose.yml up -d
@echo "✅ Services started (based on docker-compose.yml)"
services-down: ## Stop application services
docker-compose -f docker-compose.services.yml down
services-down: ## Stop application services (simplified)
$(COMPOSE) -f docker-compose.yml down
services-restart: ## Restart application services
@$(MAKE) services-down
@$(MAKE) services-up
services-logs: ## Show application services logs
docker-compose -f docker-compose.services.yml logs -f
services-logs: ## Show application services logs (simplified)
$(COMPOSE) -f docker-compose.yml logs -f
clients-up: ## Start client applications (requires services)
@echo "💻 Starting client applications..."
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
up -d
@echo "✅ Client applications started"
@echo "🌐 Web App: http://localhost:3001"
clients-up: ## Start client applications (simplified using base compose)
@echo "💻 Starting client applications (simplified)..."
$(COMPOSE) -f docker-compose.yml up -d
@echo "✅ Client applications started (docker-compose.yml)"
clients-down: ## Stop client applications
docker-compose -f docker-compose.clients.yml down
clients-down: ## Stop client applications (simplified)
$(COMPOSE) -f docker-compose.yml down
clients-restart: ## Restart client applications
@$(MAKE) clients-down
@$(MAKE) clients-up
clients-logs: ## Show client application logs
docker-compose -f docker-compose.clients.yml logs -f
clients-logs: ## Show client application logs (simplified)
$(COMPOSE) -f docker-compose.yml logs -f
# ===================================================================
# Production Commands
# ===================================================================
prod-up: ## Start production environment
@echo "🚀 Starting production environment..."
prod-up: ## Start production environment (simplified)
@echo "🚀 Starting production environment (simplified)..."
@echo "⚠️ Make sure environment variables are properly set!"
docker-compose \
-f docker-compose.prod.yml \
-f docker-compose.services.yml \
up -d
@echo "✅ Production environment started"
$(COMPOSE) -f docker-compose.yml up -d
@echo "✅ Production environment started (docker-compose.yml)"
prod-down: ## Stop production environment
docker-compose \
-f docker-compose.prod.yml \
-f docker-compose.services.yml \
down
prod-down: ## Stop production environment (simplified)
$(COMPOSE) -f docker-compose.yml down
prod-restart: ## Restart production environment
@$(MAKE) prod-down
@$(MAKE) prod-up
prod-logs: ## Show production logs
docker-compose \
-f docker-compose.prod.yml \
-f docker-compose.services.yml \
logs -f
prod-logs: ## Show production logs (simplified)
$(COMPOSE) -f docker-compose.yml logs -f
# ===================================================================
# Development Tools
# ===================================================================
dev-tools-up: ## Start development tools (pgAdmin, Redis Commander)
@echo "🔧 Starting development tools..."
docker-compose --profile dev-tools up -d pgadmin redis-commander
@echo "✅ Development tools started"
@echo "🐘 pgAdmin: http://localhost:5050 (admin@meldestelle.dev/admin)"
@echo "🔴 Redis Commander: http://localhost:8081"
dev-tools-up: ## Info: development tool containers were removed (use local tools instead)
@echo " Development tool containers are not part of the simplified setup."
@echo "Use your local tools instead (e.g., pgAdmin, TablePlus, DBeaver, RedisInsight)."
@echo "Connection hints:"
@echo " PostgreSQL: localhost:5432 (user/password per .env or defaults)"
@echo " Redis: localhost:6379"
@echo " Consul: http://localhost:8500"
@echo " Keycloak: http://localhost:8180"
dev-tools-down: ## Stop development tools
docker-compose --profile dev-tools down pgadmin redis-commander
dev-tools-down: ## Info: nothing to stop for dev tools in simplified setup
@echo " No dev-tool containers to stop in the simplified setup."
# ===================================================================
# Build and Maintenance Commands
# ===================================================================
build: ## Build all custom Docker images
@echo "🔨 Building all custom Docker images..."
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
build --no-cache
build: ## Build all custom Docker images (simplified)
@echo "🔨 Building all custom Docker images (using docker-compose.yml)..."
$(COMPOSE) -f docker-compose.yml build --no-cache
build-service: ## Build specific service (usage: make build-service SERVICE=auth-server)
@test -n "$(SERVICE)" || (echo "❌ SERVICE parameter required. Usage: make build-service SERVICE=auth-server"; exit 1)
@echo "🔨 Building $(SERVICE)..."
docker-compose \
-f docker-compose.services.yml \
build --no-cache $(SERVICE)
$(COMPOSE) -f docker-compose.yml build --no-cache $(SERVICE)
build-client: ## Build specific client (usage: make build-client CLIENT=web-app)
@test -n "$(CLIENT)" || (echo "❌ CLIENT parameter required. Usage: make build-client CLIENT=web-app"; exit 1)
@echo "🔨 Building $(CLIENT)..."
docker-compose \
-f docker-compose.clients.yml \
build --no-cache $(CLIENT)
$(COMPOSE) -f docker-compose.yml build --no-cache $(CLIENT)
clean: ## Clean up Docker resources
@echo "🧹 Cleaning up Docker resources..."
@ -196,28 +171,32 @@ clean-all: ## Clean up all Docker resources (including images)
status: ## Show status of all containers
@echo "📊 Container Status:"
docker-compose \
-f docker-compose.yml \
-f docker-compose.services.yml \
-f docker-compose.clients.yml \
ps
$(COMPOSE) -f docker-compose.yml ps
health-check: ## Check health of all services
health-check: ## Check health of core infrastructure services
@echo "🏥 Health Check Results:"
@echo "========================"
@curl -s http://localhost:8080/actuator/health | jq -r '"API Gateway: " + .status' || echo "API Gateway: ❌ Not accessible"
@curl -s http://localhost:8081/actuator/health | jq -r '"Auth Server: " + .status' || echo "Auth Server: ❌ Not accessible"
@curl -s http://localhost:8082/actuator/health | jq -r '"Ping Service: " + .status' || echo "Ping Service: ❌ Not accessible"
@curl -s http://localhost:8083/actuator/health | jq -r '"Monitoring Server: " + .status' || echo "Monitoring Server: ❌ Not accessible"
@curl -s http://localhost:3001/health | grep -q healthy && echo "Web App: UP" || echo "Web App: ❌ Not accessible"
@$(COMPOSE) ps
@echo "-- Postgres --"
@$(COMPOSE) exec -T postgres pg_isready -U meldestelle -d meldestelle >/dev/null \
&& echo "PostgreSQL: ✅ Ready" || echo "PostgreSQL: ❌ Not ready"
@echo "-- Redis --"
@$(COMPOSE) exec -T redis redis-cli ping | grep -q PONG \
&& echo "Redis: ✅ PONG" || echo "Redis: ❌ Not responding"
@echo "-- Consul --"
@curl -sf http://localhost:8500/v1/status/leader >/dev/null \
&& echo "Consul: ✅ Leader elected" || echo "Consul: ❌ Not accessible"
@echo "-- Keycloak --"
@curl -sf http://localhost:8180/health/ready >/dev/null \
&& echo "Keycloak: ✅ Ready" || echo "Keycloak: ❌ Not accessible"
logs: ## Show logs for specific service (usage: make logs SERVICE=auth-server)
@test -n "$(SERVICE)" || (echo "❌ SERVICE parameter required. Usage: make logs SERVICE=auth-server"; exit 1)
docker-compose logs -f $(SERVICE)
logs: ## Show logs for specific service (usage: make logs SERVICE=postgres)
@test -n "$(SERVICE)" || (echo "❌ SERVICE parameter required. Usage: make logs SERVICE=postgres"; exit 1)
$(COMPOSE) logs -f $(SERVICE)
shell: ## Open shell in specific container (usage: make shell SERVICE=auth-server)
@test -n "$(SERVICE)" || (echo "❌ SERVICE parameter required. Usage: make shell SERVICE=auth-server"; exit 1)
docker-compose exec $(SERVICE) sh
shell: ## Open shell in specific container (usage: make shell SERVICE=postgres)
@test -n "$(SERVICE)" || (echo "❌ SERVICE parameter required. Usage: make shell SERVICE=postgres"; exit 1)
$(COMPOSE) exec $(SERVICE) sh
# ===================================================================
# Testing Commands
@ -250,64 +229,54 @@ dev-info: ## Show development environment information
@echo "🚀 Meldestelle Development Environment Ready!"
@echo "============================================="
@echo ""
@echo "📊 Monitoring & Management:"
@echo " Grafana: http://localhost:3000 (admin/admin)"
@echo " Prometheus: http://localhost:9090"
@echo "🧭 Service Discovery:"
@echo " Consul: http://localhost:8500"
@echo ""
@echo "🔧 Application Services:"
@echo " API Gateway: http://localhost:8080"
@echo " Auth Server: http://localhost:8081"
@echo " Monitoring: http://localhost:8083"
@echo " Ping Service: http://localhost:8082"
@echo ""
@echo "💻 Client Applications:"
@echo " Web App: http://localhost:3001"
@echo "🔐 Authentication:"
@echo " Keycloak: http://localhost:8180 (admin/admin by default)"
@echo ""
@echo "🗄️ Infrastructure:"
@echo " PostgreSQL: localhost:5432 (meldestelle/meldestelle)"
@echo " PostgreSQL: localhost:5432 (default user: meldestelle)"
@echo " Redis: localhost:6379"
@echo " Keycloak: http://localhost:8180"
@echo ""
@echo "🔧 Development Tools (optional):"
@echo " make dev-tools-up to start pgAdmin & Redis Commander"
@echo " Tips: Use 'make health-check' to verify services, and 'make logs SERVICE=postgres' for logs."
@echo ""
env-template: ## Create .env template file
@echo "📝 Creating .env template..."
@cat > .env.template << 'EOF'
# ===================================================================
# Meldestelle Environment Variables Template
# Copy to .env and customize for your environment
# ===================================================================
@cat > .env.template <<-'EOF'
# ===================================================================
# Meldestelle Environment Variables Template
# Copy to .env and customize for your environment
# ===================================================================
# Database Configuration
POSTGRES_USER=meldestelle
POSTGRES_PASSWORD=meldestelle
POSTGRES_DB=meldestelle
# Database Configuration
POSTGRES_USER=meldestelle
POSTGRES_PASSWORD=meldestelle
POSTGRES_DB=meldestelle
# Redis Configuration
REDIS_PASSWORD=
# Redis Configuration
REDIS_PASSWORD=
# Keycloak Configuration
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
KC_DB=postgres
KC_DB_URL=jdbc:postgresql://postgres:5432/keycloak
KC_DB_USERNAME=meldestelle
KC_DB_PASSWORD=meldestelle
# Keycloak Configuration
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD=admin
KC_DB=postgres
KC_DB_URL=jdbc:postgresql://postgres:5432/keycloak
KC_DB_USERNAME=meldestelle
KC_DB_PASSWORD=meldestelle
# JWT Configuration
JWT_SECRET=meldestelle-auth-secret-key-change-in-production
JWT_EXPIRATION=86400
# JWT Configuration
JWT_SECRET=meldestelle-auth-secret-key-change-in-production
JWT_EXPIRATION=86400
# Monitoring Configuration
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin
# Monitoring Configuration
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin
# Production URLs (for production environment)
KC_HOSTNAME=auth.meldestelle.at
GRAFANA_HOSTNAME=monitor.meldestelle.at
PROMETHEUS_HOSTNAME=metrics.meldestelle.at
EOF
# Production URLs (for production environment)
KC_HOSTNAME=auth.meldestelle.at
GRAFANA_HOSTNAME=monitor.meldestelle.at
PROMETHEUS_HOSTNAME=metrics.meldestelle.at
EOF
@echo "✅ .env.template created"

View File

@ -31,6 +31,9 @@ subprojects {
// Dedicated performance test task per JVM subproject
plugins.withId("java") {
val javaExt = extensions.getByType<JavaPluginExtension>()
// Ensure a full JDK toolchain with compiler is available (Gradle will auto-download if missing)
javaExt.toolchain.languageVersion.set(JavaLanguageVersion.of(21))
tasks.register<Test>("perfTest") {
description = "Runs tests tagged with 'perf'"
group = "verification"

View File

@ -1,97 +0,0 @@
# ===================================================================
# Docker Compose - Client Applications
# Meldestelle Project - Client Layer Configuration
# ===================================================================
# Usage:
# Development: docker-compose -f docker-compose.yml -f docker-compose.services.yml -f docker-compose.clients.yml up
# Production: docker-compose -f docker-compose.prod.yml -f docker-compose.services.yml -f docker-compose.clients.yml up
# Clients only: docker-compose -f docker-compose.clients.yml up
# ===================================================================
#version: '3.8'
services:
# ===================================================================
# Web Application (Kotlin Multiplatform Client)
# ===================================================================
web-app:
build:
context: .
dockerfile: dockerfiles/clients/web-app/Dockerfile
args:
GRADLE_VERSION: 8.14
JAVA_VERSION: 21
NGINX_VERSION: alpine
image: meldestelle/web-app:latest
container_name: meldestelle-web-app
ports:
- "3001:80"
depends_on:
- api-gateway
environment:
# Nginx Configuration
- NGINX_HOST=localhost
- NGINX_PORT=80
# Backend API Configuration
- API_BASE_URL=http://api-gateway:8080
- AUTH_SERVER_URL=http://auth-server:8081
# Application Configuration
- APP_NAME=Meldestelle Web App
- APP_VERSION=1.0.0
- NODE_ENV=production
networks:
- meldestelle-network
volumes:
# Nginx logs
- web-app-logs:/var/log/nginx
# Static assets cache (optional)
- web-app-cache:/var/cache/nginx
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/health"]
interval: 30s
timeout: 5s
retries: 3
start_period: 15s
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.web-app.rule=Host(`localhost`) || Host(`web.meldestelle.local`)"
- "traefik.http.services.web-app.loadbalancer.server.port=80"
- "prometheus.scrape=false" # Nginx metrics handled separately if needed
# ===================================================================
# Future Client Applications
# ===================================================================
# Mobile App (if implemented as PWA proxy)
# mobile-app:
# build:
# context: .
# dockerfile: dockerfiles/clients/mobile-app/Dockerfile
# image: meldestelle/mobile-app:latest
# container_name: meldestelle-mobile-app
# ports:
# - "3002:80"
# depends_on:
# - api-gateway
# networks:
# - meldestelle-network
# restart: unless-stopped
# ===================================================================
# Volumes for Client Applications
# ===================================================================
volumes:
web-app-logs:
driver: local
web-app-cache:
driver: local
# ===================================================================
# Networks (inherits from main docker-compose.yml or creates if standalone)
# ===================================================================
networks:
meldestelle-network:
driver: bridge

View File

@ -1,235 +0,0 @@
# ===================================================================
# Docker Compose Override - Development Configuration
# Meldestelle Project - Development Workflow Optimizations
# ===================================================================
# This file is automatically loaded by docker-compose in development
# Usage: docker-compose up (automatically includes this override)
# ===================================================================
# Features:
# - Hot-reload for frontend development
# - Debug port exposure for backend services
# - Volume mounts for live code changes
# - Development-specific environment variables
# - Faster startup times
# ===================================================================
#version: '3.8'
services:
# ===================================================================
# Web Application - Development with Hot Reload
# ===================================================================
web-app:
# Override build for development - use Node.js dev server instead of production build
build:
target: development # Use development stage if multi-stage build supports it
ports:
- "3001:80"
- "3002:3000" # Additional port for webpack dev server if needed
volumes:
# Mount source code for hot-reload (read-only to prevent container changes)
- ./client/web-app/src:/workspace/client/web-app/src:ro
- ./client/common-ui/src:/workspace/client/common-ui/src:ro
# Mount build configuration for live updates
- ./client/web-app/build.gradle.kts:/workspace/client/web-app/build.gradle.kts:ro
- ./client/common-ui/build.gradle.kts:/workspace/client/common-ui/build.gradle.kts:ro
environment:
# Development-specific environment
- NODE_ENV=development
- WEBPACK_DEV_SERVER=true
- HOT_RELOAD=true
- API_BASE_URL=http://localhost:8080 # Direct to host for easier debugging
command: >
sh -c "
echo 'Starting Web App in DEVELOPMENT mode with hot-reload...';
nginx -t && nginx -g 'daemon off;'
"
# ===================================================================
# API Gateway - Development Debug Configuration
# ===================================================================
api-gateway:
ports:
- "8080:8080"
- "5005:5005" # Debug port for IDE attachment
environment:
# Enable debug mode
- DEBUG=true
- SPRING_PROFILES_ACTIVE=docker,debug
- LOGGING_LEVEL_ROOT=INFO
- LOGGING_LEVEL_AT_MOCODE=DEBUG
- SPRING_DEVTOOLS_RESTART_ENABLED=true
# Development CORS settings
- SPRING_CLOUD_GATEWAY_GLOBALCORS_CORSCONFIGURATIONS_[/**]_ALLOWEDORIGINS=http://localhost:3001,http://localhost:3002
- SPRING_CLOUD_GATEWAY_GLOBALCORS_CORSCONFIGURATIONS_[/**]_ALLOWEDMETHODS=GET,POST,PUT,DELETE,OPTIONS
- SPRING_CLOUD_GATEWAY_GLOBALCORS_CORSCONFIGURATIONS_[/**]_ALLOWEDHEADERS=*
- SPRING_CLOUD_GATEWAY_GLOBALCORS_CORSCONFIGURATIONS_[/**]_ALLOWCREDENTIALS=true
volumes:
# Mount logs for easier debugging
- ./logs/gateway:/app/logs
# ===================================================================
# Auth Server - Development Debug Configuration
# ===================================================================
auth-server:
ports:
- "8081:8081"
- "5006:5005" # Debug port (different from gateway)
environment:
# Enable debug mode
- DEBUG=true
- SPRING_PROFILES_ACTIVE=docker,debug
- LOGGING_LEVEL_ROOT=INFO
- LOGGING_LEVEL_AT_MOCODE=DEBUG
- SPRING_DEVTOOLS_RESTART_ENABLED=true
# Development JWT settings (shorter expiration for testing)
- JWT_EXPIRATION=3600 # 1 hour instead of 24 hours
- JWT_SECRET=development-secret-key-not-for-production
volumes:
# Mount logs for easier debugging
- ./logs/auth:/app/logs
# ===================================================================
# Monitoring Server - Development Debug Configuration
# ===================================================================
monitoring-server:
ports:
- "8083:8083"
- "5007:5005" # Debug port
environment:
# Enable debug mode
- DEBUG=true
- SPRING_PROFILES_ACTIVE=docker,debug
- LOGGING_LEVEL_ROOT=INFO
- LOGGING_LEVEL_AT_MOCODE=DEBUG
- LOGGING_LEVEL_MICROMETER=DEBUG
- SPRING_DEVTOOLS_RESTART_ENABLED=true
volumes:
# Mount logs for easier debugging
- ./logs/monitoring:/app/logs
# ===================================================================
# Ping Service - Development Debug Configuration
# ===================================================================
ping-service:
ports:
- "8082:8082"
- "5008:5005" # Debug port
environment:
# Enable debug mode
- DEBUG=true
- SPRING_PROFILES_ACTIVE=docker,debug
- LOGGING_LEVEL_ROOT=INFO
- SPRING_DEVTOOLS_RESTART_ENABLED=true
volumes:
# Mount logs for easier debugging
- ./logs/ping:/app/logs
# ===================================================================
# Infrastructure Services - Development Optimizations
# ===================================================================
postgres:
ports:
- "5432:5432" # Expose for external DB tools
environment:
# Development database settings
- POSTGRES_DB=meldestelle_dev
- POSTGRES_USER=meldestelle_dev
- POSTGRES_PASSWORD=meldestelle_dev
volumes:
# Use local directory for easier database inspection
- ./dev-data/postgres:/var/lib/postgresql/data
- ./logs/postgres:/var/log/postgresql
redis:
ports:
- "6379:6379" # Expose for Redis CLI access
volumes:
# Use local directory for easier cache inspection
- ./dev-data/redis:/data
command: redis-server --appendonly yes --save 60 1000 # More frequent saves in dev
prometheus:
ports:
- "9090:9090"
volumes:
# Development prometheus config with more scraping
- ./config/monitoring/prometheus.dev.yml:/etc/prometheus/prometheus.yml:ro
- ./dev-data/prometheus:/prometheus
grafana:
ports:
- "3000:3000"
environment:
# Development admin credentials
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_INSTALL_PLUGINS=grafana-piechart-panel,grafana-worldmap-panel
- GF_USERS_ALLOW_SIGN_UP=true # Allow signup in development
volumes:
# Development dashboards and data
- ./config/monitoring/grafana/dev-dashboards:/var/lib/grafana/dashboards:ro
- ./dev-data/grafana:/var/lib/grafana
consul:
ports:
- "8500:8500"
- "8600:8600/udp"
volumes:
# Development consul data
- ./dev-data/consul:/consul/data
# ===================================================================
# Development-Only Services
# ===================================================================
# PostgreSQL Admin Interface (optional)
pgadmin:
image: dpage/pgadmin4:latest
container_name: meldestelle-pgadmin-dev
ports:
- "5050:80"
environment:
- PGADMIN_DEFAULT_EMAIL=admin@meldestelle.dev
- PGADMIN_DEFAULT_PASSWORD=admin
- PGADMIN_CONFIG_SERVER_MODE=False
volumes:
- pgadmin-data:/var/lib/pgadmin
networks:
- meldestelle-network
depends_on:
- postgres
restart: unless-stopped
profiles:
- dev-tools # Only start with: docker-compose --profile dev-tools up
# Redis Admin Interface (optional)
redis-commander:
image: rediscommander/redis-commander:latest
container_name: meldestelle-redis-commander-dev
ports:
- "8081:8081"
environment:
- REDIS_HOSTS=local:redis:6379
networks:
- meldestelle-network
depends_on:
- redis
restart: unless-stopped
profiles:
- dev-tools # Only start with: docker-compose --profile dev-tools up
# ===================================================================
# Development Volumes
# ===================================================================
volumes:
pgadmin-data:
driver: local
# ===================================================================
# Networks - Same as main compose
# ===================================================================
networks:
meldestelle-network:
driver: bridge

View File

@ -1,440 +0,0 @@
version: '3.8'
services:
postgres:
image: postgres:16-alpine
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: ${POSTGRES_DB}
# Production security settings
POSTGRES_INITDB_ARGS: "--auth-host=scram-sha-256 --auth-local=scram-sha-256"
ports:
# Only expose internally, not to host
- "5432"
volumes:
- postgres-data:/var/lib/postgresql/data
- ./docker/services/postgres:/docker-entrypoint-initdb.d
# TLS certificates for PostgreSQL
- ./config/ssl/postgres:/var/lib/postgresql/ssl:ro
networks:
- meldestelle-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
restart: unless-stopped
# Security: Run as non-root user
user: postgres
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
reservations:
memory: 512M
cpus: '0.25'
redis:
image: redis:7-alpine
environment:
# Redis with authentication
REDIS_PASSWORD: ${REDIS_PASSWORD}
ports:
# Only expose internally
- "6379"
volumes:
- redis-data:/data
- ./config/redis/redis.conf:/usr/local/etc/redis/redis.conf:ro
# TLS certificates for Redis
- ./config/ssl/redis:/tls:ro
command: >
redis-server /usr/local/etc/redis/redis.conf
--requirepass ${REDIS_PASSWORD}
--appendonly yes
--appendfsync everysec
--save 900 1
--save 300 10
--save 60 10000
--maxmemory 256mb
--maxmemory-policy allkeys-lru
--tcp-keepalive 300
--timeout 0
--tcp-backlog 511
--databases 16
--stop-writes-on-bgsave-error yes
--rdbcompression yes
--rdbchecksum yes
--dir /data
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "redis-cli", "--no-auth-warning", "-a", "${REDIS_PASSWORD}", "ping"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
# Security: Run as non-root user
user: redis
# Resource limits
deploy:
resources:
limits:
memory: 512M
cpus: '0.25'
reservations:
memory: 256M
cpus: '0.1'
keycloak:
image: quay.io/keycloak/keycloak:23.0
environment:
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN}
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
KC_DB: ${KC_DB}
KC_DB_URL: ${KC_DB_URL}
KC_DB_USERNAME: ${KC_DB_USERNAME}
KC_DB_PASSWORD: ${KC_DB_PASSWORD}
# Production settings
KC_HOSTNAME: ${KC_HOSTNAME}
KC_HOSTNAME_STRICT: true
KC_HOSTNAME_STRICT_HTTPS: true
KC_HTTP_ENABLED: false
KC_HTTPS_PORT: 8443
KC_HTTPS_CERTIFICATE_FILE: /opt/keycloak/conf/server.crt.pem
KC_HTTPS_CERTIFICATE_KEY_FILE: /opt/keycloak/conf/server.key.pem
KC_PROXY: edge
KC_LOG_LEVEL: WARN
KC_METRICS_ENABLED: true
KC_HEALTH_ENABLED: true
# Security headers
KC_HTTP_RELATIVE_PATH: /auth
ports:
# HTTPS only in production
- "8443:8443"
depends_on:
postgres:
condition: service_healthy
volumes:
- ./docker/services/keycloak:/opt/keycloak/data/import
# TLS certificates
- ./config/ssl/keycloak:/opt/keycloak/conf:ro
command: start --import-realm --optimized
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "curl", "--fail", "--insecure", "https://localhost:8443/auth/health/ready"]
interval: 10s
timeout: 5s
retries: 5
start_period: 60s
restart: unless-stopped
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
reservations:
memory: 512M
cpus: '0.25'
zookeeper:
image: confluentinc/cp-zookeeper:7.5.0
environment:
ZOOKEEPER_CLIENT_PORT: ${ZOOKEEPER_CLIENT_PORT}
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_MAX_CLIENT_CNXNS: 60
ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
# Security settings
ZOOKEEPER_AUTH_PROVIDER_SASL: org.apache.zookeeper.server.auth.SASLAuthenticationProvider
ZOOKEEPER_REQUIRE_CLIENT_AUTH_SCHEME: sasl
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/zookeeper_jaas.conf"
ports:
# Only expose internally
- "2181"
volumes:
- zookeeper-data:/var/lib/zookeeper/data
- zookeeper-logs:/var/lib/zookeeper/log
- ./config/kafka/secrets:/etc/kafka/secrets:ro
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "2181"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
# Resource limits
deploy:
resources:
limits:
memory: 512M
cpus: '0.25'
reservations:
memory: 256M
cpus: '0.1'
kafka:
image: confluentinc/cp-kafka:7.5.0
depends_on:
zookeeper:
condition: service_healthy
ports:
# Only expose internally
- "9092"
- "9093"
environment:
KAFKA_BROKER_ID: ${KAFKA_BROKER_ID}
KAFKA_ZOOKEEPER_CONNECT: ${KAFKA_ZOOKEEPER_CONNECT}
# Production security settings with SASL/SSL
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://kafka:9093,SASL_PLAINTEXT://kafka:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: SASL_SSL:SASL_SSL,SASL_PLAINTEXT:SASL_PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: SASL_SSL
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
# SSL Configuration
KAFKA_SSL_KEYSTORE_FILENAME: kafka.server.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: kafka_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: kafka_ssl_key_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.server.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: kafka_truststore_creds
KAFKA_SSL_CLIENT_AUTH: required
# Performance and reliability settings
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
KAFKA_DEFAULT_REPLICATION_FACTOR: 3
KAFKA_MIN_INSYNC_REPLICAS: 2
KAFKA_NUM_PARTITIONS: 3
KAFKA_LOG_RETENTION_HOURS: 168
KAFKA_LOG_SEGMENT_BYTES: 1073741824
KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
KAFKA_AUTO_CREATE_TOPICS_ENABLE: false
# JVM settings
KAFKA_HEAP_OPTS: "-Xmx512M -Xms512M"
KAFKA_JVM_PERFORMANCE_OPTS: "-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35"
# Security
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/secrets/kafka_jaas.conf"
volumes:
- kafka-data:/var/lib/kafka/data
- ./config/kafka/secrets:/etc/kafka/secrets:ro
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "kafka-topics", "--bootstrap-server", "localhost:9092", "--list"]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
restart: unless-stopped
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
reservations:
memory: 512M
cpus: '0.25'
zipkin:
image: openzipkin/zipkin:2
ports:
# Only expose internally
- "9411"
environment:
# Production settings
JAVA_OPTS: "-Xms256m -Xmx512m"
STORAGE_TYPE: elasticsearch
ES_HOSTS: http://elasticsearch:9200
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost:9411/health"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
restart: unless-stopped
# Resource limits
deploy:
resources:
limits:
memory: 512M
cpus: '0.25'
reservations:
memory: 256M
cpus: '0.1'
# Production monitoring services
prometheus:
image: prom/prometheus:v2.48.1
volumes:
- ./config/monitoring/prometheus.prod.yml:/etc/prometheus/prometheus.yml:ro
- prometheus-data:/prometheus
# TLS certificates
- ./config/ssl/prometheus:/etc/ssl/prometheus:ro
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--storage.tsdb.retention.size=10GB'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
- '--web.enable-admin-api'
- '--web.external-url=https://${PROMETHEUS_HOSTNAME}'
- '--web.config.file=/etc/ssl/prometheus/web.yml'
ports:
# Only expose internally
- "9090"
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost:9090/-/healthy"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
restart: unless-stopped
# Security: Run as non-root user
user: "65534:65534"
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '0.5'
reservations:
memory: 512M
cpus: '0.25'
grafana:
image: grafana/grafana:10.2.3
volumes:
- ./config/monitoring/grafana/provisioning:/etc/grafana/provisioning:ro
- ./config/monitoring/grafana/dashboards:/var/lib/grafana/dashboards:ro
- grafana-data:/var/lib/grafana
# TLS certificates
- ./config/ssl/grafana:/etc/ssl/grafana:ro
environment:
# Security settings
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_USERS_ALLOW_ORG_CREATE=false
- GF_AUTH_ANONYMOUS_ENABLED=false
# HTTPS settings
- GF_SERVER_PROTOCOL=https
- GF_SERVER_CERT_FILE=/etc/ssl/grafana/server.crt
- GF_SERVER_CERT_KEY=/etc/ssl/grafana/server.key
- GF_SERVER_DOMAIN=${GRAFANA_HOSTNAME}
- GF_SERVER_ROOT_URL=https://${GRAFANA_HOSTNAME}
# Security headers
- GF_SECURITY_STRICT_TRANSPORT_SECURITY=true
- GF_SECURITY_STRICT_TRANSPORT_SECURITY_MAX_AGE_SECONDS=31536000
- GF_SECURITY_CONTENT_TYPE_PROTECTION=true
- GF_SECURITY_X_CONTENT_TYPE_OPTIONS=nosniff
- GF_SECURITY_X_XSS_PROTECTION=true
# Session settings
- GF_SESSION_PROVIDER=redis
- GF_SESSION_PROVIDER_CONFIG=addr=redis:6379,pool_size=100,db=2,password=${REDIS_PASSWORD}
- GF_SESSION_COOKIE_SECURE=true
- GF_SESSION_COOKIE_SAMESITE=strict
# Logging
- GF_LOG_MODE=console
- GF_LOG_LEVEL=warn
ports:
# HTTPS only
- "3443:3000"
networks:
- meldestelle-network
depends_on:
prometheus:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "--no-check-certificate", "https://localhost:3000/api/health"]
interval: 10s
timeout: 5s
retries: 3
start_period: 20s
restart: unless-stopped
# Security: Run as non-root user
user: "472:472"
# Resource limits
deploy:
resources:
limits:
memory: 512M
cpus: '0.25'
reservations:
memory: 256M
cpus: '0.1'
# Reverse proxy for production
nginx:
image: nginx:alpine
ports:
- "80:80"
- "443:443"
volumes:
- ./config/nginx/nginx.prod.conf:/etc/nginx/nginx.conf:ro
- ./config/nginx/conf.d:/etc/nginx/conf.d:ro
- ./config/ssl/nginx:/etc/ssl/nginx:ro
- ./logs/nginx:/var/log/nginx
networks:
- meldestelle-network
depends_on:
- keycloak
- grafana
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost/health"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
# Resource limits
deploy:
resources:
limits:
memory: 256M
cpus: '0.1'
reservations:
memory: 128M
cpus: '0.05'
volumes:
postgres-data:
driver: local
redis-data:
driver: local
kafka-data:
driver: local
zookeeper-data:
driver: local
zookeeper-logs:
driver: local
prometheus-data:
driver: local
grafana-data:
driver: local
networks:
meldestelle-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16

View File

@ -1,866 +0,0 @@
# ===================================================================
# Docker Compose - Application Services
# Meldestelle Project - Service Layer Configuration
# ===================================================================
# Usage:
# Development: docker-compose -f docker-compose.yml -f docker-compose.services.yml up
# Production: docker-compose -f docker-compose.prod.yml -f docker-compose.services.yml up
# ===================================================================
# Optimized version with:
# - Standardized build arguments and environment variables
# - Enhanced health checks and resource constraints
# - Improved security configurations
# - Debug support and development features
# - Comprehensive monitoring and logging
# ===================================================================
version: '3.8'
services:
# ===================================================================
# Authentication Server
# ===================================================================
auth-server:
build:
context: .
dockerfile: dockerfiles/infrastructure/auth-server/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
APP_USER: ${AUTH_APP_USER:-authuser}
APP_UID: ${AUTH_APP_UID:-1002}
APP_GID: ${AUTH_APP_GID:-1002}
image: meldestelle/auth-server:latest
container_name: meldestelle-auth-server
ports:
- "${AUTH_SERVER_PORT:-8081}:8081"
- "${AUTH_DEBUG_PORT:-5005}:5005" # Debug port (conditional)
depends_on:
postgres:
condition: service_healthy
consul:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8081
- MANAGEMENT_SERVER_PORT=8081
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=auth-server
# Database Configuration
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER:-meldestelle}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD:-meldestelle}
- SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=${AUTH_DB_POOL_SIZE:-10}
- SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=${AUTH_DB_MIN_IDLE:-5}
# Redis Configuration
- SPRING_REDIS_HOST=redis
- SPRING_REDIS_PORT=6379
- SPRING_REDIS_PASSWORD=${REDIS_PASSWORD:-}
- SPRING_REDIS_TIMEOUT=${REDIS_TIMEOUT:-2000ms}
- SPRING_REDIS_LETTUCE_POOL_MAX_ACTIVE=${REDIS_POOL_MAX_ACTIVE:-8}
# Security Configuration
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
- JWT_EXPIRATION=${JWT_EXPIRATION:-86400}
- JWT_REFRESH_EXPIRATION=${JWT_REFRESH_EXPIRATION:-604800}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=75.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE=${APP_LOG_LEVEL:-DEBUG}
networks:
- meldestelle-network
volumes:
- auth-logs:/app/logs
- auth-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8081/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 5
start_period: 60s
restart: unless-stopped
# Resource constraints
deploy:
resources:
limits:
memory: 512M
cpus: '1.0'
# Enhanced labels
labels:
- "traefik.enable=true"
- "traefik.http.routers.auth-server.rule=Host(`auth.meldestelle.local`)"
- "traefik.http.services.auth-server.loadbalancer.server.port=8081"
- "prometheus.scrape=true"
- "prometheus.port=8081"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=auth-server"
- "service.name=auth-server"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Monitoring Server
# ===================================================================
monitoring-server:
build:
context: .
dockerfile: dockerfiles/infrastructure/monitoring-server/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
image: meldestelle/monitoring-server:latest
container_name: meldestelle-monitoring-server
ports:
- "${MONITORING_SERVER_PORT:-8083}:8083"
- "${MONITORING_DEBUG_PORT:-5006}:5006" # Debug port
depends_on:
consul:
condition: service_healthy
prometheus:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8083
- MANAGEMENT_SERVER_PORT=8083
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=monitoring-server
# Monitoring Configuration
- PROMETHEUS_URL=http://prometheus:9090
- GRAFANA_URL=http://grafana:3000
- ZIPKIN_URL=http://zipkin:9411
- MONITORING_REFRESH_INTERVAL=${MONITORING_REFRESH_INTERVAL:-30s}
- MONITORING_ALERT_THRESHOLD=${MONITORING_ALERT_THRESHOLD:-0.8}
# Metrics Collection
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,env,configprops,beans
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_METRICS_EXPORT_PROMETHEUS_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=70.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE=${APP_LOG_LEVEL:-DEBUG}
- LOGGING_LEVEL_MICROMETER=DEBUG
- LOGGING_LEVEL_IO_MICROMETER=DEBUG
networks:
- meldestelle-network
volumes:
- monitoring-logs:/app/logs
- monitoring-temp:/app/tmp
- monitoring-data:/app/data
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8083/actuator/health/readiness"]
interval: 10s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
# Resource constraints
deploy:
resources:
limits:
memory: 384M
cpus: '0.75'
# Enhanced labels
labels:
- "traefik.enable=true"
- "traefik.http.routers.monitoring-server.rule=Host(`monitoring.meldestelle.local`)"
- "traefik.http.services.monitoring-server.loadbalancer.server.port=8083"
- "prometheus.scrape=true"
- "prometheus.port=8083"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=monitoring-server"
- "service.name=monitoring-server"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# API Gateway (Enhanced Configuration)
# ===================================================================
api-gateway:
build:
context: .
dockerfile: dockerfiles/infrastructure/gateway/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
image: meldestelle/api-gateway:latest
container_name: meldestelle-api-gateway
ports:
- "${API_GATEWAY_PORT:-8080}:8080"
- "${GATEWAY_DEBUG_PORT:-5007}:5007" # Debug port
depends_on:
consul:
condition: service_healthy
auth-server:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8080
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=api-gateway
# Gateway Configuration
- SPRING_CLOUD_GATEWAY_DISCOVERY_LOCATOR_ENABLED=true
- SPRING_CLOUD_GATEWAY_DISCOVERY_LOCATOR_LOWER_CASE_SERVICE_ID=true
- SPRING_CLOUD_GATEWAY_HTTPCLIENT_CONNECT_TIMEOUT=${GATEWAY_CONNECT_TIMEOUT:-5000}
- SPRING_CLOUD_GATEWAY_HTTPCLIENT_RESPONSE_TIMEOUT=${GATEWAY_RESPONSE_TIMEOUT:-30s}
- SPRING_CLOUD_GATEWAY_HTTPCLIENT_POOL_MAX_CONNECTIONS=${GATEWAY_POOL_MAX_CONNECTIONS:-100}
# Security Configuration
- AUTH_SERVER_URL=http://auth-server:8081
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
- CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS:-http://localhost:3001,http://web-app}
- CORS_ALLOWED_METHODS=${CORS_ALLOWED_METHODS:-GET,POST,PUT,DELETE,OPTIONS}
# Circuit Breaker & Resilience
- RESILIENCE4J_CIRCUITBREAKER_INSTANCES_DEFAULT_SLIDING_WINDOW_SIZE=${CB_SLIDING_WINDOW:-10}
- RESILIENCE4J_CIRCUITBREAKER_INSTANCES_DEFAULT_FAILURE_RATE_THRESHOLD=${CB_FAILURE_RATE:-50}
- RESILIENCE4J_CIRCUITBREAKER_INSTANCES_DEFAULT_WAIT_DURATION_IN_OPEN_STATE=${CB_WAIT_DURATION:-60s}
- RESILIENCE4J_RETRY_INSTANCES_DEFAULT_MAX_ATTEMPTS=${RETRY_MAX_ATTEMPTS:-3}
- RESILIENCE4J_TIMELIMITER_INSTANCES_DEFAULT_TIMEOUT_DURATION=${TIMEOUT_DURATION:-10s}
# Rate Limiting
- SPRING_CLOUD_GATEWAY_FILTER_REQUEST_RATE_LIMITER_REDIS_RATE_LIMITER_REPLENISH_RATE=${RATE_LIMIT_REPLENISH:-10}
- SPRING_CLOUD_GATEWAY_FILTER_REQUEST_RATE_LIMITER_REDIS_RATE_LIMITER_BURST_CAPACITY=${RATE_LIMIT_BURST:-20}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,gateway,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=75.0 -XX:+UseG1GC -XX:+UseStringDeduplication -XX:+OptimizeStringConcat
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE=${APP_LOG_LEVEL:-DEBUG}
- LOGGING_LEVEL_REACTOR_NETTY=${NETTY_LOG_LEVEL:-INFO}
networks:
- meldestelle-network
volumes:
- gateway-logs:/app/logs
- gateway-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8080/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
# Resource constraints
deploy:
resources:
limits:
memory: 768M
cpus: '1.5'
# Enhanced labels
labels:
- "traefik.enable=true"
- "traefik.http.routers.api-gateway.rule=Host(`api.meldestelle.local`)"
- "traefik.http.services.api-gateway.loadbalancer.server.port=8080"
- "prometheus.scrape=true"
- "prometheus.port=8080"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=api-gateway"
- "service.name=api-gateway"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Ping Service (Enhanced for Integration Testing)
# ===================================================================
ping-service:
build:
context: .
dockerfile: dockerfiles/services/ping-service/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
image: meldestelle/ping-service:latest
container_name: meldestelle-ping-service
ports:
- "${PING_SERVICE_PORT:-8082}:8082"
- "${PING_DEBUG_PORT:-5008}:5008" # Debug port
depends_on:
consul:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8082
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=ping-service
# Integration Testing Configuration
- PING_TEST_INTERVAL=${PING_TEST_INTERVAL:-30s}
- PING_TIMEOUT=${PING_TIMEOUT:-5s}
- PING_MAX_RETRIES=${PING_MAX_RETRIES:-3}
- INTEGRATION_TEST_ENABLED=${INTEGRATION_TEST_ENABLED:-true}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=60.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE=${APP_LOG_LEVEL:-DEBUG}
- LOGGING_LEVEL_SPRING_WEB=${WEB_LOG_LEVEL:-INFO}
networks:
- meldestelle-network
volumes:
- ping-logs:/app/logs
- ping-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8082/actuator/health/readiness"]
interval: 10s
timeout: 3s
retries: 3
start_period: 30s
restart: unless-stopped
# Resource constraints (lightweight service)
deploy:
resources:
limits:
memory: 256M
cpus: '0.5'
# Enhanced labels
labels:
- "traefik.enable=true"
- "traefik.http.routers.ping-service.rule=Host(`ping.meldestelle.local`)"
- "traefik.http.services.ping-service.loadbalancer.server.port=8082"
- "prometheus.scrape=true"
- "prometheus.port=8082"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=ping-service"
- "service.name=ping-service"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Members Service
# ===================================================================
members-service:
build:
context: .
dockerfile: dockerfiles/services/members-service/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
APP_USER: ${MEMBERS_APP_USER:-membersuser}
APP_UID: ${MEMBERS_APP_UID:-1004}
APP_GID: ${MEMBERS_APP_GID:-1004}
image: meldestelle/members-service:latest
container_name: meldestelle-members-service
ports:
- "${MEMBERS_SERVICE_PORT:-8084}:8084"
- "${MEMBERS_DEBUG_PORT:-5004}:5004" # Debug port
depends_on:
postgres:
condition: service_healthy
consul:
condition: service_healthy
redis:
condition: service_healthy
auth-server:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8084
- MANAGEMENT_SERVER_PORT=8084
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=members-service
# Database Configuration
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER:-meldestelle}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD:-meldestelle}
- SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=${MEMBERS_DB_POOL_SIZE:-20}
- SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=${MEMBERS_DB_MIN_IDLE:-10}
# Redis Configuration
- SPRING_REDIS_HOST=redis
- SPRING_REDIS_PORT=6379
- SPRING_REDIS_PASSWORD=${REDIS_PASSWORD:-}
- SPRING_REDIS_TIMEOUT=${REDIS_TIMEOUT:-2000ms}
# Security Configuration
- AUTH_SERVER_URL=http://auth-server:8081
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=80.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE_MEMBERS=${MEMBERS_LOG_LEVEL:-DEBUG}
networks:
- meldestelle-network
volumes:
- members-logs:/app/logs
- members-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8084/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
deploy:
resources:
limits:
memory: 1024M
cpus: '1.5'
labels:
- "traefik.enable=true"
- "traefik.http.routers.members-service.rule=Host(`members.meldestelle.local`)"
- "traefik.http.services.members-service.loadbalancer.server.port=8084"
- "prometheus.scrape=true"
- "prometheus.port=8084"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=members-service"
- "service.name=members-service"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Horses Service
# ===================================================================
horses-service:
build:
context: .
dockerfile: dockerfiles/services/horses-service/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
APP_USER: ${HORSES_APP_USER:-horsesuser}
APP_UID: ${HORSES_APP_UID:-1005}
APP_GID: ${HORSES_APP_GID:-1005}
image: meldestelle/horses-service:latest
container_name: meldestelle-horses-service
ports:
- "${HORSES_SERVICE_PORT:-8085}:8085"
- "${HORSES_DEBUG_PORT:-5005}:5005" # Debug port
depends_on:
postgres:
condition: service_healthy
consul:
condition: service_healthy
redis:
condition: service_healthy
auth-server:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8085
- MANAGEMENT_SERVER_PORT=8085
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=horses-service
# Database Configuration
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER:-meldestelle}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD:-meldestelle}
- SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=${HORSES_DB_POOL_SIZE:-20}
- SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=${HORSES_DB_MIN_IDLE:-10}
# Redis Configuration
- SPRING_REDIS_HOST=redis
- SPRING_REDIS_PORT=6379
- SPRING_REDIS_PASSWORD=${REDIS_PASSWORD:-}
- SPRING_REDIS_TIMEOUT=${REDIS_TIMEOUT:-2000ms}
# Security Configuration
- AUTH_SERVER_URL=http://auth-server:8081
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=80.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE_HORSES=${HORSES_LOG_LEVEL:-DEBUG}
networks:
- meldestelle-network
volumes:
- horses-logs:/app/logs
- horses-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8085/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
deploy:
resources:
limits:
memory: 1024M
cpus: '1.5'
labels:
- "traefik.enable=true"
- "traefik.http.routers.horses-service.rule=Host(`horses.meldestelle.local`)"
- "traefik.http.services.horses-service.loadbalancer.server.port=8085"
- "prometheus.scrape=true"
- "prometheus.port=8085"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=horses-service"
- "service.name=horses-service"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Events Service
# ===================================================================
events-service:
build:
context: .
dockerfile: dockerfiles/services/events-service/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
APP_USER: ${EVENTS_APP_USER:-eventsuser}
APP_UID: ${EVENTS_APP_UID:-1006}
APP_GID: ${EVENTS_APP_GID:-1006}
image: meldestelle/events-service:latest
container_name: meldestelle-events-service
ports:
- "${EVENTS_SERVICE_PORT:-8086}:8086"
- "${EVENTS_DEBUG_PORT:-5006}:5006" # Debug port
depends_on:
postgres:
condition: service_healthy
consul:
condition: service_healthy
redis:
condition: service_healthy
auth-server:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8086
- MANAGEMENT_SERVER_PORT=8086
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=events-service
# Database Configuration
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER:-meldestelle}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD:-meldestelle}
- SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=${EVENTS_DB_POOL_SIZE:-20}
- SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=${EVENTS_DB_MIN_IDLE:-10}
# Redis Configuration
- SPRING_REDIS_HOST=redis
- SPRING_REDIS_PORT=6379
- SPRING_REDIS_PASSWORD=${REDIS_PASSWORD:-}
- SPRING_REDIS_TIMEOUT=${REDIS_TIMEOUT:-2000ms}
# Security Configuration
- AUTH_SERVER_URL=http://auth-server:8081
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=80.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE_EVENTS=${EVENTS_LOG_LEVEL:-DEBUG}
networks:
- meldestelle-network
volumes:
- events-logs:/app/logs
- events-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8086/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
deploy:
resources:
limits:
memory: 1024M
cpus: '1.5'
labels:
- "traefik.enable=true"
- "traefik.http.routers.events-service.rule=Host(`events.meldestelle.local`)"
- "traefik.http.services.events-service.loadbalancer.server.port=8086"
- "prometheus.scrape=true"
- "prometheus.port=8086"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=events-service"
- "service.name=events-service"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Masterdata Service
# ===================================================================
masterdata-service:
build:
context: .
dockerfile: dockerfiles/services/masterdata-service/Dockerfile
args:
GRADLE_VERSION: ${GRADLE_VERSION:-8.14}
JAVA_VERSION: ${JAVA_VERSION:-21}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-docker}
APP_USER: ${MASTERDATA_APP_USER:-masterdatauser}
APP_UID: ${MASTERDATA_APP_UID:-1007}
APP_GID: ${MASTERDATA_APP_GID:-1007}
image: meldestelle/masterdata-service:latest
container_name: meldestelle-masterdata-service
ports:
- "${MASTERDATA_SERVICE_PORT:-8087}:8087"
- "${MASTERDATA_DEBUG_PORT:-5007}:5007" # Debug port
depends_on:
postgres:
condition: service_healthy
consul:
condition: service_healthy
redis:
condition: service_healthy
auth-server:
condition: service_healthy
environment:
# Spring Boot Configuration
- SPRING_PROFILES_ACTIVE=${SPRING_PROFILES_ACTIVE:-docker}
- SERVER_PORT=8087
- MANAGEMENT_SERVER_PORT=8087
- DEBUG=${DEBUG:-false}
# Service Discovery
- SPRING_CLOUD_CONSUL_HOST=consul
- SPRING_CLOUD_CONSUL_PORT=8500
- SPRING_APPLICATION_NAME=masterdata-service
# Database Configuration
- SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
- SPRING_DATASOURCE_USERNAME=${POSTGRES_USER:-meldestelle}
- SPRING_DATASOURCE_PASSWORD=${POSTGRES_PASSWORD:-meldestelle}
- SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=${MASTERDATA_DB_POOL_SIZE:-15}
- SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=${MASTERDATA_DB_MIN_IDLE:-8}
# Redis Configuration
- SPRING_REDIS_HOST=redis
- SPRING_REDIS_PORT=6379
- SPRING_REDIS_PASSWORD=${REDIS_PASSWORD:-}
- SPRING_REDIS_TIMEOUT=${REDIS_TIMEOUT:-2000ms}
# Security Configuration
- AUTH_SERVER_URL=http://auth-server:8081
- JWT_SECRET=${JWT_SECRET:-meldestelle-auth-secret-key-change-in-production}
# Monitoring & Observability
- MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE=health,info,metrics,prometheus,configprops
- MANAGEMENT_ENDPOINT_HEALTH_SHOW_DETAILS=always
- MANAGEMENT_ENDPOINT_HEALTH_PROBES_ENABLED=true
- MANAGEMENT_TRACING_SAMPLING_PROBABILITY=${TRACING_SAMPLING:-0.1}
- MANAGEMENT_ZIPKIN_TRACING_ENDPOINT=http://zipkin:9411/api/v2/spans
# Performance Tuning
- JAVA_OPTS=-XX:MaxRAMPercentage=80.0 -XX:+UseG1GC -XX:+UseStringDeduplication
- LOGGING_LEVEL_ROOT=${LOG_LEVEL:-INFO}
- LOGGING_LEVEL_AT_MOCODE_MASTERDATA=${MASTERDATA_LOG_LEVEL:-DEBUG}
networks:
- meldestelle-network
volumes:
- masterdata-logs:/app/logs
- masterdata-temp:/app/tmp
healthcheck:
test: ["CMD", "curl", "-fsS", "--max-time", "3", "http://localhost:8087/actuator/health/readiness"]
interval: 15s
timeout: 5s
retries: 3
start_period: 45s
restart: unless-stopped
deploy:
resources:
limits:
memory: 768M
cpus: '1.0'
labels:
- "traefik.enable=true"
- "traefik.http.routers.masterdata-service.rule=Host(`masterdata.meldestelle.local`)"
- "traefik.http.services.masterdata-service.loadbalancer.server.port=8087"
- "prometheus.scrape=true"
- "prometheus.port=8087"
- "prometheus.path=/actuator/prometheus"
- "prometheus.service=masterdata-service"
- "service.name=masterdata-service"
- "service.version=1.0.0"
- "service.environment=${SPRING_PROFILES_ACTIVE:-docker}"
# ===================================================================
# Volumes for Service Data, Logs, and Temporary Files
# ===================================================================
volumes:
# Authentication Server
auth-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/auth-server
auth-temp:
driver: local
# Monitoring Server
monitoring-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/monitoring-server
monitoring-temp:
driver: local
monitoring-data:
driver: local
driver_opts:
type: none
o: bind
device: ./data/monitoring-server
# API Gateway
gateway-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/api-gateway
gateway-temp:
driver: local
# Ping Service
ping-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/ping-service
ping-temp:
driver: local
# Members Service
members-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/members-service
members-temp:
driver: local
# Horses Service
horses-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/horses-service
horses-temp:
driver: local
# Events Service
events-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/events-service
events-temp:
driver: local
# Masterdata Service
masterdata-logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs/masterdata-service
masterdata-temp:
driver: local
# ===================================================================
# Networks (inherits from main docker-compose.yml)
# ===================================================================
networks:
meldestelle-network:
driver: bridge

View File

@ -1,8 +1,18 @@
#version: '3.8'
# ===================================================================
# Docker Compose - Basis-Infrastruktur
# Meldestelle Project - Essentielle Services
# ===================================================================
# Usage:
# Entwicklung & Standard: docker-compose up -d
# ===================================================================
services:
# ===================================================================
# Datenbank
# ===================================================================
postgres:
image: postgres:16-alpine
container_name: meldestelle-postgres
environment:
POSTGRES_USER: ${POSTGRES_USER:-meldestelle}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-meldestelle}
@ -15,15 +25,19 @@ services:
networks:
- meldestelle-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U meldestelle -d meldestelle"]
test: [ "CMD-SHELL", "pg_isready -U meldestelle -d meldestelle" ]
interval: 10s
timeout: 5s
retries: 5
start_period: 20s
restart: unless-stopped
# ===================================================================
# Cache
# ===================================================================
redis:
image: redis:7-alpine
container_name: meldestelle-redis
ports:
- "6379:6379"
volumes:
@ -32,22 +46,26 @@ services:
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [ "CMD", "redis-cli", "ping" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
# ===================================================================
# Authentifizierung
# ===================================================================
keycloak:
image: quay.io/keycloak/keycloak:23.0
container_name: meldestelle-keycloak
environment:
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN:-admin}
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD:-admin}
KC_DB: ${KC_DB:-postgres}
KC_DB_URL: ${KC_DB_URL:-jdbc:postgresql://postgres:5432/keycloak}
KC_DB_USERNAME: ${KC_DB_USERNAME:-meldestelle}
KC_DB_PASSWORD: ${KC_DB_PASSWORD:-meldestelle}
KC_DB: postgres
KC_DB_URL: jdbc:postgresql://postgres:5432/${POSTGRES_DB:-meldestelle}
KC_DB_USERNAME: ${POSTGRES_USER:-meldestelle}
KC_DB_PASSWORD: ${POSTGRES_PASSWORD:-meldestelle}
ports:
- "8180:8080"
depends_on:
@ -59,159 +77,44 @@ services:
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8080/health/ready"]
test: [ "CMD", "curl", "--fail", "http://localhost:8080/health/ready" ]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
zookeeper:
image: confluentinc/cp-zookeeper:7.5.0
environment:
ZOOKEEPER_CLIENT_PORT: ${ZOOKEEPER_CLIENT_PORT:-2181}
ports:
- "2181:2181"
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "2181"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
kafka:
image: confluentinc/cp-kafka:7.5.0
depends_on:
zookeeper:
condition: service_healthy
ports:
- "9092:9092"
environment:
KAFKA_BROKER_ID: ${KAFKA_BROKER_ID:-1}
KAFKA_ZOOKEEPER_CONNECT: ${KAFKA_ZOOKEEPER_CONNECT:-zookeeper:2181}
KAFKA_ADVERTISED_LISTENERS: ${KAFKA_ADVERTISED_LISTENERS:-PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092}
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: ${KAFKA_LISTENER_SECURITY_PROTOCOL_MAP:-PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT}
KAFKA_INTER_BROKER_LISTENER_NAME: ${KAFKA_INTER_BROKER_LISTENER_NAME:-PLAINTEXT}
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: ${KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR:-1}
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "kafka-topics", "--bootstrap-server", "localhost:9092", "--list"]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
restart: unless-stopped
zipkin:
image: openzipkin/zipkin:2
ports:
- "9411:9411"
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost:9411/health"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
restart: unless-stopped
# ===================================================================
# Service Discovery
# ===================================================================
consul:
image: hashicorp/consul:1.15
container_name: meldestelle-consul
ports:
- "8500:8500"
- "8600:8600/udp"
command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8500/v1/status/leader"]
test: [ "CMD", "curl", "-f", "http://localhost:8500/v1/status/leader" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
restart: unless-stopped
# ===================================================================
# Application Services moved to docker-compose.services.yml
# Usage: docker-compose -f docker-compose.yml -f docker-compose.services.yml up
# ===================================================================
# Optional monitoring services
prometheus:
image: prom/prometheus:v2.48.1
volumes:
- ./config/monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
ports:
- "9090:9090"
networks:
- meldestelle-network
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost:9090/-/healthy"]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
restart: unless-stopped
# Security: Run as a non-root user
user: "65534:65534"
# Resource limits for development
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
grafana:
image: grafana/grafana:10.2.3
volumes:
- ./config/monitoring/grafana/provisioning:/etc/grafana/provisioning
- ./config/monitoring/grafana/dashboards:/var/lib/grafana/dashboards
- grafana-data:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=${GF_USERS_ALLOW_SIGN_UP:-false}
ports:
- "3000:3000"
networks:
- meldestelle-network
depends_on:
prometheus:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "-q", "-O", "-", "http://localhost:3000/api/health"]
interval: 10s
timeout: 5s
retries: 3
start_period: 20s
restart: unless-stopped
# Security: Run as a non-root user
user: "472:472"
# Resource limits for development
deploy:
resources:
limits:
memory: 256M
cpus: '0.25'
# ===================================================================
# Volumes
# ===================================================================
volumes:
postgres-data:
driver: local
redis-data:
prometheus-data:
grafana-data:
driver: local
# ===================================================================
# Networks
# ===================================================================
networks:
meldestelle-network:
driver: bridge

View File

@ -29,3 +29,7 @@ kotlin.build.report.single_file=false
# Compose Experimental Features
org.jetbrains.compose.experimental.jscanvas.enabled=true
# Java Toolchain: ensure Gradle auto-downloads a full JDK when needed
org.gradle.java.installations.auto-download=true
org.gradle.java.installations.auto-detect=true

229
test-dockerfile.sh Normal file → Executable file
View File

@ -1,104 +1,163 @@
#!/bin/bash
# Test script to validate the corrected kotlin-multiplatform-web.Dockerfile template
# This script tests the Dockerfile with default values and custom build arguments
# Test script to validate the kotlin-multiplatform-web.Dockerfile template
# - Robust pre-checks (Docker, buildx, file existence)
# - Safer bash settings, clear diagnostics
# - Uses ephemeral ports for container run test (avoids conflicts)
# - Cleans up containers/images even on failure
set -e
set -Eeuo pipefail
DOCKERFILE_PATH="dockerfiles/templates/kotlin-multiplatform-web.Dockerfile"
SCRIPT_NAME="$(basename "$0")"
echo "Testing Kotlin Multiplatform Web Dockerfile Template..."
echo "======================================================="
# Unique suffix to avoid tag/container collisions
RAND_SUFFIX=$(date +%s)-$RANDOM
IMAGE_DEFAULT="test-kotlin-web:default-${RAND_SUFFIX}"
IMAGE_CUSTOM="test-kotlin-web:custom-${RAND_SUFFIX}"
CONTAINER_NAME="test-container-${RAND_SUFFIX}"
# Test 1: Check if Dockerfile syntax is valid
echo "1. Testing Dockerfile syntax validation..."
# Create a minimal validation that doesn't require project compilation
echo " Testing Dockerfile structure and ARG definitions..."
# Check if all required ARG variables are defined
if grep -q "^ARG CLIENT_PATH=" "$DOCKERFILE_PATH" && \
grep -q "^ARG CLIENT_MODULE=" "$DOCKERFILE_PATH" && \
grep -q "^ARG CLIENT_NAME=" "$DOCKERFILE_PATH"; then
echo "✓ Required ARG declarations found"
else
echo "✗ Missing required ARG declarations"
exit 1
fi
# Check if ARGs are re-declared in both stages
kotlin_builder_args=$(grep -A 10 "FROM.*AS kotlin-builder" "$DOCKERFILE_PATH" | grep -c "^ARG")
runtime_args=$(grep -A 10 "FROM.*AS runtime" "$DOCKERFILE_PATH" | grep -c "^ARG")
if [ "$kotlin_builder_args" -ge 3 ] && [ "$runtime_args" -ge 3 ]; then
echo "✓ ARG declarations found in both build stages"
else
echo "✗ Missing ARG declarations in build stages"
exit 1
fi
# Test basic Docker parsing without building
echo " Testing basic Docker parsing..."
if docker buildx build --no-cache -f "$DOCKERFILE_PATH" --platform linux/amd64 . 2>&1 | head -20 | grep -q "ERROR.*failed to solve"; then
echo "✗ Dockerfile has parsing errors"
exit 1
else
echo "✓ Dockerfile syntax validation passed"
fi
# Test 2: Test with default build arguments (web-app)
echo "2. Testing build with default arguments (web-app)..."
docker build --no-cache \
-f "$DOCKERFILE_PATH" \
-t test-kotlin-web:default \
. || {
echo "✗ Build with default arguments failed"
exit 1
cleanup() {
echo "[cleanup] Stopping/removing test resources (if any)..." || true
docker rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
docker rmi "$IMAGE_DEFAULT" "$IMAGE_CUSTOM" >/dev/null 2>&1 || true
}
echo "✓ Build with default arguments successful"
trap cleanup EXIT
# Test 3: Test with custom build arguments (desktop-app scenario)
echo "3. Testing build with custom arguments..."
docker build --no-cache \
-f "$DOCKERFILE_PATH" \
info() { echo "[INFO] $*"; }
success(){ echo "[ OK ] $*"; }
warn() { echo "[WARN] $*"; }
fail() { echo "[FAIL] $*"; exit 1; }
info "Testing Kotlin Multiplatform Web Dockerfile Template"
echo "======================================================="
# -------------------------------------------------------------------
# 0. Pre-checks
# -------------------------------------------------------------------
command -v docker >/dev/null 2>&1 || fail "Docker is not installed or not in PATH"
if ! docker info >/dev/null 2>&1; then
fail "Docker does not seem to be running or accessible for the current user"
fi
if [ ! -f "$DOCKERFILE_PATH" ]; then
fail "Dockerfile not found at: $DOCKERFILE_PATH"
fi
HAS_BUILDX=1
if ! docker buildx version >/dev/null 2>&1; then
HAS_BUILDX=0
warn "docker buildx not available; skipping buildx-specific syntax check"
fi
# -------------------------------------------------------------------
# 1. Static checks on Dockerfile structure
# -------------------------------------------------------------------
info "1) Validating Dockerfile structure and ARG definitions"
# Required ARG variables must be defined (somewhere in the file)
if grep -q "^ARG CLIENT_PATH=" "$DOCKERFILE_PATH" \
&& grep -q "^ARG CLIENT_MODULE=" "$DOCKERFILE_PATH" \
&& grep -q "^ARG CLIENT_NAME=" "$DOCKERFILE_PATH"; then
success "Required ARG declarations found"
else
fail "Missing required ARG declarations (CLIENT_PATH, CLIENT_MODULE, CLIENT_NAME)"
fi
# Ensure expected stages are present
if grep -qiE "^FROM .* as kotlin-builder" "$DOCKERFILE_PATH" && \
grep -qiE "^FROM .* as runtime" "$DOCKERFILE_PATH"; then
success "Build stages 'kotlin-builder' and 'runtime' found"
else
fail "Expected stages 'kotlin-builder' and/or 'runtime' not found"
fi
# Verify that ARGs are re-declared in both stages (search within ~40 lines after each stage marker)
kotlin_builder_args=$(grep -n "^FROM .* [Aa][Ss] kotlin-builder" "$DOCKERFILE_PATH" | cut -d: -f1 | xargs -I{} sh -c "sed -n '{}','{}+40p' '$DOCKERFILE_PATH' | grep -c '^ARG'" || echo 0)
runtime_args=$(grep -n "^FROM .* [Aa][Ss] runtime" "$DOCKERFILE_PATH" | cut -d: -f1 | xargs -I{} sh -c "sed -n '{}','{}+40p' '$DOCKERFILE_PATH' | grep -c '^ARG'" || echo 0)
if [ "${kotlin_builder_args:-0}" -ge 3 ] && [ "${runtime_args:-0}" -ge 3 ]; then
success "ARG declarations appear in both build stages"
else
fail "ARG declarations appear to be missing in one or both build stages"
fi
# Optional: attempt a lightweight parsing via buildx (does not necessarily run heavy build)
if [ "$HAS_BUILDX" -eq 1 ]; then
info "Performing basic Dockerfile parsing with buildx (no image kept)"
# Try to parse/resolve without caching; don't fail the whole flow on noisy build output
if docker buildx build --no-cache -f "$DOCKERFILE_PATH" --platform linux/amd64 . \
2>&1 | head -50 | grep -q "ERROR.*failed to solve"; then
fail "Dockerfile has parsing errors (buildx failed to solve)"
else
success "Dockerfile basic parsing passed"
fi
else
warn "Skipping buildx parsing check"
fi
# -------------------------------------------------------------------
# 2. Build with default arguments (web-app)
# -------------------------------------------------------------------
info "2) Building image with default arguments (web-app)"
if docker build --no-cache -f "$DOCKERFILE_PATH" -t "$IMAGE_DEFAULT" .; then
success "Build with default arguments successful"
else
fail "Build with default arguments failed"
fi
# -------------------------------------------------------------------
# 3. Build with custom arguments (desktop-app scenario)
# -------------------------------------------------------------------
info "3) Building image with custom arguments (desktop-app scenario)"
if docker build --no-cache -f "$DOCKERFILE_PATH" \
--build-arg CLIENT_PATH=client/desktop-app \
--build-arg CLIENT_MODULE=client:desktop-app \
--build-arg CLIENT_NAME=desktop-app \
-t test-kotlin-web:custom \
. || {
echo "✗ Build with custom arguments failed - this is expected if desktop-app doesn't have nginx.conf"
echo " This test shows the template can accept different client modules"
}
# Test 4: Verify the built image can start (quick test)
echo "4. Testing if the built container can start..."
if docker run --rm -d --name test-container -p 8080:80 test-kotlin-web:default; then
sleep 5
# Test if nginx is running
if docker exec test-container ps aux | grep nginx > /dev/null; then
echo "✓ Container started successfully and nginx is running"
docker stop test-container
else
echo "✗ Container started but nginx is not running properly"
docker stop test-container
exit 1
fi
-t "$IMAGE_CUSTOM" .; then
success "Build with custom arguments successful"
else
echo "✗ Container failed to start"
exit 1
warn "Build with custom arguments failed (this can be expected if desktop-app lacks proper assets/nginx.conf)"
fi
# Cleanup
echo "5. Cleaning up test images..."
docker rmi test-kotlin-web:default test-kotlin-web:custom 2>/dev/null || true
# -------------------------------------------------------------------
# 4. Run container and validate it responds over HTTP
# -------------------------------------------------------------------
info "4) Running container from default image and validating HTTP response"
# -P maps service ports to random host ports; then detect the mapped port
if docker run --rm -d --name "$CONTAINER_NAME" -P "$IMAGE_DEFAULT" >/dev/null; then
# Determine mapped host port for container port 80
sleep 3
HOST_PORT=$(docker port "$CONTAINER_NAME" 80/tcp | sed -E 's/.*:(\d+)/\1/' | head -n1 || true)
if [ -z "${HOST_PORT:-}" ]; then
docker logs "$CONTAINER_NAME" || true
fail "Could not determine mapped host port for container"
fi
# Try a few times to allow nginx to start
for i in {1..10}; do
if curl -fsS "http://127.0.0.1:${HOST_PORT}" >/dev/null 2>&1; then
success "Container responded over HTTP on localhost:${HOST_PORT}"
break
fi
sleep 1
done
# Final check (if not succeeded yet)
if ! curl -fsS "http://127.0.0.1:${HOST_PORT}" >/dev/null 2>&1; then
docker logs "$CONTAINER_NAME" || true
fail "Container started but did not respond on HTTP port"
fi
else
fail "Container failed to start"
fi
# -------------------------------------------------------------------
# 5. Done (cleanup happens via trap)
# -------------------------------------------------------------------
echo ""
echo "======================================================="
echo "✓ All tests passed! The Dockerfile template is working correctly."
echo "✓ Fixed issues:"
echo " - Added missing ARG declarations for CLIENT_PATH, CLIENT_MODULE, CLIENT_NAME"
echo " - Fixed undefined variable references"
echo " - Added build verification step"
echo " - Improved security with proper user switching"
echo " - Enhanced Gradle optimization settings"
echo " - Added better error handling in CMD"
success "All tests completed successfully. The Dockerfile template looks healthy."
echo "Highlights:"
echo " - Verified presence of required ARGs and stages"
echo " - Performed basic parsing (when buildx available)"
echo " - Built images (default + custom args)"
echo " - Validated container HTTP responsiveness via ephemeral port"
echo "======================================================="