chore(MP-23): network DI client, frontend architecture guards, detekt & ktlint setup, docs, ping DI factory (#21)

* chore(MP-21): snapshot pre-refactor state (Epic 1)

* chore(MP-22): scaffold new repo structure, relocate Docker Compose, move frontend/backend modules, update Makefile; add docs mapping and env template

* MP-22 Epic 2: Erfolgreich umgesetzt und verifiziert

* MP-23 Epic 3: Gradle/Build Governance zentralisieren
This commit is contained in:
StefanMo 2025-11-30 23:14:00 +01:00 committed by GitHub
parent 89bbd42245
commit 034892e890
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
101 changed files with 857 additions and 407 deletions

View File

@ -38,19 +38,19 @@ help: ## Show this help message
dev-up: ## Start development environment (single compose) dev-up: ## Start development environment (single compose)
@echo "🚀 Starting development environment..." @echo "🚀 Starting development environment..."
$(COMPOSE) -f docker-compose.yml up -d $(COMPOSE) -f docker/docker-compose.yml up -d
@$(MAKE) dev-info @$(MAKE) dev-info
dev-down: ## Stop development environment dev-down: ## Stop development environment
@echo "🛑 Stopping development environment..." @echo "🛑 Stopping development environment..."
$(COMPOSE) -f docker-compose.yml down $(COMPOSE) -f docker/docker-compose.yml down
dev-restart: ## Restart full development environment dev-restart: ## Restart full development environment
@$(MAKE) dev-down @$(MAKE) dev-down
@$(MAKE) dev-up @$(MAKE) dev-up
dev-logs: ## Show logs for all development services dev-logs: ## Show logs for all development services
$(COMPOSE) -f docker-compose.yml logs -f $(COMPOSE) -f docker/docker-compose.yml logs -f
# =================================================================== # ===================================================================
# Layer-specific Commands # Layer-specific Commands
@ -58,7 +58,7 @@ dev-logs: ## Show logs for all development services
infrastructure-up: ## Start only infrastructure services (postgres, redis, keycloak, consul) infrastructure-up: ## Start only infrastructure services (postgres, redis, keycloak, consul)
@echo "🏗️ Starting infrastructure services..." @echo "🏗️ Starting infrastructure services..."
$(COMPOSE) -f docker-compose.yml up -d $(COMPOSE) -f docker/docker-compose.yml up -d
@echo "✅ Infrastructure services started" @echo "✅ Infrastructure services started"
@echo "🗄️ PostgresQL: localhost:5432" @echo "🗄️ PostgresQL: localhost:5432"
@echo "🔴 Redis: localhost:6379" @echo "🔴 Redis: localhost:6379"
@ -66,14 +66,14 @@ infrastructure-up: ## Start only infrastructure services (postgres, redis, keycl
@echo "🧭 Consul: http://localhost:8500" @echo "🧭 Consul: http://localhost:8500"
infrastructure-down: ## Stop infrastructure services infrastructure-down: ## Stop infrastructure services
$(COMPOSE) -f docker-compose.yml down $(COMPOSE) -f docker/docker-compose.yml down
infrastructure-logs: ## Show infrastructure logs infrastructure-logs: ## Show infrastructure logs
$(COMPOSE) -f docker-compose.yml logs -f $(COMPOSE) -f docker/docker-compose.yml logs -f
services-up: ## Start application services (infrastructure + microservices) services-up: ## Start application services (infrastructure + microservices)
@echo "⚙️ Starting application services..." @echo "⚙️ Starting application services..."
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml up -d $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml up -d
@echo "✅ Application services started" @echo "✅ Application services started"
@echo "🔗 Gateway: http://localhost:8081" @echo "🔗 Gateway: http://localhost:8081"
@echo "🏓 Ping Service: http://localhost:8082" @echo "🏓 Ping Service: http://localhost:8082"
@ -83,32 +83,32 @@ services-up: ## Start application services (infrastructure + microservices)
@echo "📊 Master Service: http://localhost:8086" @echo "📊 Master Service: http://localhost:8086"
services-down: ## Stop application services services-down: ## Stop application services
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml down $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml down
services-restart: ## Restart application services services-restart: ## Restart application services
@$(MAKE) services-down @$(MAKE) services-down
@$(MAKE) services-up @$(MAKE) services-up
services-logs: ## Show application services logs services-logs: ## Show application services logs
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml logs -f $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml logs -f
clients-up: ## Start client applications (infrastructure + clients) clients-up: ## Start client applications (infrastructure + clients)
@echo "💻 Starting client applications..." @echo "💻 Starting client applications..."
$(COMPOSE) -f docker-compose.yml -f docker-compose.clients.yml up -d $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.clients.yml up -d
@echo "✅ Client applications started" @echo "✅ Client applications started"
@echo "🌐 Web App: http://localhost:4000" @echo "🌐 Web App: http://localhost:4000"
@echo "🔐 Auth Server: http://localhost:8087" @echo "🔐 Auth Server: http://localhost:8087"
@echo "📈 Monitoring: http://localhost:8088" @echo "📈 Monitoring: http://localhost:8088"
clients-down: ## Stop client applications clients-down: ## Stop client applications
$(COMPOSE) -f docker-compose.yml -f docker-compose.clients.yml down $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.clients.yml down
clients-restart: ## Restart client applications clients-restart: ## Restart client applications
@$(MAKE) clients-down @$(MAKE) clients-down
@$(MAKE) clients-up @$(MAKE) clients-up
clients-logs: ## Show client application logs clients-logs: ## Show client application logs
$(COMPOSE) -f docker-compose.yml -f docker-compose.clients.yml logs -f $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.clients.yml logs -f
# =================================================================== # ===================================================================
# Full System Commands # Full System Commands
@ -116,7 +116,7 @@ clients-logs: ## Show client application logs
full-up: ## Start complete system (infrastructure + services + clients) full-up: ## Start complete system (infrastructure + services + clients)
@echo "🚀 Starting complete Meldestelle system..." @echo "🚀 Starting complete Meldestelle system..."
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml -f docker-compose.clients.yml up -d $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml -f docker/docker-compose.clients.yml up -d
@echo "✅ Complete system started" @echo "✅ Complete system started"
@echo "" @echo ""
@echo "🌐 Frontend & APIs:" @echo "🌐 Frontend & APIs:"
@ -141,14 +141,14 @@ full-up: ## Start complete system (infrastructure + services + clients)
@echo " Monitoring: http://localhost:8088" @echo " Monitoring: http://localhost:8088"
full-down: ## Stop complete system full-down: ## Stop complete system
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml -f docker-compose.clients.yml down $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml -f docker/docker-compose.clients.yml down
full-restart: ## Restart complete system full-restart: ## Restart complete system
@$(MAKE) full-down @$(MAKE) full-down
@$(MAKE) full-up @$(MAKE) full-up
full-logs: ## Show all system logs full-logs: ## Show all system logs
$(COMPOSE) -f docker-compose.yml -f docker-compose.services.yml -f docker-compose.clients.yml logs -f $(COMPOSE) -f docker/docker-compose.yml -f docker/docker-compose.services.yml -f docker/docker-compose.clients.yml logs -f
# =================================================================== # ===================================================================
# SSoT Developer UX (optional helpers) # SSoT Developer UX (optional helpers)

7
backend/README.md Normal file
View File

@ -0,0 +1,7 @@
# Backend
Domänenspezifische Services und Gateway.
- gateway: API Gateway/Auth/Routing
- discovery: Service Registry/Discovery (optional)
- services: Microservices, pro Domäne ein Service

View File

@ -19,7 +19,7 @@ dependencies {
// Platform und Core Dependencies // Platform und Core Dependencies
implementation(projects.platform.platformDependencies) implementation(projects.platform.platformDependencies)
implementation(projects.services.ping.pingApi) implementation(project(":backend:services:ping:ping-api"))
implementation(projects.infrastructure.monitoring.monitoringClient) implementation(projects.infrastructure.monitoring.monitoringClient)
// Spring Boot Service Complete Bundle // Spring Boot Service Complete Bundle

View File

@ -1,23 +1,26 @@
plugins { plugins {
// Version management plugin for dependency updates // Version management plugin for dependency updates
id("com.github.ben-manes.versions") version "0.51.0" id("com.github.ben-manes.versions") version "0.51.0"
// Kotlin plugins declared here with 'apply false' to centralize version management // Kotlin plugins declared here with 'apply false' to centralize version management
// This prevents "plugin loaded multiple times" errors in Gradle 9.1.0+ // This prevents "plugin loaded multiple times" errors in Gradle 9.1.0+
// Subprojects apply these plugins via version catalog: alias(libs.plugins.kotlinJvm) // Subprojects apply these plugins via version catalog: alias(libs.plugins.kotlinJvm)
alias(libs.plugins.kotlinJvm) apply false alias(libs.plugins.kotlinJvm) apply false
alias(libs.plugins.kotlinMultiplatform) apply false alias(libs.plugins.kotlinMultiplatform) apply false
alias(libs.plugins.kotlinSerialization) apply false alias(libs.plugins.kotlinSerialization) apply false
alias(libs.plugins.kotlinSpring) apply false alias(libs.plugins.kotlinSpring) apply false
alias(libs.plugins.kotlinJpa) apply false alias(libs.plugins.kotlinJpa) apply false
alias(libs.plugins.composeMultiplatform) apply false alias(libs.plugins.composeMultiplatform) apply false
alias(libs.plugins.composeCompiler) apply false alias(libs.plugins.composeCompiler) apply false
alias(libs.plugins.spring.boot) apply false alias(libs.plugins.spring.boot) apply false
alias(libs.plugins.spring.dependencyManagement) apply false alias(libs.plugins.spring.dependencyManagement) apply false
// Dokka plugin applied at root to create multi-module collector tasks // Dokka plugin applied at root to create multi-module collector tasks
alias(libs.plugins.dokka) alias(libs.plugins.dokka)
// Static analysis (enabled at root and inherited by subprojects)
id("io.gitlab.arturbosch.detekt") version "1.23.6"
id("org.jlleitschuh.gradle.ktlint") version "12.1.1"
} }
// ################################################################## // ##################################################################
@ -25,90 +28,196 @@ plugins {
// ################################################################## // ##################################################################
allprojects { allprojects {
group = "at.mocode" group = "at.mocode"
version = "1.0.0-SNAPSHOT" version = "1.0.0-SNAPSHOT"
// Apply common repository configuration // Apply common repository configuration
repositories { repositories {
mavenCentral() mavenCentral()
google() google()
maven { url = uri("https://jitpack.io") } maven { url = uri("https://jitpack.io") }
maven { url = uri("https://oss.sonatype.org/content/repositories/snapshots/") } maven { url = uri("https://oss.sonatype.org/content/repositories/snapshots/") }
maven { url = uri("https://maven.pkg.jetbrains.space/public/p/compose/dev") } maven { url = uri("https://maven.pkg.jetbrains.space/public/p/compose/dev") }
maven { url = uri("https://us-central1-maven.pkg.dev/varabyte-repos/public") } maven { url = uri("https://us-central1-maven.pkg.dev/varabyte-repos/public") }
} }
} }
subprojects { subprojects {
// Note: Kotlin compiler configuration is handled by individual modules // Note: Kotlin compiler configuration is handled by individual modules
// a Root project doesn't apply Kotlin plugins, so we can't configure KotlinCompile tasks here // a Root project doesn't apply Kotlin plugins, so we can't configure KotlinCompile tasks here
tasks.withType<Test>().configureEach { tasks.withType<Test>().configureEach {
useJUnitPlatform { useJUnitPlatform {
excludeTags("perf") excludeTags("perf")
}
// Configure CDS in auto-mode to prevent bootstrap classpath warnings
jvmArgs("-Xshare:auto", "-Djdk.instrument.traceUsage=false")
// Increase test JVM memory with a stable configuration
minHeapSize = "512m"
maxHeapSize = "2g"
// Parallel test execution for better performance
maxParallelForks = (Runtime.getRuntime().availableProcessors() / 2).coerceAtLeast(1)
// Removed byte-buddy-agent configuration to fix Gradle 9.0.0 deprecation warning
// The agent configuration was causing Task.project access at execution time
} }
// Configure CDS in auto-mode to prevent bootstrap classpath warnings
jvmArgs("-Xshare:auto", "-Djdk.instrument.traceUsage=false")
// Increase test JVM memory with a stable configuration
minHeapSize = "512m"
maxHeapSize = "2g"
// Parallel test execution for better performance
maxParallelForks = (Runtime.getRuntime().availableProcessors() / 2).coerceAtLeast(1)
// Removed byte-buddy-agent configuration to fix Gradle 9.0.0 deprecation warning
// The agent configuration was causing Task.project access at execution time
}
// Erzwinge eine stabile Version von kotlinx-serialization-json für alle Konfigurationen, // Erzwinge eine stabile Version von kotlinx-serialization-json für alle Konfigurationen,
// um Auflösungsfehler (z.B. 1.10.2, nicht verfügbar auf Maven Central) zu vermeiden // um Auflösungsfehler (z.B. 1.10.2, nicht verfügbar auf Maven Central) zu vermeiden
configurations.configureEach { configurations.configureEach {
resolutionStrategy { resolutionStrategy {
force("org.jetbrains.kotlinx:kotlinx-serialization-json:1.7.3") force("org.jetbrains.kotlinx:kotlinx-serialization-json:1.7.3")
}
} }
}
// Dedicated performance test task per JVM subproject // Dedicated performance test task per JVM subproject
plugins.withId("java") { plugins.withId("java") {
val javaExt = extensions.getByType<JavaPluginExtension>() val javaExt = extensions.getByType<JavaPluginExtension>()
// Ensure a full JDK toolchain with compiler is available (Gradle will auto-download if missing) // Ensure a full JDK toolchain with compiler is available (Gradle will auto-download if missing)
javaExt.toolchain.languageVersion.set(JavaLanguageVersion.of(21)) javaExt.toolchain.languageVersion.set(JavaLanguageVersion.of(21))
tasks.register<Test>("perfTest") { tasks.register<Test>("perfTest") {
description = "Runs tests tagged with 'perf'" description = "Runs tests tagged with 'perf'"
group = "verification" group = "verification"
// Use the regular test source set outputs // Use the regular test source set outputs
testClassesDirs = javaExt.sourceSets.getByName("test").output.classesDirs testClassesDirs = javaExt.sourceSets.getByName("test").output.classesDirs
classpath = javaExt.sourceSets.getByName("test").runtimeClasspath classpath = javaExt.sourceSets.getByName("test").runtimeClasspath
useJUnitPlatform { useJUnitPlatform {
includeTags("perf") includeTags("perf")
}
shouldRunAfter("test")
// Keep the same JVM settings for consistency
jvmArgs("-Xshare:auto", "-Djdk.instrument.traceUsage=false")
maxHeapSize = "2g"
dependsOn("testClasses")
}
}
// Suppress Node.js deprecation warnings (e.g., DEP0040 punycode) during Kotlin/JS npm/yarn tasks
// Applies to all Exec-based tasks (covers Yarn/NPM invocations used by Kotlin JS plugin)
tasks.withType<Exec>().configureEach {
// Merge existing NODE_OPTIONS with --no-deprecation
val current = (environment["NODE_OPTIONS"] as String?) ?: System.getenv("NODE_OPTIONS")
val merged = if (current.isNullOrBlank()) "--no-deprecation" else "$current --no-deprecation"
environment("NODE_OPTIONS", merged)
// Also set the legacy switch to silence warnings entirely
environment("NODE_NO_WARNINGS", "1")
// Set Chrome binary path to avoid snap permission issues
environment("CHROME_BIN", "/usr/bin/google-chrome-stable")
environment("CHROMIUM_BIN", "/usr/bin/chromium")
environment("PUPPETEER_EXECUTABLE_PATH", "/usr/bin/chromium")
}
tasks.withType<org.jetbrains.kotlin.gradle.tasks.KotlinCompile> {
compilerOptions {
freeCompilerArgs.add("-Xannotation-default-target=param-property")
}
}
// ------------------------------
// Detekt & Ktlint default setup
// ------------------------------
plugins.withId("io.gitlab.arturbosch.detekt") {
extensions.configure(io.gitlab.arturbosch.detekt.extensions.DetektExtension::class.java) {
buildUponDefaultConfig = true
allRules = false
autoCorrect = false
config.setFrom(files(rootProject.file("config/detekt/detekt.yml")))
basePath = rootDir.absolutePath
}
tasks.withType<io.gitlab.arturbosch.detekt.Detekt>().configureEach {
jvmTarget = "21"
reports {
xml.required.set(false)
txt.required.set(false)
sarif.required.set(false)
html.required.set(true)
}
}
}
plugins.withId("org.jlleitschuh.gradle.ktlint") {
extensions.configure(org.jlleitschuh.gradle.ktlint.KtlintExtension::class.java) {
android.set(false)
outputToConsole.set(true)
ignoreFailures.set(false)
reporters {
reporter(org.jlleitschuh.gradle.ktlint.reporter.ReporterType.CHECKSTYLE)
reporter(org.jlleitschuh.gradle.ktlint.reporter.ReporterType.PLAIN)
}
}
}
}
// ==================================================================
// Architecture Guards (lightweight, fast checks)
// ==================================================================
// Fails if any source file contains manual Authorization header setting.
// Policy: Authorization must be injected by the DI-provided HttpClient (apiClient).
tasks.register("archGuardForbiddenAuthorizationHeader") {
group = "verification"
description = "Fail build if code sets Authorization header manually."
doLast {
val forbiddenPatterns =
listOf(
".header(\"Authorization\"",
"setHeader(\"Authorization\"",
"headers[\"Authorization\"]",
"headers[\'Authorization\']",
)
// Scope: Frontend-only enforcement. Backend/Test code is excluded.
val srcDirs = listOf("clients", "frontend")
val violations = mutableListOf<File>()
srcDirs.map { file(it) }
.filter { it.exists() }
.forEach { rootDir ->
rootDir.walkTopDown()
.filter { it.isFile && (it.extension == "kt" || it.extension == "kts") }
.forEach { f ->
val text = f.readText()
// Skip test sources
val path = f.invariantSeparatorsPath
val isTest =
path.contains("/src/commonTest/") ||
path.contains("/src/jsTest/") ||
path.contains("/src/jvmTest/") ||
path.contains("/src/test/")
if (!isTest && forbiddenPatterns.any { text.contains(it) }) {
violations += f
} }
shouldRunAfter("test") }
// Keep the same JVM settings for consistency }
jvmArgs("-Xshare:auto", "-Djdk.instrument.traceUsage=false") if (violations.isNotEmpty()) {
maxHeapSize = "2g" val msg =
dependsOn("testClasses") buildString {
appendLine("Forbidden manual Authorization header usage found in:")
violations.take(50).forEach { appendLine(" - ${it.path}") }
if (violations.size > 50) appendLine(" ... and ${violations.size - 50} more files")
appendLine()
appendLine("Policy: Use DI-provided apiClient (Koin named \"apiClient\").")
} }
throw GradleException(msg)
} }
}
}
// Suppress Node.js deprecation warnings (e.g., DEP0040 punycode) during Kotlin/JS npm/yarn tasks // Aggregate convenience task
// Applies to all Exec-based tasks (covers Yarn/NPM invocations used by Kotlin JS plugin) tasks.register("archGuards") {
tasks.withType<Exec>().configureEach { group = "verification"
// Merge existing NODE_OPTIONS with --no-deprecation description = "Run all architecture guard checks"
val current = (environment["NODE_OPTIONS"] as String?) ?: System.getenv("NODE_OPTIONS") dependsOn("archGuardForbiddenAuthorizationHeader")
val merged = if (current.isNullOrBlank()) "--no-deprecation" else "$current --no-deprecation" }
environment("NODE_OPTIONS", merged)
// Also set the legacy switch to silence warnings entirely
environment("NODE_NO_WARNINGS", "1")
// Set Chrome binary path to avoid snap permission issues
environment("CHROME_BIN", "/usr/bin/google-chrome-stable")
environment("CHROMIUM_BIN", "/usr/bin/chromium")
environment("PUPPETEER_EXECUTABLE_PATH", "/usr/bin/chromium")
}
tasks.withType<org.jetbrains.kotlin.gradle.tasks.KotlinCompile> { // Composite verification task including static analyzers if present
compilerOptions { tasks.register("staticAnalysis") {
freeCompilerArgs.add("-Xannotation-default-target=param-property") group = "verification"
} description = "Run static analysis (detekt, ktlint) and architecture guards"
} // These tasks are provided by plugins; only depend if tasks exist
dependsOn(
tasks.matching { it.name == "detekt" },
tasks.matching { it.name == "ktlintCheck" },
tasks.named("archGuards"),
)
} }
// ################################################################## // ##################################################################
@ -117,70 +226,80 @@ subprojects {
// Apply Dokka automatically to Kotlin subprojects to enable per-module docs // Apply Dokka automatically to Kotlin subprojects to enable per-module docs
subprojects { subprojects {
plugins.withId("org.jetbrains.kotlin.jvm") { plugins.withId("org.jetbrains.kotlin.jvm") {
apply(plugin = "org.jetbrains.dokka") apply(plugin = "org.jetbrains.dokka")
} }
plugins.withId("org.jetbrains.kotlin.multiplatform") { plugins.withId("org.jetbrains.kotlin.multiplatform") {
apply(plugin = "org.jetbrains.dokka") apply(plugin = "org.jetbrains.dokka")
} }
// Minimal sourceLink configuration when running in GitHub Actions // Minimal sourceLink configuration when running in GitHub Actions
tasks.withType(org.jetbrains.dokka.gradle.DokkaTask::class.java).configureEach { tasks.withType(org.jetbrains.dokka.gradle.DokkaTask::class.java).configureEach {
dokkaSourceSets.configureEach { dokkaSourceSets.configureEach {
val repo = System.getenv("GITHUB_REPOSITORY") val repo = System.getenv("GITHUB_REPOSITORY")
if (!repo.isNullOrBlank()) { if (!repo.isNullOrBlank()) {
sourceLink { sourceLink {
localDirectory.set(project.file("src")) localDirectory.set(project.file("src"))
remoteUrl.set(java.net.URI.create("https://github.com/$repo/blob/main/" + project.path.trimStart(':').replace(':', '/') + "/src").toURL()) remoteUrl.set(
} java.net.URI.create(
} "https://github.com/$repo/blob/main/" + project.path.trimStart(':').replace(':', '/') + "/src",
// Keep module names short and stable ).toURL(),
moduleName.set(project.path.trimStart(':')) )
} }
}
// Keep module names short and stable
moduleName.set(project.path.trimStart(':'))
} }
}
} }
// Aggregate tasks to build multi-module docs in Markdown (GFM) and HTML // Aggregate tasks to build multi-module docs in Markdown (GFM) and HTML
val dokkaGfmAll = tasks.register("dokkaGfmAll") { val dokkaGfmAll =
tasks.register("dokkaGfmAll") {
group = "documentation" group = "documentation"
description = "Builds Dokka GFM for all modules and aggregates outputs under build/dokka/gfm" description = "Builds Dokka GFM for all modules and aggregates outputs under build/dokka/gfm"
// Depend on all dokkaGfm tasks that exist in subprojects // Depend on all dokkaGfm tasks that exist in subprojects
dependsOn(subprojects dependsOn(
subprojects
.filter { it.plugins.hasPlugin("org.jetbrains.dokka") } .filter { it.plugins.hasPlugin("org.jetbrains.dokka") }
.map { "${it.path}:dokkaGfm" }) .map { "${it.path}:dokkaGfm" },
)
doLast { doLast {
val dest = layout.buildDirectory.dir("dokka/gfm").get().asFile val dest = layout.buildDirectory.dir("dokka/gfm").get().asFile
if (dest.exists()) dest.deleteRecursively() if (dest.exists()) dest.deleteRecursively()
dest.mkdirs() dest.mkdirs()
subprojects.filter { it.plugins.hasPlugin("org.jetbrains.dokka") }.forEach { p -> subprojects.filter { it.plugins.hasPlugin("org.jetbrains.dokka") }.forEach { p ->
val out = p.layout.buildDirectory.dir("dokka/gfm").get().asFile val out = p.layout.buildDirectory.dir("dokka/gfm").get().asFile
if (out.exists()) { if (out.exists()) {
out.copyRecursively(File(dest, p.path.trimStart(':').replace(':', '/')), overwrite = true) out.copyRecursively(File(dest, p.path.trimStart(':').replace(':', '/')), overwrite = true)
}
} }
println("[DOKKA] Aggregated GFM into ${dest.absolutePath}") }
println("[DOKKA] Aggregated GFM into ${dest.absolutePath}")
} }
} }
val dokkaHtmlAll = tasks.register("dokkaHtmlAll") { val dokkaHtmlAll =
tasks.register("dokkaHtmlAll") {
group = "documentation" group = "documentation"
description = "Builds Dokka HTML for all modules and aggregates outputs under build/dokka/html" description = "Builds Dokka HTML for all modules and aggregates outputs under build/dokka/html"
dependsOn(subprojects dependsOn(
subprojects
.filter { it.plugins.hasPlugin("org.jetbrains.dokka") } .filter { it.plugins.hasPlugin("org.jetbrains.dokka") }
.map { "${it.path}:dokkaHtml" }) .map { "${it.path}:dokkaHtml" },
)
doLast { doLast {
val dest = layout.buildDirectory.dir("dokka/html").get().asFile val dest = layout.buildDirectory.dir("dokka/html").get().asFile
if (dest.exists()) dest.deleteRecursively() if (dest.exists()) dest.deleteRecursively()
dest.mkdirs() dest.mkdirs()
subprojects.filter { it.plugins.hasPlugin("org.jetbrains.dokka") }.forEach { p -> subprojects.filter { it.plugins.hasPlugin("org.jetbrains.dokka") }.forEach { p ->
val out = p.layout.buildDirectory.dir("dokka/html").get().asFile val out = p.layout.buildDirectory.dir("dokka/html").get().asFile
if (out.exists()) { if (out.exists()) {
out.copyRecursively(File(dest, p.path.trimStart(':').replace(':', '/')), overwrite = true) out.copyRecursively(File(dest, p.path.trimStart(':').replace(':', '/')), overwrite = true)
}
} }
println("[DOKKA] Aggregated HTML into ${dest.absolutePath}") }
println("[DOKKA] Aggregated HTML into ${dest.absolutePath}")
} }
} }
// ################################################################## // ##################################################################
// ### DOKU-AGGREGATOR ### // ### DOKU-AGGREGATOR ###
@ -188,26 +307,26 @@ val dokkaHtmlAll = tasks.register("dokkaHtmlAll") {
// Leichter Aggregator im Root-Projekt, ruft die eigentlichen Tasks im :docs Subprojekt auf // Leichter Aggregator im Root-Projekt, ruft die eigentlichen Tasks im :docs Subprojekt auf
tasks.register("docs") { tasks.register("docs") {
description = "Aggregates documentation tasks from :docs" description = "Aggregates documentation tasks from :docs"
group = "documentation" group = "documentation"
dependsOn(":docs:generateAllDocs") dependsOn(":docs:generateAllDocs")
} }
// Wrapper-Konfiguration // Wrapper-Konfiguration
// Apply Node warning suppression on root project Exec tasks as well // Apply Node warning suppression on root project Exec tasks as well
// Ensures aggregated Kotlin/JS tasks created at root (e.g., kotlinNpmInstall) inherit the env // Ensures aggregated Kotlin/JS tasks created at root (e.g., kotlinNpmInstall) inherit the env
tasks.withType<Exec>().configureEach { tasks.withType<Exec>().configureEach {
val current = (environment["NODE_OPTIONS"] as String?) ?: System.getenv("NODE_OPTIONS") val current = (environment["NODE_OPTIONS"] as String?) ?: System.getenv("NODE_OPTIONS")
val merged = if (current.isNullOrBlank()) "--no-deprecation" else "$current --no-deprecation" val merged = if (current.isNullOrBlank()) "--no-deprecation" else "$current --no-deprecation"
environment("NODE_OPTIONS", merged) environment("NODE_OPTIONS", merged)
environment("NODE_NO_WARNINGS", "1") environment("NODE_NO_WARNINGS", "1")
// Set Chrome binary path to avoid snap permission issues // Set Chrome binary path to avoid snap permission issues
environment("CHROME_BIN", "/usr/bin/google-chrome-stable") environment("CHROME_BIN", "/usr/bin/google-chrome-stable")
environment("CHROMIUM_BIN", "/usr/bin/chromium") environment("CHROMIUM_BIN", "/usr/bin/chromium")
environment("PUPPETEER_EXECUTABLE_PATH", "/usr/bin/chromium") environment("PUPPETEER_EXECUTABLE_PATH", "/usr/bin/chromium")
} }
tasks.wrapper { tasks.wrapper {
gradleVersion = "9.1.0" gradleVersion = "9.1.0"
distributionType = Wrapper.DistributionType.BIN distributionType = Wrapper.DistributionType.BIN
} }

View File

@ -42,8 +42,8 @@ kotlin {
sourceSets { sourceSets {
commonMain.dependencies { commonMain.dependencies {
// UI Kit // UI Kit (Design System)
implementation(project(":clients:shared:common-ui")) implementation(project(":frontend:core:design-system"))
// Shared Konfig & Utilities (AppConfig + BuildConfig) // Shared Konfig & Utilities (AppConfig + BuildConfig)
implementation(project(":clients:shared")) implementation(project(":clients:shared"))

View File

@ -43,10 +43,10 @@ kotlin {
sourceSets { sourceSets {
commonMain.dependencies { commonMain.dependencies {
// Contract from backend // Contract from backend
implementation(projects.services.ping.pingApi) implementation(project(":backend:services:ping:ping-api"))
// UI Kit // UI Kit (Design System)
implementation(project(":clients:shared:common-ui")) implementation(project(":frontend:core:design-system"))
// Shared Konfig & Utilities // Shared Konfig & Utilities
implementation(project(":clients:shared")) implementation(project(":clients:shared"))
@ -65,6 +65,9 @@ kotlin {
// Coroutines and serialization // Coroutines and serialization
implementation(libs.bundles.kotlinx.core) implementation(libs.bundles.kotlinx.core)
// DI (Koin) for resolving apiClient from container
implementation(libs.koin.core)
// ViewModel lifecycle // ViewModel lifecycle
implementation(libs.bundles.compose.common) implementation(libs.bundles.compose.common)

View File

@ -63,6 +63,9 @@ kotlin {
implementation(libs.koin.compose) implementation(libs.koin.compose)
implementation(libs.koin.compose.viewmodel) implementation(libs.koin.compose.viewmodel)
// Network module (provides DI `apiClient`)
implementation(project(":frontend:core:network"))
// Compose für shared UI components (common) // Compose für shared UI components (common)
implementation(compose.runtime) implementation(compose.runtime)
implementation(compose.foundation) implementation(compose.foundation)

View File

@ -1,6 +1,7 @@
package at.mocode.clients.shared.di package at.mocode.clients.shared.di
import at.mocode.clients.shared.core.devConfig import at.mocode.clients.shared.core.devConfig
import at.mocode.frontend.core.network.networkModule
import org.koin.core.context.startKoin import org.koin.core.context.startKoin
import org.koin.dsl.KoinAppDeclaration import org.koin.dsl.KoinAppDeclaration
import org.koin.dsl.module import org.koin.dsl.module
@ -10,14 +11,16 @@ val configModule = module {
single { devConfig } // Später können wir hier PROD/DEV umschalten single { devConfig } // Später können wir hier PROD/DEV umschalten
} }
// Alle Module zusammen // Basismodule, die immer geladen werden sollen (ohne Feature/Core-Cross-Imports)
val sharedModules = listOf( val baseSharedModules = listOf(
configModule, configModule,
// Network module provides DI-only HttpClient (safe to be shared across features)
networkModule networkModule
) )
// Helper zum Starten von Koin (wird von der App aufgerufen) // Helper zum Starten von Koin (wird von der App aufgerufen)
// Weitere Module (z. B. networkModule) können über appDeclaration hinzugefügt werden.
fun initKoin(appDeclaration: KoinAppDeclaration = {}) = startKoin { fun initKoin(appDeclaration: KoinAppDeclaration = {}) = startKoin {
modules(baseSharedModules)
appDeclaration() appDeclaration()
modules(sharedModules)
} }

View File

@ -1,190 +0,0 @@
name: meldestelle-hardcoded
services:
# --- DATENBANK ---
postgres:
image: postgres:16-alpine
container_name: meldestelle-postgres
restart: unless-stopped
ports:
- "5432:5432"
environment:
POSTGRES_USER: pg-user
POSTGRES_PASSWORD: pg-password
POSTGRES_DB: meldestelle
volumes:
- postgres-data:/var/lib/postgresql/data
# Falls du Init-Scripte hast, lassen wir die erstmal weg,
# um Fehlerquellen zu reduzieren, oder lassen den Pfad, falls er existiert:
- ./docker/core/postgres:/docker-entrypoint-initdb.d:Z
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U pg-user -d meldestelle" ]
interval: 1s
timeout: 5s
retries: 3
start_period: 30s
networks:
- meldestelle-network
# --- DATENBANK-MANAGEMENT-TOOL ---
pgadmin:
image: dpage/pgadmin4:8
container_name: pgadmin4_container
restart: unless-stopped
ports:
- "8888:80"
environment:
PGADMIN_DEFAULT_EMAIL: user@domain.com
PGADMIN_DEFAULT_PASSWORD: strong-password
volumes:
- pgadmin-data:/var/lib/pgadmin
healthcheck:
test: [ "CMD-SHELL", "wget --spider -q http://localhost:80/ || exit 1" ]
interval: 1s
timeout: 5s
retries: 3
start_period: 30s
networks:
- meldestelle-network
# --- CACHE ---
redis:
image: redis:7-alpine
container_name: meldestelle-redis
restart: unless-stopped
ports:
- "6379:6379"
volumes:
- redis-data:/data
command: redis-server --appendonly yes
healthcheck:
test: [ "CMD", "redis-cli" ]
interval: 1s
timeout: 5s
retries: 3
networks:
- meldestelle-network
# --- IDENTITY PROVIDER (Wartet auf Postgres) ---
keycloak:
image: quay.io/keycloak/keycloak:26.4
container_name: meldestelle-keycloak
restart: unless-stopped
environment:
KC_HEALTH_ENABLED: true
KC_METRICS_ENABLED: true
KC_BOOTSTRAP_ADMIN_USERNAME: kc-admin
KC_BOOTSTRAP_ADMIN_PASSWORD: kc-password
KC_DB: postgres
KC_DB_URL: jdbc:postgresql://postgres:5432/meldestelle
KC_DB_USERNAME: pg-user
KC_DB_PASSWORD: pg-password
KC_HOSTNAME: localhost
ports:
- "8180:8080"
depends_on:
postgres:
condition: service_healthy
volumes:
- ./docker/core/keycloak:/opt/keycloak/data/import:Z
command: start-dev --import-realm
healthcheck:
test: [ "CMD-SHELL", "exec 3<>/dev/tcp/127.0.0.1/9000" ]
interval: 20s
timeout: 10s
retries: 5
start_period: 60s
networks:
- meldestelle-network
# --- MONITORING ---
prometheus:
image: prom/prometheus:v2.54.1
container_name: meldestelle-prometheus
restart: unless-stopped
ports:
- "9090:9090"
volumes:
- prometheus-data:/prometheus
- ./docker/monitoring/prometheus:/etc/prometheus:Z
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.retention.time=15d
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- meldestelle-network
grafana:
image: grafana/grafana:11.3.0
container_name: meldestelle-grafana
environment:
GF_SECURITY_ADMIN_USER: gf-admin
GF_SECURITY_ADMIN_PASSWORD: gf-password
ports:
- "3000:3000"
volumes:
- grafana-data:/var/lib/grafana
- ./docker/monitoring/grafana:/etc/grafana/provisioning:Z
depends_on:
- prometheus
healthcheck:
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
networks:
- meldestelle-network
# --- CLIENTS: WEB APP (Kotlin/JS, no WASM) ---
web-app:
build:
context: .
dockerfile: dockerfiles/clients/web-app/Dockerfile
args:
GRADLE_VERSION: 9.1.0
JAVA_VERSION: 21
NODE_VERSION: 22.21.0
NGINX_IMAGE_TAG: 1.28.0-alpine
WEB_BUILD_PROFILE: dev
container_name: meldestelle-web-app
restart: unless-stopped
ports:
- "4000:4000"
depends_on:
- api-gateway
networks:
- meldestelle-network
# --- CLIENTS: DESKTOP APP (VNC + noVNC) ---
desktop-app:
build:
context: .
dockerfile: dockerfiles/clients/desktop-app/Dockerfile
container_name: meldestelle-desktop-app
restart: unless-stopped
environment:
- API_BASE_URL=http://api-gateway:8081
ports:
- "5901:5901" # VNC
- "6080:6080" # noVNC
depends_on:
- api-gateway
networks:
- meldestelle-network
volumes:
postgres-data:
pgadmin-data:
redis-data:
prometheus-data:
grafana-data:
networks:
meldestelle-network:
driver: bridge

43
docker/.env.example Normal file
View File

@ -0,0 +1,43 @@
# Core project name used as prefix for container names
COMPOSE_PROJECT_NAME=meldestelle
# Ports
POSTGRES_PORT=5432:5432
REDIS_PORT=6379:6379
KC_PORT=8180:8080
CONSUL_PORT=8500:8500
PROMETHEUS_PORT=9090:9090
GF_PORT=3000:3000
WEB_APP_PORT=4000:80
PING_SERVICE_PORT=8082:8082
PING_DEBUG_PORT=5006:5006
GATEWAY_PORT=8081:8081
GATEWAY_DEBUG_PORT=5005:5005
GATEWAY_SERVER_PORT=8081
DESKTOP_APP_VNC_PORT=5900:5900
DESKTOP_APP_NOVNC_PORT=6080:6080
# Postgres
POSTGRES_USER=meldestelle
POSTGRES_PASSWORD=meldestelle
POSTGRES_DB=meldestelle
# Keycloak
KC_ADMIN_USER=admin
KC_ADMIN_PASSWORD=admin
KC_HOSTNAME=localhost
# PgAdmin
PGADMIN_EMAIL=admin@example.com
PGADMIN_PASSWORD=admin
# Grafana
GF_ADMIN_USER=admin
GF_ADMIN_PASSWORD=admin
# Docker build versions (optional overrides)
DOCKER_GRADLE_VERSION=9.1.0
DOCKER_JAVA_VERSION=21
DOCKER_NODE_VERSION=22.21.0
DOCKER_NGINX_VERSION=1.28.0-alpine
WEB_BUILD_PROFILE=dev

View File

@ -0,0 +1 @@
services: {}

View File

@ -0,0 +1 @@
services: {}

View File

@ -16,7 +16,7 @@ services:
POSTGRES_DB: ${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB}
volumes: volumes:
- postgres-data:/var/lib/postgresql/data - postgres-data:/var/lib/postgresql/data
- ./docker/core/postgres:/docker-entrypoint-initdb.d:Z - ./core/postgres:/docker-entrypoint-initdb.d:Z
healthcheck: healthcheck:
test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}" ] test: [ "CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}" ]
interval: 5s interval: 5s
@ -72,7 +72,7 @@ services:
postgres: postgres:
condition: service_healthy condition: service_healthy
volumes: volumes:
- ./docker/core/keycloak:/opt/keycloak/data/import:Z - ./core/keycloak:/opt/keycloak/data/import:Z
command: start-dev --import-realm command: start-dev --import-realm
healthcheck: healthcheck:
test: [ "CMD-SHELL", "exec 3<>/dev/tcp/127.0.0.1/9000" ] test: [ "CMD-SHELL", "exec 3<>/dev/tcp/127.0.0.1/9000" ]
@ -112,7 +112,7 @@ services:
- "${PROMETHEUS_PORT}" - "${PROMETHEUS_PORT}"
volumes: volumes:
- prometheus-data:/prometheus - prometheus-data:/prometheus
- ./docker/monitoring/prometheus:/etc/prometheus:Z - ./monitoring/prometheus:/etc/prometheus:Z
command: command:
- --config.file=/etc/prometheus/prometheus.yml - --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.retention.time=15d - --storage.tsdb.retention.time=15d
@ -138,7 +138,7 @@ services:
- "${GF_PORT}" - "${GF_PORT}"
volumes: volumes:
- grafana-data:/var/lib/grafana - grafana-data:/var/lib/grafana
- ./docker/monitoring/grafana:/etc/grafana/provisioning:Z - ./monitoring/grafana:/etc/grafana/provisioning:Z
depends_on: depends_on:
- prometheus - prometheus
healthcheck: healthcheck:
@ -175,7 +175,7 @@ services:
api-gateway: api-gateway:
build: build:
context: . context: ..
dockerfile: dockerfiles/infrastructure/gateway/Dockerfile dockerfile: dockerfiles/infrastructure/gateway/Dockerfile
args: args:
# Build-Args aus deinen .env Dateien (werden hier statisch benötigt für den Build) # Build-Args aus deinen .env Dateien (werden hier statisch benötigt für den Build)
@ -224,7 +224,7 @@ services:
# ========================================== # ==========================================
ping-service: ping-service:
build: build:
context: . context: ..
dockerfile: dockerfiles/services/ping-service/Dockerfile dockerfile: dockerfiles/services/ping-service/Dockerfile
args: args:
GRADLE_VERSION: 9.1.0 GRADLE_VERSION: 9.1.0
@ -246,16 +246,13 @@ services:
SPRING_CLOUD_CONSUL_PORT: 8500 SPRING_CLOUD_CONSUL_PORT: 8500
SPRING_CLOUD_CONSUL_DISCOVERY_HOSTNAME: ping-service SPRING_CLOUD_CONSUL_DISCOVERY_HOSTNAME: ping-service
# --- DATENBANK VERBINDUNG --- # - DATENBANK VERBINDUNG -
# Wir nutzen die Container-Namen aus deiner .env Variable
SPRING_DATASOURCE_URL: jdbc:postgresql://${COMPOSE_PROJECT_NAME}-postgres:5432/${POSTGRES_DB} SPRING_DATASOURCE_URL: jdbc:postgresql://${COMPOSE_PROJECT_NAME}-postgres:5432/${POSTGRES_DB}
SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER} SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER}
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD} SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD}
# WICHTIG: Wir wollen nur validieren, nichts erstellen.
SPRING_JPA_HIBERNATE_DDL_AUTO: validate SPRING_JPA_HIBERNATE_DDL_AUTO: validate
# --- REDIS --- # --- REDIS ---
# Wir nutzen den Service-Namen, genau wie bei Postgres
SPRING_DATA_REDIS_HOST: ${COMPOSE_PROJECT_NAME}-redis SPRING_DATA_REDIS_HOST: ${COMPOSE_PROJECT_NAME}-redis
SPRING_DATA_REDIS_PORT: 6379 SPRING_DATA_REDIS_PORT: 6379
depends_on: depends_on:
@ -275,7 +272,7 @@ services:
# ========================================== # ==========================================
web-app: web-app:
build: build:
context: . context: ..
dockerfile: dockerfiles/clients/web-app/Dockerfile dockerfile: dockerfiles/clients/web-app/Dockerfile
args: args:
GRADLE_VERSION: ${DOCKER_GRADLE_VERSION:-9.1.0} GRADLE_VERSION: ${DOCKER_GRADLE_VERSION:-9.1.0}
@ -297,7 +294,7 @@ services:
desktop-app: desktop-app:
build: build:
context: . context: ..
dockerfile: dockerfiles/clients/desktop-app/Dockerfile dockerfile: dockerfiles/clients/desktop-app/Dockerfile
container_name: ${COMPOSE_PROJECT_NAME}-desktop-app container_name: ${COMPOSE_PROJECT_NAME}-desktop-app
restart: unless-stopped restart: unless-stopped

View File

@ -15,22 +15,21 @@ COPY gradle ./gradle
COPY gradlew ./ COPY gradlew ./
# Kopiere alle notwendigen Module für Multi-Modul-Projekt # Kopiere alle notwendigen Module für Multi-Modul-Projekt
COPY clients ./clients COPY frontend ./frontend
COPY backend ./backend
COPY core ./core COPY core ./core
COPY domains ./domains COPY domains ./domains
COPY platform ./platform COPY platform ./platform
COPY infrastructure ./infrastructure
COPY services ./services
COPY docs ./docs COPY docs ./docs
# Setze Gradle-Wrapper Berechtigung # Setze Gradle-Wrapper Berechtigung
RUN chmod +x ./gradlew RUN chmod +x ./gradlew
# Dependencies downloaden (für besseres Caching) # Dependencies downloaden (für besseres Caching)
RUN ./gradlew :clients:app:dependencies --no-configure-on-demand RUN ./gradlew :frontend:shells:meldestelle-portal:dependencies --no-configure-on-demand
# Desktop-App kompilieren (createDistributable für native Distribution) # Desktop-App kompilieren (createDistributable für native Distribution)
RUN ./gradlew :clients:app:createDistributable --no-configure-on-demand RUN ./gradlew :frontend:shells:meldestelle-portal:createDistributable --no-configure-on-demand
# =================================================================== # ===================================================================
# Stage 2: Runtime Stage - Ubuntu mit VNC + noVNC # Stage 2: Runtime Stage - Ubuntu mit VNC + noVNC
@ -59,7 +58,7 @@ RUN apt-get update && apt-get install -y \
WORKDIR /app WORKDIR /app
# Kopiere kompilierte Desktop-App von Build-Stage # Kopiere kompilierte Desktop-App von Build-Stage
COPY --from=builder /app/clients/app/build/compose/binaries/main/desktop/ ./desktop-app/ COPY --from=builder /app/frontend/shells/meldestelle-portal/build/compose/binaries/main/desktop/ ./desktop-app/
# Kopiere Scripts # Kopiere Scripts
COPY dockerfiles/clients/desktop-app/entrypoint.sh /entrypoint.sh COPY dockerfiles/clients/desktop-app/entrypoint.sh /entrypoint.sh

View File

@ -30,29 +30,28 @@ COPY gradle ./gradle
COPY gradlew ./ COPY gradlew ./
# Kopiere alle notwendigen Module für Multi-Modul-Projekt # Kopiere alle notwendigen Module für Multi-Modul-Projekt
COPY clients ./clients COPY frontend ./frontend
COPY backend ./backend
COPY core ./core COPY core ./core
COPY domains ./domains COPY domains ./domains
COPY platform ./platform COPY platform ./platform
COPY infrastructure ./infrastructure
COPY services ./services
COPY docs ./docs COPY docs ./docs
# Setze Gradle-Wrapper Berechtigung # Setze Gradle-Wrapper Berechtigung
RUN chmod +x ./gradlew RUN chmod +x ./gradlew
# Dependencies downloaden (für besseres Caching) # Dependencies downloaden (für besseres Caching)
RUN ./gradlew :clients:app:dependencies --no-configure-on-demand RUN ./gradlew :frontend:shells:meldestelle-portal:dependencies --no-configure-on-demand
# Kotlin/JS Web-App kompilieren (Profil wählbar über WEB_BUILD_PROFILE) # Kotlin/JS Web-App kompilieren (Profil wählbar über WEB_BUILD_PROFILE)
# - dev → jsBrowserDevelopmentExecutable (schneller, Source Maps) # - dev → jsBrowserDevelopmentExecutable (schneller, Source Maps)
# - prod → jsBrowserDistribution (minifiziert, optimiert) # - prod → jsBrowserDistribution (minifiziert, optimiert)
RUN if [ "$WEB_BUILD_PROFILE" = "prod" ]; then \ RUN if [ "$WEB_BUILD_PROFILE" = "prod" ]; then \
./gradlew :clients:app:jsBrowserDistribution --no-configure-on-demand -Pproduction=true; \ ./gradlew :frontend:shells:meldestelle-portal:jsBrowserDistribution --no-configure-on-demand -Pproduction=true; \
mkdir -p /app/web-dist && cp -r clients/app/build/dist/js/productionExecutable/* /app/web-dist/; \ mkdir -p /app/web-dist && cp -r frontend/shells/meldestelle-portal/build/dist/js/productionExecutable/* /app/web-dist/; \
else \ else \
./gradlew :clients:app:jsBrowserDevelopmentExecutable --no-configure-on-demand; \ ./gradlew :frontend:shells:meldestelle-portal:jsBrowserDevelopmentExecutable --no-configure-on-demand; \
mkdir -p /app/web-dist && cp -r clients/app/build/dist/js/developmentExecutable/* /app/web-dist/; \ mkdir -p /app/web-dist && cp -r frontend/shells/meldestelle-portal/build/dist/js/developmentExecutable/* /app/web-dist/; \
fi fi
# =================================================================== # ===================================================================

67
docs/ARCHITECTURE.md Normal file
View File

@ -0,0 +1,67 @@
Repository-Architektur (MP-22)
Dieses Dokument beschreibt die Zielstruktur und das Mapping vom bisherigen Stand (Ist) zur neuen Struktur (Soll). Es begleitet Epic 2 (MP-22).
Zielstruktur (Top-Level)
backend/ Gateway, Discovery (optional), Services
gateway
discovery
services
frontend/ KMP Frontend
shells Ausführbare Apps (Assembler)
features Vertical Slices (kein Feature→Feature)
core Shared Foundation (Design-System, Network, Local-DB, Auth, Domain)
docker/ Docker Compose, .env.example, Monitoring-/Core-Konfiguration
docs/ Architektur, ADRs, C4-Modelle, Guides
Ist → Soll Mapping (erste Tranche)
- Frontend
- clients/app → frontend/shells/meldestelle-portal (verschieben in Folge-Commit)
- clients/shared/common-ui → frontend/core/design-system (verschieben in Folge-Commit)
- clients/shared/navigation → frontend/core/navigation (verschieben in Folge-Commit)
- Backend
- infrastructure/gateway → backend/gateway (verschieben in Folge-Commit)
- services/* → backend/services/* (verschieben in Folge-Commit)
- Discovery (falls genutzt) → backend/discovery
- Docker
- compose.yaml → docker/docker-compose.yml (neu angelegt, Makefile angepasst)
- .env Handling → docker/.env.example (neu, als Template)
Build/Gradle
- settings.gradle.kts bleibt vorerst unverändert. Modul-Verschiebungen folgen in einem separaten Schritt mit angepassten include-Pfaden.
- Version Catalog (gradle/libs.versions.toml) bleibt die einzige Quelle der Versionswahrheit.
Richtlinien (Kurzfassung)
- Features kommunizieren ausschließlich über Routen (Navigation) und Shared-Modelle in frontend/core/domain.
- Kein manueller Authorization-Header nur der DI-verwaltete apiClient aus frontend/core/network (Koin Named Binding).
- SQLDelight als Offline-SSoT: Schema/Migrationen zentral versionieren, UI liest stets lokal und synchronisiert im Hintergrund.
DI-Policy & Architecture Guards (MP-23)
- DI-Policy (Frontend)
- HttpRequests erfolgen ausschließlich über den via Koin bereitgestellten `apiClient` (named Binding) aus `:frontend:core:network`.
- Manuelles Setzen des `Authorization`Headers ist verboten. TokenHandling wird zentral im `apiClient` konfiguriert (AuthPlugin/Interceptor).
- BasisURL wird plattformspezifisch aufgelöst:
- JVM/Desktop: Env `API_BASE_URL` (Fallback `http://localhost:8081`).
- Web/JS: `globalThis.API_BASE_URL` (z. B. per `index.html` oder Proxy), sonst `window.location.origin`, Fallback `http://localhost:8081`.
- Architecture Guards (FrontendScope)
- RootTask `archGuards` bricht den Build ab, wenn verbotene Muster gefunden werden (manuelle `Authorization`Header). Tests sind ausgenommen; Backend ist ausgenommen.
- Statische Analyse verfügbar über `detekt` und `ktlintCheck`; Aggregator `staticAnalysis` führt alles zusammen.
- Hinweise für Features
- Features importieren keine anderen Features (Kommunikation über Navigation + SharedDomainModelle). Eine explizite DetektRegel folgt.
- Netzwerkzugriffe in Features nutzen Koin über die AppShell (DIBootstrap). Für schrittweise Migration kann eine Factory den `apiClient` optional beziehen.
Nächste Schritte (MP-22 Folgetasks)
1. Physisches Verschieben der Frontend-Module gemäß Mapping und Anpassung von settings.gradle.kts.
2. Physisches Verschieben der Backend-Komponenten in backend/* inkl. evtl. Package-Pfade, sofern notwendig.
3. Ergänzung von docker-compose.services.yml und docker-compose.clients.yml mit echten Overlays.
4. Erstellen der ersten ADRs unter docs/adr (Koin, SQLDelight, Optimistic Locking, Freshness UI, Core Domain).

13
docs/adr/README.md Normal file
View File

@ -0,0 +1,13 @@
Architecture Decision Records (ADRs)
Dieses Verzeichnis enthält Architekturentscheidungen in kurzer, überprüfbarer Form.
Namensschema: ADR-XXX-title.md mit fortlaufender Nummerierung.
- ADR-001 Koin als DI
- ADR-002 SQLDelight als Offline-DB
- ADR-003 Optimistic Locking (409) als Konfliktstrategie
- ADR-004 Freshness UI (Ampel)
- ADR-005 Core Domain & Feature Isolation
Siehe Template: ADR-000-template.md.

View File

@ -0,0 +1,160 @@
### 1\. Welche DI-Lösung? (Dependency Injection)
**Entscheidung:** Wir nutzen **Koin**.
**Begründung (ADR):**
* **Warum nicht Dagger/Hilt?** Hilt ist stark auf Android (Context, Lifecycles) fixiert. Dagger ist extrem komplex im Setup für Multiplatform (Kapt/KSP Setup über alle Targets).
* **Warum Koin?** Es ist ein reines Kotlin-Framework ("Service Locator" Pattern). Es funktioniert identisch auf JVM (Desktop), JS (Web) und Android. Es benötigt keine Annotation-Processing-Magie, was die Build-Zeiten im Monorepo niedrig hält.
**Eintrag im Guide:**
```kotlin
// GUIDELINE: Dependency Injection
// Wir nutzen Koin. Module werden im `di` Package des Features definiert.
// 1. Definition (Feature Module)
val inventoryModule = module {
// Singletons für Services
single<InventoryRepository> { InventoryRepositoryImpl(get(), get()) }
// ViewModels (Factory scope)
viewModel { InventoryViewModel(get()) }
}
// 2. Nutzung des ApiClients (Best Practice)
// Wir injizieren IMMER den "apiClient" (mit Auth-Header), niemals den Default Client.
val networkModule = module {
single(named("apiClient")) { ... } // Konfiguriert in :core:network
}
val myFeatureModule = module {
single {
// Explizites Holen des authentifizierten Clients
MyFeatureApi(httpClient = get(named("apiClient")))
}
}
```
-----
### 2\. Welche Offline-DB/ORM?
**Entscheidung:** Wir nutzen **SQLDelight**.
**Begründung (ADR):**
* **Warum nicht Room (KMP)?** Room ist für KMP noch sehr neu (Alpha/Beta Status) und bringt viel Overhead mit sich (SQLite Bundling etc.).
* **Warum SQLDelight?**
1. **Schema First:** Du schreibst SQL (`.sq`), und Kotlin-Code wird *generiert*. Das zwingt Entwickler dazu, über ihr Datenmodell nachzudenken, bevor sie Code schreiben.
2. **Performance:** Es ist extrem leichtgewichtig und typ-sicher.
3. **Migrationen:** SQLDelight hat ein exzellentes System für Schema-Migrationen (`1.sqm`, `2.sqm`), was für Desktop-Apps (die nicht einfach "neu geladen" werden können wie Webseiten) essenziell ist.
**Eintrag im Guide:**
> **DB-Guideline:**
>
> * Jedes Feature definiert sein Schema in `:frontend:core:local-db/src/commonMain/sqldelight/...`.
> * Business-Logik darf niemals SQL-Strings enthalten. Nutze die generierten `Queries`-Objekte.
> * Migrationen sind Pflicht bei Schema-Änderungen\! (Kein `DROP TABLE` in Production).
-----
### 3\. Konfliktstrategie bei Sync?
**Entscheidung:** **Optimistic Locking** (Server Wins).
**Begründung (ADR):**
* In einem System mit Offline-Clients ist "Last Write Wins" gefährlich (Lagerbestand wird überschrieben).
* **Strategie:**
1. Jedes Entity hat eine `lastUpdated` (Timestamp) Spalte.
2. Der Client sendet beim Update die Version mit, die er *kennt*.
3. Wenn Server-Version \> Client-Version → **HTTP 409 Conflict**.
4. Client muss Daten neu laden (Refresh) und User fragen/informieren.
**Eintrag im Guide:**
```kotlin
// GUIDELINE: Sync & Conflicts
// Das Frontend führt KEIN komplexes Merging durch.
suspend fun updateStock(item: Item) {
try {
api.update(item.id, item.newStock, currentVersion = item.version)
// Happy Path: DB Update
} catch (e: ConflictException) { // HTTP 409
// 1. Markiere Item in UI als "Out of Sync" (Rot)
// 2. Trigger automatischen Refresh vom Server
// 3. Zeige User Toast: "Daten waren veraltet. Bitte prüfen."
repo.refreshSingleItem(item.id)
}
}
```
-----
### 4\. Error Budgets / SLIs (Stale Data Indikatoren)
**Entscheidung:** **Visual Freshness Indicators** (Ampel-System).
**Begründung (ADR):**
* Ein User muss wissen, ob der Lagerbestand "live" ist oder "von gestern".
* Wir definieren keine harten Timeouts (App blockieren), sondern weiche UI-Hinweise.
**Eintrag im Guide:**
> **UI-Regel "Data Freshness":**
> Jedes Entity in der lokalen DB hat ein Feld `lastSyncedAt`. Das UI reagiert darauf:
>
> * **\< 5 min:** ✅ Normalzustand (Kein Indikator).
> * **\> 5 min:** ⚠️ Kleines gelbes "Wolke"-Icon oder ausgegrauter Text (Warnung).
> * **\> 1 Stunde:** ❌ Roter Banner "Offline-Daten: Bestand nicht garantiert".
> * **Aktion:** Schreibende Operationen sind bei "Rot" für kritische Bereiche (z.B. Inventur-Abschluss) gesperrt, für unkritische (z.B. Notiz anlegen) erlaubt (Queue).
-----
### 5\. API-Verträge und Kapselung der Feature-Teams
**Entscheidung:** **Loose Coupling via Navigation Routes & Shared Data Models (Core)**.
**Begründung (ADR):**
* Wir wollen vermeiden, dass Team A (Inventory) direkt Klassen von Team B (Checkout) importiert. Das führt zum "Monolithen-Klumpen".
* Wir nutzen **keine** separaten Gradle-Module pro Feature-API (`:inventory-api`, `:inventory-impl`), da dies den Build-Graph unnötig aufbläht ("Gradle Overhead").
**Strategie:**
1. **Schnittstelle:** Die einzige "Public API" eines Features ist sein `EntryPoint` (Composable) und seine `Route` (String).
2. **Datenaustausch:**
* *Minimal:* Über URL-Parameter (IDs). `navigator.navigate("inventory/details/123")`.
* *Objekte:* Wenn komplexe Objekte geteilt werden müssen (z.B. `UserProfile`), gehören diese in **`:frontend:core:domain`** (Shared Kernel).
**Eintrag im Guide:**
```kotlin
// GUIDELINE: Feature Isolation
// 1. Features importieren NIEMALS andere Features im `build.gradle.kts`.
// 2. Kommunikation nur über Navigation (Router).
// 3. Gemeinsam genutzte Datenobjekte (z.B. UserID, ShopID) liegen in :core:domain.
// FALSCH:
import com.project.features.billing.Invoice // Abhängigkeit zu anderem Feature!
// RICHTIG:
// Feature A navigiert zu Feature B via Route
navigator.navigateTo("billing/create?orderId=123")
```
-----
### Zusammenfassung für dein Dokument
Diese 5 Punkte schließen den Kreis:
1. **Koin** hält den Code sauber.
2. **SQLDelight** hält die Daten sicher.
3. **Optimistic Locking** verhindert Datenmüll.
4. **Freshness UI** managed die Erwartungshaltung des Users.
5. **Core Domain** verhindert Spaghetti-Code zwischen Features.

View File

@ -0,0 +1,155 @@
# 🏗 Project Architecture & Structure Guide
> **"Code is liability. Structure is asset."**
> Wir bauen dieses System nicht für den schnellsten Start, sondern für die **Wartbarkeit über Jahre**, Offline-Fähigkeit und Skalierbarkeit über mehrere Teams hinweg.
-----
## 1\. Die Große Übersicht: The Monorepo Strategy
Wir organisieren Backend und Frontend in einem einzigen Repository (Monorepo).
### **Warum Monorepo? (Decision Record)**
* ❌ **Alternative:** Getrennte Repositories für Backend, Web-Frontend, Desktop-App.
* **Problem dabei:** "Version Hell". Backend ändert API v1 zu v2, aber Frontend-Repo ist noch auf v1. Refactorings über die ganze Kette sind schmerzhaft.
* ✅ **Unsere Entscheidung:** Monorepo.
* **Atomic Commits:** Ein Pull Request enthält Backend-Änderungen UND die dazugehörige Frontend-Anpassung.
* **Single Versioning:** Wir nutzen `gradle/libs.versions.toml` als einzige Quelle der Wahrheit für Library-Versionen (z.B. Kotlin Version) über das gesamte System hinweg.
-----
## 2\. Der "Deep Dive" in die Ordnerstruktur
Hier ist der detaillierte Aufriss unseres Dateisystems. Jeder Ordner hat einen spezifischen architektonischen Zweck.
```text
/my-project-root
├── ⚙️ docker-compose.yml <-- Die lokale "Cloud". Startet DBs, Gateway & Services.
├── 📄 settings.gradle.kts <-- Definiert die Module (Frontend & Backend).
├── 📂 gradle
│ └── libs.versions.toml <-- 🛑 STOP! Hier werden Versionen definiert. Nirgendwo sonst.
├── 📂 backend <-- ARCHITEKTUR: Hexagonal / DDD
│ ├── 📂 gateway <-- Der "Türsteher". Routing & Auth-Check.
│ ├── 📂 discovery <-- Das "Telefonbuch" (Consul/Service Registry).
│ └── 📂 services <-- Die Business Logic (Microservices)
│ ├── 📂 inventory-service
│ │ ├── 📄 Dockerfile <-- Jedes Service ist ein isolierter Container!
│ │ └── 📂 src/main/kotlin/.../domain <-- Reine Logik, kein Spring!
│ └── 📂 auth-service
└── 📂 frontend <-- ARCHITEKTUR: Kotlin Multiplatform (KMP)
├── 📂 shells <-- 💡 CONCEPT: "The Assembler"
│ │ Das sind die ausführbaren Anwendungen. Sie enthalten KEINE Logik.
│ │ Sie "kleben" nur Features zusammen und konfigurieren DI.
│ │
│ ├── 📂 warehouse-app <-- Desktop-App (Windows/Linux) für Lageristen
│ │ └── build.gradle.kts (bindet :features:inventory ein)
│ └── 📂 admin-portal <-- Web-App (JS/Wasm) für Management
│ └── build.gradle.kts (bindet alle Features ein)
├── 📂 features <-- 💡 CONCEPT: "Vertical Slices" (Micro-Frontends)
│ │ Hier passiert die Arbeit. Ein Feature gehört einem Team.
│ │
│ ├── 📂 inventory-feature
│ │ ├── 📂 src/commonMain
│ │ │ ├── 📂 api <-- Public Interface (Der Vertrag nach außen)
│ │ │ ├── 📂 ui <-- Screens & Components (Internal)
│ │ │ └── 📂 data <-- Repository & SSoT (Internal)
│ │ └── build.gradle.kts
│ └── 📂 auth-feature
└── 📂 core <-- 💡 CONCEPT: "Shared Foundation"
│ Code, der sich selten ändert, aber überall genutzt wird.
├── 📂 design-system <-- UI-Baukasten (Farben, Typo, Buttons)
├── 📂 network <-- HTTP Clients & Auth-Interceptor
├── 📂 local-db <-- SQLDelight Schemas (Die Offline-Wahrheit)
└── 📂 auth <-- OAuth2 Logik (Browser Bridge für Desktop)
```
-----
## 3\. Architectural Decision Records (ADRs)
Warum haben wir das so gebaut? Hier sind die Antworten auf die "Warum nicht X?" Fragen.
### ADR 001: Kotlin Multiplatform vs. Electron / Web-Wrapper
* **Kontext:** Wir brauchen eine Web-App UND eine Desktop-App.
* **Entscheidung:** Wir nutzen **Kotlin Multiplatform (Compose)**.
* **Begründung:**
* *Performance:* Electron braucht pro App \~200MB RAM (Chromium Instanz). Unsere Desktop-Apps (Lager, Kasse) laufen auf schwacher Hardware. JVM/Native ist effizienter.
* *Type Safety:* Wir teilen Business-Logik (Validation, SSoT) zwischen Web und Desktop. Mit JS/Electron müssten wir Logik duplizieren oder transpilen.
* *Offline:* Echte SQL-Datenbank (SQLite) Integration ist in nativem Code robuster als im Browser-Storage.
### ADR 002: Multiple App Shells vs. One "Super-App"
* **Kontext:** Wir haben Lagerarbeiter, Kassierer und Manager.
* **Entscheidung:** Wir bauen **pro Rolle eine eigene "Shell"** (Executable).
* **Begründung:**
* *Security (Web):* "Tree Shaking". Wenn der Code für "Admin-User-Löschen" gar nicht erst in der `warehouse-app.js` enthalten ist, kann er auch nicht gehackt werden.
* *Focus (Desktop):* Die Lager-App startet schneller und hat weniger Bugs, weil sie den Code für das Rechnungswesen gar nicht lädt.
* *Flexibilität:* Wir können Features wiederverwenden. Das Feature `auth-feature` ist in ALLEN Apps, `inventory-feature` nur in zweien.
### ADR 003: Single Source of Truth (SSoT) via Database
* **Kontext:** Desktop-Apps werden in Hallen mit schlechtem WLAN genutzt.
* **Entscheidung:** **Database First Architecture**.
* **Begründung:**
* Klassisch (`UI -> API -> UI`) führt zu weißen Screens und Ladekreisen bei Netzschwankungen.
* Wir nutzen `UI -> Local DB <- Sync -> API`.
* Das UI zeigt **immer** Daten an (auch wenn sie 10 Minuten alt sind). Der User kann arbeiten. Sync passiert transparent im Hintergrund.
### ADR 004: Docker für alles (außer Desktop Runtime)
* **Kontext:** "Bei mir läuft's aber..." Probleme.
* **Entscheidung:** Das gesamte Backend + Web-Frontend Build-Pipeline läuft in Docker.
* **Begründung:**
* Die `docker-compose.yml` ist die Wahrheit.
* Für die Desktop-Entwicklung nutzen wir Gradle lokal, aber der Server, gegen den entwickelt wird, läuft im Container. Das garantiert Identität zwischen Dev und Prod.
-----
## 4\. Guidelines: Wo gehört mein Code hin?
Wenn du neuen Code schreibst, stelle dir diese Fragen:
### Q1: Ist es Business Logik (z.B. "Preis berechnen")?
* ➡️ Gehört in **`/backend/services/.../domain`** (Server-Side Validierung ist Pflicht).
* ➡️ UND optional in **`/frontend/features/.../domain`** (für schnelle UI-Feedback, aber Server hat das letzte Wort).
### Q2: Ist es ein UI-Element (z.B. "Runder Button")?
* ➡️ Gehört in **`/frontend/core/design-system`**.
* 🛑 *Stop\!* Baue keine Custom Buttons in deinem Feature-Ordner. Nutze das Design System. Wenn etwas fehlt, erweitere das Design System.
### Q3: Ich brauche Daten von einem anderen Service.
* **Szenario:** Im "Checkout" (Kasse) brauche ich den Produktnamen aus dem "Inventory".
* ❌ **Falsch:** `CheckoutService` ruft `InventoryService` Datenbank direkt ab.
* ✅ **Richtig (Backend):** `CheckoutService` ruft `InventoryService` via REST/gRPC über das Gateway.
* ✅ **Richtig (Frontend):** Das `Checkout-Feature` kennt das `Inventory-Feature` nicht. Es bekommt nur eine `productId`. Wenn es Details anzeigen muss, nutzt es entweder ein eigenes minimales Datenmodell oder fragt das Backend.
### Q4: Auth Token Handling
* ❌ **Niemals:** `httpClient.header("Authorization", token)` manuell aufrufen.
* ✅ **Immer:** Nutze den konfigurierten Client aus dem DI-Container: `get(named("apiClient"))`. Die Architektur kümmert sich um Refresh und Injection.
-----
## 5\. Das "Mental Model" für Entwickler
Stell dir unsere App wie einen **Lego-Baukasten** vor.
1. **Core (Platte):** Das Fundament (Auth, Network, Design). Muss immer da sein.
2. **Features (Steine):** Bunte Bausteine (Inventory, Cart, Profile). Sie berühren sich seitlich nicht (keine direkten Abhängigkeiten).
3. **Shells (Modelle):** Das fertige Haus.
* Haus A (Admin Portal) nutzt alle Steine.
* Haus B (Lager App) nutzt nur die grünen Steine (Inventory).
Dein Job als Entwickler ist es meistens, **einen neuen Stein (Feature)** zu bauen oder einen bestehenden zu verbessern. Du musst dich selten um das Fundament oder das fertige Haus kümmern.

7
frontend/README.md Normal file
View File

@ -0,0 +1,7 @@
# Frontend
Kotlin Multiplatform Frontend layer.
- shells: ausführbare Anwendungen (Assembler)
- features: Vertical Slices (kein Feature→Feature Import)
- core: gemeinsame Basis (Design-System, Network, Local-DB, Auth, Domain)

0
frontend/core/.gitkeep Normal file
View File

View File

View File

@ -76,11 +76,15 @@ kotlin {
commonMain.dependencies { commonMain.dependencies {
// Shared modules // Shared modules
implementation(project(":clients:shared")) implementation(project(":clients:shared"))
implementation(project(":clients:shared:common-ui")) implementation(project(":frontend:core:design-system"))
implementation(project(":clients:shared:navigation")) implementation(project(":frontend:core:navigation"))
implementation(project(":frontend:core:network"))
implementation(project(":clients:auth-feature")) implementation(project(":clients:auth-feature"))
implementation(project(":clients:ping-feature")) implementation(project(":clients:ping-feature"))
// DI (Koin) needed to call initKoin { modules(...) }
implementation(libs.koin.core)
// Compose Multiplatform // Compose Multiplatform
implementation(compose.runtime) implementation(compose.runtime)
implementation(compose.foundation) implementation(compose.foundation)
@ -100,6 +104,7 @@ kotlin {
implementation(compose.desktop.currentOs) implementation(compose.desktop.currentOs)
implementation(libs.kotlinx.coroutines.swing) implementation(libs.kotlinx.coroutines.swing)
implementation(libs.kotlinx.coroutines.core) implementation(libs.kotlinx.coroutines.core)
implementation(libs.koin.core)
} }
jsMain.dependencies { jsMain.dependencies {

View File

@ -2,10 +2,40 @@ import androidx.compose.ui.ExperimentalComposeUiApi
import androidx.compose.ui.window.ComposeViewport import androidx.compose.ui.window.ComposeViewport
import kotlinx.browser.document import kotlinx.browser.document
import org.w3c.dom.HTMLElement import org.w3c.dom.HTMLElement
import at.mocode.clients.shared.di.initKoin
import at.mocode.frontend.core.network.networkModule
import kotlinx.coroutines.MainScope
import kotlinx.coroutines.launch
import org.koin.core.context.GlobalContext
import org.koin.core.qualifier.named
import io.ktor.client.HttpClient
import io.ktor.client.call.body
import io.ktor.client.request.get
@OptIn(ExperimentalComposeUiApi::class) @OptIn(ExperimentalComposeUiApi::class)
fun main() { fun main() {
console.log("[WebApp] main() entered") console.log("[WebApp] main() entered")
// Initialize DI (Koin) with shared modules + network module
try {
initKoin { modules(networkModule) }
console.log("[WebApp] Koin initialized with networkModule")
} catch (e: dynamic) {
console.warn("[WebApp] Koin initialization warning:", e)
}
// Simple smoke request using DI apiClient
try {
val client = GlobalContext.get().get<HttpClient>(named("apiClient"))
MainScope().launch {
try {
val resp: String = client.get("/api/ping/health").body()
console.log("[WebApp] /api/ping/health → ", resp)
} catch (e: dynamic) {
console.warn("[WebApp] /api/ping/health failed:", e?.message ?: e)
}
}
} catch (e: dynamic) {
console.warn("[WebApp] Unable to resolve apiClient from Koin:", e)
}
fun startApp() { fun startApp() {
try { try {
console.log("[WebApp] startApp(): readyState=", document.asDynamic().readyState) console.log("[WebApp] startApp(): readyState=", document.asDynamic().readyState)

View File

Before

Width:  |  Height:  |  Size: 560 KiB

After

Width:  |  Height:  |  Size: 560 KiB

View File

Before

Width:  |  Height:  |  Size: 667 KiB

After

Width:  |  Height:  |  Size: 667 KiB

View File

@ -12,6 +12,24 @@
<div id="ComposeTarget"> <div id="ComposeTarget">
<div class="loading">Loading...</div> <div class="loading">Loading...</div>
</div> </div>
<script>
// Prefer explicit query param override (?apiBaseUrl=http://host:port),
// then fall back to same-origin. This avoids Docker secrets and works with Nginx proxy.
(function(){
try {
const params = new URLSearchParams(window.location.search);
const override = params.get('apiBaseUrl');
if (override) {
globalThis.API_BASE_URL = override.replace(/\/$/, '');
} else {
globalThis.API_BASE_URL = window.location.origin.replace(/\/$/, '');
}
} catch (e) {
globalThis.API_BASE_URL = 'http://localhost:8081';
}
})();
// KMP bundle will read globalThis.API_BASE_URL in PlatformConfig.js
</script>
<script src="web-app.js"></script> <script src="web-app.js"></script>
<script> <script>
// Register Service Worker only in non-localhost environments // Register Service Worker only in non-localhost environments

View File

@ -2,8 +2,17 @@ import androidx.compose.ui.window.Window
import androidx.compose.ui.window.application import androidx.compose.ui.window.application
import androidx.compose.ui.window.WindowState import androidx.compose.ui.window.WindowState
import androidx.compose.ui.unit.dp import androidx.compose.ui.unit.dp
import at.mocode.clients.shared.di.initKoin
import at.mocode.frontend.core.network.networkModule
fun main() = application { fun main() = application {
// Initialize DI (Koin) with shared modules + network module
try {
initKoin { modules(networkModule) }
println("[DesktopApp] Koin initialized with networkModule")
} catch (e: Exception) {
println("[DesktopApp] Koin initialization warning: ${e.message}")
}
Window( Window(
onCloseRequest = ::exitApplication, onCloseRequest = ::exitApplication,
title = "Meldestelle - Desktop Development", title = "Meldestelle - Desktop Development",

View File

@ -34,7 +34,7 @@ if (config.devServer) {
...config.devServer, ...config.devServer,
historyApiFallback: true, historyApiFallback: true,
hot: true, hot: true,
// API Proxy für Backend-Anfragen (Array-Format für moderne Webpack) // API Proxy für Backend-Anfragen (Array-Format für modernen Webpack)
proxy: [ proxy: [
{ {
context: ['/api'], context: ['/api'],

Some files were not shown because too many files have changed in this diff Show More