provisioning/schemas/platform/templates/kubernetes/control-center-deployment.yaml.ncl
Jesús Pérez 44648e3206
chore: complete nickel migration and consolidate legacy configs
- Remove KCL ecosystem (~220 files deleted)
- Migrate all infrastructure to Nickel schema system
- Consolidate documentation: legacy docs → provisioning/docs/src/
- Add CI/CD workflows (.github/) and Rust build config (.cargo/)
- Update core system for Nickel schema parsing
- Update README.md and CHANGES.md for v5.0.0 release
- Fix pre-commit hooks: end-of-file, trailing-whitespace
- Breaking changes: KCL workspaces require migration
- Migration bridge available in docs/src/development/
2026-01-08 09:55:37 +00:00

320 lines
8.6 KiB
Plaintext

# Control Center Kubernetes Deployment
# Policy and RBAC management service
# Supports 4 deployment modes: solo, multiuser, cicd, enterprise
#
# Usage:
# nickel eval --format json control-center-deployment.yaml.ncl | yq -P > control-center-deployment.yaml
# kubectl apply -f control-center-deployment.yaml
{
apiVersion = "apps/v1",
kind = "Deployment",
metadata = {
name = "control-center",
labels = {
app = "control-center",
component = "provisioning-platform",
},
},
spec = {
# Solo: 1 replica (single developer)
# MultiUser: 1-2 replicas (team)
# CI/CD: 1 replica (stateless)
# Enterprise: 2 replicas (HA with database backend)
replicas = 2, # Override per mode
selector = {
matchLabels = {
app = "control-center",
},
},
template = {
metadata = {
labels = {
app = "control-center",
component = "provisioning-platform",
},
annotations = {
"prometheus.io/scrape" = "true",
"prometheus.io/port" = "8080",
"prometheus.io/path" = "/metrics",
},
},
spec = {
serviceAccountName = "control-center",
# Init container: wait for database
initContainers = [
{
name = "wait-for-db",
image = "busybox:1.35",
command = ["sh", "-c", "until nc -z postgres 5432 || true; do echo waiting for db; sleep 2; done"],
imagePullPolicy = "IfNotPresent",
},
],
containers = [
{
name = "control-center",
image = "provisioning-control-center:latest",
imagePullPolicy = "Always",
ports = [
{
name = "http",
containerPort = 8080,
protocol = "TCP",
},
{
name = "metrics",
containerPort = 8081,
protocol = "TCP",
},
],
env = [
{
name = "CONTROL_CENTER_MODE",
value = "kubernetes",
},
{
name = "CONTROL_CENTER_SERVER_HOST",
value = "0.0.0.0",
},
{
name = "CONTROL_CENTER_SERVER_PORT",
value = "8080",
},
{
name = "CONTROL_CENTER_DATABASE",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "database_type",
},
},
},
# PostgreSQL connection (multiuser/enterprise modes)
{
name = "CONTROL_CENTER_DATABASE_URL",
valueFrom = {
secretKeyRef = {
name = "control-center-secrets",
key = "database_url",
optional = true,
},
},
},
# RBAC configuration
{
name = "CONTROL_CENTER_RBAC_ENABLED",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "rbac_enabled",
},
},
},
# JWT configuration
{
name = "CONTROL_CENTER_JWT_SECRET",
valueFrom = {
secretKeyRef = {
name = "control-center-secrets",
key = "jwt_secret",
},
},
},
{
name = "CONTROL_CENTER_JWT_ISSUER",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "jwt_issuer",
},
},
},
{
name = "CONTROL_CENTER_JWT_AUDIENCE",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "jwt_audience",
},
},
},
# MFA configuration
{
name = "CONTROL_CENTER_MFA_REQUIRED",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "mfa_required",
},
},
},
# Logging
{
name = "CONTROL_CENTER_LOG_LEVEL",
valueFrom = {
configMapKeyRef = {
name = "control-center-config",
key = "log_level",
},
},
},
# Pod metadata
{
name = "POD_NAME",
valueFrom = {
fieldRef = {
fieldPath = "metadata.name",
},
},
},
{
name = "POD_NAMESPACE",
valueFrom = {
fieldRef = {
fieldPath = "metadata.namespace",
},
},
},
],
# Health checks
livenessProbe = {
httpGet = {
path = "/health",
port = 8080,
},
initialDelaySeconds = 30,
periodSeconds = 10,
timeoutSeconds = 5,
failureThreshold = 3,
},
readinessProbe = {
httpGet = {
path = "/ready",
port = 8080,
},
initialDelaySeconds = 20,
periodSeconds = 5,
timeoutSeconds = 3,
failureThreshold = 3,
},
# Resource limits
resources = {
requests = {
cpu = "250m",
memory = "256Mi",
},
limits = {
cpu = "1",
memory = "1Gi",
},
},
# Volume mounts
volumeMounts = [
{
name = "control-center-config",
mountPath = "/etc/provisioning/control-center",
readOnly = true,
},
{
name = "control-center-logs",
mountPath = "/var/log/provisioning/control-center",
},
],
# Security context
securityContext = {
allowPrivilegeEscalation = false,
runAsNonRoot = true,
runAsUser = 1000,
capabilities = {
drop = ["ALL"],
},
readOnlyRootFilesystem = false,
},
},
],
# Volumes
volumes = [
{
name = "control-center-config",
configMap = {
name = "control-center-config",
defaultMode = 420, # 0644 in octal
},
},
{
name = "control-center-logs",
persistentVolumeClaim = {
claimName = "control-center-logs",
},
},
],
# Affinity: spread across nodes
affinity = {
podAntiAffinity = {
preferredDuringSchedulingIgnoredDuringExecution = [
{
weight = 100,
podAffinityTerm = {
labelSelector = {
matchExpressions = [
{
key = "app",
operator = "In",
values = ["control-center"],
},
],
},
topologyKey = "kubernetes.io/hostname",
},
},
],
},
},
# Tolerations
tolerations = [
{
key = "node.kubernetes.io/not-ready",
operator = "Exists",
effect = "NoExecute",
tolerationSeconds = 300,
},
{
key = "node.kubernetes.io/unreachable",
operator = "Exists",
effect = "NoExecute",
tolerationSeconds = 300,
},
],
restartPolicy = "Always",
terminationGracePeriodSeconds = 30,
},
},
strategy = {
type = "RollingUpdate",
rollingUpdate = {
maxSurge = 1,
maxUnavailable = 0,
},
},
revisionHistoryLimit = 5,
},
}