From 27dbc5cd08735711f3d29a475637d536d49fb02e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jesu=CC=81s=20Pe=CC=81rez?= Date: Sat, 17 Jan 2026 03:58:28 +0000 Subject: [PATCH] chore: review docs from scratch --- config/config.defaults.toml | 14 + core | 2 +- docs/.markdownlint-cli2.jsonc | 34 +- docs/README.md | 138 - docs/book.toml | 94 +- docs/book/404.html | 36 +- docs/book/CNAME | 1 - .../FontAwesome/fonts/fontawesome-webfont.svg | 1418 +- .../adr/ADR-009-security-system-complete.html | 780 - .../architecture/integration-patterns.html | 599 +- .../architecture/multi-repo-strategy.html | 1109 - .../orchestrator-auth-integration.html | 756 - .../orchestrator-integration-model.html | 917 - docs/book/clipboard.min.js | 4 +- docs/book/development/build-system.html | 1189 +- .../development/distribution-process.html | 1046 - .../development/implementation-guide.html | 1020 - docs/book/development/integration.html | 1302 - docs/book/development/project-structure.html | 558 - docs/book/development/workflow.html | 1099 - docs/book/elasticlunr.min.js | 2 +- docs/book/fonts/SOURCE-CODE-PRO-LICENSE.txt | 2 +- .../book/guides/customize-infrastructure.html | 915 - docs/book/guides/from-scratch.html | 1505 +- docs/book/guides/update-infrastructure.html | 862 - docs/book/highlight.js | 2 +- docs/book/index.html | 422 +- docs/book/print.html | 101909 +++------------ docs/book/searchindex.js | 2 +- docs/book/toc.html | 4 +- docs/book/toc.js | 2 +- .../workspaces/cost-optimized/README.md | 1 - .../workspaces/cost-optimized/index.html | 227 - .../multi-provider-web-app/README.md | 1 - .../multi-provider-web-app/index.html | 227 - .../workspaces/multi-region-ha/README.md | 1 - .../workspaces/multi-region-ha/index.html | 227 - docs/fix-markdown.nu | 74 + docs/src/PROVISIONING.md | 944 - docs/src/README.md | 408 +- docs/src/SUMMARY.md | 342 +- docs/src/ai/README.md | 364 +- docs/src/ai/ai-agents.md | 532 - docs/src/ai/ai-architecture.md | 439 + docs/src/ai/ai-assisted-forms.md | 438 - docs/src/ai/ai-service-crate.md | 479 + docs/src/ai/architecture.md | 194 - docs/src/ai/config-generation.md | 64 - docs/src/ai/configuration.md | 601 - docs/src/ai/cost-management.md | 497 - docs/src/ai/mcp-integration.md | 594 - docs/src/ai/natural-language-config.md | 469 - .../src/ai/natural-language-infrastructure.md | 436 + docs/src/ai/rag-and-knowledge.md | 381 + docs/src/ai/rag-system.md | 450 - docs/src/ai/security-policies.md | 537 - docs/src/ai/troubleshooting-with-ai.md | 502 - docs/src/ai/typedialog-integration.md | 385 + docs/src/api-reference/README.md | 338 +- docs/src/api-reference/cli-commands.md | 1152 + docs/src/api-reference/control-center-api.md | 1 + .../api-reference/control-center-endpoints.md | 177 + docs/src/api-reference/examples.md | 1 + .../api-reference/extension-registry-api.md | 72 + docs/src/api-reference/extensions.md | 1205 - .../src/api-reference/integration-examples.md | 1592 - docs/src/api-reference/nushell-api.md | 111 - docs/src/api-reference/nushell-libraries.md | 1 + docs/src/api-reference/orchestrator-api.md | 1 + .../api-reference/orchestrator-endpoints.md | 185 + docs/src/api-reference/path-resolution.md | 730 - docs/src/api-reference/provider-api.md | 186 - docs/src/api-reference/rest-api.md | 2045 +- docs/src/api-reference/schemas/openapi.yaml | 1157 - docs/src/api-reference/sdks.md | 1097 - docs/src/api-reference/websocket.md | 892 - docs/src/architecture/README.md | 178 +- .../adr/ADR-001-project-structure.md | 118 - .../adr/ADR-002-distribution-strategy.md | 179 - .../adr/ADR-003-workspace-isolation.md | 191 - .../adr/ADR-004-hybrid-architecture.md | 210 - .../adr/ADR-005-extension-framework.md | 284 - .../ADR-006-provisioning-cli-refactoring.md | 390 - .../adr/ADR-007-kms-simplification.md | 266 - .../adr/ADR-008-cedar-authorization.md | 352 - .../adr/ADR-009-security-system-complete.md | 661 - docs/src/architecture/adr/README.md | 99 +- .../architecture/adr/adr-001-modular-cli.md | 57 + .../adr/adr-002-workspace-first.md | 55 + .../adr/adr-003-nickel-as-source-of-truth.md | 106 + .../adr/adr-004-microservice-distribution.md | 125 + .../adr/adr-005-service-communication.md | 156 + .../adr/adr-006-post-quantum-cryptography.md | 156 + .../adr/adr-007-data-encryption-strategy.md | 237 + .../adr-008-observability-and-monitoring.md | 268 + .../adr/adr-009-slo-error-budgets.md | 231 + .../adr-010-configuration-format-strategy.md | 413 - .../adr-010-incident-response-automation.md | 409 + .../adr/adr-011-nickel-migration.md | 479 - ...r-012-nushell-nickel-plugin-cli-wrapper.md | 379 - .../adr/adr-013-typdialog-integration.md | 592 - .../adr/adr-014-secretumvault-integration.md | 659 - .../adr-015-ai-integration-architecture.md | 1123 - ...r-016-schema-driven-accessor-generation.md | 159 - ...17-plugin-wrapper-abstraction-framework.md | 225 - .../adr-018-help-system-fluent-integration.md | 280 - ...019-configuration-loader-modularization.md | 262 - ...dr-020-command-handler-domain-splitting.md | 312 - .../src/architecture/architecture-overview.md | 1337 - .../architecture/component-architecture.md | 426 + .../config-loading-architecture.md | 266 - .../database-and-config-architecture.md | 385 - docs/src/architecture/design-patterns.md | 142 + docs/src/architecture/design-principles.md | 676 +- .../src/architecture/ecosystem-integration.md | 523 - docs/src/architecture/integration-patterns.md | 631 +- .../architecture/multi-repo-architecture.md | 710 - docs/src/architecture/multi-repo-strategy.md | 1025 - .../nickel-executable-examples.md | 773 - .../architecture/nickel-vs-kcl-comparison.md | 1207 - .../orchestrator-auth-integration.md | 621 - docs/src/architecture/orchestrator-info.md | 149 - .../orchestrator-integration-model.md | 803 - .../architecture/package-and-loader-system.md | 410 - docs/src/architecture/repo-dist-analysis.md | 1611 - docs/src/architecture/system-overview.md | 811 +- .../typedialog-nickel-integration.md | 952 - docs/src/configuration/config-validation.md | 631 - docs/src/development/README.md | 110 + docs/src/development/api-guide.md | 74 + docs/src/development/auth-metadata-guide.md | 536 - docs/src/development/build-system.md | 1401 +- docs/src/development/command-handler-guide.md | 614 - docs/src/development/command-reference.md | 54 - docs/src/development/contributing.md | 258 + .../ctrl-c-implementation-notes.md | 295 - .../custom-detector-development.md | 527 + .../custom-provider-development.md | 663 + docs/src/development/custom-task-services.md | 420 + docs/src/development/dev-configuration.md | 984 - .../development/dev-workspace-management.md | 915 - docs/src/development/distribution-process.md | 1005 - docs/src/development/extension-development.md | 484 + docs/src/development/glossary.md | 1760 - docs/src/development/implementation-guide.md | 897 - .../infrastructure-specific-extensions.md | 1230 - docs/src/development/integration.md | 1219 - docs/src/development/kms-simplification.md | 570 - docs/src/development/mcp-server.md | 114 - .../nushell-script-best-practices.md | 398 + docs/src/development/plugin-development.md | 232 + docs/src/development/project-structure.md | 411 - docs/src/development/provider-development.md | 287 + .../provider-agnostic-architecture.md | 348 - .../providers/provider-comparison.md | 400 - .../providers/provider-development-guide.md | 718 - .../providers/provider-distribution-guide.md | 681 - .../providers/quick-provider-guide.md | 322 - .../provisioning-daemon-internals.md | 556 + .../taskservs/taskserv-categorization.md | 70 - .../taskservs/taskserv-quick-guide.md | 249 - .../src/development/testing-infrastructure.md | 395 + docs/src/development/testing.md | 488 + .../typedialog-platform-config-guide.md | 1006 - docs/src/development/workflow.md | 1065 - docs/src/examples/README.md | 150 + docs/src/examples/basic-setup.md | 78 + docs/src/examples/batch-operations-example.md | 303 + docs/src/examples/cicd-pipeline-examples.md | 536 + .../examples/compliance-and-audit-example.md | 435 + .../examples/cost-optimization-examples.md | 446 + docs/src/examples/custom-workflows.md | 41 + docs/src/examples/database-examples.md | 436 + docs/src/examples/disaster-recovery-drills.md | 451 + docs/src/examples/encryption-examples.md | 476 + docs/src/examples/finops-cost-governance.md | 381 + .../examples/high-availability-examples.md | 505 + docs/src/examples/iac-testing-strategies.md | 462 + docs/src/examples/kubernetes-deployment.md | 61 + docs/src/examples/legacy-system-migration.md | 432 + .../machine-learning-infrastructure.md | 477 + docs/src/examples/microservices-deployment.md | 145 + docs/src/examples/monitoring-examples.md | 555 + docs/src/examples/multi-cloud.md | 52 + docs/src/examples/nickel-config-examples.md | 529 + docs/src/examples/plugin-examples.md | 390 + docs/src/examples/provider-specific-aws.md | 262 + .../src/examples/provider-specific-hetzner.md | 166 + .../src/examples/provider-specific-upcloud.md | 116 + docs/src/examples/real-world-scenario.md | 235 + docs/src/examples/security-examples.md | 72 + docs/src/examples/terraform-migration.md | 95 + docs/src/examples/workspace-examples.md | 444 + docs/src/features/README.md | 136 + docs/src/features/batch-workflows.md | 1 + docs/src/features/cli-architecture.md | 673 + docs/src/features/configuration-system.md | 1 + docs/src/features/detector-system.md | 563 + docs/src/features/extension-registry.md | 618 + docs/src/features/installer.md | 1 + docs/src/features/interactive-guides.md | 1 + docs/src/features/multilingual-support.md | 388 + docs/src/features/orchestrator.md | 1 + docs/src/features/plugins.md | 718 + docs/src/features/provisioning-daemon.md | 472 + docs/src/features/security-system.md | 1 + docs/src/features/test-environment.md | 1 + docs/src/features/version-management.md | 1 + docs/src/features/workspace-management.md | 471 + docs/src/getting-started/01-prerequisites.md | 251 - docs/src/getting-started/02-installation.md | 235 - .../getting-started/03-first-deployment.md | 273 - docs/src/getting-started/04-verification.md | 342 - .../05-platform-configuration.md | 499 - docs/src/getting-started/README.md | 179 + docs/src/getting-started/first-deployment.md | 455 + docs/src/getting-started/getting-started.md | 551 - .../src/getting-started/installation-guide.md | 536 - .../installation-validation-guide.md | 622 - docs/src/getting-started/installation.md | 309 + docs/src/getting-started/prerequisites.md | 318 + docs/src/getting-started/quick-start.md | 366 + .../getting-started/quickstart-cheatsheet.md | 1107 - docs/src/getting-started/quickstart.md | 29 - docs/src/getting-started/setup-profiles.md | 832 - docs/src/getting-started/setup-quickstart.md | 178 - .../src/getting-started/setup-system-guide.md | 206 - docs/src/getting-started/setup.md | 663 - docs/src/getting-started/verification.md | 372 + docs/src/guides/README.md | 166 +- docs/src/guides/advanced-networking.md | 483 + .../guides/advanced-workflow-orchestration.md | 387 + docs/src/guides/ci-cd-integration.md | 90 + docs/src/guides/custom-extensions.md | 338 + docs/src/guides/customize-infrastructure.md | 846 - docs/src/guides/disaster-recovery.md | 390 + .../extension-development-quickstart.md | 437 - docs/src/guides/from-scratch.md | 1728 +- .../gitops-infrastructure-deployment.md | 512 + docs/src/guides/guide-system.md | 153 - docs/src/guides/hybrid-cloud-deployment.md | 424 + docs/src/guides/infrastructure-setup.md | 362 - .../src/guides/internationalization-system.md | 413 - docs/src/guides/managing-multiple-clouds.md | 139 + docs/src/guides/multi-cloud-deployment.md | 864 + docs/src/guides/multi-provider-deployment.md | 1280 - docs/src/guides/multi-provider-networking.md | 968 - docs/src/guides/production-readiness.md | 179 + docs/src/guides/provider-digitalocean.md | 784 - docs/src/guides/provider-hetzner.md | 780 - docs/src/guides/scaling-infrastructure.md | 88 + docs/src/guides/secrets-rotation-strategy.md | 426 + docs/src/guides/update-infrastructure.md | 842 - .../workspace-generation-quick-reference.md | 283 - docs/src/guides/workspace-management.md | 1 + docs/src/infrastructure/README.md | 212 + .../batch-workflow-multi-provider.md | 809 - .../infrastructure/batch-workflow-system.md | 93 - docs/src/infrastructure/batch-workflows.md | 388 + docs/src/infrastructure/cli-architecture.md | 136 - docs/src/infrastructure/cli-reference.md | 976 - docs/src/infrastructure/clusters.md | 290 + .../infrastructure/config-rendering-guide.md | 823 - .../infrastructure/configuration-system.md | 229 +- docs/src/infrastructure/configuration.md | 771 - .../infrastructure/dynamic-secrets-guide.md | 194 - .../infrastructure-from-code-guide.md | 677 - .../infrastructure-management.md | 1117 - docs/src/infrastructure/mode-system-guide.md | 496 - .../infrastructure/multi-tenancy-patterns.md | 376 + docs/src/infrastructure/nickel-guide.md | 650 + docs/src/infrastructure/provider-guides.md | 472 + docs/src/infrastructure/providers.md | 116 + docs/src/infrastructure/schemas-reference.md | 312 + .../src/infrastructure/task-services-guide.md | 462 + docs/src/infrastructure/task-services.md | 246 + docs/src/infrastructure/version-management.md | 293 + .../workspace-config-architecture.md | 412 - .../workspaces/workspace-config-commands.md | 308 - .../workspaces/workspace-enforcement-guide.md | 615 - .../workspaces/workspace-guide.md | 43 - .../workspaces/workspace-infra-reference.md | 449 - .../workspaces/workspace-setup.md | 277 - .../workspaces/workspace-switching-guide.md | 450 - .../workspaces/workspace-switching-system.md | 148 - .../integration/gitea-integration-guide.md | 721 - .../integration/integrations-quickstart.md | 622 - docs/src/integration/oci-registry-guide.md | 889 - docs/src/integration/oci-registry-platform.md | 159 - .../secrets-service-layer-complete.md | 966 - .../integration/service-mesh-ingress-guide.md | 1368 - docs/src/operations/README.md | 168 +- docs/src/operations/backup-recovery.md | 473 + docs/src/operations/backup-strategies.md | 119 + .../operations/break-glass-training-guide.md | 728 - docs/src/operations/capacity-planning.md | 100 + .../cedar-policies-production-guide.md | 865 - docs/src/operations/control-center.md | 281 - docs/src/operations/coredns-guide.md | 1283 - docs/src/operations/deployment-guide.md | 1361 - docs/src/operations/deployment-modes.md | 246 + docs/src/operations/deployment-strategies.md | 324 + .../incident-response-procedures.md | 363 + .../operations/incident-response-runbooks.md | 1652 - docs/src/operations/installer-system.md | 288 - docs/src/operations/installer.md | 182 - docs/src/operations/mfa-admin-setup-guide.md | 1370 - .../operations/monitoring-alerting-setup.md | 1149 - docs/src/operations/monitoring.md | 511 + docs/src/operations/orchestrator-system.md | 96 - docs/src/operations/orchestrator.md | 153 - .../operations/performance-optimization.md | 263 + docs/src/operations/platform-health.md | 507 + docs/src/operations/platform.md | 366 - .../production-readiness-checklist.md | 353 - docs/src/operations/provisioning-server.md | 220 - docs/src/operations/resource-management.md | 58 + .../operations/service-management-guide.md | 1430 - docs/src/operations/service-management.md | 533 + docs/src/operations/troubleshooting.md | 477 + docs/src/operations/upgrade.md | 560 + docs/src/quick-reference/README.md | 45 - docs/src/quick-reference/general.md | 343 - docs/src/quick-reference/justfile-recipes.md | 221 - docs/src/quick-reference/master.md | 35 - docs/src/quick-reference/oci.md | 439 - .../platform-operations-cheatsheet.md | 623 - .../quick-reference/sudo-password-handling.md | 161 - .../microservices-communication.svg | 101 + .../diagrams/architecture/system-overview.svg | 108 + .../examples/multi-cloud-deployment.svg | 101 + .../diagrams/features/cli-architecture.svg | 87 + .../diagrams/features/daemon-architecture.svg | 82 + .../infrastructure/configuration-loading.svg | 91 + .../infrastructure/nickel-validation-flow.svg | 88 + .../infrastructure/workspace-hierarchy.svg | 112 + .../operations/disaster-recovery-topology.svg | 264 + .../diagrams/operations/monitoring-stack.svg | 98 + .../diagrams/security/authentication-flow.svg | 131 + .../diagrams/security/authorization-cedar.svg | 144 + .../diagrams/security/encryption-layers.svg | 133 + .../diagrams/workflows/batch-workflow-dag.svg | 140 + .../workflows/deployment-pipeline.svg | 101 + docs/src/roadmap/README.md | 147 - docs/src/roadmap/ai-integration.md | 189 - docs/src/roadmap/native-plugins.md | 252 - docs/src/roadmap/nickel-workflows.md | 269 - docs/src/security/README.md | 273 + docs/src/security/audit-logging.md | 1 + .../security/authentication-layer-guide.md | 927 - docs/src/security/authentication.md | 539 + docs/src/security/authorization.md | 1 + docs/src/security/certificate-management.md | 1 + docs/src/security/compliance.md | 1 + docs/src/security/config-encryption-guide.md | 943 - docs/src/security/data-protection.md | 79 + docs/src/security/encryption.md | 1 + docs/src/security/incident-response.md | 59 + docs/src/security/kms-guide.md | 1 + docs/src/security/kms-service.md | 190 - docs/src/security/mfa.md | 1 + docs/src/security/nushell-plugins-guide.md | 1000 - docs/src/security/nushell-plugins-system.md | 77 - docs/src/security/plugin-integration-guide.md | 2191 - docs/src/security/plugin-usage-guide.md | 395 - docs/src/security/rustyvault-kms-guide.md | 547 - docs/src/security/secrets-management-guide.md | 532 - docs/src/security/secrets-management.md | 1 + docs/src/security/secretumvault-guide.md | 466 + docs/src/security/secretumvault-kms-guide.md | 647 - docs/src/security/secure-communication.md | 1 + docs/src/security/security-system.md | 171 - docs/src/security/security-testing.md | 1 + .../security/ssh-temporal-keys-user-guide.md | 615 - docs/src/setup/README.md | 228 + docs/src/setup/configuration.md | 541 + docs/src/setup/initial-configuration.md | 71 + docs/src/setup/initial-setup.md | 279 + docs/src/setup/workspace-setup.md | 433 + docs/src/testing/taskserv-validation-guide.md | 555 - docs/src/testing/test-environment-guide.md | 491 - docs/src/testing/test-environment-system.md | 187 - docs/src/troubleshooting/README.md | 151 + docs/src/troubleshooting/common-issues.md | 1 + docs/src/troubleshooting/debug-guide.md | 1 + docs/src/troubleshooting/deployment-errors.md | 125 + docs/src/troubleshooting/getting-help.md | 1 + docs/src/troubleshooting/logs-analysis.md | 1 + docs/src/troubleshooting/network-issues.md | 137 + .../src/troubleshooting/performance-issues.md | 142 + .../troubleshooting/troubleshooting-guide.md | 1088 - .../troubleshooting/ctrl-c-sudo-handling.md | 209 - justfiles/package.just | 69 +- schemas/config/environments/main.ncl | 85 + schemas/main.ncl | 5 +- tools/distribution/prepare-core-dist.nu | 15 +- tools/distribution/prepare-platform-dist.nu | 15 +- tools/package/build-containers.nu | 8 +- tools/package/package-binaries.nu | 3 +- 399 files changed, 71217 insertions(+), 200254 deletions(-) delete mode 100644 docs/README.md delete mode 100644 docs/book/CNAME delete mode 100644 docs/book/architecture/adr/ADR-009-security-system-complete.html delete mode 100644 docs/book/architecture/multi-repo-strategy.html delete mode 100644 docs/book/architecture/orchestrator-auth-integration.html delete mode 100644 docs/book/architecture/orchestrator-integration-model.html delete mode 100644 docs/book/development/distribution-process.html delete mode 100644 docs/book/development/implementation-guide.html delete mode 100644 docs/book/development/integration.html delete mode 100644 docs/book/development/project-structure.html delete mode 100644 docs/book/development/workflow.html delete mode 100644 docs/book/guides/customize-infrastructure.html delete mode 100644 docs/book/guides/update-infrastructure.html delete mode 100644 docs/examples/workspaces/cost-optimized/README.md delete mode 100644 docs/examples/workspaces/cost-optimized/index.html delete mode 100644 docs/examples/workspaces/multi-provider-web-app/README.md delete mode 100644 docs/examples/workspaces/multi-provider-web-app/index.html delete mode 100644 docs/examples/workspaces/multi-region-ha/README.md delete mode 100644 docs/examples/workspaces/multi-region-ha/index.html create mode 100644 docs/fix-markdown.nu delete mode 100644 docs/src/PROVISIONING.md delete mode 100644 docs/src/ai/ai-agents.md create mode 100644 docs/src/ai/ai-architecture.md delete mode 100644 docs/src/ai/ai-assisted-forms.md create mode 100644 docs/src/ai/ai-service-crate.md delete mode 100644 docs/src/ai/architecture.md delete mode 100644 docs/src/ai/config-generation.md delete mode 100644 docs/src/ai/configuration.md delete mode 100644 docs/src/ai/cost-management.md delete mode 100644 docs/src/ai/mcp-integration.md delete mode 100644 docs/src/ai/natural-language-config.md create mode 100644 docs/src/ai/natural-language-infrastructure.md create mode 100644 docs/src/ai/rag-and-knowledge.md delete mode 100644 docs/src/ai/rag-system.md delete mode 100644 docs/src/ai/security-policies.md delete mode 100644 docs/src/ai/troubleshooting-with-ai.md create mode 100644 docs/src/ai/typedialog-integration.md create mode 100644 docs/src/api-reference/cli-commands.md create mode 100644 docs/src/api-reference/control-center-api.md create mode 100644 docs/src/api-reference/control-center-endpoints.md create mode 100644 docs/src/api-reference/examples.md create mode 100644 docs/src/api-reference/extension-registry-api.md delete mode 100644 docs/src/api-reference/extensions.md delete mode 100644 docs/src/api-reference/integration-examples.md delete mode 100644 docs/src/api-reference/nushell-api.md create mode 100644 docs/src/api-reference/nushell-libraries.md create mode 100644 docs/src/api-reference/orchestrator-api.md create mode 100644 docs/src/api-reference/orchestrator-endpoints.md delete mode 100644 docs/src/api-reference/path-resolution.md delete mode 100644 docs/src/api-reference/provider-api.md delete mode 100644 docs/src/api-reference/schemas/openapi.yaml delete mode 100644 docs/src/api-reference/sdks.md delete mode 100644 docs/src/api-reference/websocket.md delete mode 100644 docs/src/architecture/adr/ADR-001-project-structure.md delete mode 100644 docs/src/architecture/adr/ADR-002-distribution-strategy.md delete mode 100644 docs/src/architecture/adr/ADR-003-workspace-isolation.md delete mode 100644 docs/src/architecture/adr/ADR-004-hybrid-architecture.md delete mode 100644 docs/src/architecture/adr/ADR-005-extension-framework.md delete mode 100644 docs/src/architecture/adr/ADR-006-provisioning-cli-refactoring.md delete mode 100644 docs/src/architecture/adr/ADR-007-kms-simplification.md delete mode 100644 docs/src/architecture/adr/ADR-008-cedar-authorization.md delete mode 100644 docs/src/architecture/adr/ADR-009-security-system-complete.md create mode 100644 docs/src/architecture/adr/adr-001-modular-cli.md create mode 100644 docs/src/architecture/adr/adr-002-workspace-first.md create mode 100644 docs/src/architecture/adr/adr-003-nickel-as-source-of-truth.md create mode 100644 docs/src/architecture/adr/adr-004-microservice-distribution.md create mode 100644 docs/src/architecture/adr/adr-005-service-communication.md create mode 100644 docs/src/architecture/adr/adr-006-post-quantum-cryptography.md create mode 100644 docs/src/architecture/adr/adr-007-data-encryption-strategy.md create mode 100644 docs/src/architecture/adr/adr-008-observability-and-monitoring.md create mode 100644 docs/src/architecture/adr/adr-009-slo-error-budgets.md delete mode 100644 docs/src/architecture/adr/adr-010-configuration-format-strategy.md create mode 100644 docs/src/architecture/adr/adr-010-incident-response-automation.md delete mode 100644 docs/src/architecture/adr/adr-011-nickel-migration.md delete mode 100644 docs/src/architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.md delete mode 100644 docs/src/architecture/adr/adr-013-typdialog-integration.md delete mode 100644 docs/src/architecture/adr/adr-014-secretumvault-integration.md delete mode 100644 docs/src/architecture/adr/adr-015-ai-integration-architecture.md delete mode 100644 docs/src/architecture/adr/adr-016-schema-driven-accessor-generation.md delete mode 100644 docs/src/architecture/adr/adr-017-plugin-wrapper-abstraction-framework.md delete mode 100644 docs/src/architecture/adr/adr-018-help-system-fluent-integration.md delete mode 100644 docs/src/architecture/adr/adr-019-configuration-loader-modularization.md delete mode 100644 docs/src/architecture/adr/adr-020-command-handler-domain-splitting.md delete mode 100644 docs/src/architecture/architecture-overview.md create mode 100644 docs/src/architecture/component-architecture.md delete mode 100644 docs/src/architecture/config-loading-architecture.md delete mode 100644 docs/src/architecture/database-and-config-architecture.md create mode 100644 docs/src/architecture/design-patterns.md delete mode 100644 docs/src/architecture/ecosystem-integration.md delete mode 100644 docs/src/architecture/multi-repo-architecture.md delete mode 100644 docs/src/architecture/multi-repo-strategy.md delete mode 100644 docs/src/architecture/nickel-executable-examples.md delete mode 100644 docs/src/architecture/nickel-vs-kcl-comparison.md delete mode 100644 docs/src/architecture/orchestrator-auth-integration.md delete mode 100644 docs/src/architecture/orchestrator-info.md delete mode 100644 docs/src/architecture/orchestrator-integration-model.md delete mode 100644 docs/src/architecture/package-and-loader-system.md delete mode 100644 docs/src/architecture/repo-dist-analysis.md delete mode 100644 docs/src/architecture/typedialog-nickel-integration.md delete mode 100644 docs/src/configuration/config-validation.md create mode 100644 docs/src/development/README.md create mode 100644 docs/src/development/api-guide.md delete mode 100644 docs/src/development/auth-metadata-guide.md delete mode 100644 docs/src/development/command-handler-guide.md delete mode 100644 docs/src/development/command-reference.md create mode 100644 docs/src/development/contributing.md delete mode 100644 docs/src/development/ctrl-c-implementation-notes.md create mode 100644 docs/src/development/custom-detector-development.md create mode 100644 docs/src/development/custom-provider-development.md create mode 100644 docs/src/development/custom-task-services.md delete mode 100644 docs/src/development/dev-configuration.md delete mode 100644 docs/src/development/dev-workspace-management.md delete mode 100644 docs/src/development/distribution-process.md create mode 100644 docs/src/development/extension-development.md delete mode 100644 docs/src/development/glossary.md delete mode 100644 docs/src/development/implementation-guide.md delete mode 100644 docs/src/development/infrastructure-specific-extensions.md delete mode 100644 docs/src/development/integration.md delete mode 100644 docs/src/development/kms-simplification.md delete mode 100644 docs/src/development/mcp-server.md create mode 100644 docs/src/development/nushell-script-best-practices.md create mode 100644 docs/src/development/plugin-development.md delete mode 100644 docs/src/development/project-structure.md create mode 100644 docs/src/development/provider-development.md delete mode 100644 docs/src/development/providers/provider-agnostic-architecture.md delete mode 100644 docs/src/development/providers/provider-comparison.md delete mode 100644 docs/src/development/providers/provider-development-guide.md delete mode 100644 docs/src/development/providers/provider-distribution-guide.md delete mode 100644 docs/src/development/providers/quick-provider-guide.md create mode 100644 docs/src/development/provisioning-daemon-internals.md delete mode 100644 docs/src/development/taskservs/taskserv-categorization.md delete mode 100644 docs/src/development/taskservs/taskserv-quick-guide.md create mode 100644 docs/src/development/testing-infrastructure.md create mode 100644 docs/src/development/testing.md delete mode 100644 docs/src/development/typedialog-platform-config-guide.md delete mode 100644 docs/src/development/workflow.md create mode 100644 docs/src/examples/README.md create mode 100644 docs/src/examples/basic-setup.md create mode 100644 docs/src/examples/batch-operations-example.md create mode 100644 docs/src/examples/cicd-pipeline-examples.md create mode 100644 docs/src/examples/compliance-and-audit-example.md create mode 100644 docs/src/examples/cost-optimization-examples.md create mode 100644 docs/src/examples/custom-workflows.md create mode 100644 docs/src/examples/database-examples.md create mode 100644 docs/src/examples/disaster-recovery-drills.md create mode 100644 docs/src/examples/encryption-examples.md create mode 100644 docs/src/examples/finops-cost-governance.md create mode 100644 docs/src/examples/high-availability-examples.md create mode 100644 docs/src/examples/iac-testing-strategies.md create mode 100644 docs/src/examples/kubernetes-deployment.md create mode 100644 docs/src/examples/legacy-system-migration.md create mode 100644 docs/src/examples/machine-learning-infrastructure.md create mode 100644 docs/src/examples/microservices-deployment.md create mode 100644 docs/src/examples/monitoring-examples.md create mode 100644 docs/src/examples/multi-cloud.md create mode 100644 docs/src/examples/nickel-config-examples.md create mode 100644 docs/src/examples/plugin-examples.md create mode 100644 docs/src/examples/provider-specific-aws.md create mode 100644 docs/src/examples/provider-specific-hetzner.md create mode 100644 docs/src/examples/provider-specific-upcloud.md create mode 100644 docs/src/examples/real-world-scenario.md create mode 100644 docs/src/examples/security-examples.md create mode 100644 docs/src/examples/terraform-migration.md create mode 100644 docs/src/examples/workspace-examples.md create mode 100644 docs/src/features/README.md create mode 100644 docs/src/features/batch-workflows.md create mode 100644 docs/src/features/cli-architecture.md create mode 100644 docs/src/features/configuration-system.md create mode 100644 docs/src/features/detector-system.md create mode 100644 docs/src/features/extension-registry.md create mode 100644 docs/src/features/installer.md create mode 100644 docs/src/features/interactive-guides.md create mode 100644 docs/src/features/multilingual-support.md create mode 100644 docs/src/features/orchestrator.md create mode 100644 docs/src/features/plugins.md create mode 100644 docs/src/features/provisioning-daemon.md create mode 100644 docs/src/features/security-system.md create mode 100644 docs/src/features/test-environment.md create mode 100644 docs/src/features/version-management.md create mode 100644 docs/src/features/workspace-management.md delete mode 100644 docs/src/getting-started/01-prerequisites.md delete mode 100644 docs/src/getting-started/02-installation.md delete mode 100644 docs/src/getting-started/03-first-deployment.md delete mode 100644 docs/src/getting-started/04-verification.md delete mode 100644 docs/src/getting-started/05-platform-configuration.md create mode 100644 docs/src/getting-started/README.md create mode 100644 docs/src/getting-started/first-deployment.md delete mode 100644 docs/src/getting-started/getting-started.md delete mode 100644 docs/src/getting-started/installation-guide.md delete mode 100644 docs/src/getting-started/installation-validation-guide.md create mode 100644 docs/src/getting-started/installation.md create mode 100644 docs/src/getting-started/prerequisites.md create mode 100644 docs/src/getting-started/quick-start.md delete mode 100644 docs/src/getting-started/quickstart-cheatsheet.md delete mode 100644 docs/src/getting-started/quickstart.md delete mode 100644 docs/src/getting-started/setup-profiles.md delete mode 100644 docs/src/getting-started/setup-quickstart.md delete mode 100644 docs/src/getting-started/setup-system-guide.md delete mode 100644 docs/src/getting-started/setup.md create mode 100644 docs/src/getting-started/verification.md create mode 100644 docs/src/guides/advanced-networking.md create mode 100644 docs/src/guides/advanced-workflow-orchestration.md create mode 100644 docs/src/guides/ci-cd-integration.md create mode 100644 docs/src/guides/custom-extensions.md delete mode 100644 docs/src/guides/customize-infrastructure.md create mode 100644 docs/src/guides/disaster-recovery.md delete mode 100644 docs/src/guides/extension-development-quickstart.md create mode 100644 docs/src/guides/gitops-infrastructure-deployment.md delete mode 100644 docs/src/guides/guide-system.md create mode 100644 docs/src/guides/hybrid-cloud-deployment.md delete mode 100644 docs/src/guides/infrastructure-setup.md delete mode 100644 docs/src/guides/internationalization-system.md create mode 100644 docs/src/guides/managing-multiple-clouds.md create mode 100644 docs/src/guides/multi-cloud-deployment.md delete mode 100644 docs/src/guides/multi-provider-deployment.md delete mode 100644 docs/src/guides/multi-provider-networking.md create mode 100644 docs/src/guides/production-readiness.md delete mode 100644 docs/src/guides/provider-digitalocean.md delete mode 100644 docs/src/guides/provider-hetzner.md create mode 100644 docs/src/guides/scaling-infrastructure.md create mode 100644 docs/src/guides/secrets-rotation-strategy.md delete mode 100644 docs/src/guides/update-infrastructure.md delete mode 100644 docs/src/guides/workspace-generation-quick-reference.md create mode 100644 docs/src/guides/workspace-management.md create mode 100644 docs/src/infrastructure/README.md delete mode 100644 docs/src/infrastructure/batch-workflow-multi-provider.md delete mode 100644 docs/src/infrastructure/batch-workflow-system.md create mode 100644 docs/src/infrastructure/batch-workflows.md delete mode 100644 docs/src/infrastructure/cli-architecture.md delete mode 100644 docs/src/infrastructure/cli-reference.md create mode 100644 docs/src/infrastructure/clusters.md delete mode 100644 docs/src/infrastructure/config-rendering-guide.md delete mode 100644 docs/src/infrastructure/configuration.md delete mode 100644 docs/src/infrastructure/dynamic-secrets-guide.md delete mode 100644 docs/src/infrastructure/infrastructure-from-code-guide.md delete mode 100644 docs/src/infrastructure/infrastructure-management.md delete mode 100644 docs/src/infrastructure/mode-system-guide.md create mode 100644 docs/src/infrastructure/multi-tenancy-patterns.md create mode 100644 docs/src/infrastructure/nickel-guide.md create mode 100644 docs/src/infrastructure/provider-guides.md create mode 100644 docs/src/infrastructure/providers.md create mode 100644 docs/src/infrastructure/schemas-reference.md create mode 100644 docs/src/infrastructure/task-services-guide.md create mode 100644 docs/src/infrastructure/task-services.md create mode 100644 docs/src/infrastructure/version-management.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-config-architecture.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-config-commands.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-enforcement-guide.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-guide.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-infra-reference.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-setup.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-switching-guide.md delete mode 100644 docs/src/infrastructure/workspaces/workspace-switching-system.md delete mode 100644 docs/src/integration/gitea-integration-guide.md delete mode 100644 docs/src/integration/integrations-quickstart.md delete mode 100644 docs/src/integration/oci-registry-guide.md delete mode 100644 docs/src/integration/oci-registry-platform.md delete mode 100644 docs/src/integration/secrets-service-layer-complete.md delete mode 100644 docs/src/integration/service-mesh-ingress-guide.md create mode 100644 docs/src/operations/backup-recovery.md create mode 100644 docs/src/operations/backup-strategies.md delete mode 100644 docs/src/operations/break-glass-training-guide.md create mode 100644 docs/src/operations/capacity-planning.md delete mode 100644 docs/src/operations/cedar-policies-production-guide.md delete mode 100644 docs/src/operations/control-center.md delete mode 100644 docs/src/operations/coredns-guide.md delete mode 100644 docs/src/operations/deployment-guide.md create mode 100644 docs/src/operations/deployment-modes.md create mode 100644 docs/src/operations/deployment-strategies.md create mode 100644 docs/src/operations/incident-response-procedures.md delete mode 100644 docs/src/operations/incident-response-runbooks.md delete mode 100644 docs/src/operations/installer-system.md delete mode 100644 docs/src/operations/installer.md delete mode 100644 docs/src/operations/mfa-admin-setup-guide.md delete mode 100644 docs/src/operations/monitoring-alerting-setup.md create mode 100644 docs/src/operations/monitoring.md delete mode 100644 docs/src/operations/orchestrator-system.md delete mode 100644 docs/src/operations/orchestrator.md create mode 100644 docs/src/operations/performance-optimization.md create mode 100644 docs/src/operations/platform-health.md delete mode 100644 docs/src/operations/platform.md delete mode 100644 docs/src/operations/production-readiness-checklist.md delete mode 100644 docs/src/operations/provisioning-server.md create mode 100644 docs/src/operations/resource-management.md delete mode 100644 docs/src/operations/service-management-guide.md create mode 100644 docs/src/operations/service-management.md create mode 100644 docs/src/operations/troubleshooting.md create mode 100644 docs/src/operations/upgrade.md delete mode 100644 docs/src/quick-reference/README.md delete mode 100644 docs/src/quick-reference/general.md delete mode 100644 docs/src/quick-reference/justfile-recipes.md delete mode 100644 docs/src/quick-reference/master.md delete mode 100644 docs/src/quick-reference/oci.md delete mode 100644 docs/src/quick-reference/platform-operations-cheatsheet.md delete mode 100644 docs/src/quick-reference/sudo-password-handling.md create mode 100644 docs/src/resources/diagrams/architecture/microservices-communication.svg create mode 100644 docs/src/resources/diagrams/architecture/system-overview.svg create mode 100644 docs/src/resources/diagrams/examples/multi-cloud-deployment.svg create mode 100644 docs/src/resources/diagrams/features/cli-architecture.svg create mode 100644 docs/src/resources/diagrams/features/daemon-architecture.svg create mode 100644 docs/src/resources/diagrams/infrastructure/configuration-loading.svg create mode 100644 docs/src/resources/diagrams/infrastructure/nickel-validation-flow.svg create mode 100644 docs/src/resources/diagrams/infrastructure/workspace-hierarchy.svg create mode 100644 docs/src/resources/diagrams/operations/disaster-recovery-topology.svg create mode 100644 docs/src/resources/diagrams/operations/monitoring-stack.svg create mode 100644 docs/src/resources/diagrams/security/authentication-flow.svg create mode 100644 docs/src/resources/diagrams/security/authorization-cedar.svg create mode 100644 docs/src/resources/diagrams/security/encryption-layers.svg create mode 100644 docs/src/resources/diagrams/workflows/batch-workflow-dag.svg create mode 100644 docs/src/resources/diagrams/workflows/deployment-pipeline.svg delete mode 100644 docs/src/roadmap/README.md delete mode 100644 docs/src/roadmap/ai-integration.md delete mode 100644 docs/src/roadmap/native-plugins.md delete mode 100644 docs/src/roadmap/nickel-workflows.md create mode 100644 docs/src/security/README.md create mode 100644 docs/src/security/audit-logging.md delete mode 100644 docs/src/security/authentication-layer-guide.md create mode 100644 docs/src/security/authentication.md create mode 100644 docs/src/security/authorization.md create mode 100644 docs/src/security/certificate-management.md create mode 100644 docs/src/security/compliance.md delete mode 100644 docs/src/security/config-encryption-guide.md create mode 100644 docs/src/security/data-protection.md create mode 100644 docs/src/security/encryption.md create mode 100644 docs/src/security/incident-response.md create mode 100644 docs/src/security/kms-guide.md delete mode 100644 docs/src/security/kms-service.md create mode 100644 docs/src/security/mfa.md delete mode 100644 docs/src/security/nushell-plugins-guide.md delete mode 100644 docs/src/security/nushell-plugins-system.md delete mode 100644 docs/src/security/plugin-integration-guide.md delete mode 100644 docs/src/security/plugin-usage-guide.md delete mode 100644 docs/src/security/rustyvault-kms-guide.md delete mode 100644 docs/src/security/secrets-management-guide.md create mode 100644 docs/src/security/secrets-management.md create mode 100644 docs/src/security/secretumvault-guide.md delete mode 100644 docs/src/security/secretumvault-kms-guide.md create mode 100644 docs/src/security/secure-communication.md delete mode 100644 docs/src/security/security-system.md create mode 100644 docs/src/security/security-testing.md delete mode 100644 docs/src/security/ssh-temporal-keys-user-guide.md create mode 100644 docs/src/setup/README.md create mode 100644 docs/src/setup/configuration.md create mode 100644 docs/src/setup/initial-configuration.md create mode 100644 docs/src/setup/initial-setup.md create mode 100644 docs/src/setup/workspace-setup.md delete mode 100644 docs/src/testing/taskserv-validation-guide.md delete mode 100644 docs/src/testing/test-environment-guide.md delete mode 100644 docs/src/testing/test-environment-system.md create mode 100644 docs/src/troubleshooting/README.md create mode 100644 docs/src/troubleshooting/common-issues.md create mode 100644 docs/src/troubleshooting/debug-guide.md create mode 100644 docs/src/troubleshooting/deployment-errors.md create mode 100644 docs/src/troubleshooting/getting-help.md create mode 100644 docs/src/troubleshooting/logs-analysis.md create mode 100644 docs/src/troubleshooting/network-issues.md create mode 100644 docs/src/troubleshooting/performance-issues.md delete mode 100644 docs/src/troubleshooting/troubleshooting-guide.md delete mode 100644 docs/src/troubleshooting/troubleshooting/ctrl-c-sudo-handling.md create mode 100644 schemas/config/environments/main.ncl diff --git a/config/config.defaults.toml b/config/config.defaults.toml index d19d4c5..d9b03cd 100644 --- a/config/config.defaults.toml +++ b/config/config.defaults.toml @@ -81,6 +81,20 @@ enable_tls = false cert_path = "" key_path = "" +# Environment-Specific Configuration +# ⚠️ DEPRECATED: Environments are now defined in Nickel (ADR-003: Nickel as Source of Truth) +# Location: provisioning/schemas/config/environments/main.ncl +# The loader attempts to load from Nickel first, then falls back to this TOML section +# This section is kept for backward compatibility only - DO NOT USE for new configurations +# +# [environments] +# [environments.dev] +# debug_enabled = true +# debug_log_level = "debug" +# [environments.prod] +# debug_enabled = false +# debug_log_level = "warn" + # Configuration Notes # # 1. User Configuration Override diff --git a/core b/core index 08563bc..825d1f0 160000 --- a/core +++ b/core @@ -1 +1 @@ -Subproject commit 08563bc973423ea8ce4086c6f043ba47aac9a2f5 +Subproject commit 825d1f0e88eaa37186ca91eb2016d04fce12f807 diff --git a/docs/.markdownlint-cli2.jsonc b/docs/.markdownlint-cli2.jsonc index 829fb45..7866d7e 100644 --- a/docs/.markdownlint-cli2.jsonc +++ b/docs/.markdownlint-cli2.jsonc @@ -1,5 +1,5 @@ -// Markdownlint-cli2 Configuration for docs/ -// Product documentation - inherits from parent with MD040 disabled +// Markdownlint-cli2 Configuration +// Documentation quality enforcement aligned with CLAUDE.md guidelines // See: https://github.com/igorshubovych/markdownlint-cli2 { @@ -19,13 +19,11 @@ // Code blocks - fenced only "MD046": { "style": "fenced" }, // code-block-style - - // MD040 DISABLED FOR DOCS - // Product documentation has extensive code examples with context-dependent languages. - // Opening fence language detection is complex in large docs and would require - // intelligent parsing. Since core/ validates with proper languages, docs/ - // inherits that validated content and pre-commit hooks catch malformed closing fences. - "MD040": false, // fenced-code-language (DISABLED - pre-commit validates closing fences) + // CRITICAL: MD040 only checks for missing language on opening fence. + // It does NOT catch malformed closing fences with language specifiers (e.g., ```plaintext). + // This is a CommonMark violation that must be caught by custom pre-commit hook. + // Pre-commit hook: check-malformed-fences (provisioning/core/.pre-commit-config.yaml) + // Script: provisioning/scripts/check-malformed-fences.nu // Formatting - strict whitespace "MD009": true, // no-hard-tabs @@ -49,6 +47,7 @@ // Links and references "MD034": true, // no-bare-urls (links must be formatted) + "MD040": true, // fenced-code-language (code blocks need language) "MD042": true, // no-empty-links // HTML - allow for documentation formatting and images @@ -78,22 +77,27 @@ "MD032": false, // blanks-around-lists (flexible spacing) "MD035": false, // hr-style (consistent) "MD036": false, // no-emphasis-as-heading - "MD044": false // proper-names + "MD044": false, // proper-names + "MD060": true // table-column-style (enforce proper table formatting) }, // Documentation patterns "globs": [ - "**/*.md", - "!node_modules/**", - "!build/**" + "docs/**/*.md", + "!docs/node_modules/**", + "!docs/build/**" ], - // Ignore build artifacts and external content + // Ignore build artifacts, external content, and operational directories "ignores": [ "node_modules/**", "target/**", ".git/**", "build/**", - "dist/**" + "dist/**", + ".coder/**", + ".claude/**", + ".wrks/**", + ".vale/**" ] } diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 313fd9d..0000000 --- a/docs/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Provisioning Platform Documentation - -Complete documentation for the Provisioning Platform infrastructure automation system built with Nushell, -Nickel, and Rust. - -## 📖 Browse Documentation - -All documentation is **directly readable** as markdown files in Git/GitHub—mdBook is optional. - -- **[Table of Contents](src/SUMMARY.md)** – Complete documentation index (188+ pages) -- **[Browse src/ directory](src/)** – All markdown files organized by topic - ---- - -## 🚀 Quick Navigation - -### For Users & Operators - -- **[Getting Started](src/getting-started/)** – Installation, setup, and first deployment -- **[Operations Guide](src/operations/)** – Deployment, monitoring, orchestrator management -- **[Troubleshooting](src/troubleshooting/troubleshooting-guide.md)** – Common issues and solutions -- **[Security](src/security/)** – Authentication, encryption, secrets management - -### For Developers & Architects - -- **[Architecture Overview](src/architecture/)** – System design and integration patterns -- **[Infrastructure Guide](src/infrastructure/)** – CLI, configuration system, workspaces -- **[Development Guide](src/development/)** – Extensions, providers, taskservs, build system -- **[API Reference](src/api-reference/)** – REST API, WebSocket, SDKs, integration examples - -### For Advanced Users - -- **[Deployment Guides](src/guides/)** – Multi-provider setup, customization, infrastructure examples -- **[Integration Guides](src/integration/)** – Gitea, OCI, service mesh, secrets integration -- **[Testing](src/testing/)** – Test environment setup and validation - ---- - -## 📚 Documentation Structure - -```bash -provisioning/docs/ -├── README.md # This file – navigation hub -├── book.toml # mdBook configuration -├── src/ # Source markdown files (version-controlled) -│ ├── SUMMARY.md # Complete table of contents -│ ├── getting-started/ # Installation and setup -│ ├── architecture/ # System design and ADRs -│ ├── infrastructure/ # CLI, configuration, workspaces -│ ├── operations/ # Deployment, orchestrator, monitoring -│ ├── development/ # Extensions, providers, build system -│ ├── api-reference/ # APIs and SDKs -│ ├── security/ # Authentication, secrets, encryption -│ ├── integration/ # Third-party integrations -│ ├── guides/ # How-to guides and examples -│ ├── troubleshooting/ # Common issues -│ └── ... # 12 other sections -├── book/ # Generated HTML output (Git-ignored) -└── examples/ # Example workspace configurations -``` - -### Why `src/` subdirectory - -This is the **standard mdBook convention**: -- **Source (`src/`)**: Version-controlled markdown files, directly readable -- **Output (`book/`)**: Generated HTML/CSS/JS, Git-ignored (regenerated on build) - -This separation allows the same source files to generate multiple output formats (HTML, PDF, EPUB) without -cluttering the version-controlled repository. - ---- - -## 🔨 Building HTML with mdBook - -If you prefer a formatted HTML website with search, themes, and copy buttons, build with mdBook: - -### Prerequisites - -```bash -cargo install mdbook -``` - -### Build & Serve - -```bash -# Navigate to docs directory -cd provisioning/docs - -# Build HTML to book/ directory -mdbook build - -# Serve locally at http://localhost:3000 (with live reload) -mdbook serve -``` - -### Output - -Generated HTML is available in `provisioning/docs/book/` after building. - -**Note**: mdBook is entirely optional. The markdown files in `src/` work perfectly fine in any Git -viewer or text editor. - ---- - -## 📖 Reading Markdown Directly - -All documentation is standard GitHub Flavored Markdown. You can: - -- **GitHub/GitLab**: Click `provisioning/docs/src/` and browse directly -- **Local Git**: Clone the repo and open any `.md` file in your editor -- **Text Search**: Use `grep` or your editor's search to find topics across all markdown files -- **mdBook (optional)**: Build HTML for formatted reading with search and theming - ---- - -## 🔗 Key Reference Pages - -| Document | Purpose | -| ------------------------------------------------------------------------------ | --------------------------------- | -| [System Overview](src/architecture/system-overview.md) | High-level architecture | -| [Installation Guide](src/getting-started/installation-guide.md) | Step-by-step setup | -| [CLI Reference](src/infrastructure/cli-reference.md) | Command reference | -| [Configuration System](src/infrastructure/configuration-system.md) | Config management | -| [Security System](src/security/security-system.md) | Authentication & encryption | -| [Orchestrator](src/operations/orchestrator.md) | Service orchestration | -| [Workspace Guide](src/infrastructure/workspaces/workspace-guide.md) | Infrastructure workspaces | -| [ADRs](src/architecture/adr/) | Architecture Decision Records | - ---- - -## ❓ Questions - -- **Getting started** → Start with [Installation Guide](src/getting-started/installation-guide.md) -- **Having issues** → Check [Troubleshooting](src/troubleshooting/troubleshooting-guide.md) -- **Looking for API docs** → See [API Reference](src/api-reference/) -- **Want architecture details** → Read [Architecture Overview](src/architecture/architecture-overview.md) - -For complete navigation, see [Table of Contents](src/SUMMARY.md). diff --git a/docs/book.toml b/docs/book.toml index 64999fa..30bbee2 100644 --- a/docs/book.toml +++ b/docs/book.toml @@ -1,78 +1,48 @@ [book] -authors = ["Provisioning Platform Team"] -description = "Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust" +title = "Provisioning Platform Documentation" +authors = ["Provisioning Team"] language = "en" multilingual = false src = "src" -title = "Provisioning Platform Documentation" +description = "Enterprise-grade Infrastructure as Code platform - Complete documentation" [build] build-dir = "book" create-missing = true -[preprocessor.links] -# Enable link checking - [output.html] -# theme = "theme" # Commented out - using default mdbook theme -cname = "docs.provisioning.local" -copy-fonts = true -default-theme = "ayu" -edit-url-template = "https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/{path}" -git-repository-icon = "fa-github" -git-repository-url = "https://github.com/provisioning/provisioning-platform" -mathjax-support = false -no-section-label = false +default-theme = "rust" preferred-dark-theme = "navy" -site-url = "/docs/" -smart-punctuation = true # Renamed from curly-quotes -# input-404 = "404.md" # Commented out - 404.md not created yet +smart-punctuation = true +mathjax-support = false +copy-fonts = true +no-section-label = false +git-repository-url = "https://github.com/your-org/provisioning" +git-repository-icon = "fa-github" +edit-url-template = "https://github.com/your-org/provisioning/edit/main/provisioning/docs/{path}" +site-url = "/provisioning/" - [output.html.print] - enable = true +[output.html.fold] +enable = true +level = 1 - [output.html.fold] - enable = true - level = 1 +[output.html.search] +enable = true +limit-results = 30 +teaser-word-count = 30 +use-boolean-and = true +boost-title = 2 +boost-hierarchy = 1 +boost-paragraph = 1 +expand = true - [output.html.playground] - copy-js = true - copyable = true - editable = false - line-numbers = true - runnable = false +[output.html.playground] +editable = true +copyable = true +copy-js = true +line-numbers = true +runnable = false - [output.html.search] - boost-hierarchy = 1 - boost-paragraph = 1 - boost-title = 2 - enable = true - expand = true - heading-split-level = 3 - limit-results = 30 - teaser-word-count = 30 - use-boolean-and = true +[preprocessor.links] - [output.html.code.highlightjs] - additional-languages = ["nushell", "toml", "yaml", "bash", "rust", "nickel"] - - [output.html.code] - hidelines = {} - - [[output.html.code.highlightjs.theme]] - dark = "ayu-dark" - light = "ayu-light" - - [output.html.redirect] - # Add redirects for moved pages if needed - -[rust] -edition = "2021" - -# Custom preprocessors for Nushell and KCL syntax highlighting -# Note: These preprocessors are not installed, commented out for now -# [preprocessor.nushell-highlighting] -# Enable custom highlighting for Nushell code blocks - -# [preprocessor.kcl-highlighting] -# Enable custom highlighting for KCL code blocks +[preprocessor.index] diff --git a/docs/book/404.html b/docs/book/404.html index e409d48..d54b4c4 100644 --- a/docs/book/404.html +++ b/docs/book/404.html @@ -1,15 +1,15 @@ - + Page not found - Provisioning Platform Documentation - + - + @@ -35,7 +35,7 @@ @@ -77,7 +77,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -141,7 +141,7 @@ - + @@ -190,13 +190,37 @@ + + + + + + + + diff --git a/docs/book/CNAME b/docs/book/CNAME deleted file mode 100644 index d9cc9a2..0000000 --- a/docs/book/CNAME +++ /dev/null @@ -1 +0,0 @@ -docs.provisioning.local diff --git a/docs/book/FontAwesome/fonts/fontawesome-webfont.svg b/docs/book/FontAwesome/fonts/fontawesome-webfont.svg index 52c0773..855c845 100644 --- a/docs/book/FontAwesome/fonts/fontawesome-webfont.svg +++ b/docs/book/FontAwesome/fonts/fontawesome-webfont.svg @@ -8,7 +8,7 @@ Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/book/architecture/adr/ADR-009-security-system-complete.html b/docs/book/architecture/adr/ADR-009-security-system-complete.html deleted file mode 100644 index 014c026..0000000 --- a/docs/book/architecture/adr/ADR-009-security-system-complete.html +++ /dev/null @@ -1,780 +0,0 @@ - - - - - - ADR-009: Security System Complete - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

ADR-009: Complete Security System Implementation

-

Status: Implemented -Date: 2025-10-08 -Decision Makers: Architecture Team

-
-

Context

-

The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA, -compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.

-
-

Decision

-

Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.

-
-

Implementation Summary

-

Total Implementation

-
    -
  • 39,699 lines of production-ready code
  • -
  • 136 files created/modified
  • -
  • 350+ tests implemented
  • -
  • 83+ REST endpoints available
  • -
  • 111+ CLI commands ready
  • -
-
-

Architecture Components

-

Group 1: Foundation (13,485 lines)

-

1. JWT Authentication (1,626 lines)

-

Location: provisioning/platform/control-center/src/auth/

-

Features:

-
    -
  • RS256 asymmetric signing
  • -
  • Access tokens (15 min) + refresh tokens (7 d)
  • -
  • Token rotation and revocation
  • -
  • Argon2id password hashing
  • -
  • 5 user roles (Admin, Developer, Operator, Viewer, Auditor)
  • -
  • Thread-safe blacklist
  • -
-

API: 6 endpoints -CLI: 8 commands -Tests: 30+

-

2. Cedar Authorization (5,117 lines)

-

Location: provisioning/config/cedar-policies/, provisioning/platform/orchestrator/src/security/

-

Features:

-
    -
  • Cedar policy engine integration
  • -
  • 4 policy files (schema, production, development, admin)
  • -
  • Context-aware authorization (MFA, IP, time windows)
  • -
  • Hot reload without restart
  • -
  • Policy validation
  • -
-

API: 4 endpoints -CLI: 6 commands -Tests: 30+

-

3. Audit Logging (3,434 lines)

-

Location: provisioning/platform/orchestrator/src/audit/

-

Features:

-
    -
  • Structured JSON logging
  • -
  • 40+ action types
  • -
  • GDPR compliance (PII anonymization)
  • -
  • 5 export formats (JSON, CSV, Splunk, ECS, JSON Lines)
  • -
  • Query API with advanced filtering
  • -
-

API: 7 endpoints -CLI: 8 commands -Tests: 25

-

4. Config Encryption (3,308 lines)

-

Location: provisioning/core/nulib/lib_provisioning/config/encryption.nu

-

Features:

-
    -
  • SOPS integration
  • -
  • 4 KMS backends (Age, AWS KMS, Vault, Cosmian)
  • -
  • Transparent encryption/decryption
  • -
  • Memory-only decryption
  • -
  • Auto-detection
  • -
-

CLI: 10 commands -Tests: 7

-
-

Group 2: KMS Integration (9,331 lines)

-

5. KMS Service (2,483 lines)

-

Location: provisioning/platform/kms-service/

-

Features:

-
    -
  • HashiCorp Vault (Transit engine)
  • -
  • AWS KMS (Direct + envelope encryption)
  • -
  • Context-based encryption (AAD)
  • -
  • Key rotation support
  • -
  • Multi-region support
  • -
-

API: 8 endpoints -CLI: 15 commands -Tests: 20

-

6. Dynamic Secrets (4,141 lines)

-

Location: provisioning/platform/orchestrator/src/secrets/

-

Features:

-
    -
  • AWS STS temporary credentials (15 min-12 h)
  • -
  • SSH key pair generation (Ed25519)
  • -
  • UpCloud API subaccounts
  • -
  • TTL manager with auto-cleanup
  • -
  • Vault dynamic secrets integration
  • -
-

API: 7 endpoints -CLI: 10 commands -Tests: 15

-

7. SSH Temporal Keys (2,707 lines)

-

Location: provisioning/platform/orchestrator/src/ssh/

-

Features:

-
    -
  • Ed25519 key generation
  • -
  • Vault OTP (one-time passwords)
  • -
  • Vault CA (certificate authority signing)
  • -
  • Auto-deployment to authorized_keys
  • -
  • Background cleanup every 5 min
  • -
-

API: 7 endpoints -CLI: 10 commands -Tests: 31

-
-

Group 3: Security Features (8,948 lines)

-

8. MFA Implementation (3,229 lines)

-

Location: provisioning/platform/control-center/src/mfa/

-

Features:

-
    -
  • TOTP (RFC 6238, 6-digit codes, 30 s window)
  • -
  • WebAuthn/FIDO2 (YubiKey, Touch ID, Windows Hello)
  • -
  • QR code generation
  • -
  • 10 backup codes per user
  • -
  • Multiple devices per user
  • -
  • Rate limiting (5 attempts/5 min)
  • -
-

API: 13 endpoints -CLI: 15 commands -Tests: 85+

-

9. Orchestrator Auth Flow (2,540 lines)

-

Location: provisioning/platform/orchestrator/src/middleware/

-

Features:

-
    -
  • Complete middleware chain (5 layers)
  • -
  • Security context builder
  • -
  • Rate limiting (100 req/min per IP)
  • -
  • JWT authentication middleware
  • -
  • MFA verification middleware
  • -
  • Cedar authorization middleware
  • -
  • Audit logging middleware
  • -
-

Tests: 53

-

10. Control Center UI (3,179 lines)

-

Location: provisioning/platform/control-center/web/

-

Features:

-
    -
  • React/TypeScript UI
  • -
  • Login with MFA (2-step flow)
  • -
  • MFA setup (TOTP + WebAuthn wizards)
  • -
  • Device management
  • -
  • Audit log viewer with filtering
  • -
  • API token management
  • -
  • Security settings dashboard
  • -
-

Components: 12 React components -API Integration: 17 methods

-
-

Group 4: Advanced Features (7,935 lines)

-

11. Break-Glass Emergency Access (3,840 lines)

-

Location: provisioning/platform/orchestrator/src/break_glass/

-

Features:

-
    -
  • Multi-party approval (2+ approvers, different teams)
  • -
  • Emergency JWT tokens (4 h max, special claims)
  • -
  • Auto-revocation (expiration + inactivity)
  • -
  • Enhanced audit (7-year retention)
  • -
  • Real-time alerts
  • -
  • Background monitoring
  • -
-

API: 12 endpoints -CLI: 10 commands -Tests: 985 lines (unit + integration)

-

12. Compliance (4,095 lines)

-

Location: provisioning/platform/orchestrator/src/compliance/

-

Features:

-
    -
  • GDPR: Data export, deletion, rectification, portability, objection
  • -
  • SOC2: 9 Trust Service Criteria verification
  • -
  • ISO 27001: 14 Annex A control families
  • -
  • Incident Response: Complete lifecycle management
  • -
  • Data Protection: 4-level classification, encryption controls
  • -
  • Access Control: RBAC matrix with role verification
  • -
-

API: 35 endpoints -CLI: 23 commands -Tests: 11

-
-

Security Architecture Flow

-

End-to-End Request Flow

-
1. User Request
-   ↓
-2. Rate Limiting (100 req/min per IP)
-   ↓
-3. JWT Authentication (RS256, 15 min tokens)
-   ↓
-4. MFA Verification (TOTP/WebAuthn for sensitive ops)
-   ↓
-5. Cedar Authorization (context-aware policies)
-   ↓
-6. Dynamic Secrets (AWS STS, SSH keys, 1h TTL)
-   ↓
-7. Operation Execution (encrypted configs, KMS)
-   ↓
-8. Audit Logging (structured JSON, GDPR-compliant)
-   ↓
-9. Response
-
-

Emergency Access Flow

-
1. Emergency Request (reason + justification)
-   ↓
-2. Multi-Party Approval (2+ approvers, different teams)
-   ↓
-3. Session Activation (special JWT, 4h max)
-   ↓
-4. Enhanced Audit (7-year retention, immutable)
-   ↓
-5. Auto-Revocation (expiration/inactivity)
-
-
-

Technology Stack

-

Backend (Rust)

-
    -
  • axum: HTTP framework
  • -
  • jsonwebtoken: JWT handling (RS256)
  • -
  • cedar-policy: Authorization engine
  • -
  • totp-rs: TOTP implementation
  • -
  • webauthn-rs: WebAuthn/FIDO2
  • -
  • aws-sdk-kms: AWS KMS integration
  • -
  • argon2: Password hashing
  • -
  • tracing: Structured logging
  • -
-

Frontend (TypeScript/React)

-
    -
  • React 18: UI framework
  • -
  • Leptos: Rust WASM framework
  • -
  • @simplewebauthn/browser: WebAuthn client
  • -
  • qrcode.react: QR code generation
  • -
-

CLI (Nushell)

-
    -
  • Nushell 0.107: Shell and scripting
  • -
  • nu_plugin_kcl: KCL integration
  • -
-

Infrastructure

-
    -
  • HashiCorp Vault: Secrets management, KMS, SSH CA
  • -
  • AWS KMS: Key management service
  • -
  • PostgreSQL/SurrealDB: Data storage
  • -
  • SOPS: Config encryption
  • -
-
-

Security Guarantees

-

Authentication

-

✅ RS256 asymmetric signing (no shared secrets) -✅ Short-lived access tokens (15 min) -✅ Token revocation support -✅ Argon2id password hashing (memory-hard) -✅ MFA enforced for production operations

-

Authorization

-

✅ Fine-grained permissions (Cedar policies) -✅ Context-aware (MFA, IP, time windows) -✅ Hot reload policies (no downtime) -✅ Deny by default

-

Secrets Management

-

✅ No static credentials stored -✅ Time-limited secrets (1h default) -✅ Auto-revocation on expiry -✅ Encryption at rest (KMS) -✅ Memory-only decryption

-

Audit & Compliance

-

✅ Immutable audit logs -✅ GDPR-compliant (PII anonymization) -✅ SOC2 controls implemented -✅ ISO 27001 controls verified -✅ 7-year retention for break-glass

-

Emergency Access

-

✅ Multi-party approval required -✅ Time-limited sessions (4h max) -✅ Enhanced audit logging -✅ Auto-revocation -✅ Cannot be disabled

-
-

Performance Characteristics

-
- - - - - - -
ComponentLatencyThroughputMemory
JWT Auth<5 ms10,000/s~10 MB
Cedar Authz<10 ms5,000/s~50 MB
Audit Log<5 ms20,000/s~100 MB
KMS Encrypt<50 ms1,000/s~20 MB
Dynamic Secrets<100 ms500/s~50 MB
MFA Verify<50 ms2,000/s~30 MB
-
-

Total Overhead: ~10-20 ms per request -Memory Usage: ~260 MB total for all security components

-
-

Deployment Options

-

Development

-
# Start all services
-cd provisioning/platform/kms-service && cargo run &
-cd provisioning/platform/orchestrator && cargo run &
-cd provisioning/platform/control-center && cargo run &
-
-

Production

-
# Kubernetes deployment
-kubectl apply -f k8s/security-stack.yaml
-
-# Docker Compose
-docker-compose up -d kms orchestrator control-center
-
-# Systemd services
-systemctl start provisioning-kms
-systemctl start provisioning-orchestrator
-systemctl start provisioning-control-center
-
-
-

Configuration

-

Environment Variables

-
# JWT
-export JWT_ISSUER="control-center"
-export JWT_AUDIENCE="orchestrator,cli"
-export JWT_PRIVATE_KEY_PATH="/keys/private.pem"
-export JWT_PUBLIC_KEY_PATH="/keys/public.pem"
-
-# Cedar
-export CEDAR_POLICIES_PATH="/config/cedar-policies"
-export CEDAR_ENABLE_HOT_RELOAD=true
-
-# KMS
-export KMS_BACKEND="vault"
-export VAULT_ADDR="https://vault.example.com"
-export VAULT_TOKEN="..."
-
-# MFA
-export MFA_TOTP_ISSUER="Provisioning"
-export MFA_WEBAUTHN_RP_ID="provisioning.example.com"
-
-

Config Files

-
# provisioning/config/security.toml
-[jwt]
-issuer = "control-center"
-audience = ["orchestrator", "cli"]
-access_token_ttl = "15m"
-refresh_token_ttl = "7d"
-
-[cedar]
-policies_path = "config/cedar-policies"
-hot_reload = true
-reload_interval = "60s"
-
-[mfa]
-totp_issuer = "Provisioning"
-webauthn_rp_id = "provisioning.example.com"
-rate_limit = 5
-rate_limit_window = "5m"
-
-[kms]
-backend = "vault"
-vault_address = "https://vault.example.com"
-vault_mount_point = "transit"
-
-[audit]
-retention_days = 365
-retention_break_glass_days = 2555  # 7 years
-export_format = "json"
-pii_anonymization = true
-
-
-

Testing

-

Run All Tests

-
# Control Center (JWT, MFA)
-cd provisioning/platform/control-center
-cargo test
-
-# Orchestrator (Cedar, Audit, Secrets, SSH, Break-Glass, Compliance)
-cd provisioning/platform/orchestrator
-cargo test
-
-# KMS Service
-cd provisioning/platform/kms-service
-cargo test
-
-# Config Encryption (Nushell)
-nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu
-
-

Integration Tests

-
# Full security flow
-cd provisioning/platform/orchestrator
-cargo test --test security_integration_tests
-cargo test --test break_glass_integration_tests
-
-
-

Monitoring & Alerts

-

Metrics to Monitor

-
    -
  • Authentication failures (rate, sources)
  • -
  • Authorization denials (policies, resources)
  • -
  • MFA failures (attempts, users)
  • -
  • Token revocations (rate, reasons)
  • -
  • Break-glass activations (frequency, duration)
  • -
  • Secrets generation (rate, types)
  • -
  • Audit log volume (events/sec)
  • -
-

Alerts to Configure

-
    -
  • Multiple failed auth attempts (5+ in 5 min)
  • -
  • Break-glass session created
  • -
  • Compliance report non-compliant
  • -
  • Incident severity critical/high
  • -
  • Token revocation spike
  • -
  • KMS errors
  • -
  • Audit log export failures
  • -
-
-

Maintenance

-

Daily

-
    -
  • Monitor audit logs for anomalies
  • -
  • Review failed authentication attempts
  • -
  • Check break-glass sessions (should be zero)
  • -
-

Weekly

-
    -
  • Review compliance reports
  • -
  • Check incident response status
  • -
  • Verify backup code usage
  • -
  • Review MFA device additions/removals
  • -
-

Monthly

-
    -
  • Rotate KMS keys
  • -
  • Review and update Cedar policies
  • -
  • Generate compliance reports (GDPR, SOC2, ISO)
  • -
  • Audit access control matrix
  • -
-

Quarterly

-
    -
  • Full security audit
  • -
  • Penetration testing
  • -
  • Compliance certification review
  • -
  • Update security documentation
  • -
-
-

Migration Path

-

From Existing System

-
    -
  1. -

    Phase 1: Deploy security infrastructure

    -
      -
    • KMS service
    • -
    • Orchestrator with auth middleware
    • -
    • Control Center
    • -
    -
  2. -
  3. -

    Phase 2: Migrate authentication

    -
      -
    • Enable JWT authentication
    • -
    • Migrate existing users
    • -
    • Disable old auth system
    • -
    -
  4. -
  5. -

    Phase 3: Enable MFA

    -
      -
    • Require MFA enrollment for admins
    • -
    • Gradual rollout to all users
    • -
    -
  6. -
  7. -

    Phase 4: Enable Cedar authorization

    -
      -
    • Deploy initial policies (permissive)
    • -
    • Monitor authorization decisions
    • -
    • Tighten policies incrementally
    • -
    -
  8. -
  9. -

    Phase 5: Enable advanced features

    -
      -
    • Break-glass procedures
    • -
    • Compliance reporting
    • -
    • Incident response
    • -
    -
  10. -
-
-

Future Enhancements

-

Planned (Not Implemented)

-
    -
  • Hardware Security Module (HSM) integration
  • -
  • OAuth2/OIDC federation
  • -
  • SAML SSO for enterprise
  • -
  • Risk-based authentication (IP reputation, device fingerprinting)
  • -
  • Behavioral analytics (anomaly detection)
  • -
  • Zero-Trust Network (service mesh integration)
  • -
-

Under Consideration

-
    -
  • Blockchain audit log (immutable append-only log)
  • -
  • Quantum-resistant cryptography (post-quantum algorithms)
  • -
  • Confidential computing (SGX/SEV enclaves)
  • -
  • Distributed break-glass (multi-region approval)
  • -
-
-

Consequences

-

Positive

-

Enterprise-grade security meeting GDPR, SOC2, ISO 27001 -✅ Zero static credentials (all dynamic, time-limited) -✅ Complete audit trail (immutable, GDPR-compliant) -✅ MFA-enforced for sensitive operations -✅ Emergency access with enhanced controls -✅ Fine-grained authorization (Cedar policies) -✅ Automated compliance (reports, incident response)

-

Negative

-

⚠️ Increased complexity (12 components to manage) -⚠️ Performance overhead (~10-20 ms per request) -⚠️ Memory footprint (~260 MB additional) -⚠️ Learning curve (Cedar policy language, MFA setup) -⚠️ Operational overhead (key rotation, policy updates)

-

Mitigations

-
    -
  • Comprehensive documentation (ADRs, guides, API docs)
  • -
  • CLI commands for all operations
  • -
  • Automated monitoring and alerting
  • -
  • Gradual rollout with feature flags
  • -
  • Training materials for operators
  • -
-
- -
    -
  • JWT Auth: docs/architecture/JWT_AUTH_IMPLEMENTATION.md
  • -
  • Cedar Authz: docs/architecture/CEDAR_AUTHORIZATION_IMPLEMENTATION.md
  • -
  • Audit Logging: docs/architecture/AUDIT_LOGGING_IMPLEMENTATION.md
  • -
  • MFA: docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
  • Break-Glass: docs/architecture/BREAK_GLASS_IMPLEMENTATION_SUMMARY.md
  • -
  • Compliance: docs/architecture/COMPLIANCE_IMPLEMENTATION_SUMMARY.md
  • -
  • Config Encryption: docs/user/CONFIG_ENCRYPTION_GUIDE.md
  • -
  • Dynamic Secrets: docs/user/DYNAMIC_SECRETS_QUICK_REFERENCE.md
  • -
  • SSH Keys: docs/user/SSH_TEMPORAL_KEYS_USER_GUIDE.md
  • -
-
-

Approval

-

Architecture Team: Approved -Security Team: Approved (pending penetration test) -Compliance Team: Approved (pending audit) -Engineering Team: Approved

-
-

Date: 2025-10-08 -Version: 1.0.0 -Status: Implemented and Production-Ready

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/architecture/integration-patterns.html b/docs/book/architecture/integration-patterns.html index 3093fa5..eee60d9 100644 --- a/docs/book/architecture/integration-patterns.html +++ b/docs/book/architecture/integration-patterns.html @@ -1,5 +1,5 @@ - + @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ @@ -76,7 +76,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -140,10 +140,10 @@ - + - + @@ -173,526 +173,61 @@

Integration Patterns

-

Overview

-

Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider -workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.

-

Core Integration Patterns

-

1. Hybrid Language Integration

-

Rust-to-Nushell Communication Pattern

-

Use Case: Orchestrator invoking business logic operations

-

Implementation:

-
use tokio::process::Command;
-use serde_json;
-
-pub async fn execute_nushell_workflow(
-    workflow: &str,
-    args: &[String]
-) -> Result<WorkflowResult, Error> {
-    let mut cmd = Command::new("nu");
-    cmd.arg("-c")
-       .arg(format!("use core/nulib/workflows/{}.nu *; {}", workflow, args.join(" ")));
-
-    let output = cmd.output().await?;
-    let result: WorkflowResult = serde_json::from_slice(&output.stdout)?;
-    Ok(result)
-}
-

Data Exchange Format:

-
{
-    "status": "success" | "error" | "partial",
-    "result": {
-        "operation": "server_create",
-        "resources": ["server-001", "server-002"],
-        "metadata": { ... }
-    },
-    "error": null | { "code": "ERR001", "message": "..." },
-    "context": { "workflow_id": "wf-123", "step": 2 }
-}
-
-

Nushell-to-Rust Communication Pattern

-

Use Case: Business logic submitting workflows to orchestrator

-

Implementation:

-
def submit-workflow [workflow: record] -> record {
-    let payload = $workflow | to json
-
-    http post "http://localhost:9090/workflows/submit" {
-        headers: { "Content-Type": "application/json" }
-        body: $payload
-    }
-    | from json
-}
-
-

API Contract:

-
{
-    "workflow_id": "wf-456",
-    "name": "multi_cloud_deployment",
-    "operations": [...],
-    "dependencies": { ... },
-    "configuration": { ... }
-}
-
-

2. Provider Abstraction Pattern

-

Standard Provider Interface

-

Purpose: Uniform API across different cloud providers

-

Interface Definition:

-
# Standard provider interface that all providers must implement
-export def list-servers [] -> table {
-    # Provider-specific implementation
-}
-
-export def create-server [config: record] -> record {
-    # Provider-specific implementation
-}
-
-export def delete-server [id: string] -> nothing {
-    # Provider-specific implementation
-}
-
-export def get-server [id: string] -> record {
-    # Provider-specific implementation
-}
-
-

Configuration Integration:

-
[providers.aws]
-region = "us-west-2"
-credentials_profile = "default"
-timeout = 300
-
-[providers.upcloud]
-zone = "de-fra1"
-api_endpoint = "https://api.upcloud.com"
-timeout = 180
-
-[providers.local]
-docker_socket = "/var/run/docker.sock"
-network_mode = "bridge"
-
-

Provider Discovery and Loading

-
def load-providers [] -> table {
-    let provider_dirs = glob "providers/*/nulib"
-
-    $provider_dirs
-    | each { |dir|
-        let provider_name = $dir | path basename | path dirname | path basename
-        let provider_config = get-provider-config $provider_name
-
-        {
-            name: $provider_name,
-            path: $dir,
-            config: $provider_config,
-            available: (test-provider-connectivity $provider_name)
-        }
-    }
-}
-
-

3. Configuration Resolution Pattern

-

Hierarchical Configuration Loading

-

Implementation:

-
def resolve-configuration [context: record] -> record {
-    let base_config = open config.defaults.toml
-    let user_config = if ("config.user.toml" | path exists) {
-        open config.user.toml
-    } else { {} }
-
-    let env_config = if ($env.PROVISIONING_ENV? | is-not-empty) {
-        let env_file = $"config.($env.PROVISIONING_ENV).toml"
-        if ($env_file | path exists) { open $env_file } else { {} }
-    } else { {} }
-
-    let merged_config = $base_config
-    | merge $user_config
-    | merge $env_config
-    | merge ($context.runtime_config? | default {})
-
-    interpolate-variables $merged_config
-}
-
-

Variable Interpolation Pattern

-
def interpolate-variables [config: record] -> record {
-    let interpolations = {
-        "{{paths.base}}": ($env.PWD),
-        "{{env.HOME}}": ($env.HOME),
-        "{{now.date}}": (date now | format date "%Y-%m-%d"),
-        "{{git.branch}}": (git branch --show-current | str trim)
-    }
-
-    $config
-    | to json
-    | str replace --all "{{paths.base}}" $interpolations."{{paths.base}}"
-    | str replace --all "{{env.HOME}}" $interpolations."{{env.HOME}}"
-    | str replace --all "{{now.date}}" $interpolations."{{now.date}}"
-    | str replace --all "{{git.branch}}" $interpolations."{{git.branch}}"
-    | from json
-}
-
-

4. Workflow Orchestration Patterns

-

Dependency Resolution Pattern

-

Use Case: Managing complex workflow dependencies

-

Implementation (Rust):

-
use petgraph::{Graph, Direction};
-use std::collections::HashMap;
-
-pub struct DependencyResolver {
-    graph: Graph<String, ()>,
-    node_map: HashMap<String, petgraph::graph::NodeIndex>,
-}
-
-impl DependencyResolver {
-    pub fn resolve_execution_order(&self) -> Result<Vec<String>, Error> {
-        let mut topo = petgraph::algo::toposort(&self.graph, None)
-            .map_err(|_| Error::CyclicDependency)?;
-
-        Ok(topo.into_iter()
-            .map(|idx| self.graph[idx].clone())
-            .collect())
-    }
-
-    pub fn add_dependency(&mut self, from: &str, to: &str) {
-        let from_idx = self.get_or_create_node(from);
-        let to_idx = self.get_or_create_node(to);
-        self.graph.add_edge(from_idx, to_idx, ());
-    }
-}
-

Parallel Execution Pattern

-
use tokio::task::JoinSet;
-use futures::stream::{FuturesUnordered, StreamExt};
-
-pub async fn execute_parallel_batch(
-    operations: Vec<Operation>,
-    parallelism_limit: usize
-) -> Result<Vec<OperationResult>, Error> {
-    let semaphore = tokio::sync::Semaphore::new(parallelism_limit);
-    let mut join_set = JoinSet::new();
-
-    for operation in operations {
-        let permit = semaphore.clone();
-        join_set.spawn(async move {
-            let _permit = permit.acquire().await?;
-            execute_operation(operation).await
-        });
-    }
-
-    let mut results = Vec::new();
-    while let Some(result) = join_set.join_next().await {
-        results.push(result??);
-    }
-
-    Ok(results)
-}
-

5. State Management Patterns

-

Checkpoint-Based Recovery Pattern

-

Use Case: Reliable state persistence and recovery

-

Implementation:

-
#[derive(Serialize, Deserialize)]
-pub struct WorkflowCheckpoint {
-    pub workflow_id: String,
-    pub step: usize,
-    pub completed_operations: Vec<String>,
-    pub current_state: serde_json::Value,
-    pub metadata: HashMap<String, String>,
-    pub timestamp: chrono::DateTime<chrono::Utc>,
-}
-
-pub struct CheckpointManager {
-    checkpoint_dir: PathBuf,
-}
-
-impl CheckpointManager {
-    pub fn save_checkpoint(&self, checkpoint: &WorkflowCheckpoint) -> Result<(), Error> {
-        let checkpoint_file = self.checkpoint_dir
-            .join(&checkpoint.workflow_id)
-            .with_extension("json");
-
-        let checkpoint_data = serde_json::to_string_pretty(checkpoint)?;
-        std::fs::write(checkpoint_file, checkpoint_data)?;
-        Ok(())
-    }
-
-    pub fn restore_checkpoint(&self, workflow_id: &str) -> Result<Option<WorkflowCheckpoint>, Error> {
-        let checkpoint_file = self.checkpoint_dir
-            .join(workflow_id)
-            .with_extension("json");
-
-        if checkpoint_file.exists() {
-            let checkpoint_data = std::fs::read_to_string(checkpoint_file)?;
-            let checkpoint = serde_json::from_str(&checkpoint_data)?;
-            Ok(Some(checkpoint))
-        } else {
-            Ok(None)
-        }
-    }
-}
-

Rollback Pattern

-
pub struct RollbackManager {
-    rollback_stack: Vec<RollbackAction>,
-}
-
-#[derive(Clone, Debug)]
-pub enum RollbackAction {
-    DeleteResource { provider: String, resource_id: String },
-    RestoreFile { path: PathBuf, content: String },
-    RevertConfiguration { key: String, value: serde_json::Value },
-    CustomAction { command: String, args: Vec<String> },
-}
-
-impl RollbackManager {
-    pub async fn execute_rollback(&self) -> Result<(), Error> {
-        // Execute rollback actions in reverse order
-        for action in self.rollback_stack.iter().rev() {
-            match action {
-                RollbackAction::DeleteResource { provider, resource_id } => {
-                    self.delete_resource(provider, resource_id).await?;
-                }
-                RollbackAction::RestoreFile { path, content } => {
-                    tokio::fs::write(path, content).await?;
-                }
-                // ... handle other rollback actions
-            }
-        }
-        Ok(())
-    }
-}
-

6. Event and Messaging Patterns

-

Event-Driven Architecture Pattern

-

Use Case: Decoupled communication between components

-

Event Definition:

-
#[derive(Serialize, Deserialize, Clone, Debug)]
-pub enum SystemEvent {
-    WorkflowStarted { workflow_id: String, name: String },
-    WorkflowCompleted { workflow_id: String, result: WorkflowResult },
-    WorkflowFailed { workflow_id: String, error: String },
-    ResourceCreated { provider: String, resource_type: String, resource_id: String },
-    ResourceDeleted { provider: String, resource_type: String, resource_id: String },
-    ConfigurationChanged { key: String, old_value: serde_json::Value, new_value: serde_json::Value },
-}
-

Event Bus Implementation:

-
use tokio::sync::broadcast;
-
-pub struct EventBus {
-    sender: broadcast::Sender<SystemEvent>,
-}
-
-impl EventBus {
-    pub fn new(capacity: usize) -> Self {
-        let (sender, _) = broadcast::channel(capacity);
-        Self { sender }
-    }
-
-    pub fn publish(&self, event: SystemEvent) -> Result<(), Error> {
-        self.sender.send(event)
-            .map_err(|_| Error::EventPublishFailed)?;
-        Ok(())
-    }
-
-    pub fn subscribe(&self) -> broadcast::Receiver<SystemEvent> {
-        self.sender.subscribe()
-    }
-}
-

7. Extension Integration Patterns

-

Extension Discovery and Loading

-
def discover-extensions [] -> table {
-    let extension_dirs = glob "extensions/*/extension.toml"
-
-    $extension_dirs
-    | each { |manifest_path|
-        let extension_dir = $manifest_path | path dirname
-        let manifest = open $manifest_path
-
-        {
-            name: $manifest.extension.name,
-            version: $manifest.extension.version,
-            type: $manifest.extension.type,
-            path: $extension_dir,
-            manifest: $manifest,
-            valid: (validate-extension $manifest),
-            compatible: (check-compatibility $manifest.compatibility)
-        }
-    }
-    | where valid and compatible
-}
-
-

Extension Interface Pattern

-
# Standard extension interface
-export def extension-info [] -> record {
-    {
-        name: "custom-provider",
-        version: "1.0.0",
-        type: "provider",
-        description: "Custom cloud provider integration",
-        entry_points: {
-            cli: "nulib/cli.nu",
-            provider: "nulib/provider.nu"
-        }
-    }
-}
-
-export def extension-validate [] -> bool {
-    # Validate extension configuration and dependencies
-    true
-}
-
-export def extension-activate [] -> nothing {
-    # Perform extension activation tasks
-}
-
-export def extension-deactivate [] -> nothing {
-    # Perform extension cleanup tasks
-}
-
-

8. API Design Patterns

-

REST API Standardization

-

Base API Structure:

-
use axum::{
-    extract::{Path, State},
-    response::Json,
-    routing::{get, post, delete},
-    Router,
-};
-
-pub fn create_api_router(state: AppState) -> Router {
-    Router::new()
-        .route("/health", get(health_check))
-        .route("/workflows", get(list_workflows).post(create_workflow))
-        .route("/workflows/:id", get(get_workflow).delete(delete_workflow))
-        .route("/workflows/:id/status", get(workflow_status))
-        .route("/workflows/:id/logs", get(workflow_logs))
-        .with_state(state)
-}
-

Standard Response Format:

-
{
-    "status": "success" | "error" | "pending",
-    "data": { ... },
-    "metadata": {
-        "timestamp": "2025-09-26T12:00:00Z",
-        "request_id": "req-123",
-        "version": "3.1.0"
-    },
-    "error": null | {
-        "code": "ERR001",
-        "message": "Human readable error",
-        "details": { ... }
-    }
-}
-
-

Error Handling Patterns

-

Structured Error Pattern

-
#[derive(thiserror::Error, Debug)]
-pub enum ProvisioningError {
-    #[error("Configuration error: {message}")]
-    Configuration { message: String },
-
-    #[error("Provider error [{provider}]: {message}")]
-    Provider { provider: String, message: String },
-
-    #[error("Workflow error [{workflow_id}]: {message}")]
-    Workflow { workflow_id: String, message: String },
-
-    #[error("Resource error [{resource_type}/{resource_id}]: {message}")]
-    Resource { resource_type: String, resource_id: String, message: String },
-}
-

Error Recovery Pattern

-
def with-retry [operation: closure, max_attempts: int = 3] {
-    mut attempts = 0
-    mut last_error = null
-
-    while $attempts < $max_attempts {
-        try {
-            return (do $operation)
-        } catch { |error|
-            $attempts = $attempts + 1
-            $last_error = $error
-
-            if $attempts < $max_attempts {
-                let delay = (2 ** ($attempts - 1)) * 1000  # Exponential backoff
-                sleep $"($delay)ms"
-            }
-        }
-    }
-
-    error make { msg: $"Operation failed after ($max_attempts) attempts: ($last_error)" }
-}
-
-

Performance Optimization Patterns

-

Caching Strategy Pattern

-
use std::sync::Arc;
-use tokio::sync::RwLock;
-use std::collections::HashMap;
-use chrono::{DateTime, Utc, Duration};
-
-#[derive(Clone)]
-pub struct CacheEntry<T> {
-    pub value: T,
-    pub expires_at: DateTime<Utc>,
-}
-
-pub struct Cache<T> {
-    store: Arc<RwLock<HashMap<String, CacheEntry<T>>>>,
-    default_ttl: Duration,
-}
-
-impl<T: Clone> Cache<T> {
-    pub async fn get(&self, key: &str) -> Option<T> {
-        let store = self.store.read().await;
-        if let Some(entry) = store.get(key) {
-            if entry.expires_at > Utc::now() {
-                Some(entry.value.clone())
-            } else {
-                None
-            }
-        } else {
-            None
-        }
-    }
-
-    pub async fn set(&self, key: String, value: T) {
-        let expires_at = Utc::now() + self.default_ttl;
-        let entry = CacheEntry { value, expires_at };
-
-        let mut store = self.store.write().await;
-        store.insert(key, entry);
-    }
-}
-

Streaming Pattern for Large Data

-
def process-large-dataset [source: string] -> nothing {
-    # Stream processing instead of loading entire dataset
-    open $source
-    | lines
-    | each { |line|
-        # Process line individually
-        $line | process-record
-    }
-    | save output.json
-}
-
-

Testing Integration Patterns

-

Integration Test Pattern

-
#[cfg(test)]
-mod integration_tests {
-    use super::*;
-    use tokio_test;
-
-    #[tokio::test]
-    async fn test_workflow_execution() {
-        let orchestrator = setup_test_orchestrator().await;
-        let workflow = create_test_workflow();
-
-        let result = orchestrator.execute_workflow(workflow).await;
-
-        assert!(result.is_ok());
-        assert_eq!(result.unwrap().status, WorkflowStatus::Completed);
-    }
-}
-

These integration patterns provide the foundation for the system’s sophisticated multi-component architecture, enabling reliable, scalable, and -maintainable infrastructure automation.

+

Design patterns for extending and integrating with Provisioning.

+

1. Provider Integration Pattern

+

Pattern: Add a new cloud provider to Provisioning.

+

2. Task Service Integration Pattern

+

Pattern: Add infrastructure component.

+

3. Cluster Template Pattern

+

Pattern: Create pre-configured cluster template.

+

4. Batch Workflow Pattern

+

Pattern: Create automation workflow for complex operations.

+

5. Custom Extension Pattern

+

Pattern: Create custom Nushell library.

+

6. Authorization Policy Pattern

+

Pattern: Define fine-grained access control via Cedar.

+

7. Webhook Integration

+

Pattern: Trigger Provisioning from external systems.

+

8. Monitoring Integration

+

Pattern: Export metrics and logs to monitoring systems.

+

9. CI/CD Integration

+

Pattern: Use Provisioning in automated pipelines.

+

10. MCP Tool Integration

+

Pattern: Add AI-powered tool via MCP.

+

Integration Scenarios

+

Multi-Cloud Deployment

+

Deploy across UpCloud, AWS, and Hetzner in single workflow.

+

GitOps Workflow

+

Git changes trigger infrastructure updates via webhooks.

+

Self-Service Deployment

+

Non-technical users request infrastructure via natural language.

+

Best Practices

+
    +
  1. Use type-safe Nickel schemas
  2. +
  3. Implement proper error handling
  4. +
  5. Log all operations for audit trails
  6. +
  7. Test extensions before production
  8. +
  9. Document configuration & usage
  10. +
  11. Version extensions independently
  12. +
  13. Support backward compatibility
  14. +
  15. Validate inputs & encrypt credentials
  16. +
+ +
+ + + + + + + + diff --git a/docs/book/architecture/multi-repo-strategy.html b/docs/book/architecture/multi-repo-strategy.html deleted file mode 100644 index bb69e96..0000000 --- a/docs/book/architecture/multi-repo-strategy.html +++ /dev/null @@ -1,1109 +0,0 @@ - - - - - - Multi-Repo Strategy - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Multi-Repository Strategy Analysis

-

Date: 2025-10-01 -Status: Strategic Analysis -Related: Repository Distribution Analysis

-

Executive Summary

-

This document analyzes a multi-repository strategy as an alternative to the monorepo approach. After careful consideration of the provisioning -system’s architecture, a hybrid approach with 4 core repositories is recommended, avoiding submodules in favor of a cleaner package-based -dependency model.

-
-

Repository Architecture Options

-

Option A: Pure Monorepo (Original Recommendation)

-

Single repository: provisioning

-

Pros:

-
    -
  • Simplest development workflow
  • -
  • Atomic cross-component changes
  • -
  • Single version number
  • -
  • One CI/CD pipeline
  • -
-

Cons:

-
    -
  • Large repository size
  • -
  • Mixed language tooling (Rust + Nushell)
  • -
  • All-or-nothing updates
  • -
  • Unclear ownership boundaries
  • -
- -

Repositories:

-
    -
  • provisioning-core (main, contains submodules)
  • -
  • provisioning-platform (submodule)
  • -
  • provisioning-extensions (submodule)
  • -
  • provisioning-workspace (submodule)
  • -
-

Why Not Recommended:

-
    -
  • Submodule hell: complex, error-prone workflows
  • -
  • Detached HEAD issues
  • -
  • Update synchronization nightmares
  • -
  • Clone complexity for users
  • -
  • Difficult to maintain version compatibility
  • -
  • Poor developer experience
  • -
- -

Independent repositories with package-based integration:

-
    -
  • provisioning-core - Nushell libraries and Nickel schemas
  • -
  • provisioning-platform - Rust services (orchestrator, control-center, MCP)
  • -
  • provisioning-extensions - Extension marketplace/catalog
  • -
  • provisioning-workspace - Project templates and examples
  • -
  • provisioning-distribution - Release automation and packaging
  • -
-

Why Recommended:

-
    -
  • Clean separation of concerns
  • -
  • Independent versioning and release cycles
  • -
  • Language-specific tooling and workflows
  • -
  • Clear ownership boundaries
  • -
  • Package-based dependencies (no submodules)
  • -
  • Easier community contributions
  • -
-
- -

Repository 1: provisioning-core

-

Purpose: Core Nushell infrastructure automation engine

-

Contents:

-
provisioning-core/
-├── nulib/                   # Nushell libraries
-│   ├── lib_provisioning/    # Core library functions
-│   ├── servers/             # Server management
-│   ├── taskservs/           # Task service management
-│   ├── clusters/            # Cluster management
-│   └── workflows/           # Workflow orchestration
-├── cli/                     # CLI entry point
-│   └── provisioning         # Pure Nushell CLI
-├── schemas/                 # Nickel schemas
-│   ├── main.ncl
-│   ├── settings.ncl
-│   ├── server.ncl
-│   ├── cluster.ncl
-│   └── workflows.ncl
-├── config/                  # Default configurations
-│   └── config.defaults.toml
-├── templates/               # Core templates
-├── tools/                   # Build and packaging tools
-├── tests/                   # Core tests
-├── docs/                    # Core documentation
-├── LICENSE
-├── README.md
-├── CHANGELOG.md
-└── version.toml             # Core version file
-
-

Technology: Nushell, Nickel -Primary Language: Nushell -Release Frequency: Monthly (stable) -Ownership: Core team -Dependencies: None (foundation)

-

Package Output:

-
    -
  • provisioning-core-{version}.tar.gz - Installable package
  • -
  • Published to package registry
  • -
-

Installation Path:

-
/usr/local/
-├── bin/provisioning
-├── lib/provisioning/
-└── share/provisioning/
-
-
-

Repository 2: provisioning-platform

-

Purpose: High-performance Rust platform services

-

Contents:

-
provisioning-platform/
-├── orchestrator/            # Rust orchestrator
-│   ├── src/
-│   ├── tests/
-│   ├── benches/
-│   └── Cargo.toml
-├── control-center/          # Web control center (Leptos)
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── mcp-server/              # Model Context Protocol server
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── api-gateway/             # REST API gateway
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── shared/                  # Shared Rust libraries
-│   ├── types/
-│   └── utils/
-├── docs/                    # Platform documentation
-├── Cargo.toml               # Workspace root
-├── Cargo.lock
-├── LICENSE
-├── README.md
-└── CHANGELOG.md
-
-

Technology: Rust, WebAssembly -Primary Language: Rust -Release Frequency: Bi-weekly (fast iteration) -Ownership: Platform team -Dependencies:

-
    -
  • provisioning-core (runtime integration, loose coupling)
  • -
-

Package Output:

-
    -
  • provisioning-platform-{version}.tar.gz - Binaries
  • -
  • Binaries for: Linux (x86_64, arm64), macOS (x86_64, arm64)
  • -
-

Installation Path:

-
/usr/local/
-├── bin/
-│   ├── provisioning-orchestrator
-│   └── provisioning-control-center
-└── share/provisioning/platform/
-
-

Integration with Core:

-
    -
  • Platform services call provisioning CLI via subprocess
  • -
  • No direct code dependencies
  • -
  • Communication via REST API and file-based queues
  • -
  • Core and Platform can be deployed independently
  • -
-
-

Repository 3: provisioning-extensions

-

Purpose: Extension marketplace and community modules

-

Contents:

-
provisioning-extensions/
-├── registry/                # Extension registry
-│   ├── index.json          # Searchable index
-│   └── catalog/            # Extension metadata
-├── providers/               # Additional cloud providers
-│   ├── azure/
-│   ├── gcp/
-│   ├── digitalocean/
-│   └── hetzner/
-├── taskservs/               # Community task services
-│   ├── databases/
-│   │   ├── mongodb/
-│   │   ├── redis/
-│   │   └── cassandra/
-│   ├── development/
-│   │   ├── gitlab/
-│   │   ├── jenkins/
-│   │   └── sonarqube/
-│   └── observability/
-│       ├── prometheus/
-│       ├── grafana/
-│       └── loki/
-├── clusters/                # Cluster templates
-│   ├── ml-platform/
-│   ├── data-pipeline/
-│   └── gaming-backend/
-├── workflows/               # Workflow templates
-├── tools/                   # Extension development tools
-├── docs/                    # Extension development guide
-├── LICENSE
-└── README.md
-
-

Technology: Nushell, Nickel -Primary Language: Nushell -Release Frequency: Continuous (per-extension) -Ownership: Community + Core team -Dependencies:

-
    -
  • provisioning-core (extends core functionality)
  • -
-

Package Output:

-
    -
  • Individual extension packages: provisioning-ext-{name}-{version}.tar.gz
  • -
  • Registry index for discovery
  • -
-

Installation:

-
# Install extension via core CLI
-provisioning extension install mongodb
-provisioning extension install azure-provider
-
-

Extension Structure: -Each extension is self-contained:

-
mongodb/
-├── manifest.toml           # Extension metadata
-├── taskserv.nu             # Implementation
-├── templates/              # Templates
-├── schemas/                # Nickel schemas
-├── tests/                  # Tests
-└── README.md
-
-
-

Repository 4: provisioning-workspace

-

Purpose: Project templates and starter kits

-

Contents:

-
provisioning-workspace/
-├── templates/               # Workspace templates
-│   ├── minimal/            # Minimal starter
-│   ├── kubernetes/         # Full K8s cluster
-│   ├── multi-cloud/        # Multi-cloud setup
-│   ├── microservices/      # Microservices platform
-│   ├── data-platform/      # Data engineering
-│   └── ml-ops/             # MLOps platform
-├── examples/               # Complete examples
-│   ├── blog-deployment/
-│   ├── e-commerce/
-│   └── saas-platform/
-├── blueprints/             # Architecture blueprints
-├── docs/                   # Template documentation
-├── tools/                  # Template scaffolding
-│   └── create-workspace.nu
-├── LICENSE
-└── README.md
-
-

Technology: Configuration files, Nickel -Primary Language: TOML, Nickel, YAML -Release Frequency: Quarterly (stable templates) -Ownership: Community + Documentation team -Dependencies:

-
    -
  • provisioning-core (templates use core)
  • -
  • provisioning-extensions (may reference extensions)
  • -
-

Package Output:

-
    -
  • provisioning-templates-{version}.tar.gz
  • -
-

Usage:

-
# Create workspace from template
-provisioning workspace init my-project --template kubernetes
-
-# Or use separate tool
-gh repo create my-project --template provisioning-workspace
-cd my-project
-provisioning workspace init
-
-
-

Repository 5: provisioning-distribution

-

Purpose: Release automation, packaging, and distribution infrastructure

-

Contents:

-
provisioning-distribution/
-├── release-automation/      # Automated release workflows
-│   ├── build-all.nu        # Build all packages
-│   ├── publish.nu          # Publish to registries
-│   └── validate.nu         # Validation suite
-├── installers/             # Installation scripts
-│   ├── install.nu          # Nushell installer
-│   ├── install.sh          # Bash installer
-│   └── install.ps1         # PowerShell installer
-├── packaging/              # Package builders
-│   ├── core/
-│   ├── platform/
-│   └── extensions/
-├── registry/               # Package registry backend
-│   ├── api/               # Registry REST API
-│   └── storage/           # Package storage
-├── ci-cd/                  # CI/CD configurations
-│   ├── github/            # GitHub Actions
-│   ├── gitlab/            # GitLab CI
-│   └── jenkins/           # Jenkins pipelines
-├── version-management/     # Cross-repo version coordination
-│   ├── versions.toml      # Version matrix
-│   └── compatibility.toml  # Compatibility matrix
-├── docs/                   # Distribution documentation
-│   ├── release-process.md
-│   └── packaging-guide.md
-├── LICENSE
-└── README.md
-
-

Technology: Nushell, Bash, CI/CD -Primary Language: Nushell, YAML -Release Frequency: As needed -Ownership: Release engineering team -Dependencies: All repositories (orchestrates releases)

-

Responsibilities:

-
    -
  • Build packages from all repositories
  • -
  • Coordinate multi-repo releases
  • -
  • Publish to package registries
  • -
  • Manage version compatibility
  • -
  • Generate release notes
  • -
  • Host package registry
  • -
-
-

Dependency and Integration Model

-

Package-Based Dependencies (Not Submodules)

-
┌─────────────────────────────────────────────────────────────┐
-│                  provisioning-distribution                   │
-│              (Release orchestration & registry)              │
-└──────────────────────────┬──────────────────────────────────┘
-                           │ publishes packages
-                           ↓
-                    ┌──────────────┐
-                    │   Registry   │
-                    └──────┬───────┘
-                           │
-        ┌──────────────────┼──────────────────┐
-        ↓                  ↓                  ↓
-┌───────────────┐  ┌──────────────┐  ┌──────────────┐
-│  provisioning │  │ provisioning │  │ provisioning │
-│     -core     │  │  -platform   │  │  -extensions │
-└───────┬───────┘  └──────┬───────┘  └──────┬───────┘
-        │                 │                  │
-        │                 │ depends on       │ extends
-        │                 └─────────┐        │
-        │                           ↓        │
-        └───────────────────────────────────→┘
-                    runtime integration
-
-

Integration Mechanisms

-

1. Core ↔ Platform Integration

-

Method: Loose coupling via CLI + REST API

-
# Platform calls Core CLI (subprocess)
-def create-server [name: string] {
-    # Orchestrator executes Core CLI
-    ^provisioning server create $name --infra production
-}
-
-# Core calls Platform API (HTTP)
-def submit-workflow [workflow: record] {
-    http post http://localhost:9090/workflows/submit $workflow
-}
-
-

Version Compatibility:

-
# platform/Cargo.toml
-[package.metadata.provisioning]
-core-version = "^3.0"  # Compatible with core 3.x
-
-

2. Core ↔ Extensions Integration

-

Method: Plugin/module system

-
# Extension manifest
-# extensions/mongodb/manifest.toml
-[extension]
-name = "mongodb"
-version = "1.0.0"
-type = "taskserv"
-core-version = "^3.0"
-
-[dependencies]
-provisioning-core = "^3.0"
-
-# Extension installation
-# Core downloads and validates extension
-provisioning extension install mongodb
-# → Downloads from registry
-# → Validates compatibility
-# → Installs to ~/.provisioning/extensions/mongodb
-
-

3. Workspace Templates

-

Method: Git templates or package templates

-
# Option 1: GitHub template repository
-gh repo create my-infra --template provisioning-workspace
-cd my-infra
-provisioning workspace init
-
-# Option 2: Template package
-provisioning workspace create my-infra --template kubernetes
-# → Downloads template package
-# → Scaffolds workspace
-# → Initializes configuration
-
-
-

Version Management Strategy

-

Semantic Versioning Per Repository

-

Each repository maintains independent semantic versioning:

-
provisioning-core:       3.2.1
-provisioning-platform:   2.5.3
-provisioning-extensions: (per-extension versioning)
-provisioning-workspace:  1.4.0
-
-

Compatibility Matrix

-

provisioning-distribution/version-management/versions.toml:

-
# Version compatibility matrix
-[compatibility]
-
-# Core versions and compatible platform versions
-[compatibility.core]
-"3.2.1" = { platform = "^2.5", extensions = "^1.0", workspace = "^1.0" }
-"3.2.0" = { platform = "^2.4", extensions = "^1.0", workspace = "^1.0" }
-"3.1.0" = { platform = "^2.3", extensions = "^0.9", workspace = "^1.0" }
-
-# Platform versions and compatible core versions
-[compatibility.platform]
-"2.5.3" = { core = "^3.2", min-core = "3.2.0" }
-"2.5.0" = { core = "^3.1", min-core = "3.1.0" }
-
-# Release bundles (tested combinations)
-[bundles]
-
-[bundles.stable-3.2]
-name = "Stable 3.2 Bundle"
-release-date = "2025-10-15"
-core = "3.2.1"
-platform = "2.5.3"
-extensions = ["mongodb@1.2.0", "redis@1.1.0", "azure@2.0.0"]
-workspace = "1.4.0"
-
-[bundles.lts-3.1]
-name = "LTS 3.1 Bundle"
-release-date = "2025-09-01"
-lts-until = "2026-09-01"
-core = "3.1.5"
-platform = "2.4.8"
-workspace = "1.3.0"
-
-

Release Coordination

-

Coordinated releases for major versions:

-
# Major release: All repos release together
-provisioning-core:     3.0.0
-provisioning-platform: 2.0.0
-provisioning-workspace: 1.0.0
-
-# Minor/patch releases: Independent
-provisioning-core:     3.1.0 (adds features, platform stays 2.0.x)
-provisioning-platform: 2.1.0 (improves orchestrator, core stays 3.1.x)
-
-
-

Development Workflow

-

Working on Single Repository

-
# Developer working on core only
-git clone https://github.com/yourorg/provisioning-core
-cd provisioning-core
-
-# Install dependencies
-just install-deps
-
-# Development
-just dev-check
-just test
-
-# Build package
-just build
-
-# Test installation locally
-just install-dev
-
-

Working Across Repositories

-
# Scenario: Adding new feature requiring core + platform changes
-
-# 1. Clone both repositories
-git clone https://github.com/yourorg/provisioning-core
-git clone https://github.com/yourorg/provisioning-platform
-
-# 2. Create feature branches
-cd provisioning-core
-git checkout -b feat/batch-workflow-v2
-
-cd ../provisioning-platform
-git checkout -b feat/batch-workflow-v2
-
-# 3. Develop with local linking
-cd provisioning-core
-just install-dev  # Installs to /usr/local/bin/provisioning
-
-cd ../provisioning-platform
-# Platform uses system provisioning CLI (local dev version)
-cargo run
-
-# 4. Test integration
-cd ../provisioning-core
-just test-integration
-
-cd ../provisioning-platform
-cargo test
-
-# 5. Create PRs in both repositories
-# PR #123 in provisioning-core
-# PR #456 in provisioning-platform (references core PR)
-
-# 6. Coordinate merge
-# Merge core PR first, cut release 3.3.0
-# Update platform dependency to core 3.3.0
-# Merge platform PR, cut release 2.6.0
-
-

Testing Cross-Repo Integration

-
# Integration tests in provisioning-distribution
-cd provisioning-distribution
-
-# Test specific version combination
-just test-integration \
-    --core 3.3.0 \
-    --platform 2.6.0
-
-# Test bundle
-just test-bundle stable-3.3
-
-
-

Distribution Strategy

-

Individual Repository Releases

-

Each repository releases independently:

-
# Core release
-cd provisioning-core
-git tag v3.2.1
-git push --tags
-# → GitHub Actions builds package
-# → Publishes to package registry
-
-# Platform release
-cd provisioning-platform
-git tag v2.5.3
-git push --tags
-# → GitHub Actions builds binaries
-# → Publishes to package registry
-
-

Bundle Releases (Coordinated)

-

Distribution repository creates tested bundles:

-
cd provisioning-distribution
-
-# Create bundle
-just create-bundle stable-3.2 \
-    --core 3.2.1 \
-    --platform 2.5.3 \
-    --workspace 1.4.0
-
-# Test bundle
-just test-bundle stable-3.2
-
-# Publish bundle
-just publish-bundle stable-3.2
-# → Creates meta-package with all components
-# → Publishes bundle to registry
-# → Updates documentation
-
-

User Installation Options

- -
# Install stable bundle (easiest)
-curl -fsSL https://get.provisioning.io | sh
-
-# Installs:
-# - provisioning-core 3.2.1
-# - provisioning-platform 2.5.3
-# - provisioning-workspace 1.4.0
-
-

Option 2: Individual Component Installation

-
# Install only core (minimal)
-curl -fsSL https://get.provisioning.io/core | sh
-
-# Add platform later
-provisioning install platform
-
-# Add extensions
-provisioning extension install mongodb
-
-

Option 3: Custom Combination

-
# Install specific versions
-provisioning install core@3.1.0
-provisioning install platform@2.4.0
-
-
-

Repository Ownership and Contribution Model

-

Core Team Ownership

-
- - - - - -
RepositoryPrimary OwnerContribution Model
provisioning-coreCore TeamStrict review, stable API
provisioning-platformPlatform TeamFast iteration, performance focus
provisioning-extensionsCommunity + CoreOpen contributions, moderated
provisioning-workspaceDocs TeamTemplate contributions welcome
provisioning-distributionRelease EngineeringCore team only
-
-

Contribution Workflow

-

For Core:

-
    -
  1. Create issue in provisioning-core
  2. -
  3. Discuss design
  4. -
  5. Submit PR with tests
  6. -
  7. Strict code review
  8. -
  9. Merge to main
  10. -
  11. Release when ready
  12. -
-

For Extensions:

-
    -
  1. Create extension in provisioning-extensions
  2. -
  3. Follow extension guidelines
  4. -
  5. Submit PR
  6. -
  7. Community review
  8. -
  9. Merge and publish to registry
  10. -
  11. Independent versioning
  12. -
-

For Platform:

-
    -
  1. Create issue in provisioning-platform
  2. -
  3. Implement with benchmarks
  4. -
  5. Submit PR
  6. -
  7. Performance review
  8. -
  9. Merge and release
  10. -
-
-

CI/CD Strategy

-

Per-Repository CI/CD

-

Core CI (provisioning-core/.github/workflows/ci.yml):

-
name: Core CI
-
-on: [push, pull_request]
-
-jobs:
-  test:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: Install Nushell
-        run: cargo install nu
-      - name: Run tests
-        run: just test
-      - name: Validate Nickel schemas
-        run: just validate-nickel
-
-  package:
-    runs-on: ubuntu-latest
-    if: startsWith(github.ref, 'refs/tags/v')
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build package
-        run: just build
-      - name: Publish to registry
-        run: just publish
-        env:
-          REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
-
-

Platform CI (provisioning-platform/.github/workflows/ci.yml):

-
name: Platform CI
-
-on: [push, pull_request]
-
-jobs:
-  test:
-    strategy:
-      matrix:
-        os: [ubuntu-latest, macos-latest]
-    runs-on: ${{ matrix.os }}
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build
-        run: cargo build --release
-      - name: Test
-        run: cargo test --workspace
-      - name: Benchmark
-        run: cargo bench
-
-  cross-compile:
-    runs-on: ubuntu-latest
-    if: startsWith(github.ref, 'refs/tags/v')
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build for Linux x86_64
-        run: cargo build --release --target x86_64-unknown-linux-gnu
-      - name: Build for Linux arm64
-        run: cargo build --release --target aarch64-unknown-linux-gnu
-      - name: Publish binaries
-        run: just publish-binaries
-
-

Integration Testing (Distribution Repo)

-

Distribution CI (provisioning-distribution/.github/workflows/integration.yml):

-
name: Integration Tests
-
-on:
-  schedule:
-    - cron: '0 0 * * *'  # Daily
-  workflow_dispatch:
-
-jobs:
-  test-bundle:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Install bundle
-        run: |
-          nu release-automation/install-bundle.nu stable-3.2
-
-      - name: Run integration tests
-        run: |
-          nu tests/integration/test-all.nu
-
-      - name: Test upgrade path
-        run: |
-          nu tests/integration/test-upgrade.nu 3.1.0 3.2.1
-
-
-

File and Directory Structure Comparison

-

Monorepo Structure

-
provisioning/                          (One repo, ~500 MB)
-├── core/                             (Nushell)
-├── platform/                         (Rust)
-├── extensions/                       (Community)
-├── workspace/                        (Templates)
-└── distribution/                     (Build)
-
-

Multi-Repo Structure

-
provisioning-core/                     (Repo 1, ~50 MB)
-├── nulib/
-├── cli/
-├── schemas/
-└── tools/
-
-provisioning-platform/                 (Repo 2, ~150 MB with target/)
-├── orchestrator/
-├── control-center/
-├── mcp-server/
-└── Cargo.toml
-
-provisioning-extensions/               (Repo 3, ~100 MB)
-├── registry/
-├── providers/
-├── taskservs/
-└── clusters/
-
-provisioning-workspace/                (Repo 4, ~20 MB)
-├── templates/
-├── examples/
-└── blueprints/
-
-provisioning-distribution/             (Repo 5, ~30 MB)
-├── release-automation/
-├── installers/
-├── packaging/
-└── registry/
-
-
-

Decision Matrix

-
- - - - - - - - - - - - -
CriterionMonorepoMulti-Repo
Development ComplexitySimpleModerate
Clone SizeLarge (~500 MB)Small (50-150 MB each)
Cross-Component ChangesEasy (atomic)Moderate (coordinated)
Independent ReleasesDifficultEasy
Language-Specific ToolingMixedClean
Community ContributionsHarder (big repo)Easier (focused repos)
Version ManagementSimple (one version)Complex (matrix)
CI/CD ComplexitySimple (one pipeline)Moderate (multiple)
Ownership ClarityUnclearClear
Extension EcosystemMonolithicModular
Build TimeLong (build all)Short (build one)
Testing IsolationDifficultEasy
-
-
- -

Why Multi-Repo Wins for This Project

-
    -
  1. -

    Clear Separation of Concerns

    -
      -
    • Nushell core vs Rust platform are different domains
    • -
    • Different teams can own different repos
    • -
    • Different release cadences make sense
    • -
    -
  2. -
  3. -

    Language-Specific Tooling

    -
      -
    • provisioning-core: Nushell-focused, simple testing
    • -
    • provisioning-platform: Rust workspace, Cargo tooling
    • -
    • No mixed tooling confusion
    • -
    -
  4. -
  5. -

    Community Contributions

    -
      -
    • Extensions repo is easier to contribute to
    • -
    • Don’t need to clone entire monorepo
    • -
    • Clearer contribution guidelines per repo
    • -
    -
  6. -
  7. -

    Independent Versioning

    -
      -
    • Core can stay stable (3.x for months)
    • -
    • Platform can iterate fast (2.x weekly)
    • -
    • Extensions have own lifecycles
    • -
    -
  8. -
  9. -

    Build Performance

    -
      -
    • Only build what changed
    • -
    • Faster CI/CD per repo
    • -
    • Parallel builds across repos
    • -
    -
  10. -
  11. -

    Extension Ecosystem

    -
      -
    • Extensions repo becomes marketplace
    • -
    • Third-party extensions can live separately
    • -
    • Registry becomes discovery mechanism
    • -
    -
  12. -
-

Implementation Strategy

-

Phase 1: Split Repositories (Week 1-2)

-
    -
  1. Create 5 new repositories
  2. -
  3. Extract code from monorepo
  4. -
  5. Set up CI/CD for each
  6. -
  7. Create initial packages
  8. -
-

Phase 2: Package Integration (Week 3)

-
    -
  1. Implement package registry
  2. -
  3. Create installers
  4. -
  5. Set up version compatibility matrix
  6. -
  7. Test cross-repo integration
  8. -
-

Phase 3: Distribution System (Week 4)

-
    -
  1. Implement bundle system
  2. -
  3. Create release automation
  4. -
  5. Set up package hosting
  6. -
  7. Document release process
  8. -
-

Phase 4: Migration (Week 5)

-
    -
  1. Migrate existing users
  2. -
  3. Update documentation
  4. -
  5. Archive monorepo
  6. -
  7. Announce new structure
  8. -
-
-

Conclusion

-

Recommendation: Multi-Repository Architecture with Package-Based Integration

-

The multi-repo approach provides:

-
    -
  • ✅ Clear separation between Nushell core and Rust platform
  • -
  • ✅ Independent release cycles for different components
  • -
  • ✅ Better community contribution experience
  • -
  • ✅ Language-specific tooling and workflows
  • -
  • ✅ Modular extension ecosystem
  • -
  • ✅ Faster builds and CI/CD
  • -
  • ✅ Clear ownership boundaries
  • -
-

Avoid: Submodules (complexity nightmare)

-

Use: Package-based dependencies with version compatibility matrix

-

This architecture scales better for your project’s growth, supports a community extension ecosystem, and provides professional-grade separation of -concerns while maintaining integration through a well-designed package system.

-
-

Next Steps

-
    -
  1. Approve multi-repo strategy
  2. -
  3. Create repository split plan
  4. -
  5. Set up GitHub organizations/teams
  6. -
  7. Implement package registry
  8. -
  9. Begin repository extraction
  10. -
-

Would you like me to create a detailed repository split implementation plan next?

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/architecture/orchestrator-auth-integration.html b/docs/book/architecture/orchestrator-auth-integration.html deleted file mode 100644 index 51b4c98..0000000 --- a/docs/book/architecture/orchestrator-auth-integration.html +++ /dev/null @@ -1,756 +0,0 @@ - - - - - - Orchestrator Auth Integration - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Orchestrator Authentication & Authorization Integration

-

Version: 1.0.0 -Date: 2025-10-08 -Status: Implemented

-

Overview

-

Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA -verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.

-

Architecture

-

Security Middleware Chain

-

The middleware chain is applied in this specific order to ensure proper security:

-
┌─────────────────────────────────────────────────────────────────┐
-│                    Incoming HTTP Request                        │
-└────────────────────────┬────────────────────────────────────────┘
-                         │
-                         ▼
-        ┌────────────────────────────────┐
-        │  1. Rate Limiting Middleware   │
-        │  - Per-IP request limits       │
-        │  - Sliding window              │
-        │  - Exempt IPs                  │
-        └────────────┬───────────────────┘
-                     │ (429 if exceeded)
-                     ▼
-        ┌────────────────────────────────┐
-        │  2. Authentication Middleware  │
-        │  - Extract Bearer token        │
-        │  - Validate JWT signature      │
-        │  - Check expiry, issuer, aud   │
-        │  - Check revocation            │
-        └────────────┬───────────────────┘
-                     │ (401 if invalid)
-                     ▼
-        ┌────────────────────────────────┐
-        │  3. MFA Verification           │
-        │  - Check MFA status in token   │
-        │  - Enforce for sensitive ops   │
-        │  - Production deployments      │
-        │  - All DELETE operations       │
-        └────────────┬───────────────────┘
-                     │ (403 if required but missing)
-                     ▼
-        ┌────────────────────────────────┐
-        │  4. Authorization Middleware   │
-        │  - Build Cedar request         │
-        │  - Evaluate policies           │
-        │  - Check permissions           │
-        │  - Log decision                │
-        └────────────┬───────────────────┘
-                     │ (403 if denied)
-                     ▼
-        ┌────────────────────────────────┐
-        │  5. Audit Logging Middleware   │
-        │  - Log complete request        │
-        │  - User, action, resource      │
-        │  - Authorization decision      │
-        │  - Response status             │
-        └────────────┬───────────────────┘
-                     │
-                     ▼
-        ┌────────────────────────────────┐
-        │      Protected Handler         │
-        │  - Access security context     │
-        │  - Execute business logic      │
-        └────────────────────────────────┘
-
-

Implementation Details

-

1. Security Context Builder (middleware/security_context.rs)

-

Purpose: Build complete security context from authenticated requests.

-

Key Features:

-
    -
  • Extracts JWT token claims
  • -
  • Determines MFA verification status
  • -
  • Extracts IP address (X-Forwarded-For, X-Real-IP)
  • -
  • Extracts user agent and session info
  • -
  • Provides permission checking methods
  • -
-

Lines of Code: 275

-

Example:

-
pub struct SecurityContext {
-    pub user_id: String,
-    pub token: ValidatedToken,
-    pub mfa_verified: bool,
-    pub ip_address: IpAddr,
-    pub user_agent: Option<String>,
-    pub permissions: Vec<String>,
-    pub workspace: String,
-    pub request_id: String,
-    pub session_id: Option<String>,
-}
-
-impl SecurityContext {
-    pub fn has_permission(&self, permission: &str) -> bool { ... }
-    pub fn has_any_permission(&self, permissions: &[&str]) -> bool { ... }
-    pub fn has_all_permissions(&self, permissions: &[&str]) -> bool { ... }
-}
-

2. Enhanced Authentication Middleware (middleware/auth.rs)

-

Purpose: JWT token validation with revocation checking.

-

Key Features:

-
    -
  • Bearer token extraction
  • -
  • JWT signature validation (RS256)
  • -
  • Expiry, issuer, audience checks
  • -
  • Token revocation status
  • -
  • Security context injection
  • -
-

Lines of Code: 245

-

Flow:

-
    -
  1. Extract Authorization: Bearer <token> header
  2. -
  3. Validate JWT with TokenValidator
  4. -
  5. Build SecurityContext
  6. -
  7. Inject into request extensions
  8. -
  9. Continue to next middleware or return 401
  10. -
-

Error Responses:

-
    -
  • 401 Unauthorized: Missing/invalid token, expired, revoked
  • -
  • 403 Forbidden: Insufficient permissions
  • -
-

3. MFA Verification Middleware (middleware/mfa.rs)

-

Purpose: Enforce MFA for sensitive operations.

-

Key Features:

-
    -
  • Path-based MFA requirements
  • -
  • Method-based enforcement (all DELETEs)
  • -
  • Production environment protection
  • -
  • Clear error messages
  • -
-

Lines of Code: 290

-

MFA Required For:

-
    -
  • Production deployments (/production/, /prod/)
  • -
  • All DELETE operations
  • -
  • Server operations (POST, PUT, DELETE)
  • -
  • Cluster operations (POST, PUT, DELETE)
  • -
  • Batch submissions
  • -
  • Rollback operations
  • -
  • Configuration changes (POST, PUT, DELETE)
  • -
  • Secret management
  • -
  • User/role management
  • -
-

Example:

-
fn requires_mfa(method: &str, path: &str) -> bool {
-    if path.contains("/production/") { return true; }
-    if method == "DELETE" { return true; }
-    if path.contains("/deploy") { return true; }
-    // ...
-}
-

4. Enhanced Authorization Middleware (middleware/authz.rs)

-

Purpose: Cedar policy evaluation with audit logging.

-

Key Features:

-
    -
  • Builds Cedar authorization request from HTTP request
  • -
  • Maps HTTP methods to Cedar actions (GET→Read, POST→Create, etc.)
  • -
  • Extracts resource types from paths
  • -
  • Evaluates Cedar policies with context (MFA, IP, time, workspace)
  • -
  • Logs all authorization decisions to audit log
  • -
  • Non-blocking audit logging (tokio::spawn)
  • -
-

Lines of Code: 380

-

Resource Mapping:

-
/api/v1/servers/srv-123    → Resource::Server("srv-123")
-/api/v1/taskserv/kubernetes → Resource::TaskService("kubernetes")
-/api/v1/cluster/prod        → Resource::Cluster("prod")
-/api/v1/config/settings     → Resource::Config("settings")
-

Action Mapping:

-
GET    → Action::Read
-POST   → Action::Create
-PUT    → Action::Update
-DELETE → Action::Delete
-

5. Rate Limiting Middleware (middleware/rate_limit.rs)

-

Purpose: Prevent API abuse with per-IP rate limiting.

-

Key Features:

-
    -
  • Sliding window rate limiting
  • -
  • Per-IP request tracking
  • -
  • Configurable limits and windows
  • -
  • Exempt IP support
  • -
  • Automatic cleanup of old entries
  • -
  • Statistics tracking
  • -
-

Lines of Code: 420

-

Configuration:

-
pub struct RateLimitConfig {
-    pub max_requests: u32,          // for example, 100
-    pub window_duration: Duration,  // for example, 60 seconds
-    pub exempt_ips: Vec<IpAddr>,    // for example, internal services
-    pub enabled: bool,
-}
-
-// Default: 100 requests per minute
-

Statistics:

-
pub struct RateLimitStats {
-    pub total_ips: usize,      // Number of tracked IPs
-    pub total_requests: u32,   // Total requests made
-    pub limited_ips: usize,    // IPs that hit the limit
-    pub config: RateLimitConfig,
-}
-

6. Security Integration Module (security_integration.rs)

-

Purpose: Helper module to integrate all security components.

-

Key Features:

-
    -
  • SecurityComponents struct grouping all middleware
  • -
  • SecurityConfig for configuration
  • -
  • initialize() method to set up all components
  • -
  • disabled() method for development mode
  • -
  • apply_security_middleware() helper for router setup
  • -
-

Lines of Code: 265

-

Usage Example:

-
use provisioning_orchestrator::security_integration::{
-    SecurityComponents, SecurityConfig
-};
-
-// Initialize security
-let config = SecurityConfig {
-    public_key_path: PathBuf::from("keys/public.pem"),
-    jwt_issuer: "control-center".to_string(),
-    jwt_audience: "orchestrator".to_string(),
-    cedar_policies_path: PathBuf::from("policies"),
-    auth_enabled: true,
-    authz_enabled: true,
-    mfa_enabled: true,
-    rate_limit_config: RateLimitConfig::new(100, 60),
-};
-
-let security = SecurityComponents::initialize(config, audit_logger).await?;
-
-// Apply to router
-let app = Router::new()
-    .route("/api/v1/servers", post(create_server))
-    .route("/api/v1/servers/:id", delete(delete_server));
-
-let secured_app = apply_security_middleware(app, &security);
-

Integration with AppState

-

Updated AppState Structure

-
pub struct AppState {
-    // Existing fields
-    pub task_storage: Arc<dyn TaskStorage>,
-    pub batch_coordinator: BatchCoordinator,
-    pub dependency_resolver: DependencyResolver,
-    pub state_manager: Arc<WorkflowStateManager>,
-    pub monitoring_system: Arc<MonitoringSystem>,
-    pub progress_tracker: Arc<ProgressTracker>,
-    pub rollback_system: Arc<RollbackSystem>,
-    pub test_orchestrator: Arc<TestOrchestrator>,
-    pub dns_manager: Arc<DnsManager>,
-    pub extension_manager: Arc<ExtensionManager>,
-    pub oci_manager: Arc<OciManager>,
-    pub service_orchestrator: Arc<ServiceOrchestrator>,
-    pub audit_logger: Arc<AuditLogger>,
-    pub args: Args,
-
-    // NEW: Security components
-    pub security: SecurityComponents,
-}
-

Initialization in main.rs

-
#[tokio::main]
-async fn main() -> Result<()> {
-    let args = Args::parse();
-
-    // Initialize AppState (creates audit_logger)
-    let state = Arc::new(AppState::new(args).await?);
-
-    // Initialize security components
-    let security_config = SecurityConfig {
-        public_key_path: PathBuf::from("keys/public.pem"),
-        jwt_issuer: env::var("JWT_ISSUER").unwrap_or("control-center".to_string()),
-        jwt_audience: "orchestrator".to_string(),
-        cedar_policies_path: PathBuf::from("policies"),
-        auth_enabled: env::var("AUTH_ENABLED").unwrap_or("true".to_string()) == "true",
-        authz_enabled: env::var("AUTHZ_ENABLED").unwrap_or("true".to_string()) == "true",
-        mfa_enabled: env::var("MFA_ENABLED").unwrap_or("true".to_string()) == "true",
-        rate_limit_config: RateLimitConfig::new(
-            env::var("RATE_LIMIT_MAX").unwrap_or("100".to_string()).parse().unwrap(),
-            env::var("RATE_LIMIT_WINDOW").unwrap_or("60".to_string()).parse().unwrap(),
-        ),
-    };
-
-    let security = SecurityComponents::initialize(
-        security_config,
-        state.audit_logger.clone()
-    ).await?;
-
-    // Public routes (no auth)
-    let public_routes = Router::new()
-        .route("/health", get(health_check));
-
-    // Protected routes (full security chain)
-    let protected_routes = Router::new()
-        .route("/api/v1/servers", post(create_server))
-        .route("/api/v1/servers/:id", delete(delete_server))
-        .route("/api/v1/taskserv", post(create_taskserv))
-        .route("/api/v1/cluster", post(create_cluster))
-        // ... more routes
-        ;
-
-    // Apply security middleware to protected routes
-    let secured_routes = apply_security_middleware(protected_routes, &security)
-        .with_state(state.clone());
-
-    // Combine routes
-    let app = Router::new()
-        .merge(public_routes)
-        .merge(secured_routes)
-        .layer(CorsLayer::permissive());
-
-    // Start server
-    let listener = tokio::net::TcpListener::bind("0.0.0.0:9090").await?;
-    axum::serve(listener, app).await?;
-
-    Ok(())
-}
-

Protected Endpoints

-

Endpoint Categories

-
- - - - - - - - - - - -
CategoryExample EndpointsAuth RequiredMFA RequiredCedar Policy
Health/health
Read-OnlyGET /api/v1/servers
Server MgmtPOST /api/v1/servers
Server DeleteDELETE /api/v1/servers/:id
Taskserv MgmtPOST /api/v1/taskserv
Cluster MgmtPOST /api/v1/cluster
ProductionPOST /api/v1/production/*
Batch OpsPOST /api/v1/batch/submit
RollbackPOST /api/v1/rollback
Config WritePOST /api/v1/config
SecretsGET /api/v1/secret/*
-
-

Complete Authentication Flow

-

Step-by-Step Flow

-
1. CLIENT REQUEST
-   ├─ Headers:
-   │  ├─ Authorization: Bearer <jwt_token>
-   │  ├─ X-Forwarded-For: 192.168.1.100
-   │  ├─ User-Agent: MyClient/1.0
-   │  └─ X-MFA-Verified: true
-   └─ Path: DELETE /api/v1/servers/prod-srv-01
-
-2. RATE LIMITING MIDDLEWARE
-   ├─ Extract IP: 192.168.1.100
-   ├─ Check limit: 45/100 requests in window
-   ├─ Decision: ALLOW (under limit)
-   └─ Continue →
-
-3. AUTHENTICATION MIDDLEWARE
-   ├─ Extract Bearer token
-   ├─ Validate JWT:
-   │  ├─ Signature: ✅ Valid (RS256)
-   │  ├─ Expiry: ✅ Valid until 2025-10-09 10:00:00
-   │  ├─ Issuer: ✅ control-center
-   │  ├─ Audience: ✅ orchestrator
-   │  └─ Revoked: ✅ Not revoked
-   ├─ Build SecurityContext:
-   │  ├─ user_id: "user-456"
-   │  ├─ workspace: "production"
-   │  ├─ permissions: ["read", "write", "delete"]
-   │  ├─ mfa_verified: true
-   │  └─ ip_address: 192.168.1.100
-   ├─ Decision: ALLOW (valid token)
-   └─ Continue →
-
-4. MFA VERIFICATION MIDDLEWARE
-   ├─ Check endpoint: DELETE /api/v1/servers/prod-srv-01
-   ├─ Requires MFA: ✅ YES (DELETE operation)
-   ├─ MFA status: ✅ Verified
-   ├─ Decision: ALLOW (MFA verified)
-   └─ Continue →
-
-5. AUTHORIZATION MIDDLEWARE
-   ├─ Build Cedar request:
-   │  ├─ Principal: User("user-456")
-   │  ├─ Action: Delete
-   │  ├─ Resource: Server("prod-srv-01")
-   │  └─ Context:
-   │     ├─ mfa_verified: true
-   │     ├─ ip_address: "192.168.1.100"
-   │     ├─ time: 2025-10-08T14:30:00Z
-   │     └─ workspace: "production"
-   ├─ Evaluate Cedar policies:
-   │  ├─ Policy 1: Allow if user.role == "admin" ✅
-   │  ├─ Policy 2: Allow if mfa_verified == true ✅
-   │  └─ Policy 3: Deny if not business_hours ❌
-   ├─ Decision: ALLOW (2 allow, 1 deny = allow)
-   ├─ Log to audit: Authorization GRANTED
-   └─ Continue →
-
-6. AUDIT LOGGING MIDDLEWARE
-   ├─ Record:
-   │  ├─ User: user-456 (IP: 192.168.1.100)
-   │  ├─ Action: ServerDelete
-   │  ├─ Resource: prod-srv-01
-   │  ├─ Authorization: GRANTED
-   │  ├─ MFA: Verified
-   │  └─ Timestamp: 2025-10-08T14:30:00Z
-   └─ Continue →
-
-7. PROTECTED HANDLER
-   ├─ Execute business logic
-   ├─ Delete server prod-srv-01
-   └─ Return: 200 OK
-
-8. AUDIT LOGGING (Response)
-   ├─ Update event:
-   │  ├─ Status: 200 OK
-   │  ├─ Duration: 1.234s
-   │  └─ Result: SUCCESS
-   └─ Write to audit log
-
-9. CLIENT RESPONSE
-   └─ 200 OK: Server deleted successfully
-
-

Configuration

-

Environment Variables

-
# JWT Configuration
-JWT_ISSUER=control-center
-JWT_AUDIENCE=orchestrator
-PUBLIC_KEY_PATH=/path/to/keys/public.pem
-
-# Cedar Policies
-CEDAR_POLICIES_PATH=/path/to/policies
-
-# Security Toggles
-AUTH_ENABLED=true
-AUTHZ_ENABLED=true
-MFA_ENABLED=true
-
-# Rate Limiting
-RATE_LIMIT_MAX=100
-RATE_LIMIT_WINDOW=60
-RATE_LIMIT_EXEMPT_IPS=10.0.0.1,10.0.0.2
-
-# Audit Logging
-AUDIT_ENABLED=true
-AUDIT_RETENTION_DAYS=365
-
-

Development Mode

-

For development/testing, all security can be disabled:

-
// In main.rs
-let security = if env::var("DEVELOPMENT_MODE").unwrap_or("false".to_string()) == "true" {
-    SecurityComponents::disabled(audit_logger.clone())
-} else {
-    SecurityComponents::initialize(security_config, audit_logger.clone()).await?
-};
-

Testing

-

Integration Tests

-

Location: provisioning/platform/orchestrator/tests/security_integration_tests.rs

-

Test Coverage:

-
    -
  • ✅ Rate limiting enforcement
  • -
  • ✅ Rate limit statistics
  • -
  • ✅ Exempt IP handling
  • -
  • ✅ Authentication missing token
  • -
  • ✅ MFA verification for sensitive operations
  • -
  • ✅ Cedar policy evaluation
  • -
  • ✅ Complete security flow
  • -
  • ✅ Security components initialization
  • -
  • ✅ Configuration defaults
  • -
-

Lines of Code: 340

-

Run Tests:

-
cd provisioning/platform/orchestrator
-cargo test security_integration_tests
-
-

File Summary

-
- - - - - - - - - -
FilePurposeLinesTests
middleware/security_context.rsSecurity context builder2758
middleware/auth.rsJWT authentication2455
middleware/mfa.rsMFA verification29015
middleware/authz.rsCedar authorization3804
middleware/rate_limit.rsRate limiting4208
middleware/mod.rsModule exports250
security_integration.rsIntegration helpers2652
tests/security_integration_tests.rsIntegration tests34011
Total2,24053
-
-

Benefits

-

Security

-
    -
  • ✅ Complete authentication flow with JWT validation
  • -
  • ✅ MFA enforcement for sensitive operations
  • -
  • ✅ Fine-grained authorization with Cedar policies
  • -
  • ✅ Rate limiting prevents API abuse
  • -
  • ✅ Complete audit trail for compliance
  • -
-

Architecture

-
    -
  • ✅ Modular middleware design
  • -
  • ✅ Clear separation of concerns
  • -
  • ✅ Reusable security components
  • -
  • ✅ Easy to test and maintain
  • -
  • ✅ Configuration-driven behavior
  • -
-

Operations

-
    -
  • ✅ Can enable/disable features independently
  • -
  • ✅ Development mode for testing
  • -
  • ✅ Comprehensive error messages
  • -
  • ✅ Real-time statistics and monitoring
  • -
  • ✅ Non-blocking audit logging
  • -
-

Future Enhancements

-
    -
  1. Token Refresh: Automatic token refresh before expiry
  2. -
  3. IP Whitelisting: Additional IP-based access control
  4. -
  5. Geolocation: Block requests from specific countries
  6. -
  7. Advanced Rate Limiting: Per-user, per-endpoint limits
  8. -
  9. Session Management: Track active sessions, force logout
  10. -
  11. 2FA Integration: Direct integration with TOTP/SMS providers
  12. -
  13. Policy Hot Reload: Update Cedar policies without restart
  14. -
  15. Metrics Dashboard: Real-time security metrics visualization
  16. -
- -
    -
  • Cedar Policy Language
  • -
  • JWT Token Management
  • -
  • MFA Setup Guide
  • -
  • Audit Log Format
  • -
  • Rate Limiting Best Practices
  • -
-

Version History

-
- -
VersionDateChanges
1.0.02025-10-08Initial implementation
-
-
-

Maintained By: Security Team -Review Cycle: Quarterly -Last Reviewed: 2025-10-08

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/architecture/orchestrator-integration-model.html b/docs/book/architecture/orchestrator-integration-model.html deleted file mode 100644 index 2cb20bf..0000000 --- a/docs/book/architecture/orchestrator-integration-model.html +++ /dev/null @@ -1,917 +0,0 @@ - - - - - - Orchestrator Integration Model - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Orchestrator Integration Model - Deep Dive

-

Date: 2025-10-01 -Status: Clarification Document -Related: Multi-Repo Strategy, Hybrid Orchestrator v3.0

-

Executive Summary

-

This document clarifies how the Rust orchestrator integrates with Nushell core in both monorepo and multi-repo architectures. The orchestrator is -a critical performance layer that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing -functionality.

-
-

Current Architecture (Hybrid Orchestrator v3.0)

-

The Problem Being Solved

-

Original Issue:

-
Deep call stack in Nushell (template.nu:71)
-→ "Type not supported" errors
-→ Cannot handle complex nested workflows
-→ Performance bottlenecks with recursive calls
-
-

Solution: Rust orchestrator provides:

-
    -
  1. Task queue management (file-based, reliable)
  2. -
  3. Priority scheduling (intelligent task ordering)
  4. -
  5. Deep call stack elimination (Rust handles recursion)
  6. -
  7. Performance optimization (async/await, parallel execution)
  8. -
  9. State management (workflow checkpointing)
  10. -
-

How It Works Today (Monorepo)

-
┌─────────────────────────────────────────────────────────────┐
-│                        User                                  │
-└───────────────────────────┬─────────────────────────────────┘
-                            │ calls
-                            ↓
-                    ┌───────────────┐
-                    │ provisioning  │ (Nushell CLI)
-                    │      CLI      │
-                    └───────┬───────┘
-                            │
-        ┌───────────────────┼───────────────────┐
-        │                   │                   │
-        ↓                   ↓                   ↓
-┌───────────────┐   ┌───────────────┐   ┌──────────────┐
-│ Direct Mode   │   │Orchestrated   │   │ Workflow     │
-│ (Simple ops)  │   │ Mode          │   │ Mode         │
-└───────────────┘   └───────┬───────┘   └──────┬───────┘
-                            │                   │
-                            ↓                   ↓
-                    ┌────────────────────────────────┐
-                    │   Rust Orchestrator Service    │
-                    │   (Background daemon)           │
-                    │                                 │
-                    │ • Task Queue (file-based)      │
-                    │ • Priority Scheduler           │
-                    │ • Workflow Engine              │
-                    │ • REST API Server              │
-                    └────────┬───────────────────────┘
-                            │ spawns
-                            ↓
-                    ┌────────────────┐
-                    │ Nushell        │
-                    │ Business Logic │
-                    │                │
-                    │ • servers.nu   │
-                    │ • taskservs.nu │
-                    │ • clusters.nu  │
-                    └────────────────┘
-
-

Three Execution Modes

-

Mode 1: Direct Mode (Simple Operations)

-
# No orchestrator needed
-provisioning server list
-provisioning env
-provisioning help
-
-# Direct Nushell execution
-provisioning (CLI) → Nushell scripts → Result
-
-

Mode 2: Orchestrated Mode (Complex Operations)

-
# Uses orchestrator for coordination
-provisioning server create --orchestrated
-
-# Flow:
-provisioning CLI → Orchestrator API → Task Queue → Nushell executor
-                                                 ↓
-                                            Result back to user
-
-

Mode 3: Workflow Mode (Batch Operations)

-
# Complex workflows with dependencies
-provisioning workflow submit server-cluster.ncl
-
-# Flow:
-provisioning CLI → Orchestrator Workflow Engine → Dependency Graph
-                                                 ↓
-                                            Parallel task execution
-                                                 ↓
-                                            Nushell scripts for each task
-                                                 ↓
-                                            Checkpoint state
-
-
-

Integration Patterns

-

Pattern 1: CLI Submits Tasks to Orchestrator

-

Current Implementation:

-

Nushell CLI (core/nulib/workflows/server_create.nu):

-
# Submit server creation workflow to orchestrator
-export def server_create_workflow [
-    infra_name: string
-    --orchestrated
-] {
-    if $orchestrated {
-        # Submit task to orchestrator
-        let task = {
-            type: "server_create"
-            infra: $infra_name
-            params: { ... }
-        }
-
-        # POST to orchestrator REST API
-        http post http://localhost:9090/workflows/servers/create $task
-    } else {
-        # Direct execution (old way)
-        do-server-create $infra_name
-    }
-}
-
-

Rust Orchestrator (platform/orchestrator/src/api/workflows.rs):

-
// Receive workflow submission from Nushell CLI
-#[axum::debug_handler]
-async fn create_server_workflow(
-    State(state): State<Arc<AppState>>,
-    Json(request): Json<ServerCreateRequest>,
-) -> Result<Json<WorkflowResponse>, ApiError> {
-    // Create task
-    let task = Task {
-        id: Uuid::new_v4(),
-        task_type: TaskType::ServerCreate,
-        payload: serde_json::to_value(&request)?,
-        priority: Priority::Normal,
-        status: TaskStatus::Pending,
-        created_at: Utc::now(),
-    };
-
-    // Queue task
-    state.task_queue.enqueue(task).await?;
-
-    // Return immediately (async execution)
-    Ok(Json(WorkflowResponse {
-        workflow_id: task.id,
-        status: "queued",
-    }))
-}
-

Flow:

-
User → provisioning server create --orchestrated
-     ↓
-Nushell CLI prepares task
-     ↓
-HTTP POST to orchestrator (localhost:9090)
-     ↓
-Orchestrator queues task
-     ↓
-Returns workflow ID immediately
-     ↓
-User can monitor: provisioning workflow monitor <id>
-
-

Pattern 2: Orchestrator Executes Nushell Scripts

-

Orchestrator Task Executor (platform/orchestrator/src/executor.rs):

-
// Orchestrator spawns Nushell to execute business logic
-pub async fn execute_task(task: Task) -> Result<TaskResult> {
-    match task.task_type {
-        TaskType::ServerCreate => {
-            // Orchestrator calls Nushell script via subprocess
-            let output = Command::new("nu")
-                .arg("-c")
-                .arg(format!(
-                    "use {}/servers/create.nu; create-server '{}'",
-                    PROVISIONING_LIB_PATH,
-                    task.payload.infra_name
-                ))
-                .output()
-                .await?;
-
-            // Parse Nushell output
-            let result = parse_nushell_output(&output)?;
-
-            Ok(TaskResult {
-                task_id: task.id,
-                status: if result.success { "completed" } else { "failed" },
-                output: result.data,
-            })
-        }
-        // Other task types...
-    }
-}
-

Flow:

-
Orchestrator task queue has pending task
-     ↓
-Executor picks up task
-     ↓
-Spawns Nushell subprocess: nu -c "use servers/create.nu; create-server 'wuji'"
-     ↓
-Nushell executes business logic
-     ↓
-Returns result to orchestrator
-     ↓
-Orchestrator updates task status
-     ↓
-User monitors via: provisioning workflow status <id>
-
-

Pattern 3: Bidirectional Communication

-

Nushell Calls Orchestrator API:

-
# Nushell script checks orchestrator status during execution
-export def check-orchestrator-health [] {
-    let response = (http get http://localhost:9090/health)
-
-    if $response.status != "healthy" {
-        error make { msg: "Orchestrator not available" }
-    }
-
-    $response
-}
-
-# Nushell script reports progress to orchestrator
-export def report-progress [task_id: string, progress: int] {
-    http post http://localhost:9090/tasks/$task_id/progress {
-        progress: $progress
-        status: "in_progress"
-    }
-}
-
-

Orchestrator Monitors Nushell Execution:

-
// Orchestrator tracks Nushell subprocess
-pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
-    let mut child = Command::new("nu")
-        .arg("-c")
-        .arg(&task.script)
-        .stdout(Stdio::piped())
-        .stderr(Stdio::piped())
-        .spawn()?;
-
-    // Monitor stdout/stderr in real-time
-    let stdout = child.stdout.take().unwrap();
-    tokio::spawn(async move {
-        let reader = BufReader::new(stdout);
-        let mut lines = reader.lines();
-
-        while let Some(line) = lines.next_line().await.unwrap() {
-            // Parse progress updates from Nushell
-            if line.contains("PROGRESS:") {
-                update_task_progress(&line);
-            }
-        }
-    });
-
-    // Wait for completion with timeout
-    let result = tokio::time::timeout(
-        Duration::from_secs(3600),
-        child.wait()
-    ).await??;
-
-    Ok(TaskResult::from_exit_status(result))
-}
-
-

Multi-Repo Architecture Impact

-

Repository Split Doesn’t Change Integration Model

-

In Multi-Repo Setup:

-

Repository: provisioning-core

-
    -
  • Contains: Nushell business logic
  • -
  • Installs to: /usr/local/lib/provisioning/
  • -
  • Package: provisioning-core-3.2.1.tar.gz
  • -
-

Repository: provisioning-platform

-
    -
  • Contains: Rust orchestrator
  • -
  • Installs to: /usr/local/bin/provisioning-orchestrator
  • -
  • Package: provisioning-platform-2.5.3.tar.gz
  • -
-

Runtime Integration (Same as Monorepo):

-
User installs both packages:
-  provisioning-core-3.2.1     → /usr/local/lib/provisioning/
-  provisioning-platform-2.5.3 → /usr/local/bin/provisioning-orchestrator
-
-Orchestrator expects core at:  /usr/local/lib/provisioning/
-Core expects orchestrator at:  http://localhost:9090/
-
-No code dependencies, just runtime coordination!
-
-

Configuration-Based Integration

-

Core Package (provisioning-core) config:

-
# /usr/local/share/provisioning/config/config.defaults.toml
-
-[orchestrator]
-enabled = true
-endpoint = "http://localhost:9090"
-timeout = 60
-auto_start = true  # Start orchestrator if not running
-
-[execution]
-default_mode = "orchestrated"  # Use orchestrator by default
-fallback_to_direct = true      # Fall back if orchestrator down
-
-

Platform Package (provisioning-platform) config:

-
# /usr/local/share/provisioning/platform/config.toml
-
-[orchestrator]
-host = "127.0.0.1"
-port = 8080
-data_dir = "/var/lib/provisioning/orchestrator"
-
-[executor]
-nushell_binary = "nu"  # Expects nu in PATH
-provisioning_lib = "/usr/local/lib/provisioning"
-max_concurrent_tasks = 10
-task_timeout_seconds = 3600
-
-

Version Compatibility

-

Compatibility Matrix (provisioning-distribution/versions.toml):

-
[compatibility.platform."2.5.3"]
-core = "^3.2"  # Platform 2.5.3 compatible with core 3.2.x
-min-core = "3.2.0"
-api-version = "v1"
-
-[compatibility.core."3.2.1"]
-platform = "^2.5"  # Core 3.2.1 compatible with platform 2.5.x
-min-platform = "2.5.0"
-orchestrator-api = "v1"
-
-
-

Execution Flow Examples

-

Example 1: Simple Server Creation (Direct Mode)

-

No Orchestrator Needed:

-
provisioning server list
-
-# Flow:
-CLI → servers/list.nu → Query state → Return results
-(Orchestrator not involved)
-
-

Example 2: Server Creation with Orchestrator

-

Using Orchestrator:

-
provisioning server create --orchestrated --infra wuji
-
-# Detailed Flow:
-1. User executes command
-   ↓
-2. Nushell CLI (provisioning binary)
-   ↓
-3. Reads config: orchestrator.enabled = true
-   ↓
-4. Prepares task payload:
-   {
-     type: "server_create",
-     infra: "wuji",
-     params: { ... }
-   }
-   ↓
-5. HTTP POST → http://localhost:9090/workflows/servers/create
-   ↓
-6. Orchestrator receives request
-   ↓
-7. Creates task with UUID
-   ↓
-8. Enqueues to task queue (file-based: /var/lib/provisioning/queue/)
-   ↓
-9. Returns immediately: { workflow_id: "abc-123", status: "queued" }
-   ↓
-10. User sees: "Workflow submitted: abc-123"
-   ↓
-11. Orchestrator executor picks up task
-   ↓
-12. Spawns Nushell subprocess:
-    nu -c "use /usr/local/lib/provisioning/servers/create.nu; create-server 'wuji'"
-   ↓
-13. Nushell executes business logic:
-    - Reads Nickel config
-    - Calls provider API (UpCloud/AWS)
-    - Creates server
-    - Returns result
-   ↓
-14. Orchestrator captures output
-   ↓
-15. Updates task status: "completed"
-   ↓
-16. User monitors: provisioning workflow status abc-123
-    → Shows: "Server wuji created successfully"
-
-

Example 3: Batch Workflow with Dependencies

-

Complex Workflow:

-
provisioning batch submit multi-cloud-deployment.ncl
-
-# Workflow contains:
-- Create 5 servers (parallel)
-- Install Kubernetes on servers (depends on server creation)
-- Deploy applications (depends on Kubernetes)
-
-# Detailed Flow:
-1. CLI submits Nickel workflow to orchestrator
-   ↓
-2. Orchestrator parses workflow
-   ↓
-3. Builds dependency graph using petgraph (Rust)
-   ↓
-4. Topological sort determines execution order
-   ↓
-5. Creates tasks for each operation
-   ↓
-6. Executes in parallel where possible:
-
-   [Server 1] [Server 2] [Server 3] [Server 4] [Server 5]
-       ↓          ↓          ↓          ↓          ↓
-   (All execute in parallel via Nushell subprocesses)
-       ↓          ↓          ↓          ↓          ↓
-       └──────────┴──────────┴──────────┴──────────┘
-                           │
-                           ↓
-                    [All servers ready]
-                           ↓
-                  [Install Kubernetes]
-                  (Nushell subprocess)
-                           ↓
-                  [Kubernetes ready]
-                           ↓
-                  [Deploy applications]
-                  (Nushell subprocess)
-                           ↓
-                       [Complete]
-
-7. Orchestrator checkpoints state at each step
-   ↓
-8. If failure occurs, can retry from checkpoint
-   ↓
-9. User monitors real-time: provisioning batch monitor <id>
-
-
-

Why This Architecture

-

Orchestrator Benefits

-
    -
  1. -

    Eliminates Deep Call Stack Issues

    -
    
    -Without Orchestrator:
    -template.nu → calls → cluster.nu → calls → taskserv.nu → calls → provider.nu
    -(Deep nesting causes "Type not supported" errors)
    -
    -With Orchestrator:
    -Orchestrator → spawns → Nushell subprocess (flat execution)
    -(No deep nesting, fresh Nushell context for each task)
    -
    -
    -
  2. -
  3. -

    Performance Optimization

    -
    // Orchestrator executes tasks in parallel
    -let tasks = vec![task1, task2, task3, task4, task5];
    -
    -let results = futures::future::join_all(
    -    tasks.iter().map(|t| execute_task(t))
    -).await;
    -
    -// 5 Nushell subprocesses run concurrently
    -
  4. -
  5. -

    Reliable State Management

    -
  6. -
-
   Orchestrator maintains:
-   - Task queue (survives crashes)
-   - Workflow checkpoints (resume on failure)
-   - Progress tracking (real-time monitoring)
-   - Retry logic (automatic recovery)
-
-
    -
  1. Clean Separation
  2. -
-
   Orchestrator (Rust):     Performance, concurrency, state
-   Business Logic (Nushell): Providers, taskservs, workflows
-
-   Each does what it's best at!
-
-

Why NOT Pure Rust

-

Question: Why not implement everything in Rust?

-

Answer:

-
    -
  1. -

    Nushell is perfect for infrastructure automation:

    -
      -
    • Shell-like scripting for system operations
    • -
    • Built-in structured data handling
    • -
    • Easy template rendering
    • -
    • Readable business logic
    • -
    -
  2. -
  3. -

    Rapid iteration:

    -
      -
    • Change Nushell scripts without recompiling
    • -
    • Community can contribute Nushell modules
    • -
    • Template-based configuration generation
    • -
    -
  4. -
  5. -

    Best of both worlds:

    -
      -
    • Rust: Performance, type safety, concurrency
    • -
    • Nushell: Flexibility, readability, ease of use
    • -
    -
  6. -
-
-

Multi-Repo Integration Example

-

Installation

-

User installs bundle:

-
curl -fsSL https://get.provisioning.io | sh
-
-# Installs:
-1. provisioning-core-3.2.1.tar.gz
-   → /usr/local/bin/provisioning (Nushell CLI)
-   → /usr/local/lib/provisioning/ (Nushell libraries)
-   → /usr/local/share/provisioning/ (configs, templates)
-
-2. provisioning-platform-2.5.3.tar.gz
-   → /usr/local/bin/provisioning-orchestrator (Rust binary)
-   → /usr/local/share/provisioning/platform/ (platform configs)
-
-3. Sets up systemd/launchd service for orchestrator
-
-

Runtime Coordination

-

Core package expects orchestrator:

-
# core/nulib/lib_provisioning/orchestrator/client.nu
-
-# Check if orchestrator is running
-export def orchestrator-available [] {
-    let config = (load-config)
-    let endpoint = $config.orchestrator.endpoint
-
-    try {
-        let response = (http get $"($endpoint)/health")
-        $response.status == "healthy"
-    } catch {
-        false
-    }
-}
-
-# Auto-start orchestrator if needed
-export def ensure-orchestrator [] {
-    if not (orchestrator-available) {
-        if (load-config).orchestrator.auto_start {
-            print "Starting orchestrator..."
-            ^provisioning-orchestrator --daemon
-            sleep 2sec
-        }
-    }
-}
-
-

Platform package executes core scripts:

-
// platform/orchestrator/src/executor/nushell.rs
-
-pub struct NushellExecutor {
-    provisioning_lib: PathBuf,  // /usr/local/lib/provisioning
-    nu_binary: PathBuf,          // nu (from PATH)
-}
-
-impl NushellExecutor {
-    pub async fn execute_script(&self, script: &str) -> Result<Output> {
-        Command::new(&self.nu_binary)
-            .env("NU_LIB_DIRS", &self.provisioning_lib)
-            .arg("-c")
-            .arg(script)
-            .output()
-            .await
-    }
-
-    pub async fn execute_module_function(
-        &self,
-        module: &str,
-        function: &str,
-        args: &[String],
-    ) -> Result<Output> {
-        let script = format!(
-            "use {}/{}; {} {}",
-            self.provisioning_lib.display(),
-            module,
-            function,
-            args.join(" ")
-        );
-
-        self.execute_script(&script).await
-    }
-}
-
-

Configuration Examples

-

Core Package Config

-

/usr/local/share/provisioning/config/config.defaults.toml:

-
[orchestrator]
-enabled = true
-endpoint = "http://localhost:9090"
-timeout_seconds = 60
-auto_start = true
-fallback_to_direct = true
-
-[execution]
-# Modes: "direct", "orchestrated", "auto"
-default_mode = "auto"  # Auto-detect based on complexity
-
-# Operations that always use orchestrator
-force_orchestrated = [
-    "server.create",
-    "cluster.create",
-    "batch.*",
-    "workflow.*"
-]
-
-# Operations that always run direct
-force_direct = [
-    "*.list",
-    "*.show",
-    "help",
-    "version"
-]
-
-

Platform Package Config

-

/usr/local/share/provisioning/platform/config.toml:

-
[server]
-host = "127.0.0.1"
-port = 8080
-
-[storage]
-backend = "filesystem"  # or "surrealdb"
-data_dir = "/var/lib/provisioning/orchestrator"
-
-[executor]
-max_concurrent_tasks = 10
-task_timeout_seconds = 3600
-checkpoint_interval_seconds = 30
-
-[nushell]
-binary = "nu"  # Expects nu in PATH
-provisioning_lib = "/usr/local/lib/provisioning"
-env_vars = { NU_LIB_DIRS = "/usr/local/lib/provisioning" }
-
-
-

Key Takeaways

-

1. Orchestrator is Essential

-
    -
  • Solves deep call stack problems
  • -
  • Provides performance optimization
  • -
  • Enables complex workflows
  • -
  • NOT optional for production use
  • -
-

2. Integration is Loose but Coordinated

-
    -
  • No code dependencies between repos
  • -
  • Runtime integration via CLI + REST API
  • -
  • Configuration-driven coordination
  • -
  • Works in both monorepo and multi-repo
  • -
-

3. Best of Both Worlds

-
    -
  • Rust: High-performance coordination
  • -
  • Nushell: Flexible business logic
  • -
  • Clean separation of concerns
  • -
  • Each technology does what it’s best at
  • -
-

4. Multi-Repo Doesn’t Change Integration

-
    -
  • Same runtime model as monorepo
  • -
  • Package installation sets up paths
  • -
  • Configuration enables discovery
  • -
  • Versioning ensures compatibility
  • -
-
-

Conclusion

-

The confusing example in the multi-repo doc was oversimplified. The real architecture is:

-
✅ Orchestrator IS USED and IS ESSENTIAL
-✅ Platform (Rust) coordinates Core (Nushell) execution
-✅ Loose coupling via CLI + REST API (not code dependencies)
-✅ Works identically in monorepo and multi-repo
-✅ Configuration-based integration (no hardcoded paths)
-
-

The orchestrator provides:

-
    -
  • Performance layer (async, parallel execution)
  • -
  • Workflow engine (complex dependencies)
  • -
  • State management (checkpoints, recovery)
  • -
  • Task queue (reliable execution)
  • -
-

While Nushell provides:

-
    -
  • Business logic (providers, taskservs, clusters)
  • -
  • Template rendering (Jinja2 via nu_plugin_tera)
  • -
  • Configuration management (KCL integration)
  • -
  • User-facing scripting
  • -
-

Multi-repo just splits WHERE the code lives, not HOW it works together.

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/clipboard.min.js b/docs/book/clipboard.min.js index 99561a0..02c549e 100644 --- a/docs/book/clipboard.min.js +++ b/docs/book/clipboard.min.js @@ -1,7 +1,7 @@ /*! * clipboard.js v2.0.4 * https://zenorocha.github.io/clipboard.js - * + * * Licensed MIT © Zeno Rocha */ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return function(n){var o={};function r(t){if(o[t])return o[t].exports;var e=o[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,r),e.l=!0,e.exports}return r.m=n,r.c=o,r.d=function(t,e,n){r.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(e,"a",e),e},r.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},r.p="",r(r.s=0)}([function(t,e,n){"use strict";var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},i=function(){function o(t,e){for(var n=0;n - + @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ @@ -76,7 +76,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -140,10 +140,10 @@ - + - + @@ -172,869 +172,374 @@
-

Build System Documentation

-

This document provides comprehensive documentation for the provisioning project’s build system, including the complete Makefile reference with 40+ -targets, build tools, compilation instructions, and troubleshooting.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Quick Start
  4. -
  5. Makefile Reference
  6. -
  7. Build Tools
  8. -
  9. Cross-Platform Compilation
  10. -
  11. Dependency Management
  12. -
  13. Troubleshooting
  14. -
  15. CI/CD Integration
  16. -
-

Overview

-

The build system is a comprehensive, Makefile-based solution that orchestrates:

-
    -
  • Rust compilation: Platform binaries (orchestrator, control-center, etc.)
  • -
  • Nushell bundling: Core libraries and CLI tools
  • -
  • Nickel validation: Configuration schema validation
  • -
  • Distribution generation: Multi-platform packages
  • -
  • Release management: Automated release pipelines
  • -
  • Documentation generation: API and user documentation
  • -
-

Location: /src/tools/ -Main entry point: /src/tools/Makefile

-

Quick Start

-
# Navigate to build system
-cd src/tools
-
-# View all available targets
-make help
-
-# Complete build and package
-make all
-
-# Development build (quick)
-make dev-build
-
-# Build for specific platform
-make linux
-make macos
-make windows
-
-# Clean everything
-make clean
-
-# Check build system status
-make status
-
-

Makefile Reference

-

Build Configuration

-

Variables:

-
# Project metadata
-PROJECT_NAME := provisioning
-VERSION := $(git describe --tags --always --dirty)
-BUILD_TIME := $(date -u +"%Y-%m-%dT%H:%M:%SZ")
-
-# Build configuration
-RUST_TARGET := x86_64-unknown-linux-gnu
-BUILD_MODE := release
-PLATFORMS := linux-amd64,macos-amd64,windows-amd64
-VARIANTS := complete,minimal
-
-# Flags
-VERBOSE := false
-DRY_RUN := false
-PARALLEL := true
-
-

Build Targets

-

Primary Build Targets

-

make all - Complete build, package, and test

-
    -
  • Runs: clean build-all package-all test-dist
  • -
  • Use for: Production releases, complete validation
  • -
-

make build-all - Build all components

-
    -
  • Runs: build-platform build-core validate-nickel
  • -
  • Use for: Complete system compilation
  • -
-

make build-platform - Build platform binaries for all targets

-
make build-platform
-# Equivalent to:
-nu tools/build/compile-platform.nu \
-    --target x86_64-unknown-linux-gnu \
-    --release \
-    --output-dir dist/platform \
-    --verbose=false
-
-

make build-core - Bundle core Nushell libraries

-
make build-core
-# Equivalent to:
-nu tools/build/bundle-core.nu \
-    --output-dir dist/core \
-    --config-dir dist/config \
-    --validate \
-    --exclude-dev
-
-

make validate-nickel - Validate and compile Nickel schemas

-
make validate-nickel
-# Equivalent to:
-nu tools/build/validate-nickel.nu \
-    --output-dir dist/schemas \
-    --format-code \
-    --check-dependencies
-
-

make build-cross - Cross-compile for multiple platforms

-
    -
  • Builds for all platforms in PLATFORMS variable
  • -
  • Parallel execution support
  • -
  • Failure handling for each platform
  • -
-

Package Targets

-

make package-all - Create all distribution packages

-
    -
  • Runs: dist-generate package-binaries package-containers
  • -
-

make dist-generate - Generate complete distributions

-
make dist-generate
-# Advanced usage:
-make dist-generate PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
-
-

make package-binaries - Package binaries for distribution

-
    -
  • Creates platform-specific archives
  • -
  • Strips debug symbols
  • -
  • Generates checksums
  • -
-

make package-containers - Build container images

-
    -
  • Multi-platform container builds
  • -
  • Optimized layers and caching
  • -
  • Version tagging
  • -
-

make create-archives - Create distribution archives

-
    -
  • TAR and ZIP formats
  • -
  • Platform-specific and universal archives
  • -
  • Compression and checksums
  • -
-

make create-installers - Create installation packages

-
    -
  • Shell script installers
  • -
  • Platform-specific packages (DEB, RPM, MSI)
  • -
  • Uninstaller creation
  • -
-

Release Targets

-

make release - Create a complete release (requires VERSION)

-
make release VERSION=2.1.0
-
-

Features:

-
    -
  • Automated changelog generation
  • -
  • Git tag creation and push
  • -
  • Artifact upload
  • -
  • Comprehensive validation
  • -
-

make release-draft - Create a draft release

-
    -
  • Create without publishing
  • -
  • Review artifacts before release
  • -
  • Manual approval workflow
  • -
-

make upload-artifacts - Upload release artifacts

-
    -
  • GitHub Releases
  • -
  • Container registries
  • -
  • Package repositories
  • -
  • Verification and validation
  • -
-

make notify-release - Send release notifications

-
    -
  • Slack notifications
  • -
  • Discord announcements
  • -
  • Email notifications
  • -
  • Custom webhook support
  • -
-

make update-registry - Update package manager registries

-
    -
  • Homebrew formula updates
  • -
  • APT repository updates
  • -
  • Custom registry support
  • -
-

Development and Testing Targets

-

make dev-build - Quick development build

-
make dev-build
-# Fast build with minimal validation
-
-

make test-build - Test build system

-
    -
  • Validates build process
  • -
  • Runs with test configuration
  • -
  • Comprehensive logging
  • -
-

make test-dist - Test generated distributions

-
    -
  • Validates distribution integrity
  • -
  • Tests installation process
  • -
  • Platform compatibility checks
  • -
-

make validate-all - Validate all components

-
    -
  • Nickel schema validation
  • -
  • Package validation
  • -
  • Configuration validation
  • -
-

make benchmark - Run build benchmarks

-
    -
  • Times build process
  • -
  • Performance analysis
  • -
  • Resource usage monitoring
  • -
-

Documentation Targets

-

make docs - Generate documentation

-
make docs
-# Generates API docs, user guides, and examples
-
-

make docs-serve - Generate and serve documentation locally

-
    -
  • Starts local HTTP server on port 8000
  • -
  • Live documentation browsing
  • -
  • Development documentation workflow
  • -
-

Utility Targets

-

make clean - Clean all build artifacts

-
make clean
-# Removes all build, distribution, and package directories
-
-

make clean-dist - Clean only distribution artifacts

-
    -
  • Preserves build cache
  • -
  • Removes distribution packages
  • -
  • Faster cleanup option
  • -
-

make install - Install the built system locally

-
    -
  • Requires distribution to be built
  • -
  • Installs to system directories
  • -
  • Creates uninstaller
  • -
-

make uninstall - Uninstall the system

-
    -
  • Removes system installation
  • -
  • Cleans configuration
  • -
  • Removes service files
  • -
-

make status - Show build system status

-
make status
-# Output:
-# Build System Status
-# ===================
-# Project: provisioning
-# Version: v2.1.0-5-g1234567
-# Git Commit: 1234567890abcdef
-# Build Time: 2025-09-25T14:30:22Z
-#
-# Directories:
-#   Source: /Users/user/repo-cnz/src
-#   Tools: /Users/user/repo-cnz/src/tools
-#   Build: /Users/user/repo-cnz/src/target
-#   Distribution: /Users/user/repo-cnz/src/dist
-#   Packages: /Users/user/repo-cnz/src/packages
-
-

make info - Show detailed system information

-
    -
  • OS and architecture details
  • -
  • Tool versions (Nushell, Rust, Docker, Git)
  • -
  • Environment information
  • -
  • Build prerequisites
  • -
-

CI/CD Integration Targets

-

make ci-build - CI build pipeline

-
    -
  • Complete validation build
  • -
  • Suitable for automated CI systems
  • -
  • Comprehensive testing
  • -
-

make ci-test - CI test pipeline

-
    -
  • Validation and testing only
  • -
  • Fast feedback for pull requests
  • -
  • Quality assurance
  • -
-

make ci-release - CI release pipeline

-
    -
  • Build and packaging for releases
  • -
  • Artifact preparation
  • -
  • Release candidate creation
  • -
-

make cd-deploy - CD deployment pipeline

-
    -
  • Complete release and deployment
  • -
  • Artifact upload and distribution
  • -
  • User notifications
  • -
-

Platform-Specific Targets

-

make linux - Build for Linux only

-
make linux
-# Sets PLATFORMS=linux-amd64
-
-

make macos - Build for macOS only

-
make macos
-# Sets PLATFORMS=macos-amd64
-
-

make windows - Build for Windows only

-
make windows
-# Sets PLATFORMS=windows-amd64
-
-

Debugging Targets

-

make debug - Build with debug information

-
make debug
-# Sets BUILD_MODE=debug VERBOSE=true
-
-

make debug-info - Show debug information

-
    -
  • Make variables and environment
  • -
  • Build system diagnostics
  • -
  • Troubleshooting information
  • -
+

Build System

+

Building, testing, and packaging the Provisioning platform and extensions with Cargo, Just, and Nickel.

Build Tools

-

Core Build Scripts

-

All build tools are implemented as Nushell scripts with comprehensive parameter validation and error handling.

-

/src/tools/build/compile-platform.nu

-

Purpose: Compiles all Rust components for distribution

-

Components Compiled:

-
    -
  • orchestratorprovisioning-orchestrator binary
  • -
  • control-centercontrol-center binary
  • -
  • control-center-ui → Web UI assets
  • -
  • mcp-server-rust → MCP integration binary
  • -
-

Usage:

-
nu compile-platform.nu [options]
+
+ + + + +
ToolPurposeVersion Required
CargoRust compilation and testingLatest stable
JustTask runner for common operationsLatest
NickelSchema validation and type checking1.15.1+
NushellScript execution and testing0.109.0+
+
+

Building Platform Services

+

Build All Services

+
# Build all Rust services in release mode
+cd provisioning/platform
+cargo build --release --workspace
 
-Options:
-  --target STRING          Target platform (default: x86_64-unknown-linux-gnu)
-  --release                Build in release mode
-  --features STRING        Comma-separated features to enable
-  --output-dir STRING      Output directory (default: dist/platform)
-  --verbose                Enable verbose logging
-  --clean                  Clean before building
+# Or using just task runner
+just build-platform
 
-

Example:

-
nu compile-platform.nu \
-    --target x86_64-apple-darwin \
-    --release \
-    --features "surrealdb,telemetry" \
-    --output-dir dist/macos \
-    --verbose
+

Binary outputs in target/release/:

+
    +
  • provisioning-orchestrator
  • +
  • provisioning-control-center
  • +
  • provisioning-vault-service
  • +
  • provisioning-installer
  • +
+

Build Individual Service

+
# Orchestrator service
+cd provisioning/platform/crates/orchestrator
+cargo build --release
+
+# Control Center service
+cd provisioning/platform/crates/control-center
+cargo build --release
+
+# Development build (faster compilation)
+cargo build
 
-

/src/tools/build/bundle-core.nu

-

Purpose: Bundles Nushell core libraries and CLI for distribution

-

Components Bundled:

-
    -
  • Nushell provisioning CLI wrapper
  • -
  • Core Nushell libraries (lib_provisioning)
  • -
  • Configuration system
  • -
  • Template system
  • -
  • Extensions and plugins
  • -
-

Usage:

-
nu bundle-core.nu [options]
+

Testing

+

Run All Tests

+
# Rust unit and integration tests
+cargo test --workspace
 
-Options:
-  --output-dir STRING      Output directory (default: dist/core)
-  --config-dir STRING      Configuration directory (default: dist/config)
-  --validate               Validate Nushell syntax
-  --compress               Compress bundle with gzip
-  --exclude-dev            Exclude development files (default: true)
-  --verbose                Enable verbose logging
+# Nushell script tests
+just test-nushell
+
+# Complete test suite
+just test-all
 
-

Validation Features:

-
    -
  • Syntax validation of all Nushell files
  • -
  • Import dependency checking
  • -
  • Function signature validation
  • -
  • Test execution (if tests present)
  • -
-

/src/tools/build/validate-nickel.nu

-

Purpose: Validates and compiles Nickel schemas

-

Validation Process:

-
    -
  1. Syntax validation of all .ncl files
  2. -
  3. Schema dependency checking
  4. -
  5. Type constraint validation
  6. -
  7. Example validation against schemas
  8. -
  9. Documentation generation
  10. -
-

Usage:

-
nu validate-nickel.nu [options]
+

Test Specific Component

+
# Test orchestrator crate
+cargo test -p provisioning-orchestrator
 
-Options:
-  --output-dir STRING      Output directory (default: dist/schemas)
-  --format-code            Format Nickel code during validation
-  --check-dependencies     Validate schema dependencies
-  --verbose                Enable verbose logging
+# Test with output visible
+cargo test -p provisioning-orchestrator -- --nocapture
+
+# Test specific function
+cargo test -p provisioning-orchestrator test_workflow_creation
+
+# Run tests matching pattern
+cargo test workflow
 
-

/src/tools/build/test-distribution.nu

-

Purpose: Tests generated distributions for correctness

-

Test Types:

-
    -
  • Basic: Installation test, CLI help, version check
  • -
  • Integration: Server creation, configuration validation
  • -
  • Complete: Full workflow testing including cluster operations
  • -
-

Usage:

-
nu test-distribution.nu [options]
+

Security Tests

+
# Run 350+ security test cases
+cargo test -p security --test '*'
 
-Options:
-  --dist-dir STRING        Distribution directory (default: dist)
-  --test-types STRING      Test types: basic,integration,complete
-  --platform STRING        Target platform for testing
-  --cleanup                Remove test files after completion
-  --verbose                Enable verbose logging
+# Specific security component
+cargo test -p security authentication
+cargo test -p security authorization
+cargo test -p security kms
 
-

/src/tools/build/clean-build.nu

-

Purpose: Intelligent build artifact cleanup

-

Cleanup Scopes:

-
    -
  • all: Complete cleanup (build, dist, packages, cache)
  • -
  • dist: Distribution artifacts only
  • -
  • cache: Build cache and temporary files
  • -
  • old: Files older than specified age
  • -
-

Usage:

-
nu clean-build.nu [options]
+

Code Quality

+

Formatting

+
# Format all Rust code
+cargo fmt --all
 
-Options:
-  --scope STRING           Cleanup scope: all,dist,cache,old
-  --age DURATION          Age threshold for 'old' scope (default: 7d)
-  --force                  Force cleanup without confirmation
-  --dry-run               Show what would be cleaned without doing it
-  --verbose               Enable verbose logging
+# Check formatting without modifying
+cargo fmt --all -- --check
+
+# Format Nickel schemas
+nickel fmt provisioning/schemas/**/*.ncl
 
-

Distribution Tools

-

/src/tools/distribution/generate-distribution.nu

-

Purpose: Main distribution generator orchestrating the complete process

-

Generation Process:

-
    -
  1. Platform binary compilation
  2. -
  3. Core library bundling
  4. -
  5. Nickel schema validation and packaging
  6. -
  7. Configuration system preparation
  8. -
  9. Documentation generation
  10. -
  11. Archive creation and compression
  12. -
  13. Installer generation
  14. -
  15. Validation and testing
  16. -
-

Usage:

-
nu generate-distribution.nu [command] [options]
+

Linting

+
# Run Clippy linter
+cargo clippy --all -- -D warnings
 
-Commands:
-  <default>                Generate complete distribution
-  quick                    Quick development distribution
-  status                   Show generation status
+# Auto-fix Clippy warnings
+cargo clippy --all --fix
 
-Options:
-  --version STRING         Version to build (default: auto-detect)
-  --platforms STRING       Comma-separated platforms
-  --variants STRING        Variants: complete,minimal
-  --output-dir STRING      Output directory (default: dist)
-  --compress               Enable compression
-  --generate-docs          Generate documentation
-  --parallel-builds        Enable parallel builds
-  --validate-output        Validate generated output
-  --verbose                Enable verbose logging
+# Clippy with all features enabled
+cargo clippy --all --all-features -- -D warnings
 
-

Advanced Examples:

-
# Complete multi-platform release
-nu generate-distribution.nu \
-    --version 2.1.0 \
-    --platforms linux-amd64,macos-amd64,windows-amd64 \
-    --variants complete,minimal \
-    --compress \
-    --generate-docs \
-    --parallel-builds \
-    --validate-output
+

Nickel Validation

+
# Type check Nickel schemas
+nickel typecheck provisioning/schemas/main.ncl
 
-# Quick development build
-nu generate-distribution.nu quick \
-    --platform linux \
-    --variant minimal
+# Evaluate schema
+nickel eval provisioning/schemas/main.ncl
 
-# Status check
-nu generate-distribution.nu status
+# Format Nickel files
+nickel fmt provisioning/schemas/**/*.ncl
 
-

/src/tools/distribution/create-installer.nu

-

Purpose: Creates platform-specific installers

-

Installer Types:

-
    -
  • shell: Shell script installer (cross-platform)
  • -
  • package: Platform packages (DEB, RPM, MSI, PKG)
  • -
  • container: Container image with provisioning
  • -
  • source: Source distribution with build instructions
  • -
-

Usage:

-
nu create-installer.nu DISTRIBUTION_DIR [options]
+

Continuous Integration

+

The platform uses automated CI workflows for quality assurance.

+

GitHub Actions Pipeline

+

Key CI jobs:

+
1. Rust Build and Test
+   - cargo build --release --workspace
+   - cargo test --workspace
+   - cargo clippy --all -- -D warnings
 
-Options:
-  --output-dir STRING      Installer output directory
-  --installer-types STRING Installer types: shell,package,container,source
-  --platforms STRING       Target platforms
-  --include-services       Include systemd/launchd service files
-  --create-uninstaller     Generate uninstaller
-  --validate-installer     Test installer functionality
-  --verbose                Enable verbose logging
-
-

Package Tools

-

/src/tools/package/package-binaries.nu

-

Purpose: Packages compiled binaries for distribution

-

Package Formats:

-
    -
  • archive: TAR.GZ and ZIP archives
  • -
  • standalone: Single binary with embedded resources
  • -
  • installer: Platform-specific installer packages
  • -
-

Features:

-
    -
  • Binary stripping for size reduction
  • -
  • Compression optimization
  • -
  • Checksum generation (SHA256, MD5)
  • -
  • Digital signing (if configured)
  • -
-

/src/tools/package/build-containers.nu

-

Purpose: Builds optimized container images

-

Container Features:

-
    -
  • Multi-stage builds for minimal image size
  • -
  • Security scanning integration
  • -
  • Multi-platform image generation
  • -
  • Layer caching optimization
  • -
  • Runtime environment configuration
  • -
-

Release Tools

-

/src/tools/release/create-release.nu

-

Purpose: Automated release creation and management

-

Release Process:

-
    -
  1. Version validation and tagging
  2. -
  3. Changelog generation from git history
  4. -
  5. Asset building and validation
  6. -
  7. Release creation (GitHub, GitLab, etc.)
  8. -
  9. Asset upload and verification
  10. -
  11. Release announcement preparation
  12. -
-

Usage:

-
nu create-release.nu [options]
+2. Nushell Validation
+   - nu --check core/cli/provisioning
+   - Run Nushell test suite
 
-Options:
-  --version STRING         Release version (required)
-  --asset-dir STRING       Directory containing release assets
-  --draft                  Create draft release
-  --prerelease             Mark as pre-release
-  --generate-changelog     Auto-generate changelog
-  --push-tag               Push git tag
-  --auto-upload            Upload assets automatically
-  --verbose                Enable verbose logging
+3. Nickel Schema Validation
+   - nickel typecheck schemas/main.ncl
+   - Validate all schema files
+
+4. Security Tests
+   - Run 350+ security test cases
+   - Vulnerability scanning
+
+5. Documentation Build
+   - mdbook build docs
+   - Markdown linting
+
+

Packaging and Distribution

+

Create Release Package

+
# Build optimized binaries
+cargo build --release --workspace
+
+# Strip debug symbols (reduce binary size)
+strip target/release/provisioning-orchestrator
+strip target/release/provisioning-control-center
+
+# Create distribution archive
+just package
+
+

Package Structure

+
provisioning-5.0.0-linux-x86_64.tar.gz
+├── bin/
+│   ├── provisioning                    # Main CLI
+│   ├── provisioning-orchestrator       # Orchestrator service
+│   ├── provisioning-control-center     # Control Center
+│   ├── provisioning-vault-service      # Vault service
+│   └── provisioning-installer          # Platform installer
+├── lib/
+│   └── nulib/                          # Nushell libraries
+├── schemas/                            # Nickel schemas
+├── config/
+│   └── config.defaults.toml            # Default configuration
+├── systemd/
+│   └── *.service                       # Systemd unit files
+└── README.md
+
+

Cross-Platform Builds

+

Supported Targets

+
# Linux x86_64 (primary platform)
+cargo build --release --target x86_64-unknown-linux-gnu
+
+# Linux ARM64 (Raspberry Pi, cloud ARM instances)
+cargo build --release --target aarch64-unknown-linux-gnu
+
+# macOS x86_64
+cargo build --release --target x86_64-apple-darwin
+
+# macOS ARM64 (Apple Silicon)
+cargo build --release --target aarch64-apple-darwin
 
-

Cross-Platform Compilation

-

Supported Platforms

-

Primary Platforms:

-
    -
  • linux-amd64 (x86_64-unknown-linux-gnu)
  • -
  • macos-amd64 (x86_64-apple-darwin)
  • -
  • windows-amd64 (x86_64-pc-windows-gnu)
  • -
-

Additional Platforms:

-
    -
  • linux-arm64 (aarch64-unknown-linux-gnu)
  • -
  • macos-arm64 (aarch64-apple-darwin)
  • -
  • freebsd-amd64 (x86_64-unknown-freebsd)
  • -

Cross-Compilation Setup

-

Install Rust Targets:

-
# Install additional targets
-rustup target add x86_64-apple-darwin
-rustup target add x86_64-pc-windows-gnu
-rustup target add aarch64-unknown-linux-gnu
-rustup target add aarch64-apple-darwin
-
-

Platform-Specific Dependencies:

-

macOS Cross-Compilation:

-
# Install osxcross toolchain
-brew install FiloSottile/musl-cross/musl-cross
-brew install mingw-w64
-
-

Windows Cross-Compilation:

-
# Install Windows dependencies
-brew install mingw-w64
-# or on Linux:
-sudo apt-get install gcc-mingw-w64
-
-

Cross-Compilation Usage

-

Single Platform:

-
# Build for macOS from Linux
-make build-platform RUST_TARGET=x86_64-apple-darwin
-
-# Build for Windows
-make build-platform RUST_TARGET=x86_64-pc-windows-gnu
-
-

Multiple Platforms:

-
# Build for all configured platforms
-make build-cross
-
-# Specify platforms
-make build-cross PLATFORMS=linux-amd64,macos-amd64,windows-amd64
-
-

Platform-Specific Targets:

-
# Quick platform builds
-make linux      # Linux AMD64
-make macos      # macOS AMD64
-make windows    # Windows AMD64
-
-

Dependency Management

-

Build Dependencies

-

Required Tools:

-
    -
  • Nushell 0.107.1+: Core shell and scripting
  • -
  • Rust 1.70+: Platform binary compilation
  • -
  • Cargo: Rust package management
  • -
  • KCL 0.11.2+: Configuration language
  • -
  • Git: Version control and tagging
  • -
-

Optional Tools:

-
    -
  • Docker: Container image building
  • -
  • Cross: Simplified cross-compilation
  • -
  • SOPS: Secrets management
  • -
  • Age: Encryption for secrets
  • -
-

Dependency Validation

-

Check Dependencies:

-
make info
-# Shows versions of all required tools
-
-# Output example:
-# Tool Versions:
-#   Nushell: 0.107.1
-#   Rust: rustc 1.75.0
-#   Docker: Docker version 24.0.6
-#   Git: git version 2.42.0
-
-

Install Missing Dependencies:

-
# Install Nushell
-cargo install nu
-
-# Install Nickel
-cargo install nickel
-
-# Install Cross (for cross-compilation)
-cargo install cross
-
-

Dependency Caching

-

Rust Dependencies:

-
    -
  • Cargo cache: ~/.cargo/registry
  • -
  • Target cache: target/ directory
  • -
  • Cross-compilation cache: ~/.cache/cross
  • -
-

Build Cache Management:

-
# Clean Cargo cache
-cargo clean
-
-# Clean cross-compilation cache
-cross clean
-
-# Clean all caches
-make clean SCOPE=cache
-
-

Troubleshooting

-

Common Build Issues

-

Rust Compilation Errors

-

Error: linker 'cc' not found

-
# Solution: Install build essentials
-sudo apt-get install build-essential  # Linux
-xcode-select --install                 # macOS
-
-

Error: target not found

-
# Solution: Install target
+
# Add target architectures
 rustup target add x86_64-unknown-linux-gnu
-
-

Error: Cross-compilation linking errors

-
# Solution: Use cross instead of cargo
-cargo install cross
-make build-platform CROSS=true
-
-

Nushell Script Errors

-

Error: command not found

-
# Solution: Ensure Nushell is in PATH
-which nu
-export PATH="$HOME/.cargo/bin:$PATH"
-
-

Error: Permission denied

-
# Solution: Make scripts executable
-chmod +x src/tools/build/*.nu
-
-

Error: Module not found

-
# Solution: Check working directory
-cd src/tools
-nu build/compile-platform.nu --help
-
-

Nickel Validation Errors

-

Error: nickel command not found

-
# Solution: Install Nickel
-cargo install nickel
-# or
-brew install nickel
-
-

Error: Schema validation failed

-
# Solution: Check Nickel syntax
-nickel fmt schemas/
-nickel check schemas/
-
-

Build Performance Issues

-

Slow Compilation

-

Optimizations:

-
# Enable parallel builds
-make build-all PARALLEL=true
+rustup target add aarch64-unknown-linux-gnu
 
-# Use faster linker
+# Install cross-compilation tool
+cargo install cross
+
+# Cross-compile with Docker
+cross build --release --target aarch64-unknown-linux-gnu
+
+

Just Task Runner

+

Common build tasks in justfile:

+
# Build all components
+build-all: build-platform build-plugins
+
+# Build platform services
+build-platform:
+    cd platform && cargo build --release --workspace
+
+# Run all tests
+test: test-rust test-nushell test-integration
+
+# Test Rust code
+test-rust:
+    cargo test --workspace
+
+# Test Nushell scripts
+test-nushell:
+    nu scripts/test/test_all.nu
+
+# Format all code
+fmt:
+    cargo fmt --all
+    nickel fmt schemas/**/*.ncl
+
+# Lint all code
+lint:
+    cargo clippy --all -- -D warnings
+    nickel typecheck schemas/main.ncl
+
+# Create release package
+package:
+    ./scripts/package.nu
+
+# Clean build artifacts
+clean:
+    cargo clean
+    rm -rf target/
+
+

Usage examples:

+
just build-all     # Build everything
+just test          # Run all tests
+just fmt           # Format code
+just lint          # Run linters
+just package       # Create distribution
+just clean         # Remove artifacts
+
+

Performance Optimization

+

Release Builds

+
# Cargo.toml
+[profile.release]
+opt-level = 3              # Maximum optimization
+lto = "fat"                # Link-time optimization
+codegen-units = 1          # Better optimization, slower compile
+strip = true               # Strip debug symbols
+panic = "abort"            # Smaller binary size
+
+

Build Time Optimization

+
# Cargo.toml
+[profile.dev]
+opt-level = 1              # Basic optimization
+incremental = true         # Faster recompilation
+
+

Speed up compilation:

+
# Use faster linker (Linux)
+sudo apt install lld
 export RUSTFLAGS="-C link-arg=-fuse-ld=lld"
 
-# Increase build jobs
-export CARGO_BUILD_JOBS=8
-
-

Cargo Configuration (~/.cargo/config.toml):

-
[build]
-jobs = 8
+# Parallel compilation
+cargo build -j 8
 
-[target.x86_64-unknown-linux-gnu]
-linker = "lld"
+# Use cargo-watch for auto-rebuild
+cargo install cargo-watch
+cargo watch -x build
 
-

Memory Issues

-

Solutions:

+

Development Workflow

+ +
# 1. Start development
+just clean
+just build-all
+
+# 2. Make changes to code
+
+# 3. Test changes quickly
+cargo check                # Fast syntax check
+cargo test <specific-test> # Test specific functionality
+
+# 4. Full validation before commit
+just fmt
+just lint
+just test
+
+# 5. Create package for testing
+just package
+
+

Hot Reload Development

+
# Auto-rebuild on file changes
+cargo watch -x build
+
+# Auto-test on changes
+cargo watch -x test
+
+# Run service with auto-reload
+cargo watch -x 'run --bin provisioning-orchestrator'
+
+

Debugging Builds

+

Debug Information

+
# Build with full debug info
+cargo build
+
+# Build with debug info in release mode
+cargo build --release --profile release-with-debug
+
+# Run with backtraces
+RUST_BACKTRACE=1 cargo run
+RUST_BACKTRACE=full cargo run
+
+

Build Verbosity

+
# Verbose build output
+cargo build -vv
+
+# Show build commands
+cargo build -vvv
+
+# Show timing information
+cargo build --timings
+
+

Dependency Tree

+
# View dependency tree
+cargo tree
+
+# Duplicate dependencies
+cargo tree --duplicates
+
+# Build graph visualization
+cargo depgraph | dot -Tpng > deps.png
+
+

Best Practices

+
    +
  • Always run just test before committing
  • +
  • Use cargo fmt and cargo clippy for code quality
  • +
  • Test on multiple platforms before release
  • +
  • Strip binaries for production distributions
  • +
  • Version binaries with semantic versioning
  • +
  • Cache dependencies in CI/CD
  • +
  • Use release profile for production builds
  • +
  • Document build requirements in README
  • +
  • Automate common tasks with Just
  • +
  • Keep build times reasonable (<5 min)
  • +
+

Troubleshooting

+

Common Build Issues

+

Compilation fails with linker error:

+
# Install build dependencies
+sudo apt install build-essential pkg-config libssl-dev
+
+

Out of memory during build:

# Reduce parallel jobs
-export CARGO_BUILD_JOBS=2
+cargo build -j 2
 
-# Use debug build for development
-make dev-build BUILD_MODE=debug
-
-# Clean up between builds
-make clean-dist
+# Use more swap space
+sudo fallocate -l 8G /swapfile
+sudo mkswap /swapfile
+sudo swapon /swapfile
 
-

Distribution Issues

-

Missing Assets

-

Validation:

-
# Test distribution
-make test-dist
+

Clippy warnings:

+
# Fix automatically where possible
+cargo clippy --all --fix
 
-# Detailed validation
-nu src/tools/package/validate-package.nu dist/
+# Allow specific lints temporarily
+#[allow(clippy::too_many_arguments)]
 
-

Size Optimization

-

Optimizations:

-
# Strip binaries
-make package-binaries STRIP=true
-
-# Enable compression
-make dist-generate COMPRESS=true
-
-# Use minimal variant
-make dist-generate VARIANTS=minimal
-
-

Debug Mode

-

Enable Debug Logging:

-
# Set environment
-export PROVISIONING_DEBUG=true
-export RUST_LOG=debug
-
-# Run with debug
-make debug
-
-# Verbose make output
-make build-all VERBOSE=true
-
-

Debug Information:

-
# Show debug information
-make debug-info
-
-# Build system status
-make status
-
-# Tool information
-make info
-
-

CI/CD Integration

-

GitHub Actions

-

Example Workflow (.github/workflows/build.yml):

-
name: Build and Test
-on: [push, pull_request]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Setup Nushell
-        uses: hustcer/setup-nu@v3.5
-
-      - name: Setup Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: stable
-
-      - name: CI Build
-        run: |
-          cd src/tools
-          make ci-build
-
-      - name: Upload Artifacts
-        uses: actions/upload-artifact@v4
-        with:
-          name: build-artifacts
-          path: src/dist/
-
-

Release Automation

-

Release Workflow:

-
name: Release
-on:
-  push:
-    tags: ['v*']
-
-jobs:
-  release:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Build Release
-        run: |
-          cd src/tools
-          make ci-release VERSION=${{ github.ref_name }}
-
-      - name: Create Release
-        run: |
-          cd src/tools
-          make release VERSION=${{ github.ref_name }}
-
-

Local CI Testing

-

Test CI Pipeline Locally:

-
# Run CI build pipeline
-make ci-build
-
-# Run CI test pipeline
-make ci-test
-
-# Full CI/CD pipeline
-make ci-release
-
-

This build system provides a comprehensive, maintainable foundation for the provisioning project’s development lifecycle, from local development to -production releases.

+ +
    +
  • Testing - Testing strategies and procedures
  • +
  • Contributing - Contribution guidelines including build requirements
  • +
+ + + + + + + + diff --git a/docs/book/development/distribution-process.html b/docs/book/development/distribution-process.html deleted file mode 100644 index 1b57c0e..0000000 --- a/docs/book/development/distribution-process.html +++ /dev/null @@ -1,1046 +0,0 @@ - - - - - - Distribution Process - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Distribution Process Documentation

-

This document provides comprehensive documentation for the provisioning project’s distribution process, covering release workflows, package -generation, multi-platform distribution, and rollback procedures.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Distribution Architecture
  4. -
  5. Release Process
  6. -
  7. Package Generation
  8. -
  9. Multi-Platform Distribution
  10. -
  11. Validation and Testing
  12. -
  13. Release Management
  14. -
  15. Rollback Procedures
  16. -
  17. CI/CD Integration
  18. -
  19. Troubleshooting
  20. -
-

Overview

-

The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with -automated release management.

-

Key Features:

-
    -
  • Multi-Platform Support: Linux, macOS, Windows with multiple architectures
  • -
  • Multiple Distribution Variants: Complete and minimal distributions
  • -
  • Automated Release Pipeline: From development to production deployment
  • -
  • Package Management: Binary packages, container images, and installers
  • -
  • Validation Framework: Comprehensive testing and validation
  • -
  • Rollback Capabilities: Safe rollback and recovery procedures
  • -
-

Location: /src/tools/ -Main Tool: /src/tools/Makefile and associated Nushell scripts

-

Distribution Architecture

-

Distribution Components

-
Distribution Ecosystem
-├── Core Components
-│   ├── Platform Binaries      # Rust-compiled binaries
-│   ├── Core Libraries         # Nushell libraries and CLI
-│   ├── Configuration System   # TOML configuration files
-│   └── Documentation         # User and API documentation
-├── Platform Packages
-│   ├── Archives              # TAR.GZ and ZIP files
-│   ├── Installers            # Platform-specific installers
-│   └── Container Images      # Docker/OCI images
-├── Distribution Variants
-│   ├── Complete              # Full-featured distribution
-│   └── Minimal               # Lightweight distribution
-└── Release Artifacts
-    ├── Checksums             # SHA256/MD5 verification
-    ├── Signatures            # Digital signatures
-    └── Metadata              # Release information
-
-

Build Pipeline

-
Build Pipeline Flow
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│   Source Code   │ -> │   Build Stage   │ -> │  Package Stage  │
-│                 │    │                 │    │                 │
-│ - Rust code     │    │ - compile-      │    │ - create-       │
-│ - Nushell libs  │    │   platform      │    │   archives      │
-│ - Nickel schemas│    │ - bundle-core   │    │ - build-        │
-│ - Config files  │    │ - validate-nickel│   │   containers    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-                                |
-                                v
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│ Release Stage   │ <- │ Validate Stage  │ <- │ Distribute Stage│
-│                 │    │                 │    │                 │
-│ - create-       │    │ - test-dist     │    │ - generate-     │
-│   release       │    │ - validate-     │    │   distribution  │
-│ - upload-       │    │   package       │    │ - create-       │
-│   artifacts     │    │ - integration   │    │   installers    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-
-

Distribution Variants

-

Complete Distribution:

-
    -
  • All Rust binaries (orchestrator, control-center, MCP server)
  • -
  • Full Nushell library suite
  • -
  • All providers, taskservs, and clusters
  • -
  • Complete documentation and examples
  • -
  • Development tools and templates
  • -
-

Minimal Distribution:

-
    -
  • Essential binaries only
  • -
  • Core Nushell libraries
  • -
  • Basic provider support
  • -
  • Essential task services
  • -
  • Minimal documentation
  • -
-

Release Process

-

Release Types

-

Release Classifications:

-
    -
  • Major Release (x.0.0): Breaking changes, new major features
  • -
  • Minor Release (x.y.0): New features, backward compatible
  • -
  • Patch Release (x.y.z): Bug fixes, security updates
  • -
  • Pre-Release (x.y.z-alpha/beta/rc): Development/testing releases
  • -
-

Step-by-Step Release Process

-

1. Preparation Phase

-

Pre-Release Checklist:

-
# Update dependencies and security
-cargo update
-cargo audit
-
-# Run comprehensive tests
-make ci-test
-
-# Update documentation
-make docs
-
-# Validate all configurations
-make validate-all
-
-

Version Planning:

-
# Check current version
-git describe --tags --always
-
-# Plan next version
-make status | grep Version
-
-# Validate version bump
-nu src/tools/release/create-release.nu --dry-run --version 2.1.0
-
-

2. Build Phase

-

Complete Build:

-
# Clean build environment
-make clean
-
-# Build all platforms and variants
-make all
-
-# Validate build output
-make test-dist
-
-

Build with Specific Parameters:

-
# Build for specific platforms
-make all PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
-
-# Build with custom version
-make all VERSION=2.1.0-rc1
-
-# Parallel build for speed
-make all PARALLEL=true
-
-

3. Package Generation

-

Create Distribution Packages:

-
# Generate complete distributions
-make dist-generate
-
-# Create binary packages
-make package-binaries
-
-# Build container images
-make package-containers
-
-# Create installers
-make create-installers
-
-

Package Validation:

-
# Validate packages
-make test-dist
-
-# Check package contents
-nu src/tools/package/validate-package.nu packages/
-
-# Test installation
-make install
-make uninstall
-
-

4. Release Creation

-

Automated Release:

-
# Create complete release
-make release VERSION=2.1.0
-
-# Create draft release for review
-make release-draft VERSION=2.1.0
-
-# Manual release creation
-nu src/tools/release/create-release.nu \
-    --version 2.1.0 \
-    --generate-changelog \
-    --push-tag \
-    --auto-upload
-
-

Release Options:

-
    -
  • --pre-release: Mark as pre-release
  • -
  • --draft: Create draft release
  • -
  • --generate-changelog: Auto-generate changelog from commits
  • -
  • --push-tag: Push git tag to remote
  • -
  • --auto-upload: Upload assets automatically
  • -
-

5. Distribution and Notification

-

Upload Artifacts:

-
# Upload to GitHub Releases
-make upload-artifacts
-
-# Update package registries
-make update-registry
-
-# Send notifications
-make notify-release
-
-

Registry Updates:

-
# Update Homebrew formula
-nu src/tools/release/update-registry.nu \
-    --registries homebrew \
-    --version 2.1.0 \
-    --auto-commit
-
-# Custom registry updates
-nu src/tools/release/update-registry.nu \
-    --registries custom \
-    --registry-url https://packages.company.com \
-    --credentials-file ~/.registry-creds
-
-

Release Automation

-

Complete Automated Release:

-
# Full release pipeline
-make cd-deploy VERSION=2.1.0
-
-# Equivalent manual steps:
-make clean
-make all VERSION=2.1.0
-make create-archives
-make create-installers
-make release VERSION=2.1.0
-make upload-artifacts
-make update-registry
-make notify-release
-
-

Package Generation

-

Binary Packages

-

Package Types:

-
    -
  • Standalone Archives: TAR.GZ and ZIP with all dependencies
  • -
  • Platform Packages: DEB, RPM, MSI, PKG with system integration
  • -
  • Portable Packages: Single-directory distributions
  • -
  • Source Packages: Source code with build instructions
  • -
-

Create Binary Packages:

-
# Standard binary packages
-make package-binaries
-
-# Custom package creation
-nu src/tools/package/package-binaries.nu \
-    --source-dir dist/platform \
-    --output-dir packages/binaries \
-    --platforms linux-amd64,macos-amd64 \
-    --format archive \
-    --compress \
-    --strip \
-    --checksum
-
-

Package Features:

-
    -
  • Binary Stripping: Removes debug symbols for smaller size
  • -
  • Compression: GZIP, LZMA, and Brotli compression
  • -
  • Checksums: SHA256 and MD5 verification
  • -
  • Signatures: GPG and code signing support
  • -
-

Container Images

-

Container Build Process:

-
# Build container images
-make package-containers
-
-# Advanced container build
-nu src/tools/package/build-containers.nu \
-    --dist-dir dist \
-    --tag-prefix provisioning \
-    --version 2.1.0 \
-    --platforms "linux/amd64,linux/arm64" \
-    --optimize-size \
-    --security-scan \
-    --multi-stage
-
-

Container Features:

-
    -
  • Multi-Stage Builds: Minimal runtime images
  • -
  • Security Scanning: Vulnerability detection
  • -
  • Multi-Platform: AMD64, ARM64 support
  • -
  • Layer Optimization: Efficient layer caching
  • -
  • Runtime Configuration: Environment-based configuration
  • -
-

Container Registry Support:

-
    -
  • Docker Hub
  • -
  • GitHub Container Registry
  • -
  • Amazon ECR
  • -
  • Google Container Registry
  • -
  • Azure Container Registry
  • -
  • Private registries
  • -
-

Installers

-

Installer Types:

-
    -
  • Shell Script Installer: Universal Unix/Linux installer
  • -
  • Package Installers: DEB, RPM, MSI, PKG
  • -
  • Container Installer: Docker/Podman setup
  • -
  • Source Installer: Build-from-source installer
  • -
-

Create Installers:

-
# Generate all installer types
-make create-installers
-
-# Custom installer creation
-nu src/tools/distribution/create-installer.nu \
-    dist/provisioning-2.1.0-linux-amd64-complete \
-    --output-dir packages/installers \
-    --installer-types shell,package \
-    --platforms linux,macos \
-    --include-services \
-    --create-uninstaller \
-    --validate-installer
-
-

Installer Features:

-
    -
  • System Integration: Systemd/Launchd service files
  • -
  • Path Configuration: Automatic PATH updates
  • -
  • User/System Install: Support for both user and system-wide installation
  • -
  • Uninstaller: Clean removal capability
  • -
  • Dependency Management: Automatic dependency resolution
  • -
  • Configuration Setup: Initial configuration creation
  • -
-

Multi-Platform Distribution

-

Supported Platforms

-

Primary Platforms:

-
    -
  • Linux AMD64 (x86_64-unknown-linux-gnu)
  • -
  • Linux ARM64 (aarch64-unknown-linux-gnu)
  • -
  • macOS AMD64 (x86_64-apple-darwin)
  • -
  • macOS ARM64 (aarch64-apple-darwin)
  • -
  • Windows AMD64 (x86_64-pc-windows-gnu)
  • -
  • FreeBSD AMD64 (x86_64-unknown-freebsd)
  • -
-

Platform-Specific Features:

-
    -
  • Linux: SystemD integration, package manager support
  • -
  • macOS: LaunchAgent services, Homebrew packages
  • -
  • Windows: Windows Service support, MSI installers
  • -
  • FreeBSD: RC scripts, pkg packages
  • -
-

Cross-Platform Build

-

Cross-Compilation Setup:

-
# Install cross-compilation targets
-rustup target add aarch64-unknown-linux-gnu
-rustup target add x86_64-apple-darwin
-rustup target add aarch64-apple-darwin
-rustup target add x86_64-pc-windows-gnu
-
-# Install cross-compilation tools
-cargo install cross
-
-

Platform-Specific Builds:

-
# Build for specific platform
-make build-platform RUST_TARGET=aarch64-apple-darwin
-
-# Build for multiple platforms
-make build-cross PLATFORMS=linux-amd64,macos-arm64,windows-amd64
-
-# Platform-specific distributions
-make linux
-make macos
-make windows
-
-

Distribution Matrix

-

Generated Distributions:

-
Distribution Matrix:
-provisioning-{version}-{platform}-{variant}.{format}
-
-Examples:
-- provisioning-2.1.0-linux-amd64-complete.tar.gz
-- provisioning-2.1.0-macos-arm64-minimal.tar.gz
-- provisioning-2.1.0-windows-amd64-complete.zip
-- provisioning-2.1.0-freebsd-amd64-minimal.tar.xz
-
-

Platform Considerations:

-
    -
  • File Permissions: Executable permissions on Unix systems
  • -
  • Path Separators: Platform-specific path handling
  • -
  • Service Integration: Platform-specific service management
  • -
  • Package Formats: TAR.GZ for Unix, ZIP for Windows
  • -
  • Line Endings: CRLF for Windows, LF for Unix
  • -
-

Validation and Testing

-

Distribution Validation

-

Validation Pipeline:

-
# Complete validation
-make test-dist
-
-# Custom validation
-nu src/tools/build/test-distribution.nu \
-    --dist-dir dist \
-    --test-types basic,integration,complete \
-    --platform linux \
-    --cleanup \
-    --verbose
-
-

Validation Types:

-
    -
  • Basic: Installation test, CLI help, version check
  • -
  • Integration: Server creation, configuration validation
  • -
  • Complete: Full workflow testing including cluster operations
  • -
-

Testing Framework

-

Test Categories:

-
    -
  • Unit Tests: Component-specific testing
  • -
  • Integration Tests: Cross-component testing
  • -
  • End-to-End Tests: Complete workflow testing
  • -
  • Performance Tests: Load and performance validation
  • -
  • Security Tests: Security scanning and validation
  • -
-

Test Execution:

-
# Run all tests
-make ci-test
-
-# Specific test types
-nu src/tools/build/test-distribution.nu --test-types basic
-nu src/tools/build/test-distribution.nu --test-types integration
-nu src/tools/build/test-distribution.nu --test-types complete
-
-

Package Validation

-

Package Integrity:

-
# Validate package structure
-nu src/tools/package/validate-package.nu dist/
-
-# Check checksums
-sha256sum -c packages/checksums.sha256
-
-# Verify signatures
-gpg --verify packages/provisioning-2.1.0.tar.gz.sig
-
-

Installation Testing:

-
# Test installation process
-./packages/installers/install-provisioning-2.1.0.sh --dry-run
-
-# Test uninstallation
-./packages/installers/uninstall-provisioning.sh --dry-run
-
-# Container testing
-docker run --rm provisioning:2.1.0 provisioning --version
-
-

Release Management

-

Release Workflow

-

GitHub Release Integration:

-
# Create GitHub release
-nu src/tools/release/create-release.nu \
-    --version 2.1.0 \
-    --asset-dir packages \
-    --generate-changelog \
-    --push-tag \
-    --auto-upload
-
-

Release Features:

-
    -
  • Automated Changelog: Generated from git commit history
  • -
  • Asset Management: Automatic upload of all distribution artifacts
  • -
  • Tag Management: Semantic version tagging
  • -
  • Release Notes: Formatted release notes with change summaries
  • -
-

Versioning Strategy

-

Semantic Versioning:

-
    -
  • MAJOR.MINOR.PATCH format (for example, 2.1.0)
  • -
  • Pre-release suffixes (for example, 2.1.0-alpha.1, 2.1.0-rc.2)
  • -
  • Build metadata (for example, 2.1.0+20250925.abcdef)
  • -
-

Version Detection:

-
# Auto-detect next version
-nu src/tools/release/create-release.nu --release-type minor
-
-# Manual version specification
-nu src/tools/release/create-release.nu --version 2.1.0
-
-# Pre-release versioning
-nu src/tools/release/create-release.nu --version 2.1.0-rc.1 --pre-release
-
-

Artifact Management

-

Artifact Types:

-
    -
  • Source Archives: Complete source code distributions
  • -
  • Binary Archives: Compiled binary distributions
  • -
  • Container Images: OCI-compliant container images
  • -
  • Installers: Platform-specific installation packages
  • -
  • Documentation: Generated documentation packages
  • -
-

Upload and Distribution:

-
# Upload to GitHub Releases
-make upload-artifacts
-
-# Upload to container registries
-docker push provisioning:2.1.0
-
-# Update package repositories
-make update-registry
-
-

Rollback Procedures

-

Rollback Scenarios

-

Common Rollback Triggers:

-
    -
  • Critical bugs discovered post-release
  • -
  • Security vulnerabilities identified
  • -
  • Performance regression
  • -
  • Compatibility issues
  • -
  • Infrastructure failures
  • -
-

Rollback Process

-

Automated Rollback:

-
# Rollback latest release
-nu src/tools/release/rollback-release.nu --version 2.1.0
-
-# Rollback with specific target
-nu src/tools/release/rollback-release.nu \
-    --from-version 2.1.0 \
-    --to-version 2.0.5 \
-    --update-registries \
-    --notify-users
-
-

Manual Rollback Steps:

-
# 1. Identify target version
-git tag -l | grep -v 2.1.0 | tail -5
-
-# 2. Create rollback release
-nu src/tools/release/create-release.nu \
-    --version 2.0.6 \
-    --rollback-from 2.1.0 \
-    --urgent
-
-# 3. Update package managers
-nu src/tools/release/update-registry.nu \
-    --version 2.0.6 \
-    --rollback-notice "Critical fix for 2.1.0 issues"
-
-# 4. Notify users
-nu src/tools/release/notify-users.nu \
-    --channels slack,discord,email \
-    --message-type rollback \
-    --urgent
-
-

Rollback Safety

-

Pre-Rollback Validation:

-
    -
  • Validate target version integrity
  • -
  • Check compatibility matrix
  • -
  • Verify rollback procedure testing
  • -
  • Confirm communication plan
  • -
-

Rollback Testing:

-
# Test rollback in staging
-nu src/tools/release/rollback-release.nu \
-    --version 2.1.0 \
-    --target-version 2.0.5 \
-    --dry-run \
-    --staging-environment
-
-# Validate rollback success
-make test-dist DIST_VERSION=2.0.5
-
-

Emergency Procedures

-

Critical Security Rollback:

-
# Emergency rollback (bypasses normal procedures)
-nu src/tools/release/rollback-release.nu \
-    --version 2.1.0 \
-    --emergency \
-    --security-issue \
-    --immediate-notify
-
-

Infrastructure Failure Recovery:

-
# Failover to backup infrastructure
-nu src/tools/release/rollback-release.nu \
-    --infrastructure-failover \
-    --backup-registry \
-    --mirror-sync
-
-

CI/CD Integration

-

GitHub Actions Integration

-

Build Workflow (.github/workflows/build.yml):

-
name: Build and Distribute
-on:
-  push:
-    branches: [main]
-  pull_request:
-    branches: [main]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        platform: [linux, macos, windows]
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Setup Nushell
-        uses: hustcer/setup-nu@v3.5
-
-      - name: Setup Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: stable
-
-      - name: CI Build
-        run: |
-          cd src/tools
-          make ci-build
-
-      - name: Upload Build Artifacts
-        uses: actions/upload-artifact@v4
-        with:
-          name: build-${{ matrix.platform }}
-          path: src/dist/
-
-

Release Workflow (.github/workflows/release.yml):

-
name: Release
-on:
-  push:
-    tags: ['v*']
-
-jobs:
-  release:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Build Release
-        run: |
-          cd src/tools
-          make ci-release VERSION=${{ github.ref_name }}
-
-      - name: Create Release
-        run: |
-          cd src/tools
-          make release VERSION=${{ github.ref_name }}
-
-      - name: Update Registries
-        run: |
-          cd src/tools
-          make update-registry VERSION=${{ github.ref_name }}
-
-

GitLab CI Integration

-

GitLab CI Configuration (.gitlab-ci.yml):

-
stages:
-  - build
-  - package
-  - test
-  - release
-
-build:
-  stage: build
-  script:
-    - cd src/tools
-    - make ci-build
-  artifacts:
-    paths:
-      - src/dist/
-    expire_in: 1 hour
-
-package:
-  stage: package
-  script:
-    - cd src/tools
-    - make package-all
-  artifacts:
-    paths:
-      - src/packages/
-    expire_in: 1 day
-
-release:
-  stage: release
-  script:
-    - cd src/tools
-    - make cd-deploy VERSION=${CI_COMMIT_TAG}
-  only:
-    - tags
-
-

Jenkins Integration

-

Jenkinsfile:

-
pipeline {
-    agent any
-
-    stages {
-        stage('Build') {
-            steps {
-                dir('src/tools') {
-                    sh 'make ci-build'
-                }
-            }
-        }
-
-        stage('Package') {
-            steps {
-                dir('src/tools') {
-                    sh 'make package-all'
-                }
-            }
-        }
-
-        stage('Release') {
-            when {
-                tag '*'
-            }
-            steps {
-                dir('src/tools') {
-                    sh "make cd-deploy VERSION=${env.TAG_NAME}"
-                }
-            }
-        }
-    }
-}
-
-

Troubleshooting

-

Common Issues

-

Build Failures

-

Rust Compilation Errors:

-
# Solution: Clean and rebuild
-make clean
-cargo clean
-make build-platform
-
-# Check Rust toolchain
-rustup show
-rustup update
-
-

Cross-Compilation Issues:

-
# Solution: Install missing targets
-rustup target list --installed
-rustup target add x86_64-apple-darwin
-
-# Use cross for problematic targets
-cargo install cross
-make build-platform CROSS=true
-
-

Package Generation Issues

-

Missing Dependencies:

-
# Solution: Install build tools
-sudo apt-get install build-essential
-brew install gnu-tar
-
-# Check tool availability
-make info
-
-

Permission Errors:

-
# Solution: Fix permissions
-chmod +x src/tools/build/*.nu
-chmod +x src/tools/distribution/*.nu
-chmod +x src/tools/package/*.nu
-
-

Distribution Validation Failures

-

Package Integrity Issues:

-
# Solution: Regenerate packages
-make clean-dist
-make package-all
-
-# Verify manually
-sha256sum packages/*.tar.gz
-
-

Installation Test Failures:

-
# Solution: Test in clean environment
-docker run --rm -v $(pwd):/work ubuntu:latest /work/packages/installers/install.sh
-
-# Debug installation
-./packages/installers/install.sh --dry-run --verbose
-
-

Release Issues

-

Upload Failures

-

Network Issues:

-
# Solution: Retry with backoff
-nu src/tools/release/upload-artifacts.nu \
-    --retry-count 5 \
-    --backoff-delay 30
-
-# Manual upload
-gh release upload v2.1.0 packages/*.tar.gz
-
-

Authentication Failures:

-
# Solution: Refresh tokens
-gh auth refresh
-docker login ghcr.io
-
-# Check credentials
-gh auth status
-docker system info
-
-

Registry Update Issues

-

Homebrew Formula Issues:

-
# Solution: Manual PR creation
-git clone https://github.com/Homebrew/homebrew-core
-cd homebrew-core
-# Edit formula
-git add Formula/provisioning.rb
-git commit -m "provisioning 2.1.0"
-
-

Debug and Monitoring

-

Debug Mode:

-
# Enable debug logging
-export PROVISIONING_DEBUG=true
-export RUST_LOG=debug
-
-# Run with verbose output
-make all VERBOSE=true
-
-# Debug specific components
-nu src/tools/distribution/generate-distribution.nu \
-    --verbose \
-    --dry-run
-
-

Monitoring Build Progress:

-
# Monitor build logs
-tail -f src/tools/build.log
-
-# Check build status
-make status
-
-# Resource monitoring
-top
-df -h
-
-

This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms -while maintaining high quality and reliability standards.

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/development/implementation-guide.html b/docs/book/development/implementation-guide.html deleted file mode 100644 index 762f91d..0000000 --- a/docs/book/development/implementation-guide.html +++ /dev/null @@ -1,1020 +0,0 @@ - - - - - - Implementation Guide - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Repository Restructuring - Implementation Guide

-

Status: Ready for Implementation -Estimated Time: 12-16 days -Priority: High -Related: Architecture Analysis

-

Overview

-

This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes -specific commands, validation steps, and rollback procedures.

-
-

Prerequisites

-

Required Tools

-
    -
  • Nushell 0.107.1+
  • -
  • Rust toolchain (for platform builds)
  • -
  • Git
  • -
  • tar/gzip
  • -
  • curl or wget
  • -
- -
    -
  • Just (task runner)
  • -
  • ripgrep (for code searches)
  • -
  • fd (for file finding)
  • -
-

Before Starting

-
    -
  1. Create full backup
  2. -
  3. Notify team members
  4. -
  5. Create implementation branch
  6. -
  7. Set aside dedicated time
  8. -
-
-

Phase 1: Repository Restructuring (Days 1-4)

-

Day 1: Backup and Analysis

-

Step 1.1: Create Complete Backup

-
# Create timestamped backup
-BACKUP_DIR="/Users/Akasha/project-provisioning-backup-$(date +%Y%m%d)"
-cp -r /Users/Akasha/project-provisioning "$BACKUP_DIR"
-
-# Verify backup
-ls -lh "$BACKUP_DIR"
-du -sh "$BACKUP_DIR"
-
-# Create backup manifest
-find "$BACKUP_DIR" -type f > "$BACKUP_DIR/manifest.txt"
-echo "✅ Backup created: $BACKUP_DIR"
-
-

Step 1.2: Analyze Current State

-
cd /Users/Akasha/project-provisioning
-
-# Count workspace directories
-echo "=== Workspace Directories ==="
-fd workspace -t d
-
-# Analyze workspace contents
-echo "=== Active Workspace ==="
-du -sh workspace/
-
-echo "=== Backup Workspaces ==="
-du -sh _workspace/ backup-workspace/ workspace-librecloud/
-
-# Find obsolete directories
-echo "=== Build Artifacts ==="
-du -sh target/ wrks/ NO/
-
-# Save analysis
-{
-    echo "# Current State Analysis - $(date)"
-    echo ""
-    echo "## Workspace Directories"
-    fd workspace -t d
-    echo ""
-    echo "## Directory Sizes"
-    du -sh workspace/ _workspace/ backup-workspace/ workspace-librecloud/ 2>/dev/null
-    echo ""
-    echo "## Build Artifacts"
-    du -sh target/ wrks/ NO/ 2>/dev/null
-} > docs/development/current-state-analysis.txt
-
-echo "✅ Analysis complete: docs/development/current-state-analysis.txt"
-
-

Step 1.3: Identify Dependencies

-
# Find all hardcoded paths
-echo "=== Hardcoded Paths in Nushell Scripts ==="
-rg -t nu "workspace/|_workspace/|backup-workspace/" provisioning/core/nulib/ | tee hardcoded-paths.txt
-
-# Find ENV references (legacy)
-echo "=== ENV References ==="
-rg "PROVISIONING_" provisioning/core/nulib/ | wc -l
-
-# Find workspace references in configs
-echo "=== Config References ==="
-rg "workspace" provisioning/config/
-
-echo "✅ Dependencies mapped"
-
-

Step 1.4: Create Implementation Branch

-
# Create and switch to implementation branch
-git checkout -b feat/repo-restructure
-
-# Commit analysis
-git add docs/development/current-state-analysis.txt
-git commit -m "docs: add current state analysis for restructuring"
-
-echo "✅ Implementation branch created: feat/repo-restructure"
-
-

Validation:

-
    -
  • ✅ Backup exists and is complete
  • -
  • ✅ Analysis document created
  • -
  • ✅ Dependencies mapped
  • -
  • ✅ Implementation branch ready
  • -
-
-

Day 2: Directory Restructuring

-

Step 2.1: Create New Directory Structure

-
cd /Users/Akasha/project-provisioning
-
-# Create distribution directory structure
-mkdir -p distribution/{packages,installers,registry}
-echo "✅ Created distribution/"
-
-# Create workspace structure (keep tracked templates)
-mkdir -p workspace/{infra,config,extensions,runtime}/{.gitkeep}
-mkdir -p workspace/templates/{minimal,kubernetes,multi-cloud}
-echo "✅ Created workspace/"
-
-# Verify
-tree -L 2 distribution/ workspace/
-
-

Step 2.2: Move Build Artifacts

-
# Move Rust build artifacts
-if [ -d "target" ]; then
-    mv target distribution/target
-    echo "✅ Moved target/ to distribution/"
-fi
-
-# Move KCL packages
-if [ -d "provisioning/tools/dist" ]; then
-    mv provisioning/tools/dist/* distribution/packages/ 2>/dev/null || true
-    echo "✅ Moved packages to distribution/"
-fi
-
-# Move any existing packages
-find . -name "*.tar.gz" -o -name "*.zip" | grep -v node_modules | while read pkg; do
-    mv "$pkg" distribution/packages/
-    echo "  Moved: $pkg"
-done
-
-

Step 2.3: Consolidate Workspaces

-
# Identify active workspace
-echo "=== Current Workspace Status ==="
-ls -la workspace/ _workspace/ backup-workspace/ 2>/dev/null
-
-# Interactive workspace consolidation
-read -p "Which workspace is currently active? (workspace/_workspace/backup-workspace): " ACTIVE_WS
-
-if [ "$ACTIVE_WS" != "workspace" ]; then
-    echo "Consolidating $ACTIVE_WS to workspace/"
-
-    # Merge infra configs
-    if [ -d "$ACTIVE_WS/infra" ]; then
-        cp -r "$ACTIVE_WS/infra/"* workspace/infra/
-    fi
-
-    # Merge configs
-    if [ -d "$ACTIVE_WS/config" ]; then
-        cp -r "$ACTIVE_WS/config/"* workspace/config/
-    fi
-
-    # Merge extensions
-    if [ -d "$ACTIVE_WS/extensions" ]; then
-        cp -r "$ACTIVE_WS/extensions/"* workspace/extensions/
-    fi
-
-    echo "✅ Consolidated workspace"
-fi
-
-# Archive old workspace directories
-mkdir -p .archived-workspaces
-for ws in _workspace backup-workspace workspace-librecloud; do
-    if [ -d "$ws" ] && [ "$ws" != "$ACTIVE_WS" ]; then
-        mv "$ws" ".archived-workspaces/$(basename $ws)-$(date +%Y%m%d)"
-        echo "  Archived: $ws"
-    fi
-done
-
-echo "✅ Workspaces consolidated"
-
-

Step 2.4: Remove Obsolete Directories

-
# Remove build artifacts (already moved)
-rm -rf wrks/
-echo "✅ Removed wrks/"
-
-# Remove test/scratch directories
-rm -rf NO/
-echo "✅ Removed NO/"
-
-# Archive presentations (optional)
-if [ -d "presentations" ]; then
-    read -p "Archive presentations directory? (y/N): " ARCHIVE_PRES
-    if [ "$ARCHIVE_PRES" = "y" ]; then
-        tar czf presentations-archive-$(date +%Y%m%d).tar.gz presentations/
-        rm -rf presentations/
-        echo "✅ Archived and removed presentations/"
-    fi
-fi
-
-# Remove empty directories
-find . -type d -empty -delete 2>/dev/null || true
-
-echo "✅ Cleanup complete"
-
-

Step 2.5: Update .gitignore

-
# Backup existing .gitignore
-cp .gitignore .gitignore.backup
-
-# Update .gitignore
-cat >> .gitignore << 'EOF'
-
-# ============================================================================
-# Repository Restructure (2025-10-01)
-# ============================================================================
-
-# Workspace runtime data (user-specific)
-/workspace/infra/
-/workspace/config/
-/workspace/extensions/
-/workspace/runtime/
-
-# Distribution artifacts
-/distribution/packages/
-/distribution/target/
-
-# Build artifacts
-/target/
-/provisioning/platform/target/
-/provisioning/platform/*/target/
-
-# Rust artifacts
-**/*.rs.bk
-Cargo.lock
-
-# Archived directories
-/.archived-workspaces/
-
-# Temporary files
-*.tmp
-*.temp
-/tmp/
-/wrks/
-/NO/
-
-# Logs
-*.log
-/workspace/runtime/logs/
-
-# Cache
-.cache/
-/workspace/runtime/cache/
-
-# IDE
-.vscode/
-.idea/
-*.swp
-*.swo
-*~
-
-# OS
-.DS_Store
-Thumbs.db
-
-# Backup files
-*.backup
-*.bak
-
-EOF
-
-echo "✅ Updated .gitignore"
-
-

Step 2.6: Commit Restructuring

-
# Stage changes
-git add -A
-
-# Show what's being committed
-git status
-
-# Commit
-git commit -m "refactor: restructure repository for clean distribution
-
-- Consolidate workspace directories to single workspace/
-- Move build artifacts to distribution/
-- Remove obsolete directories (wrks/, NO/)
-- Update .gitignore for new structure
-- Archive old workspace variants
-
-This is part of Phase 1 of the repository restructuring plan.
-
-Related: docs/architecture/repo-dist-analysis.md"
-
-echo "✅ Restructuring committed"
-
-

Validation:

-
    -
  • ✅ Single workspace/ directory exists
  • -
  • ✅ Build artifacts in distribution/
  • -
  • ✅ No wrks/, NO/ directories
  • -
  • .gitignore updated
  • -
  • ✅ Changes committed
  • -
-
-

Day 3: Update Path References

-

Step 3.1: Create Path Update Script

-
# Create migration script
-cat > provisioning/tools/migration/update-paths.nu << 'EOF'
-#!/usr/bin/env nu
-# Path update script for repository restructuring
-
-# Find and replace path references
-export def main [] {
-    print "🔧 Updating path references..."
-
-    let replacements = [
-        ["_workspace/" "workspace/"]
-        ["backup-workspace/" "workspace/"]
-        ["workspace-librecloud/" "workspace/"]
-        ["wrks/" "distribution/"]
-        ["NO/" "distribution/"]
-    ]
-
-    let files = (fd -e nu -e toml -e md . provisioning/)
-
-    mut updated_count = 0
-
-    for file in $files {
-        mut content = (open $file)
-        mut modified = false
-
-        for replacement in $replacements {
-            let old = $replacement.0
-            let new = $replacement.1
-
-            if ($content | str contains $old) {
-                $content = ($content | str replace -a $old $new)
-                $modified = true
-            }
-        }
-
-        if $modified {
-            $content | save -f $file
-            $updated_count = $updated_count + 1
-            print $"  ✓ Updated: ($file)"
-        }
-    }
-
-    print $"✅ Updated ($updated_count) files"
-}
-EOF
-
-chmod +x provisioning/tools/migration/update-paths.nu
-
-

Step 3.2: Run Path Updates

-
# Create backup before updates
-git stash
-git checkout -b feat/path-updates
-
-# Run update script
-nu provisioning/tools/migration/update-paths.nu
-
-# Review changes
-git diff
-
-# Test a sample file
-nu -c "use provisioning/core/nulib/servers/create.nu; print 'OK'"
-
-

Step 3.3: Update CLAUDE.md

-
# Update CLAUDE.md with new paths
-cat > CLAUDE.md.new << 'EOF'
-# CLAUDE.md
-
-[Keep existing content, update paths section...]
-
-## Updated Path Structure (2025-10-01)
-
-### Core System
-- **Main CLI**: `provisioning/core/cli/provisioning`
-- **Libraries**: `provisioning/core/nulib/`
-- **Extensions**: `provisioning/extensions/`
-- **Platform**: `provisioning/platform/`
-
-### User Workspace
-- **Active Workspace**: `workspace/` (gitignored runtime data)
-- **Templates**: `workspace/templates/` (tracked)
-- **Infrastructure**: `workspace/infra/` (user configs, gitignored)
-
-### Build System
-- **Distribution**: `distribution/` (gitignored artifacts)
-- **Packages**: `distribution/packages/`
-- **Installers**: `distribution/installers/`
-
-[Continue with rest of content...]
-EOF
-
-# Review changes
-diff CLAUDE.md CLAUDE.md.new
-
-# Apply if satisfied
-mv CLAUDE.md.new CLAUDE.md
-
-

Step 3.4: Update Documentation

-
# Find all documentation files
-fd -e md . docs/
-
-# Update each doc with new paths
-# This is semi-automated - review each file
-
-# Create list of docs to update
-fd -e md . docs/ > docs-to-update.txt
-
-# Manual review and update
-echo "Review and update each documentation file with new paths"
-echo "Files listed in: docs-to-update.txt"
-
-

Step 3.5: Commit Path Updates

-
git add -A
-git commit -m "refactor: update all path references for new structure
-
-- Update Nushell scripts to use workspace/ instead of variants
-- Update CLAUDE.md with new path structure
-- Update documentation references
-- Add migration script for future path changes
-
-Phase 1.3 of repository restructuring."
-
-echo "✅ Path updates committed"
-
-

Validation:

-
    -
  • ✅ All Nushell scripts reference correct paths
  • -
  • ✅ CLAUDE.md updated
  • -
  • ✅ Documentation updated
  • -
  • ✅ No references to old paths remain
  • -
-
-

Day 4: Validation and Testing

-

Step 4.1: Automated Validation

-
# Create validation script
-cat > provisioning/tools/validation/validate-structure.nu << 'EOF'
-#!/usr/bin/env nu
-# Repository structure validation
-
-export def main [] {
-    print "🔍 Validating repository structure..."
-
-    mut passed = 0
-    mut failed = 0
-
-    # Check required directories exist
-    let required_dirs = [
-        "provisioning/core"
-        "provisioning/extensions"
-        "provisioning/platform"
-        "provisioning/schemas"
-        "workspace"
-        "workspace/templates"
-        "distribution"
-        "docs"
-        "tests"
-    ]
-
-    for dir in $required_dirs {
-        if ($dir | path exists) {
-            print $"  ✓ ($dir)"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ ($dir) MISSING"
-            $failed = $failed + 1
-        }
-    }
-
-    # Check obsolete directories don't exist
-    let obsolete_dirs = [
-        "_workspace"
-        "backup-workspace"
-        "workspace-librecloud"
-        "wrks"
-        "NO"
-    ]
-
-    for dir in $obsolete_dirs {
-        if not ($dir | path exists) {
-            print $"  ✓ ($dir) removed"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ ($dir) still exists"
-            $failed = $failed + 1
-        }
-    }
-
-    # Check no old path references
-    let old_paths = ["_workspace/" "backup-workspace/" "wrks/"]
-    for path in $old_paths {
-        let results = (rg -l $path provisioning/ --iglob "!*.md" 2>/dev/null | lines)
-        if ($results | is-empty) {
-            print $"  ✓ No references to ($path)"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ Found references to ($path):"
-            $results | each { |f| print $"    - ($f)" }
-            $failed = $failed + 1
-        }
-    }
-
-    print ""
-    print $"Results: ($passed) passed, ($failed) failed"
-
-    if $failed > 0 {
-        error make { msg: "Validation failed" }
-    }
-
-    print "✅ Validation passed"
-}
-EOF
-
-chmod +x provisioning/tools/validation/validate-structure.nu
-
-# Run validation
-nu provisioning/tools/validation/validate-structure.nu
-
-

Step 4.2: Functional Testing

-
# Test core commands
-echo "=== Testing Core Commands ==="
-
-# Version
-provisioning/core/cli/provisioning version
-echo "✓ version command"
-
-# Help
-provisioning/core/cli/provisioning help
-echo "✓ help command"
-
-# List
-provisioning/core/cli/provisioning list servers
-echo "✓ list command"
-
-# Environment
-provisioning/core/cli/provisioning env
-echo "✓ env command"
-
-# Validate config
-provisioning/core/cli/provisioning validate config
-echo "✓ validate command"
-
-echo "✅ Functional tests passed"
-
-

Step 4.3: Integration Testing

-
# Test workflow system
-echo "=== Testing Workflow System ==="
-
-# List workflows
-nu -c "use provisioning/core/nulib/workflows/management.nu *; workflow list"
-echo "✓ workflow list"
-
-# Test workspace commands
-echo "=== Testing Workspace Commands ==="
-
-# Workspace info
-provisioning/core/cli/provisioning workspace info
-echo "✓ workspace info"
-
-echo "✅ Integration tests passed"
-
-

Step 4.4: Create Test Report

-
{
-    echo "# Repository Restructuring - Validation Report"
-    echo "Date: $(date)"
-    echo ""
-    echo "## Structure Validation"
-    nu provisioning/tools/validation/validate-structure.nu 2>&1
-    echo ""
-    echo "## Functional Tests"
-    echo "✓ version command"
-    echo "✓ help command"
-    echo "✓ list command"
-    echo "✓ env command"
-    echo "✓ validate command"
-    echo ""
-    echo "## Integration Tests"
-    echo "✓ workflow list"
-    echo "✓ workspace info"
-    echo ""
-    echo "## Conclusion"
-    echo "✅ Phase 1 validation complete"
-} > docs/development/phase1-validation-report.md
-
-echo "✅ Test report created: docs/development/phase1-validation-report.md"
-
-

Step 4.5: Update README

-
# Update main README with new structure
-# This is manual - review and update README.md
-
-echo "📝 Please review and update README.md with new structure"
-echo "   - Update directory structure diagram"
-echo "   - Update installation instructions"
-echo "   - Update quick start guide"
-
-

Step 4.6: Finalize Phase 1

-
# Commit validation and reports
-git add -A
-git commit -m "test: add validation for repository restructuring
-
-- Add structure validation script
-- Add functional tests
-- Add integration tests
-- Create validation report
-- Document Phase 1 completion
-
-Phase 1 complete: Repository restructuring validated."
-
-# Merge to implementation branch
-git checkout feat/repo-restructure
-git merge feat/path-updates
-
-echo "✅ Phase 1 complete and merged"
-
-

Validation:

-
    -
  • ✅ All validation tests pass
  • -
  • ✅ Functional tests pass
  • -
  • ✅ Integration tests pass
  • -
  • ✅ Validation report created
  • -
  • ✅ README updated
  • -
  • ✅ Phase 1 changes merged
  • -
-
-

Phase 2: Build System Implementation (Days 5-8)

-

Day 5: Build System Core

-

Step 5.1: Create Build Tools Directory

-
mkdir -p provisioning/tools/build
-cd provisioning/tools/build
-
-# Create directory structure
-mkdir -p {core,platform,extensions,validation,distribution}
-
-echo "✅ Build tools directory created"
-
-

Step 5.2: Implement Core Build System

-
# Create main build orchestrator
-# See full implementation in repo-dist-analysis.md
-# Copy build-system.nu from the analysis document
-
-# Test build system
-nu build-system.nu status
-
-

Step 5.3: Implement Core Packaging

-
# Create package-core.nu
-# This packages Nushell libraries, KCL schemas, templates
-
-# Test core packaging
-nu build-system.nu build-core --version dev
-
-

Step 5.4: Create Justfile

-
# Create Justfile in project root
-# See full Justfile in repo-dist-analysis.md
-
-# Test Justfile
-just --list
-just status
-
-

Validation:

-
    -
  • ✅ Build system structure exists
  • -
  • ✅ Core build orchestrator works
  • -
  • ✅ Core packaging works
  • -
  • ✅ Justfile functional
  • -
-

Day 6-8: Continue with Platform, Extensions, and Validation

-

[Follow similar pattern for remaining build system components]

-
-

Phase 3: Installation System (Days 9-11)

-

Day 9: Nushell Installer

-

Step 9.1: Create install.nu

-
mkdir -p distribution/installers
-
-# Create install.nu
-# See full implementation in repo-dist-analysis.md
-
-

Step 9.2: Test Installation

-
# Test installation to /tmp
-nu distribution/installers/install.nu --prefix /tmp/provisioning-test
-
-# Verify
-ls -lh /tmp/provisioning-test/
-
-# Test uninstallation
-nu distribution/installers/install.nu uninstall --prefix /tmp/provisioning-test
-
-

Validation:

-
    -
  • ✅ Installer works
  • -
  • ✅ Files installed to correct locations
  • -
  • ✅ Uninstaller works
  • -
  • ✅ No files left after uninstall
  • -
-
-

Rollback Procedures

-

If Phase 1 Fails

-
# Restore from backup
-rm -rf /Users/Akasha/project-provisioning
-cp -r "$BACKUP_DIR" /Users/Akasha/project-provisioning
-
-# Return to main branch
-cd /Users/Akasha/project-provisioning
-git checkout main
-git branch -D feat/repo-restructure
-
-

If Build System Fails

-
# Revert build system commits
-git checkout feat/repo-restructure
-git revert <commit-hash>
-
-

If Installation Fails

-
# Clean up test installation
-rm -rf /tmp/provisioning-test
-sudo rm -rf /usr/local/lib/provisioning
-sudo rm -rf /usr/local/share/provisioning
-
-
-

Checklist

-

Phase 1: Repository Restructuring

-
    -
  • -Day 1: Backup and analysis complete
  • -
  • -Day 2: Directory restructuring complete
  • -
  • -Day 3: Path references updated
  • -
  • -Day 4: Validation passed
  • -
-

Phase 2: Build System

-
    -
  • -Day 5: Core build system implemented
  • -
  • -Day 6: Platform/extensions packaging
  • -
  • -Day 7: Package validation
  • -
  • -Day 8: Build system tested
  • -
-

Phase 3: Installation

-
    -
  • -Day 9: Nushell installer created
  • -
  • -Day 10: Bash installer and CLI
  • -
  • -Day 11: Multi-OS testing
  • -
-

Phase 4: Registry (Optional)

-
    -
  • -Day 12: Registry system
  • -
  • -Day 13: Registry commands
  • -
  • -Day 14: Registry hosting
  • -
-

Phase 5: Documentation

-
    -
  • -Day 15: Documentation updated
  • -
  • -Day 16: Release prepared
  • -
-
-

Notes

-
    -
  • Take breaks between phases - Don’t rush
  • -
  • Test thoroughly - Each phase builds on previous
  • -
  • Commit frequently - Small, atomic commits
  • -
  • Document issues - Track any problems encountered
  • -
  • Ask for review - Get feedback at phase boundaries
  • -
-
-

Support

-

If you encounter issues:

-
    -
  1. Check the validation reports
  2. -
  3. Review the rollback procedures
  4. -
  5. Consult the architecture analysis
  6. -
  7. Create an issue in the tracker
  8. -
- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/development/integration.html b/docs/book/development/integration.html deleted file mode 100644 index 687c368..0000000 --- a/docs/book/development/integration.html +++ /dev/null @@ -1,1302 +0,0 @@ - - - - - - Integration - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Integration Guide

-

This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration -strategies, deployment considerations, and monitoring and observability.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Existing System Integration
  4. -
  5. API Compatibility and Versioning
  6. -
  7. Database Migration Strategies
  8. -
  9. Deployment Considerations
  10. -
  11. Monitoring and Observability
  12. -
  13. Legacy System Bridge
  14. -
  15. Migration Pathways
  16. -
  17. Troubleshooting Integration Issues
  18. -
-

Overview

-

Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and -existing production systems while providing clear migration pathways.

-

Integration Principles:

-
    -
  • Backward Compatibility: All existing APIs and interfaces remain functional
  • -
  • Gradual Migration: Systems can be migrated incrementally without disruption
  • -
  • Dual Operation: New and legacy systems operate side-by-side during transition
  • -
  • Zero Downtime: Migrations occur without service interruption
  • -
  • Data Integrity: All data migrations are atomic and reversible
  • -
-

Integration Architecture:

-
Integration Ecosystem
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│   Legacy Core   │ ←→ │  Bridge Layer   │ ←→ │   New Systems   │
-│                 │    │                 │    │                 │
-│ - ENV config    │    │ - Compatibility │    │ - TOML config   │
-│ - Direct calls  │    │ - Translation   │    │ - Orchestrator  │
-│ - File-based    │    │ - Monitoring    │    │ - Workflows     │
-│ - Simple logging│    │ - Validation    │    │ - REST APIs     │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-
-

Existing System Integration

-

Command-Line Interface Integration

-

Seamless CLI Compatibility:

-
# All existing commands continue to work unchanged
-./core/nulib/provisioning server create web-01 2xCPU-4 GB
-./core/nulib/provisioning taskserv install kubernetes
-./core/nulib/provisioning cluster create buildkit
-
-# New commands available alongside existing ones
-./src/core/nulib/provisioning server create web-01 2xCPU-4 GB --orchestrated
-nu workspace/tools/workspace.nu health --detailed
-
-

Path Resolution Integration:

-
# Automatic path resolution between systems
-use workspace/lib/path-resolver.nu
-
-# Resolves to workspace path if available, falls back to core
-let config_path = (path-resolver resolve_path "config" "user" --fallback-to-core)
-
-# Seamless extension discovery
-let provider_path = (path-resolver resolve_extension "providers" "upcloud")
-
-

Configuration System Bridge

-

Dual Configuration Support:

-
# Configuration bridge supports both ENV and TOML
-def get-config-value-bridge [key: string, default: string = ""] -> string {
-    # Try new TOML configuration first
-    let toml_value = try {
-        get-config-value $key
-    } catch { null }
-
-    if $toml_value != null {
-        return $toml_value
-    }
-
-    # Fall back to ENV variable (legacy support)
-    let env_key = ($key | str replace "." "_" | str upcase | $"PROVISIONING_($in)")
-    let env_value = ($env | get $env_key | default null)
-
-    if $env_value != null {
-        return $env_value
-    }
-
-    # Use default if provided
-    if $default != "" {
-        return $default
-    }
-
-    # Error with helpful migration message
-    error make {
-        msg: $"Configuration not found: ($key)",
-        help: $"Migrate from ($env_key) environment variable to ($key) in config file"
-    }
-}
-
-

Data Integration

-

Shared Data Access:

-
# Unified data access across old and new systems
-def get-server-info [server_name: string] -> record {
-    # Try new orchestrator data store first
-    let orchestrator_data = try {
-        get-orchestrator-server-data $server_name
-    } catch { null }
-
-    if $orchestrator_data != null {
-        return $orchestrator_data
-    }
-
-    # Fall back to legacy file-based storage
-    let legacy_data = try {
-        get-legacy-server-data $server_name
-    } catch { null }
-
-    if $legacy_data != null {
-        return ($legacy_data | migrate-to-new-format)
-    }
-
-    error make {msg: $"Server not found: ($server_name)"}
-}
-
-

Process Integration

-

Hybrid Process Management:

-
# Orchestrator-aware process management
-def create-server-integrated [
-    name: string,
-    plan: string,
-    --orchestrated: bool = false
-] -> record {
-    if $orchestrated and (check-orchestrator-available) {
-        # Use new orchestrator workflow
-        return (create-server-workflow $name $plan)
-    } else {
-        # Use legacy direct creation
-        return (create-server-direct $name $plan)
-    }
-}
-
-def check-orchestrator-available [] -> bool {
-    try {
-        http get "http://localhost:9090/health" | get status == "ok"
-    } catch {
-        false
-    }
-}
-
-

API Compatibility and Versioning

-

REST API Versioning

-

API Version Strategy:

-
    -
  • v1: Legacy compatibility API (existing functionality)
  • -
  • v2: Enhanced API with orchestrator features
  • -
  • v3: Full workflow and batch operation support
  • -
-

Version Header Support:

-
# API calls with version specification
-curl -H "API-Version: v1" http://localhost:9090/servers
-curl -H "API-Version: v2" http://localhost:9090/workflows/servers/create
-curl -H "API-Version: v3" http://localhost:9090/workflows/batch/submit
-
-

API Compatibility Layer

-

Backward Compatible Endpoints:

-
// Rust API compatibility layer
-#[derive(Debug, Serialize, Deserialize)]
-struct ApiRequest {
-    version: Option<String>,
-    #[serde(flatten)]
-    payload: serde_json::Value,
-}
-
-async fn handle_versioned_request(
-    headers: HeaderMap,
-    req: ApiRequest,
-) -> Result<ApiResponse, ApiError> {
-    let api_version = headers
-        .get("API-Version")
-        .and_then(|v| v.to_str().ok())
-        .unwrap_or("v1");
-
-    match api_version {
-        "v1" => handle_v1_request(req.payload).await,
-        "v2" => handle_v2_request(req.payload).await,
-        "v3" => handle_v3_request(req.payload).await,
-        _ => Err(ApiError::UnsupportedVersion(api_version.to_string())),
-    }
-}
-
-// V1 compatibility endpoint
-async fn handle_v1_request(payload: serde_json::Value) -> Result<ApiResponse, ApiError> {
-    // Transform request to legacy format
-    let legacy_request = transform_to_legacy_format(payload)?;
-
-    // Execute using legacy system
-    let result = execute_legacy_operation(legacy_request).await?;
-
-    // Transform response to v1 format
-    Ok(transform_to_v1_response(result))
-}
-

Schema Evolution

-

Backward Compatible Schema Changes:

-
# API schema with version support
-let ServerCreateRequest = {
-    # V1 fields (always supported)
-    name | string,
-    plan | string,
-    zone | string | default = "auto",
-
-    # V2 additions (optional for backward compatibility)
-    orchestrated | bool | default = false,
-    workflow_options | { } | optional,
-
-    # V3 additions
-    batch_options | { } | optional,
-    dependencies | array | default = [],
-
-    # Version constraints
-    api_version | string | default = "v1",
-} in
-ServerCreateRequest
-
-# Conditional validation based on API version
-let WorkflowOptions = {
-    wait_for_completion | bool | default = true,
-    timeout_seconds | number | default = 300,
-    retry_count | number | default = 3,
-} in
-WorkflowOptions
-
-

Client SDK Compatibility

-

Multi-Version Client Support:

-
# Nushell client with version support
-def "client create-server" [
-    name: string,
-    plan: string,
-    --api-version: string = "v1",
-    --orchestrated: bool = false
-] -> record {
-    let endpoint = match $api_version {
-        "v1" => "/servers",
-        "v2" => "/workflows/servers/create",
-        "v3" => "/workflows/batch/submit",
-        _ => (error make {msg: $"Unsupported API version: ($api_version)"})
-    }
-
-    let request_body = match $api_version {
-        "v1" => {name: $name, plan: $plan},
-        "v2" => {name: $name, plan: $plan, orchestrated: $orchestrated},
-        "v3" => {
-            operations: [{
-                id: "create_server",
-                type: "server_create",
-                config: {name: $name, plan: $plan}
-            }]
-        },
-        _ => (error make {msg: $"Unsupported API version: ($api_version)"})
-    }
-
-    http post $"http://localhost:9090($endpoint)" $request_body
-        --headers {
-            "Content-Type": "application/json",
-            "API-Version": $api_version
-        }
-}
-
-

Database Migration Strategies

-

Database Architecture Evolution

-

Migration Strategy:

-
Database Evolution Path
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│  File-based     │ → │   SQLite        │ → │   SurrealDB     │
-│  Storage        │    │   Migration     │    │   Full Schema   │
-│                 │    │                 │    │                 │
-│ - JSON files    │    │ - Structured    │    │ - Graph DB      │
-│ - Text logs     │    │ - Transactions  │    │ - Real-time     │
-│ - Simple state  │    │ - Backup/restore│    │ - Clustering    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-
-

Migration Scripts

-

Automated Database Migration:

-
# Database migration orchestration
-def migrate-database [
-    --from: string = "filesystem",
-    --to: string = "surrealdb",
-    --backup-first: bool = true,
-    --verify: bool = true
-] -> record {
-    if $backup_first {
-        print "Creating backup before migration..."
-        let backup_result = (create-database-backup $from)
-        print $"Backup created: ($backup_result.path)"
-    }
-
-    print $"Migrating from ($from) to ($to)..."
-
-    match [$from, $to] {
-        ["filesystem", "sqlite"] => migrate_filesystem_to_sqlite,
-        ["filesystem", "surrealdb"] => migrate_filesystem_to_surrealdb,
-        ["sqlite", "surrealdb"] => migrate_sqlite_to_surrealdb,
-        _ => (error make {msg: $"Unsupported migration path: ($from) → ($to)"})
-    }
-
-    if $verify {
-        print "Verifying migration integrity..."
-        let verification = (verify-migration $from $to)
-        if not $verification.success {
-            error make {
-                msg: $"Migration verification failed: ($verification.errors)",
-                help: "Restore from backup and retry migration"
-            }
-        }
-    }
-
-    print $"Migration from ($from) to ($to) completed successfully"
-    {from: $from, to: $to, status: "completed", migrated_at: (date now)}
-}
-
-

File System to SurrealDB Migration:

-
def migrate_filesystem_to_surrealdb [] -> record {
-    # Initialize SurrealDB connection
-    let db = (connect-surrealdb)
-
-    # Migrate server data
-    let server_files = (ls data/servers/*.json)
-    let migrated_servers = []
-
-    for server_file in $server_files {
-        let server_data = (open $server_file.name | from json)
-
-        # Transform to new schema
-        let server_record = {
-            id: $server_data.id,
-            name: $server_data.name,
-            plan: $server_data.plan,
-            zone: ($server_data.zone? | default "unknown"),
-            status: $server_data.status,
-            ip_address: $server_data.ip_address?,
-            created_at: $server_data.created_at,
-            updated_at: (date now),
-            metadata: ($server_data.metadata? | default {}),
-            tags: ($server_data.tags? | default [])
-        }
-
-        # Insert into SurrealDB
-        let insert_result = try {
-            query-surrealdb $"CREATE servers:($server_record.id) CONTENT ($server_record | to json)"
-        } catch { |e|
-            print $"Warning: Failed to migrate server ($server_data.name): ($e.msg)"
-        }
-
-        $migrated_servers = ($migrated_servers | append $server_record.id)
-    }
-
-    # Migrate workflow data
-    migrate_workflows_to_surrealdb $db
-
-    # Migrate state data
-    migrate_state_to_surrealdb $db
-
-    {
-        migrated_servers: ($migrated_servers | length),
-        migrated_workflows: (migrate_workflows_to_surrealdb $db).count,
-        status: "completed"
-    }
-}
-
-

Data Integrity Verification

-

Migration Verification:

-
def verify-migration [from: string, to: string] -> record {
-    print "Verifying data integrity..."
-
-    let source_data = (read-source-data $from)
-    let target_data = (read-target-data $to)
-
-    let errors = []
-
-    # Verify record counts
-    if $source_data.servers.count != $target_data.servers.count {
-        $errors = ($errors | append "Server count mismatch")
-    }
-
-    # Verify key records
-    for server in $source_data.servers {
-        let target_server = ($target_data.servers | where id == $server.id | first)
-
-        if ($target_server | is-empty) {
-            $errors = ($errors | append $"Missing server: ($server.id)")
-        } else {
-            # Verify critical fields
-            if $target_server.name != $server.name {
-                $errors = ($errors | append $"Name mismatch for server ($server.id)")
-            }
-
-            if $target_server.status != $server.status {
-                $errors = ($errors | append $"Status mismatch for server ($server.id)")
-            }
-        }
-    }
-
-    {
-        success: ($errors | length) == 0,
-        errors: $errors,
-        verified_at: (date now)
-    }
-}
-
-

Deployment Considerations

-

Deployment Architecture

-

Hybrid Deployment Model:

-
Deployment Architecture
-┌─────────────────────────────────────────────────────────────────┐
-│                    Load Balancer / Reverse Proxy               │
-└─────────────────────┬───────────────────────────────────────────┘
-                      │
-    ┌─────────────────┼─────────────────┐
-    │                 │                 │
-┌───▼────┐      ┌─────▼─────┐      ┌───▼────┐
-│Legacy  │      │Orchestrator│      │New     │
-│System  │ ←→   │Bridge      │  ←→  │Systems │
-│        │      │            │      │        │
-│- CLI   │      │- API Gate  │      │- REST  │
-│- Files │      │- Compat    │      │- DB    │
-│- Logs  │      │- Monitor   │      │- Queue │
-└────────┘      └────────────┘      └────────┘
-
-

Deployment Strategies

-

Blue-Green Deployment:

-
# Blue-Green deployment with integration bridge
-# Phase 1: Deploy new system alongside existing (Green environment)
-cd src/tools
-make all
-make create-installers
-
-# Install new system without disrupting existing
-./packages/installers/install-provisioning-2.0.0.sh \
-    --install-path /opt/provisioning-v2 \
-    --no-replace-existing \
-    --enable-bridge-mode
-
-# Phase 2: Start orchestrator and validate integration
-/opt/provisioning-v2/bin/orchestrator start --bridge-mode --legacy-path /opt/provisioning-v1
-
-# Phase 3: Gradual traffic shift
-# Route 10% traffic to new system
-nginx-traffic-split --new-backend 10%
-
-# Validate metrics and gradually increase
-nginx-traffic-split --new-backend 50%
-nginx-traffic-split --new-backend 90%
-
-# Phase 4: Complete cutover
-nginx-traffic-split --new-backend 100%
-/opt/provisioning-v1/bin/orchestrator stop
-
-

Rolling Update:

-
def rolling-deployment [
-    --target-version: string,
-    --batch-size: int = 3,
-    --health-check-interval: duration = 30sec
-] -> record {
-    let nodes = (get-deployment-nodes)
-    let batches = ($nodes | group_by --chunk-size $batch_size)
-
-    let deployment_results = []
-
-    for batch in $batches {
-        print $"Deploying to batch: ($batch | get name | str join ', ')"
-
-        # Deploy to batch
-        for node in $batch {
-            deploy-to-node $node $target_version
-        }
-
-        # Wait for health checks
-        sleep $health_check_interval
-
-        # Verify batch health
-        let batch_health = ($batch | each { |node| check-node-health $node })
-        let healthy_nodes = ($batch_health | where healthy == true | length)
-
-        if $healthy_nodes != ($batch | length) {
-            # Rollback batch on failure
-            print $"Health check failed, rolling back batch"
-            for node in $batch {
-                rollback-node $node
-            }
-            error make {msg: "Rolling deployment failed at batch"}
-        }
-
-        print $"Batch deployed successfully"
-        $deployment_results = ($deployment_results | append {
-            batch: $batch,
-            status: "success",
-            deployed_at: (date now)
-        })
-    }
-
-    {
-        strategy: "rolling",
-        target_version: $target_version,
-        batches: ($deployment_results | length),
-        status: "completed",
-        completed_at: (date now)
-    }
-}
-
-

Configuration Deployment

-

Environment-Specific Deployment:

-
# Development deployment
-PROVISIONING_ENV=dev ./deploy.sh \
-    --config-source config.dev.toml \
-    --enable-debug \
-    --enable-hot-reload
-
-# Staging deployment
-PROVISIONING_ENV=staging ./deploy.sh \
-    --config-source config.staging.toml \
-    --enable-monitoring \
-    --backup-before-deploy
-
-# Production deployment
-PROVISIONING_ENV=prod ./deploy.sh \
-    --config-source config.prod.toml \
-    --zero-downtime \
-    --enable-all-monitoring \
-    --backup-before-deploy \
-    --health-check-timeout 5m
-
-

Container Integration

-

Docker Deployment with Bridge:

-
# Multi-stage Docker build supporting both systems
-FROM rust:1.70 as builder
-WORKDIR /app
-COPY . .
-RUN cargo build --release
-
-FROM ubuntu:22.04 as runtime
-WORKDIR /app
-
-# Install both legacy and new systems
-COPY --from=builder /app/target/release/orchestrator /app/bin/
-COPY legacy-provisioning/ /app/legacy/
-COPY config/ /app/config/
-
-# Bridge script for dual operation
-COPY bridge-start.sh /app/bin/
-
-ENV PROVISIONING_BRIDGE_MODE=true
-ENV PROVISIONING_LEGACY_PATH=/app/legacy
-ENV PROVISIONING_NEW_PATH=/app/bin
-
-EXPOSE 8080
-CMD ["/app/bin/bridge-start.sh"]
-
-

Kubernetes Integration:

-
# Kubernetes deployment with bridge sidecar
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: provisioning-system
-spec:
-  replicas: 3
-  template:
-    spec:
-      containers:
-      - name: orchestrator
-        image: provisioning-system:2.0.0
-        ports:
-        - containerPort: 8080
-        env:
-        - name: PROVISIONING_BRIDGE_MODE
-          value: "true"
-        volumeMounts:
-        - name: config
-          mountPath: /app/config
-        - name: legacy-data
-          mountPath: /app/legacy/data
-
-      - name: legacy-bridge
-        image: provisioning-legacy:1.0.0
-        env:
-        - name: BRIDGE_ORCHESTRATOR_URL
-          value: "http://localhost:9090"
-        volumeMounts:
-        - name: legacy-data
-          mountPath: /data
-
-      volumes:
-      - name: config
-        configMap:
-          name: provisioning-config
-      - name: legacy-data
-        persistentVolumeClaim:
-          claimName: provisioning-data
-
-

Monitoring and Observability

-

Integrated Monitoring Architecture

-

Monitoring Stack Integration:

-
Observability Architecture
-┌─────────────────────────────────────────────────────────────────┐
-│                    Monitoring Dashboard                         │
-│  ┌─────────────┐  ┌─────────────┐  ┌─────────────┐           │
-│  │   Grafana   │  │  Jaeger     │  │  AlertMgr   │           │
-│  └─────────────┘  └─────────────┘  └─────────────┘           │
-└─────────────┬───────────────┬───────────────┬─────────────────┘
-              │               │               │
-   ┌──────────▼──────────┐   │   ┌───────────▼───────────┐
-   │     Prometheus      │   │   │      Jaeger           │
-   │   (Metrics)         │   │   │    (Tracing)          │
-   └──────────┬──────────┘   │   └───────────┬───────────┘
-              │               │               │
-┌─────────────▼─────────────┐ │ ┌─────────────▼─────────────┐
-│        Legacy             │ │ │        New System         │
-│      Monitoring           │ │ │       Monitoring          │
-│                           │ │ │                           │
-│ - File-based logs        │ │ │ - Structured logs         │
-│ - Simple metrics         │ │ │ - Prometheus metrics      │
-│ - Basic health checks    │ │ │ - Distributed tracing     │
-└───────────────────────────┘ │ └───────────────────────────┘
-                              │
-                    ┌─────────▼─────────┐
-                    │   Bridge Monitor  │
-                    │                   │
-                    │ - Integration     │
-                    │ - Compatibility   │
-                    │ - Migration       │
-                    └───────────────────┘
-
-

Metrics Integration

-

Unified Metrics Collection:

-
# Metrics bridge for legacy and new systems
-def collect-system-metrics [] -> record {
-    let legacy_metrics = collect-legacy-metrics
-    let new_metrics = collect-new-metrics
-    let bridge_metrics = collect-bridge-metrics
-
-    {
-        timestamp: (date now),
-        legacy: $legacy_metrics,
-        new: $new_metrics,
-        bridge: $bridge_metrics,
-        integration: {
-            compatibility_rate: (calculate-compatibility-rate $bridge_metrics),
-            migration_progress: (calculate-migration-progress),
-            system_health: (assess-overall-health $legacy_metrics $new_metrics)
-        }
-    }
-}
-
-def collect-legacy-metrics [] -> record {
-    let log_files = (ls logs/*.log)
-    let process_stats = (get-process-stats "legacy-provisioning")
-
-    {
-        active_processes: $process_stats.count,
-        log_file_sizes: ($log_files | get size | math sum),
-        last_activity: (get-last-log-timestamp),
-        error_count: (count-log-errors "last 1h"),
-        performance: {
-            avg_response_time: (calculate-avg-response-time),
-            throughput: (calculate-throughput)
-        }
-    }
-}
-
-def collect-new-metrics [] -> record {
-    let orchestrator_stats = try {
-        http get "http://localhost:9090/metrics"
-    } catch {
-        {status: "unavailable"}
-    }
-
-    {
-        orchestrator: $orchestrator_stats,
-        workflow_stats: (get-workflow-metrics),
-        api_stats: (get-api-metrics),
-        database_stats: (get-database-metrics)
-    }
-}
-
-

Logging Integration

-

Unified Logging Strategy:

-
# Structured logging bridge
-def log-integrated [
-    level: string,
-    message: string,
-    --component: string = "bridge",
-    --legacy-compat: bool = true
-] {
-    let log_entry = {
-        timestamp: (date now | format date "%Y-%m-%d %H:%M:%S%.3f"),
-        level: $level,
-        component: $component,
-        message: $message,
-        system: "integrated",
-        correlation_id: (generate-correlation-id)
-    }
-
-    # Write to structured log (new system)
-    $log_entry | to json | save --append logs/integrated.jsonl
-
-    if $legacy_compat {
-        # Write to legacy log format
-        let legacy_entry = $"[($log_entry.timestamp)] [($level)] ($component): ($message)"
-        $legacy_entry | save --append logs/legacy.log
-    }
-
-    # Send to monitoring system
-    send-to-monitoring $log_entry
-}
-
-

Health Check Integration

-

Comprehensive Health Monitoring:

-
def health-check-integrated [] -> record {
-    let health_checks = [
-        {name: "legacy-system", check: (check-legacy-health)},
-        {name: "orchestrator", check: (check-orchestrator-health)},
-        {name: "database", check: (check-database-health)},
-        {name: "bridge-compatibility", check: (check-bridge-health)},
-        {name: "configuration", check: (check-config-health)}
-    ]
-
-    let results = ($health_checks | each { |check|
-        let result = try {
-            do $check.check
-        } catch { |e|
-            {status: "unhealthy", error: $e.msg}
-        }
-
-        {name: $check.name, result: $result}
-    })
-
-    let healthy_count = ($results | where result.status == "healthy" | length)
-    let total_count = ($results | length)
-
-    {
-        overall_status: (if $healthy_count == $total_count { "healthy" } else { "degraded" }),
-        healthy_services: $healthy_count,
-        total_services: $total_count,
-        services: $results,
-        checked_at: (date now)
-    }
-}
-
-

Legacy System Bridge

-

Bridge Architecture

-

Bridge Component Design:

-
# Legacy system bridge module
-export module bridge {
-    # Bridge state management
-    export def init-bridge [] -> record {
-        let bridge_config = get-config-section "bridge"
-
-        {
-            legacy_path: ($bridge_config.legacy_path? | default "/opt/provisioning-v1"),
-            new_path: ($bridge_config.new_path? | default "/opt/provisioning-v2"),
-            mode: ($bridge_config.mode? | default "compatibility"),
-            monitoring_enabled: ($bridge_config.monitoring? | default true),
-            initialized_at: (date now)
-        }
-    }
-
-    # Command translation layer
-    export def translate-command [
-        legacy_command: list<string>
-    ] -> list<string> {
-        match $legacy_command {
-            ["provisioning", "server", "create", $name, $plan, ...$args] => {
-                let new_args = ($args | each { |arg|
-                    match $arg {
-                        "--dry-run" => "--dry-run",
-                        "--wait" => "--wait",
-                        $zone if ($zone | str starts-with "--zone=") => $zone,
-                        _ => $arg
-                    }
-                })
-
-                ["provisioning", "server", "create", $name, $plan] ++ $new_args ++ ["--orchestrated"]
-            },
-            _ => $legacy_command  # Pass through unchanged
-        }
-    }
-
-    # Data format translation
-    export def translate-response [
-        legacy_response: record,
-        target_format: string = "v2"
-    ] -> record {
-        match $target_format {
-            "v2" => {
-                id: ($legacy_response.id? | default (generate-uuid)),
-                name: $legacy_response.name,
-                status: $legacy_response.status,
-                created_at: ($legacy_response.created_at? | default (date now)),
-                metadata: ($legacy_response | reject name status created_at),
-                version: "v2-compat"
-            },
-            _ => $legacy_response
-        }
-    }
-}
-
-

Bridge Operation Modes

-

Compatibility Mode:

-
# Full compatibility with legacy system
-def run-compatibility-mode [] {
-    print "Starting bridge in compatibility mode..."
-
-    # Intercept legacy commands
-    let legacy_commands = monitor-legacy-commands
-
-    for command in $legacy_commands {
-        let translated = (bridge translate-command $command)
-
-        try {
-            let result = (execute-new-system $translated)
-            let legacy_result = (bridge translate-response $result "v1")
-            respond-to-legacy $legacy_result
-        } catch { |e|
-            # Fall back to legacy system on error
-            let fallback_result = (execute-legacy-system $command)
-            respond-to-legacy $fallback_result
-        }
-    }
-}
-
-

Migration Mode:

-
# Gradual migration with traffic splitting
-def run-migration-mode [
-    --new-system-percentage: int = 50
-] {
-    print $"Starting bridge in migration mode (($new_system_percentage)% new system)"
-
-    let commands = monitor-all-commands
-
-    for command in $commands {
-        let route_to_new = ((random integer 1..100) <= $new_system_percentage)
-
-        if $route_to_new {
-            try {
-                execute-new-system $command
-            } catch {
-                # Fall back to legacy on failure
-                execute-legacy-system $command
-            }
-        } else {
-            execute-legacy-system $command
-        }
-    }
-}
-
-

Migration Pathways

-

Migration Phases

-

Phase 1: Parallel Deployment

-
    -
  • Deploy new system alongside existing
  • -
  • Enable bridge for compatibility
  • -
  • Begin data synchronization
  • -
  • Monitor integration health
  • -
-

Phase 2: Gradual Migration

-
    -
  • Route increasing traffic to new system
  • -
  • Migrate data in background
  • -
  • Validate consistency
  • -
  • Address integration issues
  • -
-

Phase 3: Full Migration

-
    -
  • Complete traffic cutover
  • -
  • Decommission legacy system
  • -
  • Clean up bridge components
  • -
  • Finalize data migration
  • -
-

Migration Automation

-

Automated Migration Orchestration:

-
def execute-migration-plan [
-    migration_plan: string,
-    --dry-run: bool = false,
-    --skip-backup: bool = false
-] -> record {
-    let plan = (open $migration_plan | from yaml)
-
-    if not $skip_backup {
-        create-pre-migration-backup
-    }
-
-    let migration_results = []
-
-    for phase in $plan.phases {
-        print $"Executing migration phase: ($phase.name)"
-
-        if $dry_run {
-            print $"[DRY RUN] Would execute phase: ($phase)"
-            continue
-        }
-
-        let phase_result = try {
-            execute-migration-phase $phase
-        } catch { |e|
-            print $"Migration phase failed: ($e.msg)"
-
-            if $phase.rollback_on_failure? | default false {
-                print "Rolling back migration phase..."
-                rollback-migration-phase $phase
-            }
-
-            error make {msg: $"Migration failed at phase ($phase.name): ($e.msg)"}
-        }
-
-        $migration_results = ($migration_results | append $phase_result)
-
-        # Wait between phases if specified
-        if "wait_seconds" in $phase {
-            sleep ($phase.wait_seconds * 1sec)
-        }
-    }
-
-    {
-        migration_plan: $migration_plan,
-        phases_completed: ($migration_results | length),
-        status: "completed",
-        completed_at: (date now),
-        results: $migration_results
-    }
-}
-
-

Migration Validation:

-
def validate-migration-readiness [] -> record {
-    let checks = [
-        {name: "backup-available", check: (check-backup-exists)},
-        {name: "new-system-healthy", check: (check-new-system-health)},
-        {name: "database-accessible", check: (check-database-connectivity)},
-        {name: "configuration-valid", check: (validate-migration-config)},
-        {name: "resources-available", check: (check-system-resources)},
-        {name: "network-connectivity", check: (check-network-health)}
-    ]
-
-    let results = ($checks | each { |check|
-        {
-            name: $check.name,
-            result: (do $check.check),
-            timestamp: (date now)
-        }
-    })
-
-    let failed_checks = ($results | where result.status != "ready")
-
-    {
-        ready_for_migration: ($failed_checks | length) == 0,
-        checks: $results,
-        failed_checks: $failed_checks,
-        validated_at: (date now)
-    }
-}
-
-

Troubleshooting Integration Issues

-

Common Integration Problems

-

API Compatibility Issues

-

Problem: Version mismatch between client and server

-
# Diagnosis
-curl -H "API-Version: v1" http://localhost:9090/health
-curl -H "API-Version: v2" http://localhost:9090/health
-
-# Solution: Check supported versions
-curl http://localhost:9090/api/versions
-
-# Update client API version
-export PROVISIONING_API_VERSION=v2
-
-

Configuration Bridge Issues

-

Problem: Configuration not found in either system

-
# Diagnosis
-def diagnose-config-issue [key: string] -> record {
-    let toml_result = try {
-        get-config-value $key
-    } catch { |e| {status: "failed", error: $e.msg} }
-
-    let env_key = ($key | str replace "." "_" | str upcase | $"PROVISIONING_($in)")
-    let env_result = try {
-        $env | get $env_key
-    } catch { |e| {status: "failed", error: $e.msg} }
-
-    {
-        key: $key,
-        toml_config: $toml_result,
-        env_config: $env_result,
-        migration_needed: ($toml_result.status == "failed" and $env_result.status != "failed")
-    }
-}
-
-# Solution: Migrate configuration
-def migrate-single-config [key: string] {
-    let diagnosis = (diagnose-config-issue $key)
-
-    if $diagnosis.migration_needed {
-        let env_value = $diagnosis.env_config
-        set-config-value $key $env_value
-        print $"Migrated ($key) from environment variable"
-    }
-}
-
-

Database Integration Issues

-

Problem: Data inconsistency between systems

-
# Diagnosis and repair
-def repair-data-consistency [] -> record {
-    let legacy_data = (read-legacy-data)
-    let new_data = (read-new-data)
-
-    let inconsistencies = []
-
-    # Check server records
-    for server in $legacy_data.servers {
-        let new_server = ($new_data.servers | where id == $server.id | first)
-
-        if ($new_server | is-empty) {
-            print $"Missing server in new system: ($server.id)"
-            create-server-record $server
-            $inconsistencies = ($inconsistencies | append {type: "missing", id: $server.id})
-        } else if $new_server != $server {
-            print $"Inconsistent server data: ($server.id)"
-            update-server-record $server
-            $inconsistencies = ($inconsistencies | append {type: "inconsistent", id: $server.id})
-        }
-    }
-
-    {
-        inconsistencies_found: ($inconsistencies | length),
-        repairs_applied: ($inconsistencies | length),
-        repaired_at: (date now)
-    }
-}
-
-

Debug Tools

-

Integration Debug Mode:

-
# Enable comprehensive debugging
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-export PROVISIONING_BRIDGE_DEBUG=true
-export PROVISIONING_INTEGRATION_TRACE=true
-
-# Run with integration debugging
-provisioning server create test-server 2xCPU-4 GB --debug-integration
-
-

Health Check Debugging:

-
def debug-integration-health [] -> record {
-    print "=== Integration Health Debug ==="
-
-    # Check all integration points
-    let legacy_health = try {
-        check-legacy-system
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let orchestrator_health = try {
-        http get "http://localhost:9090/health"
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let bridge_health = try {
-        check-bridge-status
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let config_health = try {
-        validate-config-integration
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    print $"Legacy System: ($legacy_health.status)"
-    print $"Orchestrator: ($orchestrator_health.status)"
-    print $"Bridge: ($bridge_health.status)"
-    print $"Configuration: ($config_health.status)"
-
-    {
-        legacy: $legacy_health,
-        orchestrator: $orchestrator_health,
-        bridge: $bridge_health,
-        configuration: $config_health,
-        debug_timestamp: (date now)
-    }
-}
-
-

This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while -maintaining reliability, compatibility, and clear migration pathways.

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/development/project-structure.html b/docs/book/development/project-structure.html deleted file mode 100644 index a20ec22..0000000 --- a/docs/book/development/project-structure.html +++ /dev/null @@ -1,558 +0,0 @@ - - - - - - Project Structure - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Project Structure Guide

-

This document provides a comprehensive overview of the provisioning project’s structure after the major reorganization, explaining both the new -development-focused organization and the preserved existing functionality.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. New Structure vs Legacy
  4. -
  5. Core Directories
  6. -
  7. Development Workspace
  8. -
  9. File Naming Conventions
  10. -
  11. Navigation Guide
  12. -
  13. Migration Path
  14. -
-

Overview

-

The provisioning project has been restructured to support a dual-organization approach:

-
    -
  • src/: Development-focused structure with build tools, distribution system, and core components
  • -
  • Legacy directories: Preserved in their original locations for backward compatibility
  • -
  • workspace/: Development workspace with tools and runtime management
  • -
-

This reorganization enables efficient development workflows while maintaining full backward compatibility with existing deployments.

-

New Structure vs Legacy

-

New Development Structure (/src/)

-
src/
-├── config/                      # System configuration
-├── control-center/              # Control center application
-├── control-center-ui/           # Web UI for control center
-├── core/                        # Core system libraries
-├── docs/                        # Documentation (new)
-├── extensions/                  # Extension framework
-├── generators/                  # Code generation tools
-├── schemas/                     # Nickel configuration schemas (migrated from kcl/)
-├── orchestrator/               # Hybrid Rust/Nushell orchestrator
-├── platform/                   # Platform-specific code
-├── provisioning/               # Main provisioning
-├── templates/                   # Template files
-├── tools/                      # Build and development tools
-└── utils/                      # Utility scripts
-
-

Legacy Structure (Preserved)

-
repo-cnz/
-├── cluster/                     # Cluster configurations (preserved)
-├── core/                        # Core system (preserved)
-├── generate/                    # Generation scripts (preserved)
-├── schemas/                     # Nickel schemas (migrated from kcl/)
-├── klab/                       # Development lab (preserved)
-├── nushell-plugins/            # Plugin development (preserved)
-├── providers/                  # Cloud providers (preserved)
-├── taskservs/                  # Task services (preserved)
-└── templates/                  # Template files (preserved)
-
-

Development Workspace (/workspace/)

-
workspace/
-├── config/                     # Development configuration
-├── extensions/                 # Extension development
-├── infra/                      # Development infrastructure
-├── lib/                        # Workspace libraries
-├── runtime/                    # Runtime data
-└── tools/                      # Workspace management tools
-
-

Core Directories

-

/src/core/ - Core Development Libraries

-

Purpose: Development-focused core libraries and entry points

-

Key Files:

-
    -
  • nulib/provisioning - Main CLI entry point (symlinks to legacy location)
  • -
  • nulib/lib_provisioning/ - Core provisioning libraries
  • -
  • nulib/workflows/ - Workflow management (orchestrator integration)
  • -
-

Relationship to Legacy: Preserves original core/ functionality while adding development enhancements

-

/src/tools/ - Build and Development Tools

-

Purpose: Complete build system for the provisioning project

-

Key Components:

-
tools/
-├── build/                      # Build tools
-│   ├── compile-platform.nu     # Platform-specific compilation
-│   ├── bundle-core.nu          # Core library bundling
-│   ├── validate-nickel.nu      # Nickel schema validation
-│   ├── clean-build.nu          # Build cleanup
-│   └── test-distribution.nu    # Distribution testing
-├── distribution/               # Distribution tools
-│   ├── generate-distribution.nu # Main distribution generator
-│   ├── prepare-platform-dist.nu # Platform-specific distribution
-│   ├── prepare-core-dist.nu    # Core distribution
-│   ├── create-installer.nu     # Installer creation
-│   └── generate-docs.nu        # Documentation generation
-├── package/                    # Packaging tools
-│   ├── package-binaries.nu     # Binary packaging
-│   ├── build-containers.nu     # Container image building
-│   ├── create-tarball.nu       # Archive creation
-│   └── validate-package.nu     # Package validation
-├── release/                    # Release management
-│   ├── create-release.nu       # Release creation
-│   ├── upload-artifacts.nu     # Artifact upload
-│   ├── rollback-release.nu     # Release rollback
-│   ├── notify-users.nu         # Release notifications
-│   └── update-registry.nu      # Package registry updates
-└── Makefile                    # Main build system (40+ targets)
-
-

/src/orchestrator/ - Hybrid Orchestrator

-

Purpose: Rust/Nushell hybrid orchestrator for solving deep call stack limitations

-

Key Components:

-
    -
  • src/ - Rust orchestrator implementation
  • -
  • scripts/ - Orchestrator management scripts
  • -
  • data/ - File-based task queue and persistence
  • -
-

Integration: Provides REST API and workflow management while preserving all Nushell business logic

-

/src/provisioning/ - Enhanced Provisioning

-

Purpose: Enhanced version of the main provisioning with additional features

-

Key Features:

-
    -
  • Batch workflow system (v3.1.0)
  • -
  • Provider-agnostic design
  • -
  • Configuration-driven architecture (v2.0.0)
  • -
-

/workspace/ - Development Workspace

-

Purpose: Complete development environment with tools and runtime management

-

Key Components:

-
    -
  • tools/workspace.nu - Unified workspace management interface
  • -
  • lib/path-resolver.nu - Smart path resolution system
  • -
  • config/ - Environment-specific development configurations
  • -
  • extensions/ - Extension development templates and examples
  • -
  • infra/ - Development infrastructure examples
  • -
  • runtime/ - Isolated runtime data per user
  • -
-

Development Workspace

-

Workspace Management

-

The workspace provides a sophisticated development environment:

-

Initialization:

-
cd workspace/tools
-nu workspace.nu init --user-name developer --infra-name my-infra
-
-

Health Monitoring:

-
nu workspace.nu health --detailed --fix-issues
-
-

Path Resolution:

-
use lib/path-resolver.nu
-let config = (path-resolver resolve_config "user" --workspace-user "john")
-
-

Extension Development

-

The workspace provides templates for developing:

-
    -
  • Providers: Custom cloud provider implementations
  • -
  • Task Services: Infrastructure service components
  • -
  • Clusters: Complete deployment solutions
  • -
-

Templates are available in workspace/extensions/{type}/template/

-

Configuration Hierarchy

-

The workspace implements a sophisticated configuration cascade:

-
    -
  1. Workspace user configuration (workspace/config/{user}.toml)
  2. -
  3. Environment-specific defaults (workspace/config/{env}-defaults.toml)
  4. -
  5. Workspace defaults (workspace/config/dev-defaults.toml)
  6. -
  7. Core system defaults (config.defaults.toml)
  8. -
-

File Naming Conventions

-

Nushell Files (.nu)

-
    -
  • Commands: kebab-case - create-server.nu, validate-config.nu
  • -
  • Modules: snake_case - lib_provisioning, path_resolver
  • -
  • Scripts: kebab-case - workspace-health.nu, runtime-manager.nu
  • -
-

Configuration Files

-
    -
  • TOML: kebab-case.toml - config-defaults.toml, user-settings.toml
  • -
  • Environment: {env}-defaults.toml - dev-defaults.toml, prod-defaults.toml
  • -
  • Examples: *.toml.example - local-overrides.toml.example
  • -
-

Nickel Files (.ncl)

-
    -
  • Schemas: kebab-case.ncl - server-config.ncl, workflow-schema.ncl
  • -
  • Configuration: manifest.toml - Package metadata
  • -
  • Structure: Organized in schemas/ directories per extension
  • -
-

Build and Distribution

-
    -
  • Scripts: kebab-case.nu - compile-platform.nu, generate-distribution.nu
  • -
  • Makefiles: Makefile - Standard naming
  • -
  • Archives: {project}-{version}-{platform}-{variant}.{ext}
  • -
- -

Finding Components

-

Core System Entry Points:

-
# Main CLI (development version)
-/src/core/nulib/provisioning
-
-# Legacy CLI (production version)
-/core/nulib/provisioning
-
-# Workspace management
-/workspace/tools/workspace.nu
-
-

Build System:

-
# Main build system
-cd /src/tools && make help
-
-# Quick development build
-make dev-build
-
-# Complete distribution
-make all
-
-

Configuration Files:

-
# System defaults
-/config.defaults.toml
-
-# User configuration (workspace)
-/workspace/config/{user}.toml
-
-# Environment-specific
-/workspace/config/{env}-defaults.toml
-
-

Extension Development:

-
# Provider template
-/workspace/extensions/providers/template/
-
-# Task service template
-/workspace/extensions/taskservs/template/
-
-# Cluster template
-/workspace/extensions/clusters/template/
-
-

Common Workflows

-

1. Development Setup:

-
# Initialize workspace
-cd workspace/tools
-nu workspace.nu init --user-name $USER
-
-# Check health
-nu workspace.nu health --detailed
-
-

2. Building Distribution:

-
# Complete build
-cd src/tools
-make all
-
-# Platform-specific build
-make linux
-make macos
-make windows
-
-

3. Extension Development:

-
# Create new provider
-cp -r workspace/extensions/providers/template workspace/extensions/providers/my-provider
-
-# Test extension
-nu workspace/extensions/providers/my-provider/nulib/provider.nu test
-
-

Legacy Compatibility

-

Existing Commands Still Work:

-
# All existing commands preserved
-./core/nulib/provisioning server create
-./core/nulib/provisioning taskserv install kubernetes
-./core/nulib/provisioning cluster create buildkit
-
-

Configuration Migration:

-
    -
  • ENV variables still supported as fallbacks
  • -
  • New configuration system provides better defaults
  • -
  • Migration tools available in src/tools/migration/
  • -
-

Migration Path

-

For Users

-

No Changes Required:

-
    -
  • All existing commands continue to work
  • -
  • Configuration files remain compatible
  • -
  • Existing infrastructure deployments unaffected
  • -
-

Optional Enhancements:

-
    -
  • Migrate to new configuration system for better defaults
  • -
  • Use workspace for development environments
  • -
  • Leverage new build system for custom distributions
  • -
-

For Developers

-

Development Environment:

-
    -
  1. Initialize development workspace: nu workspace/tools/workspace.nu init
  2. -
  3. Use new build system: cd src/tools && make dev-build
  4. -
  5. Leverage extension templates for custom development
  6. -
-

Build System:

-
    -
  1. Use new Makefile for comprehensive build management
  2. -
  3. Leverage distribution tools for packaging
  4. -
  5. Use release management for version control
  6. -
-

Orchestrator Integration:

-
    -
  1. Start orchestrator for workflow management: cd src/orchestrator && ./scripts/start-orchestrator.nu
  2. -
  3. Use workflow APIs for complex operations
  4. -
  5. Leverage batch operations for efficiency
  6. -
-

Migration Tools

-

Available Migration Scripts:

-
    -
  • src/tools/migration/config-migration.nu - Configuration migration
  • -
  • src/tools/migration/workspace-setup.nu - Workspace initialization
  • -
  • src/tools/migration/path-resolver.nu - Path resolution migration
  • -
-

Validation Tools:

-
    -
  • src/tools/validation/system-health.nu - System health validation
  • -
  • src/tools/validation/compatibility-check.nu - Compatibility verification
  • -
  • src/tools/validation/migration-status.nu - Migration status tracking
  • -
-

Architecture Benefits

-

Development Efficiency

-
    -
  • Build System: Comprehensive 40+ target Makefile system
  • -
  • Workspace Isolation: Per-user development environments
  • -
  • Extension Framework: Template-based extension development
  • -
-

Production Reliability

-
    -
  • Backward Compatibility: All existing functionality preserved
  • -
  • Configuration Migration: Gradual migration from ENV to config-driven
  • -
  • Orchestrator Architecture: Hybrid Rust/Nushell for performance and flexibility
  • -
  • Workflow Management: Batch operations with rollback capabilities
  • -
-

Maintenance Benefits

-
    -
  • Clean Separation: Development tools separate from production code
  • -
  • Organized Structure: Logical grouping of related functionality
  • -
  • Documentation: Comprehensive documentation and examples
  • -
  • Testing Framework: Built-in testing and validation tools
  • -
-

This structure represents a significant evolution in the project’s organization while maintaining complete backward compatibility and providing -powerful new development capabilities.

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/development/workflow.html b/docs/book/development/workflow.html deleted file mode 100644 index d5f21c4..0000000 --- a/docs/book/development/workflow.html +++ /dev/null @@ -1,1099 +0,0 @@ - - - - - - Workflow - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Development Workflow Guide

-

This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning -project.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Development Setup
  4. -
  5. Daily Development Workflow
  6. -
  7. Code Organization
  8. -
  9. Testing Strategies
  10. -
  11. Debugging Techniques
  12. -
  13. Integration Workflows
  14. -
  15. Collaboration Guidelines
  16. -
  17. Quality Assurance
  18. -
  19. Best Practices
  20. -
-

Overview

-

The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency, -quality, and efficiency.

-

Key Technologies:

-
    -
  • Nushell: Primary scripting and automation language
  • -
  • Rust: High-performance system components
  • -
  • KCL: Configuration language and schemas
  • -
  • TOML: Configuration files
  • -
  • Jinja2: Template engine
  • -
-

Development Principles:

-
    -
  • Configuration-Driven: Never hardcode, always configure
  • -
  • Hybrid Architecture: Rust for performance, Nushell for flexibility
  • -
  • Test-First: Comprehensive testing at all levels
  • -
  • Documentation-Driven: Code and APIs are self-documenting
  • -
-

Development Setup

-

Initial Environment Setup

-

1. Clone and Navigate:

-
# Clone repository
-git clone https://github.com/company/provisioning-system.git
-cd provisioning-system
-
-# Navigate to workspace
-cd workspace/tools
-
-

2. Initialize Workspace:

-
# Initialize development workspace
-nu workspace.nu init --user-name $USER --infra-name dev-env
-
-# Check workspace health
-nu workspace.nu health --detailed --fix-issues
-
-

3. Configure Development Environment:

-
# Create user configuration
-cp workspace/config/local-overrides.toml.example workspace/config/$USER.toml
-
-# Edit configuration for development
-$EDITOR workspace/config/$USER.toml
-
-

4. Set Up Build System:

-
# Navigate to build tools
-cd src/tools
-
-# Check build prerequisites
-make info
-
-# Perform initial build
-make dev-build
-
-

Tool Installation

-

Required Tools:

-
# Install Nushell
-cargo install nu
-
-# Install Nickel
-cargo install nickel
-
-# Install additional tools
-cargo install cross          # Cross-compilation
-cargo install cargo-audit    # Security auditing
-cargo install cargo-watch    # File watching
-
-

Optional Development Tools:

-
# Install development enhancers
-cargo install nu_plugin_tera    # Template plugin
-cargo install sops              # Secrets management
-brew install k9s                # Kubernetes management
-
-

IDE Configuration

-

VS Code Setup (.vscode/settings.json):

-
{
-  "files.associations": {
-    "*.nu": "shellscript",
-    "*.ncl": "nickel",
-    "*.toml": "toml"
-  },
-  "nushell.shellPath": "/usr/local/bin/nu",
-  "rust-analyzer.cargo.features": "all",
-  "editor.formatOnSave": true,
-  "editor.rulers": [100],
-  "files.trimTrailingWhitespace": true
-}
-
-

Recommended Extensions:

-
    -
  • Nushell Language Support
  • -
  • Rust Analyzer
  • -
  • Nickel Language Support
  • -
  • TOML Language Support
  • -
  • Better TOML
  • -
-

Daily Development Workflow

-

Morning Routine

-

1. Sync and Update:

-
# Sync with upstream
-git pull origin main
-
-# Update workspace
-cd workspace/tools
-nu workspace.nu health --fix-issues
-
-# Check for updates
-nu workspace.nu status --detailed
-
-

2. Review Current State:

-
# Check current infrastructure
-provisioning show servers
-provisioning show settings
-
-# Review workspace status
-nu workspace.nu status
-
-

Development Cycle

-

1. Feature Development:

-
# Create feature branch
-git checkout -b feature/new-provider-support
-
-# Start development environment
-cd workspace/tools
-nu workspace.nu init --workspace-type development
-
-# Begin development
-$EDITOR workspace/extensions/providers/new-provider/nulib/provider.nu
-
-

2. Incremental Testing:

-
# Test syntax during development
-nu --check workspace/extensions/providers/new-provider/nulib/provider.nu
-
-# Run unit tests
-nu workspace/extensions/providers/new-provider/tests/unit/basic-test.nu
-
-# Integration testing
-nu workspace.nu tools test-extension providers/new-provider
-
-

3. Build and Validate:

-
# Quick development build
-cd src/tools
-make dev-build
-
-# Validate changes
-make validate-all
-
-# Test distribution
-make test-dist
-
-

Testing During Development

-

Unit Testing:

-
# Add test examples to functions
-def create-server [name: string] -> record {
-    # @test: "test-server" -> {name: "test-server", status: "created"}
-    # Implementation here
-}
-
-

Integration Testing:

-
# Test with real infrastructure
-nu workspace/extensions/providers/new-provider/nulib/provider.nu \
-    create-server test-server --dry-run
-
-# Test with workspace isolation
-PROVISIONING_WORKSPACE_USER=$USER provisioning server create test-server --check
-
-

End-of-Day Routine

-

1. Commit Progress:

-
# Stage changes
-git add .
-
-# Commit with descriptive message
-git commit -m "feat(provider): add new cloud provider support
-
-- Implement basic server creation
-- Add configuration schema
-- Include unit tests
-- Update documentation"
-
-# Push to feature branch
-git push origin feature/new-provider-support
-
-

2. Workspace Maintenance:

-
# Clean up development data
-nu workspace.nu cleanup --type cache --age 1d
-
-# Backup current state
-nu workspace.nu backup --auto-name --components config,extensions
-
-# Check workspace health
-nu workspace.nu health
-
-

Code Organization

-

Nushell Code Structure

-

File Organization:

-
Extension Structure:
-├── nulib/
-│   ├── main.nu              # Main entry point
-│   ├── core/                # Core functionality
-│   │   ├── api.nu           # API interactions
-│   │   ├── config.nu        # Configuration handling
-│   │   └── utils.nu         # Utility functions
-│   ├── commands/            # User commands
-│   │   ├── create.nu        # Create operations
-│   │   ├── delete.nu        # Delete operations
-│   │   └── list.nu          # List operations
-│   └── tests/               # Test files
-│       ├── unit/            # Unit tests
-│       └── integration/     # Integration tests
-└── templates/               # Template files
-    ├── config.j2            # Configuration templates
-    └── manifest.j2          # Manifest templates
-
-

Function Naming Conventions:

-
# Use kebab-case for commands
-def create-server [name: string] -> record { ... }
-def validate-config [config: record] -> bool { ... }
-
-# Use snake_case for internal functions
-def get_api_client [] -> record { ... }
-def parse_config_file [path: string] -> record { ... }
-
-# Use descriptive prefixes
-def check-server-status [server: string] -> string { ... }
-def get-server-info [server: string] -> record { ... }
-def list-available-zones [] -> list<string> { ... }
-
-

Error Handling Pattern:

-
def create-server [
-    name: string
-    --dry-run: bool = false
-] -> record {
-    # 1. Validate inputs
-    if ($name | str length) == 0 {
-        error make {
-            msg: "Server name cannot be empty"
-            label: {
-                text: "empty name provided"
-                span: (metadata $name).span
-            }
-        }
-    }
-
-    # 2. Check prerequisites
-    let config = try {
-        get-provider-config
-    } catch {
-        error make {msg: "Failed to load provider configuration"}
-    }
-
-    # 3. Perform operation
-    if $dry_run {
-        return {action: "create", server: $name, status: "dry-run"}
-    }
-
-    # 4. Return result
-    {server: $name, status: "created", id: (generate-id)}
-}
-
-

Rust Code Structure

-

Project Organization:

-
src/
-├── lib.rs                   # Library root
-├── main.rs                  # Binary entry point
-├── config/                  # Configuration handling
-│   ├── mod.rs
-│   ├── loader.rs            # Config loading
-│   └── validation.rs        # Config validation
-├── api/                     # HTTP API
-│   ├── mod.rs
-│   ├── handlers.rs          # Request handlers
-│   └── middleware.rs        # Middleware components
-└── orchestrator/            # Orchestration logic
-    ├── mod.rs
-    ├── workflow.rs          # Workflow management
-    └── task_queue.rs        # Task queue management
-
-

Error Handling:

-
use anyhow::{Context, Result};
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum ProvisioningError {
-    #[error("Configuration error: {message}")]
-    Config { message: String },
-
-    #[error("Network error: {source}")]
-    Network {
-        #[from]
-        source: reqwest::Error,
-    },
-
-    #[error("Validation failed: {field}")]
-    Validation { field: String },
-}
-
-pub fn create_server(name: &str) -> Result<ServerInfo> {
-    let config = load_config()
-        .context("Failed to load configuration")?;
-
-    validate_server_name(name)
-        .context("Server name validation failed")?;
-
-    let server = provision_server(name, &config)
-        .context("Failed to provision server")?;
-
-    Ok(server)
-}
-

Nickel Schema Organization

-

Schema Structure:

-
# Base schema definitions
-let ServerConfig = {
-    name | string,
-    plan | string,
-    zone | string,
-    tags | { } | default = {},
-} in
-ServerConfig
-
-# Provider-specific extensions
-let UpCloudServerConfig = {
-    template | string | default = "Ubuntu Server 22.04 LTS (Jammy Jellyfish)",
-    storage | number | default = 25,
-} in
-UpCloudServerConfig
-
-# Composition schemas
-let InfrastructureConfig = {
-    servers | array,
-    networks | array | default = [],
-    load_balancers | array | default = [],
-} in
-InfrastructureConfig
-
-

Testing Strategies

-

Test-Driven Development

-

TDD Workflow:

-
    -
  1. Write Test First: Define expected behavior
  2. -
  3. Run Test (Fail): Confirm test fails as expected
  4. -
  5. Write Code: Implement minimal code to pass
  6. -
  7. Run Test (Pass): Confirm test now passes
  8. -
  9. Refactor: Improve code while keeping tests green
  10. -
-

Nushell Testing

-

Unit Test Pattern:

-
# Function with embedded test
-def validate-server-name [name: string] -> bool {
-    # @test: "valid-name" -> true
-    # @test: "" -> false
-    # @test: "name-with-spaces" -> false
-
-    if ($name | str length) == 0 {
-        return false
-    }
-
-    if ($name | str contains " ") {
-        return false
-    }
-
-    true
-}
-
-# Separate test file
-# tests/unit/server-validation-test.nu
-def test_validate_server_name [] {
-    # Valid cases
-    assert (validate-server-name "valid-name")
-    assert (validate-server-name "server123")
-
-    # Invalid cases
-    assert not (validate-server-name "")
-    assert not (validate-server-name "name with spaces")
-    assert not (validate-server-name "name@with!special")
-
-    print "✅ validate-server-name tests passed"
-}
-
-

Integration Test Pattern:

-
# tests/integration/server-lifecycle-test.nu
-def test_complete_server_lifecycle [] {
-    # Setup
-    let test_server = "test-server-" + (date now | format date "%Y%m%d%H%M%S")
-
-    try {
-        # Test creation
-        let create_result = (create-server $test_server --dry-run)
-        assert ($create_result.status == "dry-run")
-
-        # Test validation
-        let validate_result = (validate-server-config $test_server)
-        assert $validate_result
-
-        print $"✅ Server lifecycle test passed for ($test_server)"
-    } catch { |e|
-        print $"❌ Server lifecycle test failed: ($e.msg)"
-        exit 1
-    }
-}
-
-

Rust Testing

-

Unit Testing:

-
#[cfg(test)]
-mod tests {
-    use super::*;
-    use tokio_test;
-
-    #[test]
-    fn test_validate_server_name() {
-        assert!(validate_server_name("valid-name"));
-        assert!(validate_server_name("server123"));
-
-        assert!(!validate_server_name(""));
-        assert!(!validate_server_name("name with spaces"));
-        assert!(!validate_server_name("name@special"));
-    }
-
-    #[tokio::test]
-    async fn test_server_creation() {
-        let config = test_config();
-        let result = create_server("test-server", &config).await;
-
-        assert!(result.is_ok());
-        let server = result.unwrap();
-        assert_eq!(server.name, "test-server");
-        assert_eq!(server.status, "created");
-    }
-}
-

Integration Testing:

-
#[cfg(test)]
-mod integration_tests {
-    use super::*;
-    use testcontainers::*;
-
-    #[tokio::test]
-    async fn test_full_workflow() {
-        // Setup test environment
-        let docker = clients::Cli::default();
-        let postgres = docker.run(images::postgres::Postgres::default());
-
-        let config = TestConfig {
-            database_url: format!("postgresql://localhost:{}/test",
-                                 postgres.get_host_port_ipv4(5432))
-        };
-
-        // Test complete workflow
-        let workflow = create_workflow(&config).await.unwrap();
-        let result = execute_workflow(workflow).await.unwrap();
-
-        assert_eq!(result.status, WorkflowStatus::Completed);
-    }
-}
-

Nickel Testing

-

Schema Validation Testing:

-
# Test Nickel schemas
-nickel check schemas/
-
-# Validate specific schemas
-nickel typecheck schemas/server.ncl
-
-# Test with examples
-nickel eval schemas/server.ncl
-
-

Test Automation

-

Continuous Testing:

-
# Watch for changes and run tests
-cargo watch -x test -x check
-
-# Watch Nushell files
-find . -name "*.nu" | entr -r nu tests/run-all-tests.nu
-
-# Automated testing in workspace
-nu workspace.nu tools test-all --watch
-
-

Debugging Techniques

-

Debug Configuration

-

Enable Debug Mode:

-
# Environment variables
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-export RUST_LOG=debug
-export RUST_BACKTRACE=1
-
-# Workspace debug
-export PROVISIONING_WORKSPACE_USER=$USER
-
-

Nushell Debugging

-

Debug Techniques:

-
# Debug prints
-def debug-server-creation [name: string] {
-    print $"🐛 Creating server: ($name)"
-
-    let config = get-provider-config
-    print $"🐛 Config loaded: ($config | to json)"
-
-    let result = try {
-        create-server-api $name $config
-    } catch { |e|
-        print $"🐛 API call failed: ($e.msg)"
-        $e
-    }
-
-    print $"🐛 Result: ($result | to json)"
-    $result
-}
-
-# Conditional debugging
-def create-server [name: string] {
-    if $env.PROVISIONING_DEBUG? == "true" {
-        print $"Debug: Creating server ($name)"
-    }
-
-    # Implementation
-}
-
-# Interactive debugging
-def debug-interactive [] {
-    print "🐛 Entering debug mode..."
-    print "Available commands: $env.PATH"
-    print "Current config: " (get-config | to json)
-
-    # Drop into interactive shell
-    nu --interactive
-}
-
-

Error Investigation:

-
# Comprehensive error handling
-def safe-server-creation [name: string] {
-    try {
-        create-server $name
-    } catch { |e|
-        # Log error details
-        {
-            timestamp: (date now | format date "%Y-%m-%d %H:%M:%S"),
-            operation: "create-server",
-            input: $name,
-            error: $e.msg,
-            debug: $e.debug?,
-            env: {
-                user: $env.USER,
-                workspace: $env.PROVISIONING_WORKSPACE_USER?,
-                debug: $env.PROVISIONING_DEBUG?
-            }
-        } | save --append logs/error-debug.json
-
-        # Re-throw with context
-        error make {
-            msg: $"Server creation failed: ($e.msg)",
-            label: {text: "failed here", span: $e.span?}
-        }
-    }
-}
-
-

Rust Debugging

-

Debug Logging:

-
use tracing::{debug, info, warn, error, instrument};
-
-#[instrument]
-pub async fn create_server(name: &str) -> Result<ServerInfo> {
-    debug!("Starting server creation for: {}", name);
-
-    let config = load_config()
-        .map_err(|e| {
-            error!("Failed to load config: {:?}", e);
-            e
-        })?;
-
-    info!("Configuration loaded successfully");
-    debug!("Config details: {:?}", config);
-
-    let server = provision_server(name, &config).await
-        .map_err(|e| {
-            error!("Provisioning failed for {}: {:?}", name, e);
-            e
-        })?;
-
-    info!("Server {} created successfully", name);
-    Ok(server)
-}
-

Interactive Debugging:

-
// Use debugger breakpoints
-#[cfg(debug_assertions)]
-{
-    println!("Debug: server creation starting");
-    dbg!(&config);
-    // Add breakpoint here in IDE
-}
-

Log Analysis

-

Log Monitoring:

-
# Follow all logs
-tail -f workspace/runtime/logs/$USER/*.log
-
-# Filter for errors
-grep -i error workspace/runtime/logs/$USER/*.log
-
-# Monitor specific component
-tail -f workspace/runtime/logs/$USER/orchestrator.log | grep -i workflow
-
-# Structured log analysis
-jq '.level == "ERROR"' workspace/runtime/logs/$USER/structured.jsonl
-
-

Debug Log Levels:

-
# Different verbosity levels
-PROVISIONING_LOG_LEVEL=trace provisioning server create test
-PROVISIONING_LOG_LEVEL=debug provisioning server create test
-PROVISIONING_LOG_LEVEL=info provisioning server create test
-
-

Integration Workflows

-

Existing System Integration

-

Working with Legacy Components:

-
# Test integration with existing system
-provisioning --version                    # Legacy system
-src/core/nulib/provisioning --version    # New system
-
-# Test workspace integration
-PROVISIONING_WORKSPACE_USER=$USER provisioning server list
-
-# Validate configuration compatibility
-provisioning validate config
-nu workspace.nu config validate
-
-

API Integration Testing

-

REST API Testing:

-
# Test orchestrator API
-curl -X GET http://localhost:9090/health
-curl -X GET http://localhost:9090/tasks
-
-# Test workflow creation
-curl -X POST http://localhost:9090/workflows/servers/create \
-  -H "Content-Type: application/json" \
-  -d '{"name": "test-server", "plan": "2xCPU-4 GB"}'
-
-# Monitor workflow
-curl -X GET http://localhost:9090/workflows/batch/status/workflow-id
-
-

Database Integration

-

SurrealDB Integration:

-
# Test database connectivity
-use core/nulib/lib_provisioning/database/surreal.nu
-let db = (connect-database)
-(test-connection $db)
-
-# Workflow state testing
-let workflow_id = (create-workflow-record "test-workflow")
-let status = (get-workflow-status $workflow_id)
-assert ($status.status == "pending")
-
-

External Tool Integration

-

Container Integration:

-
# Test with Docker
-docker run --rm -v $(pwd):/work provisioning:dev provisioning --version
-
-# Test with Kubernetes
-kubectl apply -f manifests/test-pod.yaml
-kubectl logs test-pod
-
-# Validate in different environments
-make test-dist PLATFORM=docker
-make test-dist PLATFORM=kubernetes
-
-

Collaboration Guidelines

-

Branch Strategy

-

Branch Naming:

-
    -
  • feature/description - New features
  • -
  • fix/description - Bug fixes
  • -
  • docs/description - Documentation updates
  • -
  • refactor/description - Code refactoring
  • -
  • test/description - Test improvements
  • -
-

Workflow:

-
# Start new feature
-git checkout main
-git pull origin main
-git checkout -b feature/new-provider-support
-
-# Regular commits
-git add .
-git commit -m "feat(provider): implement server creation API"
-
-# Push and create PR
-git push origin feature/new-provider-support
-gh pr create --title "Add new provider support" --body "..."
-
-

Code Review Process

-

Review Checklist:

-
    -
  • -Code follows project conventions
  • -
  • -Tests are included and passing
  • -
  • -Documentation is updated
  • -
  • -No hardcoded values
  • -
  • -Error handling is comprehensive
  • -
  • -Performance considerations addressed
  • -
-

Review Commands:

-
# Test PR locally
-gh pr checkout 123
-cd src/tools && make ci-test
-
-# Run specific tests
-nu workspace/extensions/providers/new-provider/tests/run-all.nu
-
-# Check code quality
-cargo clippy -- -D warnings
-nu --check $(find . -name "*.nu")
-
-

Documentation Requirements

-

Code Documentation:

-
# Function documentation
-def create-server [
-    name: string        # Server name (must be unique)
-    plan: string        # Server plan (for example, "2xCPU-4 GB")
-    --dry-run: bool     # Show what would be created without doing it
-] -> record {           # Returns server creation result
-    # Creates a new server with the specified configuration
-    #
-    # Examples:
-    #   create-server "web-01" "2xCPU-4 GB"
-    #   create-server "test" "1xCPU-2 GB" --dry-run
-
-    # Implementation
-}
-
-

Communication

-

Progress Updates:

-
    -
  • Daily standup participation
  • -
  • Weekly architecture reviews
  • -
  • PR descriptions with context
  • -
  • Issue tracking with details
  • -
-

Knowledge Sharing:

-
    -
  • Technical blog posts
  • -
  • Architecture decision records
  • -
  • Code review discussions
  • -
  • Team documentation updates
  • -
-

Quality Assurance

-

Code Quality Checks

-

Automated Quality Gates:

-
# Pre-commit hooks
-pre-commit install
-
-# Manual quality check
-cd src/tools
-make validate-all
-
-# Security audit
-cargo audit
-
-

Quality Metrics:

-
    -
  • Code coverage > 80%
  • -
  • No critical security vulnerabilities
  • -
  • All tests passing
  • -
  • Documentation coverage complete
  • -
  • Performance benchmarks met
  • -
-

Performance Monitoring

-

Performance Testing:

-
# Benchmark builds
-make benchmark
-
-# Performance profiling
-cargo flamegraph --bin provisioning-orchestrator
-
-# Load testing
-ab -n 1000 -c 10 http://localhost:9090/health
-
-

Resource Monitoring:

-
# Monitor during development
-nu workspace/tools/runtime-manager.nu monitor --duration 5m
-
-# Check resource usage
-du -sh workspace/runtime/
-df -h
-
-

Best Practices

-

Configuration Management

-

Never Hardcode:

-
# Bad
-def get-api-url [] { "https://api.upcloud.com" }
-
-# Good
-def get-api-url [] {
-    get-config-value "providers.upcloud.api_url" "https://api.upcloud.com"
-}
-
-

Error Handling

-

Comprehensive Error Context:

-
def create-server [name: string] {
-    try {
-        validate-server-name $name
-    } catch { |e|
-        error make {
-            msg: $"Invalid server name '($name)': ($e.msg)",
-            label: {text: "server name validation failed", span: $e.span?}
-        }
-    }
-
-    try {
-        provision-server $name
-    } catch { |e|
-        error make {
-            msg: $"Server provisioning failed for '($name)': ($e.msg)",
-            help: "Check provider credentials and quota limits"
-        }
-    }
-}
-
-

Resource Management

-

Clean Up Resources:

-
def with-temporary-server [name: string, action: closure] {
-    let server = (create-server $name)
-
-    try {
-        do $action $server
-    } catch { |e|
-        # Clean up on error
-        delete-server $name
-        $e
-    }
-
-    # Clean up on success
-    delete-server $name
-}
-
-

Testing Best Practices

-

Test Isolation:

-
def test-with-isolation [test_name: string, test_action: closure] {
-    let test_workspace = $"test-($test_name)-(date now | format date '%Y%m%d%H%M%S')"
-
-    try {
-        # Set up isolated environment
-        $env.PROVISIONING_WORKSPACE_USER = $test_workspace
-        nu workspace.nu init --user-name $test_workspace
-
-        # Run test
-        do $test_action
-
-        print $"✅ Test ($test_name) passed"
-    } catch { |e|
-        print $"❌ Test ($test_name) failed: ($e.msg)"
-        exit 1
-    } finally {
-        # Clean up test environment
-        nu workspace.nu cleanup --user-name $test_workspace --type all --force
-    }
-}
-
-

This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the project’s architectural -principles and ensuring smooth collaboration across the team.

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/elasticlunr.min.js b/docs/book/elasticlunr.min.js index 06cc9b3..94b20dd 100644 --- a/docs/book/elasticlunr.min.js +++ b/docs/book/elasticlunr.min.js @@ -7,4 +7,4 @@ * MIT Licensed * @license */ -!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();oo;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();o - - - - - Customize Infrastructure - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Customize Infrastructure

-

Goal: Customize infrastructure using layers, templates, and configuration patterns -Time: 20-40 minutes -Difficulty: Intermediate to Advanced

-

Overview

-

This guide covers:

-
    -
  1. Understanding the layer system
  2. -
  3. Using templates
  4. -
  5. Creating custom modules
  6. -
  7. Configuration inheritance
  8. -
  9. Advanced customization patterns
  10. -
-

The Layer System

-

Understanding Layers

-

The provisioning system uses a 3-layer architecture for configuration inheritance:

-
┌─────────────────────────────────────┐
-│  Infrastructure Layer (Priority 300)│  ← Highest priority
-│  workspace/infra/{name}/            │
-│  • Project-specific configs         │
-│  • Environment customizations       │
-│  • Local overrides                  │
-└─────────────────────────────────────┘
-              ↓ overrides
-┌─────────────────────────────────────┐
-│  Workspace Layer (Priority 200)     │
-│  provisioning/workspace/templates/  │
-│  • Reusable patterns                │
-│  • Organization standards           │
-│  • Team conventions                 │
-└─────────────────────────────────────┘
-              ↓ overrides
-┌─────────────────────────────────────┐
-│  Core Layer (Priority 100)          │  ← Lowest priority
-│  provisioning/extensions/           │
-│  • System defaults                  │
-│  • Provider implementations         │
-│  • Default taskserv configs         │
-└─────────────────────────────────────┘
-
-

Resolution Order: Infrastructure (300) → Workspace (200) → Core (100)

-

Higher numbers override lower numbers.

-

View Layer Resolution

-
# Explain layer concept
-provisioning lyr explain
-
-

Expected Output:

-
📚 LAYER SYSTEM EXPLAINED
-
-The layer system provides configuration inheritance across 3 levels:
-
-🔵 CORE LAYER (100) - System Defaults
-   Location: provisioning/extensions/
-   • Base taskserv configurations
-   • Default provider settings
-   • Standard cluster templates
-   • Built-in extensions
-
-🟢 WORKSPACE LAYER (200) - Shared Templates
-   Location: provisioning/workspace/templates/
-   • Organization-wide patterns
-   • Reusable configurations
-   • Team standards
-   • Custom extensions
-
-🔴 INFRASTRUCTURE LAYER (300) - Project Specific
-   Location: workspace/infra/{project}/
-   • Project-specific overrides
-   • Environment customizations
-   • Local modifications
-   • Runtime settings
-
-Resolution: Infrastructure → Workspace → Core
-Higher priority layers override lower ones.
-
-
# Show layer resolution for your project
-provisioning lyr show my-production
-
-

Expected Output:

-
📊 Layer Resolution for my-production:
-
-LAYER            PRIORITY  SOURCE                              FILES
-Infrastructure   300       workspace/infra/my-production/      4 files
-                           • servers.ncl (overrides)
-                           • taskservs.ncl (overrides)
-                           • clusters.ncl (custom)
-                           • providers.ncl (overrides)
-
-Workspace        200       provisioning/workspace/templates/   2 files
-                           • production.ncl (used)
-                           • kubernetes.ncl (used)
-
-Core             100       provisioning/extensions/            15 files
-                           • taskservs/* (base configs)
-                           • providers/* (default settings)
-                           • clusters/* (templates)
-
-Resolution Order: Infrastructure → Workspace → Core
-Status: ✅ All layers resolved successfully
-
-

Test Layer Resolution

-
# Test how a specific module resolves
-provisioning lyr test kubernetes my-production
-
-

Expected Output:

-
🔍 Layer Resolution Test: kubernetes → my-production
-
-Resolving kubernetes configuration...
-
-🔴 Infrastructure Layer (300):
-   ✅ Found: workspace/infra/my-production/taskservs/kubernetes.ncl
-   Provides:
-     • version = "1.30.0" (overrides)
-     • control_plane_servers = ["web-01"] (overrides)
-     • worker_servers = ["web-02"] (overrides)
-
-🟢 Workspace Layer (200):
-   ✅ Found: provisioning/workspace/templates/production-kubernetes.ncl
-   Provides:
-     • security_policies (inherited)
-     • network_policies (inherited)
-     • resource_quotas (inherited)
-
-🔵 Core Layer (100):
-   ✅ Found: provisioning/extensions/taskservs/kubernetes/main.ncl
-   Provides:
-     • default_version = "1.29.0" (base)
-     • default_features (base)
-     • default_plugins (base)
-
-Final Configuration (after merging all layers):
-  version: "1.30.0" (from Infrastructure)
-  control_plane_servers: ["web-01"] (from Infrastructure)
-  worker_servers: ["web-02"] (from Infrastructure)
-  security_policies: {...} (from Workspace)
-  network_policies: {...} (from Workspace)
-  resource_quotas: {...} (from Workspace)
-  default_features: {...} (from Core)
-  default_plugins: {...} (from Core)
-
-Resolution: ✅ Success
-
-

Using Templates

-

List Available Templates

-
# List all templates
-provisioning tpl list
-
-

Expected Output:

-
📋 Available Templates:
-
-TASKSERVS:
-  • production-kubernetes    - Production-ready Kubernetes setup
-  • production-postgres      - Production PostgreSQL with replication
-  • production-redis         - Redis cluster with sentinel
-  • development-kubernetes   - Development Kubernetes (minimal)
-  • ci-cd-pipeline          - Complete CI/CD pipeline
-
-PROVIDERS:
-  • upcloud-production      - UpCloud production settings
-  • upcloud-development     - UpCloud development settings
-  • aws-production          - AWS production VPC setup
-  • aws-development         - AWS development environment
-  • local-docker            - Local Docker-based setup
-
-CLUSTERS:
-  • buildkit-cluster        - BuildKit for container builds
-  • monitoring-stack        - Prometheus + Grafana + Loki
-  • security-stack          - Security monitoring tools
-
-Total: 13 templates
-
-
# List templates by type
-provisioning tpl list --type taskservs
-provisioning tpl list --type providers
-provisioning tpl list --type clusters
-
-

View Template Details

-
# Show template details
-provisioning tpl show production-kubernetes
-
-

Expected Output:

-
📄 Template: production-kubernetes
-
-Description: Production-ready Kubernetes configuration with
-             security hardening, network policies, and monitoring
-
-Category: taskservs
-Version: 1.0.0
-
-Configuration Provided:
-  • Kubernetes version: 1.30.0
-  • Security policies: Pod Security Standards (restricted)
-  • Network policies: Default deny + allow rules
-  • Resource quotas: Per-namespace limits
-  • Monitoring: Prometheus integration
-  • Logging: Loki integration
-  • Backup: Velero configuration
-
-Requirements:
-  • Minimum 2 servers
-  • 4 GB RAM per server
-  • Network plugin (Cilium recommended)
-
-Location: provisioning/workspace/templates/production-kubernetes.ncl
-
-Example Usage:
-  provisioning tpl apply production-kubernetes my-production
-
-

Apply Template

-
# Apply template to your infrastructure
-provisioning tpl apply production-kubernetes my-production
-
-

Expected Output:

-
🚀 Applying template: production-kubernetes → my-production
-
-Checking compatibility... ⏳
-✅ Infrastructure compatible with template
-
-Merging configuration... ⏳
-✅ Configuration merged
-
-Files created/updated:
-  • workspace/infra/my-production/taskservs/kubernetes.ncl (updated)
-  • workspace/infra/my-production/policies/security.ncl (created)
-  • workspace/infra/my-production/policies/network.ncl (created)
-  • workspace/infra/my-production/monitoring/prometheus.ncl (created)
-
-🎉 Template applied successfully!
-
-Next steps:
-  1. Review generated configuration
-  2. Adjust as needed
-  3. Deploy: provisioning t create kubernetes --infra my-production
-
-

Validate Template Usage

-
# Validate template was applied correctly
-provisioning tpl validate my-production
-
-

Expected Output:

-
✅ Template Validation: my-production
-
-Templates Applied:
-  ✅ production-kubernetes (v1.0.0)
-  ✅ production-postgres (v1.0.0)
-
-Configuration Status:
-  ✅ All required fields present
-  ✅ No conflicting settings
-  ✅ Dependencies satisfied
-
-Compliance:
-  ✅ Security policies configured
-  ✅ Network policies configured
-  ✅ Resource quotas set
-  ✅ Monitoring enabled
-
-Status: ✅ Valid
-
-

Creating Custom Templates

-

Step 1: Create Template Structure

-
# Create custom template directory
-mkdir -p provisioning/workspace/templates/my-custom-template
-
-

Step 2: Write Template Configuration

-

File: provisioning/workspace/templates/my-custom-template/main.ncl

-
# Custom Kubernetes template with specific settings
-let kubernetes_config = {
-  # Version
-  version = "1.30.0",
-
-  # Custom feature gates
-  feature_gates = {
-    "GracefulNodeShutdown" = true,
-    "SeccompDefault" = true,
-    "StatefulSetAutoDeletePVC" = true,
-  },
-
-  # Custom kubelet configuration
-  kubelet_config = {
-    max_pods = 110,
-    pod_pids_limit = 4096,
-    container_log_max_size = "10Mi",
-    container_log_max_files = 5,
-  },
-
-  # Custom API server flags
-  apiserver_extra_args = {
-    "enable-admission-plugins" = "NodeRestriction,PodSecurity,LimitRanger",
-    "audit-log-maxage" = "30",
-    "audit-log-maxbackup" = "10",
-  },
-
-  # Custom scheduler configuration
-  scheduler_config = {
-    profiles = [
-      {
-        name = "high-availability",
-        plugins = {
-          score = {
-            enabled = [
-              {name = "NodeResourcesBalancedAllocation", weight = 2},
-              {name = "NodeResourcesLeastAllocated", weight = 1},
-            ],
-          },
-        },
-      },
-    ],
-  },
-
-  # Network configuration
-  network = {
-    service_cidr = "10.96.0.0/12",
-    pod_cidr = "10.244.0.0/16",
-    dns_domain = "cluster.local",
-  },
-
-  # Security configuration
-  security = {
-    pod_security_standard = "restricted",
-    encrypt_etcd = true,
-    rotate_certificates = true,
-  },
-} in
-kubernetes_config
-
-

Step 3: Create Template Metadata

-

File: provisioning/workspace/templates/my-custom-template/metadata.toml

-
[template]
-name = "my-custom-template"
-version = "1.0.0"
-description = "Custom Kubernetes template with enhanced security"
-category = "taskservs"
-author = "Your Name"
-
-[requirements]
-min_servers = 2
-min_memory_gb = 4
-required_taskservs = ["containerd", "cilium"]
-
-[tags]
-environment = ["production", "staging"]
-features = ["security", "monitoring", "high-availability"]
-
-

Step 4: Test Custom Template

-
# List templates (should include your custom template)
-provisioning tpl list
-
-# Show your template
-provisioning tpl show my-custom-template
-
-# Apply to test infrastructure
-provisioning tpl apply my-custom-template my-test
-
-

Configuration Inheritance Examples

-

Example 1: Override Single Value

-

Core Layer (provisioning/extensions/taskservs/postgres/main.ncl):

-
let postgres_config = {
-  version = "15.5",
-  port = 5432,
-  max_connections = 100,
-} in
-postgres_config
-
-

Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl):

-
let postgres_config = {
-  max_connections = 500,  # Override only max_connections
-} in
-postgres_config
-
-

Result (after layer resolution):

-
let postgres_config = {
-  version = "15.5",          # From Core
-  port = 5432,               # From Core
-  max_connections = 500,     # From Infrastructure (overridden)
-} in
-postgres_config
-
-

Example 2: Add Custom Configuration

-

Workspace Layer (provisioning/workspace/templates/production-postgres.ncl):

-
let postgres_config = {
-  replication = {
-    enabled = true,
-    replicas = 2,
-    sync_mode = "async",
-  },
-} in
-postgres_config
-
-

Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl):

-
let postgres_config = {
-  replication = {
-    sync_mode = "sync",  # Override sync mode
-  },
-  custom_extensions = ["pgvector", "timescaledb"],  # Add custom config
-} in
-postgres_config
-
-

Result:

-
let postgres_config = {
-  version = "15.5",          # From Core
-  port = 5432,               # From Core
-  max_connections = 100,     # From Core
-  replication = {
-    enabled = true,          # From Workspace
-    replicas = 2,            # From Workspace
-    sync_mode = "sync",      # From Infrastructure (overridden)
-  },
-  custom_extensions = ["pgvector", "timescaledb"],  # From Infrastructure (added)
-} in
-postgres_config
-
-

Example 3: Environment-Specific Configuration

-

Workspace Layer (provisioning/workspace/templates/base-kubernetes.ncl):

-
let kubernetes_config = {
-  version = "1.30.0",
-  control_plane_count = 3,
-  worker_count = 5,
-  resources = {
-    control_plane = {cpu = "4", memory = "8Gi"},
-    worker = {cpu = "8", memory = "16Gi"},
-  },
-} in
-kubernetes_config
-
-

Development Infrastructure (workspace/infra/my-dev/taskservs/kubernetes.ncl):

-
let kubernetes_config = {
-  control_plane_count = 1,  # Smaller for dev
-  worker_count = 2,
-  resources = {
-    control_plane = {cpu = "2", memory = "4Gi"},
-    worker = {cpu = "2", memory = "4Gi"},
-  },
-} in
-kubernetes_config
-
-

Production Infrastructure (workspace/infra/my-prod/taskservs/kubernetes.ncl):

-
let kubernetes_config = {
-  control_plane_count = 5,  # Larger for prod
-  worker_count = 10,
-  resources = {
-    control_plane = {cpu = "8", memory = "16Gi"},
-    worker = {cpu = "16", memory = "32Gi"},
-  },
-} in
-kubernetes_config
-
-

Advanced Customization Patterns

-

Pattern 1: Multi-Environment Setup

-

Create different configurations for each environment:

-
# Create environments
-provisioning ws init my-app-dev
-provisioning ws init my-app-staging
-provisioning ws init my-app-prod
-
-# Apply environment-specific templates
-provisioning tpl apply development-kubernetes my-app-dev
-provisioning tpl apply staging-kubernetes my-app-staging
-provisioning tpl apply production-kubernetes my-app-prod
-
-# Customize each environment
-# Edit: workspace/infra/my-app-dev/...
-# Edit: workspace/infra/my-app-staging/...
-# Edit: workspace/infra/my-app-prod/...
-
-

Pattern 2: Shared Configuration Library

-

Create reusable configuration fragments:

-

File: provisioning/workspace/templates/shared/security-policies.ncl

-
let security_policies = {
-  pod_security = {
-    enforce = "restricted",
-    audit = "restricted",
-    warn = "restricted",
-  },
-  network_policies = [
-    {
-      name = "deny-all",
-      pod_selector = {},
-      policy_types = ["Ingress", "Egress"],
-    },
-    {
-      name = "allow-dns",
-      pod_selector = {},
-      egress = [
-        {
-          to = [{namespace_selector = {name = "kube-system"}}],
-          ports = [{protocol = "UDP", port = 53}],
-        },
-      ],
-    },
-  ],
-} in
-security_policies
-
-

Import in your infrastructure:

-
let security_policies = (import "../../../provisioning/workspace/templates/shared/security-policies.ncl") in
-
-let kubernetes_config = {
-  version = "1.30.0",
-  image_repo = "k8s.gcr.io",
-  security = security_policies,  # Import shared policies
-} in
-kubernetes_config
-
-

Pattern 3: Dynamic Configuration

-

Use Nickel features for dynamic configuration:

-
# Calculate resources based on server count
-let server_count = 5 in
-let replicas_per_server = 2 in
-let total_replicas = server_count * replicas_per_server in
-
-let postgres_config = {
-  version = "16.1",
-  max_connections = total_replicas * 50,  # Dynamic calculation
-  shared_buffers = "1024 MB",
-} in
-postgres_config
-
-

Pattern 4: Conditional Configuration

-
let environment = "production" in  # or "development"
-
-let kubernetes_config = {
-  version = "1.30.0",
-  control_plane_count = if environment == "production" then 3 else 1,
-  worker_count = if environment == "production" then 5 else 2,
-  monitoring = {
-    enabled = environment == "production",
-    retention = if environment == "production" then "30d" else "7d",
-  },
-} in
-kubernetes_config
-
-

Layer Statistics

-
# Show layer system statistics
-provisioning lyr stats
-
-

Expected Output:

-
📊 Layer System Statistics:
-
-Infrastructure Layer:
-  • Projects: 3
-  • Total files: 15
-  • Average overrides per project: 5
-
-Workspace Layer:
-  • Templates: 13
-  • Most used: production-kubernetes (5 projects)
-  • Custom templates: 2
-
-Core Layer:
-  • Taskservs: 15
-  • Providers: 3
-  • Clusters: 3
-
-Resolution Performance:
-  • Average resolution time: 45 ms
-  • Cache hit rate: 87%
-  • Total resolutions: 1,250
-
-

Customization Workflow

-

Complete Customization Example

-
# 1. Create new infrastructure
-provisioning ws init my-custom-app
-
-# 2. Understand layer system
-provisioning lyr explain
-
-# 3. Discover templates
-provisioning tpl list --type taskservs
-
-# 4. Apply base template
-provisioning tpl apply production-kubernetes my-custom-app
-
-# 5. View applied configuration
-provisioning lyr show my-custom-app
-
-# 6. Customize (edit files)
-provisioning sops workspace/infra/my-custom-app/taskservs/kubernetes.ncl
-
-# 7. Test layer resolution
-provisioning lyr test kubernetes my-custom-app
-
-# 8. Validate configuration
-provisioning tpl validate my-custom-app
-provisioning val config --infra my-custom-app
-
-# 9. Deploy customized infrastructure
-provisioning s create --infra my-custom-app --check
-provisioning s create --infra my-custom-app
-provisioning t create kubernetes --infra my-custom-app
-
-

Best Practices

-

1. Use Layers Correctly

-
    -
  • Core Layer: Only modify for system-wide changes
  • -
  • Workspace Layer: Use for organization-wide templates
  • -
  • Infrastructure Layer: Use for project-specific customizations
  • -
-

2. Template Organization

-
provisioning/workspace/templates/
-├── shared/           # Shared configuration fragments
-│   ├── security-policies.ncl
-│   ├── network-policies.ncl
-│   └── monitoring.ncl
-├── production/       # Production templates
-│   ├── kubernetes.ncl
-│   ├── postgres.ncl
-│   └── redis.ncl
-└── development/      # Development templates
-    ├── kubernetes.ncl
-    └── postgres.ncl
-
-

3. Documentation

-

Document your customizations:

-

File: workspace/infra/my-production/README.md

-
# My Production Infrastructure
-
-## Customizations
-
-- Kubernetes: Using production template with 5 control plane nodes
-- PostgreSQL: Configured with streaming replication
-- Cilium: Native routing mode enabled
-
-## Layer Overrides
-
-- `taskservs/kubernetes.ncl`: Control plane count (3 → 5)
-- `taskservs/postgres.ncl`: Replication mode (async → sync)
-- `network/cilium.ncl`: Routing mode (tunnel → native)
-
-

4. Version Control

-

Keep templates and configurations in version control:

-
cd provisioning/workspace/templates/
-git add .
-git commit -m "Add production Kubernetes template with enhanced security"
-
-cd workspace/infra/my-production/
-git add .
-git commit -m "Configure production environment for my-production"
-
-

Troubleshooting Customizations

-

Issue: Configuration not applied

-
# Check layer resolution
-provisioning lyr show my-production
-
-# Verify file exists
-ls -la workspace/infra/my-production/taskservs/
-
-# Test specific resolution
-provisioning lyr test kubernetes my-production
-
-

Issue: Conflicting configurations

-
# Validate configuration
-provisioning val config --infra my-production
-
-# Show configuration merge result
-provisioning show config kubernetes --infra my-production
-
-

Issue: Template not found

-
# List available templates
-provisioning tpl list
-
-# Check template path
-ls -la provisioning/workspace/templates/
-
-# Refresh template cache
-provisioning tpl refresh
-
-

Next Steps

- -

Quick Reference

-
# Layer system
-provisioning lyr explain              # Explain layers
-provisioning lyr show <project>       # Show layer resolution
-provisioning lyr test <module> <project>  # Test resolution
-provisioning lyr stats                # Layer statistics
-
-# Templates
-provisioning tpl list                 # List all templates
-provisioning tpl list --type <type>   # Filter by type
-provisioning tpl show <template>      # Show template details
-provisioning tpl apply <template> <project>  # Apply template
-provisioning tpl validate <project>   # Validate template usage
-
-
-

This guide is part of the provisioning project documentation. Last updated: 2025-09-30

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/guides/from-scratch.html b/docs/book/guides/from-scratch.html index 370d50a..9cafdd8 100644 --- a/docs/book/guides/from-scratch.html +++ b/docs/book/guides/from-scratch.html @@ -1,14 +1,14 @@ - + - From Scratch - Provisioning Platform Documentation + From Scratch Guide - Provisioning Platform Documentation - + @@ -34,7 +34,7 @@ @@ -76,7 +76,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -140,10 +140,10 @@ - + - + @@ -172,917 +172,754 @@
-

Complete Deployment Guide: From Scratch to Production

-

Version: 3.5.0 -Last Updated: 2025-10-09 -Estimated Time: 30-60 minutes -Difficulty: Beginner to Intermediate

-
-

Table of Contents

-
    -
  1. Prerequisites
  2. -
  3. Step 1: Install Nushell
  4. -
  5. Step 2: Install Nushell Plugins (Recommended)
  6. -
  7. Step 3: Install Required Tools
  8. -
  9. Step 4: Clone and Setup Project
  10. -
  11. Step 5: Initialize Workspace
  12. -
  13. Step 6: Configure Environment
  14. -
  15. Step 7: Discover and Load Modules
  16. -
  17. Step 8: Validate Configuration
  18. -
  19. Step 9: Deploy Servers
  20. -
  21. Step 10: Install Task Services
  22. -
  23. Step 11: Create Clusters
  24. -
  25. Step 12: Verify Deployment
  26. -
  27. Step 13: Post-Deployment
  28. -
  29. Troubleshooting
  30. -
  31. Next Steps
  32. -
-
-

Prerequisites

-

Before starting, ensure you have:

+

From Scratch Guide

+

Complete walkthrough from zero to production-ready infrastructure deployment using the Provisioning platform. This guide covers installation, configuration, +workspace setup, infrastructure definition, and deployment workflows.

+

Overview

+

This guide walks you through:

    -
  • Operating System: macOS, Linux, or Windows (WSL2 recommended)
  • -
  • Administrator Access: Ability to install software and configure system
  • -
  • Internet Connection: For downloading dependencies and accessing cloud providers
  • -
  • Cloud Provider Credentials: UpCloud, Hetzner, AWS, or local development environment
  • -
  • Basic Terminal Knowledge: Comfortable running shell commands
  • -
  • Text Editor: vim, nano, Zed, VSCode, or your preferred editor
  • +
  • Installing prerequisites and the Provisioning platform
  • +
  • Configuring cloud provider credentials
  • +
  • Creating your first workspace
  • +
  • Defining infrastructure using Nickel
  • +
  • Deploying servers and task services
  • +
  • Setting up Kubernetes clusters
  • +
  • Implementing security best practices
  • +
  • Monitoring and maintaining infrastructure
- -
    -
  • CPU: 2+ cores
  • -
  • RAM: 8 GB minimum, 16 GB recommended
  • -
  • Disk: 20 GB free space minimum
  • -
-
-

Step 1: Install Nushell

-

Nushell 0.109.1+ is the primary shell and scripting language for the provisioning platform.

-

macOS (via Homebrew)

-
# Install Nushell
+

Time commitment: 2-3 hours for complete setup +Prerequisites: Linux or macOS, terminal access, cloud provider account (optional)

+

Phase 1: Installation

+

System Prerequisites

+

Ensure your system meets minimum requirements:

+
# Check OS (Linux or macOS)
+uname -s
+
+# Verify available disk space (minimum 10GB recommended)
+df -h ~
+
+# Check internet connectivity
+ping -c 3 github.com
+
+

Install Required Tools

+

Nushell (Required)

+
# macOS
 brew install nushell
 
-# Verify installation
-nu --version
-# Expected: 0.109.1 or higher
-
-

Linux (via Package Manager)

-

Ubuntu/Debian:

-
# Add Nushell repository
-curl -fsSL https://starship.rs/install.sh | bash
-
-# Install Nushell
-sudo apt update
-sudo apt install nushell
+# Linux
+cargo install nu
 
 # Verify installation
-nu --version
+nu --version  # Expected: 0.109.1+
 
-

Fedora:

-
sudo dnf install nushell
-nu --version
-
-

Arch Linux:

-
sudo pacman -S nushell
-nu --version
-
-

Linux/macOS (via Cargo)

-
# Install Rust (if not already installed)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
+

Nickel (Required)

+
# macOS
+brew install nickel
 
-# Install Nushell
-cargo install nu --locked
+# Linux
+cargo install nickel-lang-cli
 
 # Verify installation
-nu --version
+nickel --version  # Expected: 1.15.1+
 
-

Windows (via Winget)

-
# Install Nushell
-winget install nushell
+

Additional Tools

+
# SOPS for secrets management
+brew install sops  # macOS
+# or download from  [https://github.com/getsops/sops/releases](https://github.com/getsops/sops/releases)
 
-# Verify installation
-nu --version
-
-

Configure Nushell

-
# Start Nushell
-nu
+# Age for encryption
+brew install age  # macOS
+cargo install age  # Linux
 
-# Configure (creates default config if not exists)
-config nu
+# K9s for Kubernetes management (optional)
+brew install derailed/k9s/k9s
+
+# Verify installations
+sops --version    # Expected: 3.10.2+
+age --version     # Expected: 1.2.1+
+k9s version       # Expected: 0.50.6+
 
-
- -

Native plugins provide 10-50x performance improvement for authentication, KMS, and orchestrator operations.

-

Why Install Plugins

-

Performance Gains:

+

Install Provisioning Platform

+ +
# Download and run installer
+INSTALL_URL="https://raw.githubusercontent.com/yourusername/provisioning/main/install.sh"
+curl -sSL "$INSTALL_URL" | bash
+
+# Follow prompts to configure installation directory and path
+# Default: ~/.local/bin/provisioning
+
+

Installer performs:

    -
  • 🚀 KMS operations: ~5 ms vs ~50 ms (10x faster)
  • -
  • 🚀 Orchestrator queries: ~1 ms vs ~30 ms (30x faster)
  • -
  • 🚀 Batch encryption: 100 files in 0.5s vs 5s (10x faster)
  • +
  • Downloads latest platform binaries
  • +
  • Installs CLI to system PATH
  • +
  • Creates default configuration structure
  • +
  • Validates dependencies
  • +
  • Runs health check
-

Benefits:

-
    -
  • ✅ Native Nushell integration (pipelines, data structures)
  • -
  • ✅ OS keyring for secure token storage
  • -
  • ✅ Offline capability (Age encryption, local orchestrator)
  • -
  • ✅ Graceful fallback to HTTP if not installed
  • -
-

Prerequisites for Building Plugins

-
# Install Rust toolchain (if not already installed)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
-rustc --version
-# Expected: rustc 1.75+ or higher
+

Option 2: Build from Source

+
# Clone repository
+git clone  [https://github.com/yourusername/provisioning.git](https://github.com/yourusername/provisioning.git)
+cd provisioning
 
-# Linux only: Install development packages
-sudo apt install libssl-dev pkg-config  # Ubuntu/Debian
-sudo dnf install openssl-devel          # Fedora
+# Build core CLI
+cd provisioning/core
+cargo build --release
 
-# Linux only: Install keyring service (required for auth plugin)
-sudo apt install gnome-keyring          # Ubuntu/Debian (GNOME)
-sudo apt install kwalletmanager         # Ubuntu/Debian (KDE)
-
-

Build Plugins

-
# Navigate to plugins directory
-cd provisioning/core/plugins/nushell-plugins
+# Install to local bin
+cp target/release/provisioning ~/.local/bin/
 
-# Build all three plugins in release mode (optimized)
-cargo build --release --all
+# Add to PATH (add to ~/.bashrc or ~/.zshrc)
+export PATH="$HOME/.local/bin:$PATH"
 
-# Expected output:
-#    Compiling nu_plugin_auth v0.1.0
-#    Compiling nu_plugin_kms v0.1.0
-#    Compiling nu_plugin_orchestrator v0.1.0
-#     Finished release [optimized] target(s) in 2m 15s
-
-

Build time: ~2-5 minutes depending on hardware

-

Register Plugins with Nushell

-
# Register all three plugins (full paths recommended)
-plugin add $PWD/target/release/nu_plugin_auth
-plugin add $PWD/target/release/nu_plugin_kms
-plugin add $PWD/target/release/nu_plugin_orchestrator
-
-# Alternative (from plugins directory)
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-

Verify Plugin Installation

-
# List registered plugins
-plugin list | where name =~ "auth|kms|orch"
-
-# Expected output:
-# ╭───┬─────────────────────────┬─────────┬───────────────────────────────────╮
-# │ # │          name           │ version │           filename                │
-# ├───┼─────────────────────────┼─────────┼───────────────────────────────────┤
-# │ 0 │ nu_plugin_auth          │ 0.1.0   │ .../nu_plugin_auth                │
-# │ 1 │ nu_plugin_kms           │ 0.1.0   │ .../nu_plugin_kms                 │
-# │ 2 │ nu_plugin_orchestrator  │ 0.1.0   │ .../nu_plugin_orchestrator        │
-# ╰───┴─────────────────────────┴─────────┴───────────────────────────────────╯
-
-# Test each plugin
-auth --help       # Should show auth commands
-kms --help        # Should show kms commands
-orch --help       # Should show orch commands
-
-

Configure Plugin Environments

-
# Add to ~/.config/nushell/env.nu
-$env.CONTROL_CENTER_URL = "http://localhost:3000"
-$env.RUSTYVAULT_ADDR = "http://localhost:8200"
-$env.RUSTYVAULT_TOKEN = "your-vault-token-here"
-$env.ORCHESTRATOR_DATA_DIR = "provisioning/platform/orchestrator/data"
-
-# For Age encryption (local development)
-$env.AGE_IDENTITY = $"($env.HOME)/.age/key.txt"
-$env.AGE_RECIPIENT = "age1xxxxxxxxx"  # Replace with your public key
-
-

Test Plugins (Quick Smoke Test)

-
# Test KMS plugin (requires backend configured)
-kms status
-# Expected: { backend: "rustyvault", status: "healthy", ... }
-# Or: Error if backend not configured (OK for now)
-
-# Test orchestrator plugin (reads local files)
-orch status
-# Expected: { active_tasks: 0, completed_tasks: 0, health: "healthy" }
-# Or: Error if orchestrator not started yet (OK for now)
-
-# Test auth plugin (requires control center)
-auth verify
-# Expected: { active: false }
-# Or: Error if control center not running (OK for now)
-
-

Note: It’s OK if plugins show errors at this stage. We’ll configure backends and services later.

- -

If you want to skip plugin installation for now:

-
    -
  • ✅ All features work via HTTP API (slower but functional)
  • -
  • ⚠️ You’ll miss 10-50x performance improvements
  • -
  • ⚠️ No offline capability for KMS/orchestrator
  • -
  • ℹ️ You can install plugins later anytime
  • -
-

To use HTTP fallback:

-
# System automatically uses HTTP if plugins not available
-# No configuration changes needed
-
-
-

Step 3: Install Required Tools

-

Essential Tools

-

SOPS (Secrets Management)

-
# macOS
-brew install sops
-
-# Linux
-wget https://github.com/mozilla/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64
-sudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops
-sudo chmod +x /usr/local/bin/sops
-
-# Verify
-sops --version
-# Expected: 3.10.2 or higher
-
-

Age (Encryption Tool)

-
# macOS
-brew install age
-
-# Linux
-sudo apt install age  # Ubuntu/Debian
-sudo dnf install age  # Fedora
-
-# Or from source
-go install filippo.io/age/cmd/...@latest
-
-# Verify
-age --version
-# Expected: 1.2.1 or higher
-
-# Generate Age key (for local encryption)
-age-keygen -o ~/.age/key.txt
-cat ~/.age/key.txt
-# Save the public key (age1...) for later
-
- -

K9s (Kubernetes Management)

-
# macOS
-brew install k9s
-
-# Linux
-curl -sS https://webinstall.dev/k9s | bash
-
-# Verify
-k9s version
-# Expected: 0.50.6 or higher
-
-

glow (Markdown Renderer)

-
# macOS
-brew install glow
-
-# Linux
-sudo apt install glow  # Ubuntu/Debian
-sudo dnf install glow  # Fedora
-
-# Verify
-glow --version
-
-
-

Step 4: Clone and Setup Project

-

Clone Repository

-
# Clone project
-git clone https://github.com/your-org/project-provisioning.git
-cd project-provisioning
-
-# Or if already cloned, update to latest
-git pull origin main
-
-

Add CLI to PATH (Optional)

-
# Add to ~/.bashrc or ~/.zshrc
-export PATH="$PATH:/Users/Akasha/project-provisioning/provisioning/core/cli"
-
-# Or create symlink
-sudo ln -s /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning /usr/local/bin/provisioning
-
-# Verify
+# Verify installation
 provisioning version
-# Expected: 3.5.0
 
-
-

Step 5: Initialize Workspace

-

A workspace is a self-contained environment for managing infrastructure.

-

Create New Workspace

-
# Initialize new workspace
-provisioning workspace init --name production
+

Platform Health Check

+
# Verify installation
+provisioning setup check
 
-# Or use interactive mode
-provisioning workspace init
-# Name: production
-# Description: Production infrastructure
-# Provider: upcloud
+# Expected output:
+# ✓ Nushell 0.109.1 installed
+# ✓ Nickel 1.15.1 installed
+# ✓ SOPS 3.10.2 installed
+# ✓ Age 1.2.1 installed
+# ✓ Provisioning CLI installed
+# ✓ Configuration directory created
+# Platform ready for use
 
-

What this creates:

-

The new workspace initialization now generates Nickel configuration files for type-safe, schema-validated infrastructure definitions:

-
workspace/
-├── config/
-│   ├── config.ncl               # Master Nickel configuration (type-safe)
-│   ├── providers/
-│   │   └── upcloud.toml         # Provider-specific settings
-│   ├── platform/                # Platform service configs
-│   └── kms.toml                 # Key management settings
-├── infra/
-│   └── default/
-│       ├── main.ncl             # Infrastructure entry point
-│       └── servers.ncl          # Server definitions
-├── docs/                        # Auto-generated guides
-└── workspace.nu                 # Workspace utility scripts
+

Phase 2: Initial Configuration

+

Generate User Configuration

+
# Create user configuration directory
+mkdir -p ~/.config/provisioning
+
+# Generate default user config
+provisioning setup init-user-config
 
-

Workspace Configuration Format

-

The workspace configuration uses Nickel (type-safe, validated). This provides:

-
    -
  • Type Safety: Schema validation catches errors at load time
  • -
  • Lazy Evaluation: Only computes what’s needed
  • -
  • Validation: Record merging, required fields, constraints
  • -
  • Documentation: Self-documenting with records
  • -
-

Example Nickel config (config.ncl):

-
{
-  workspace = {
-    name = "production",
-    version = "1.0.0",
-    created = "2025-12-03T14:30:00Z",
-  },
-
-  paths = {
-    base = "/opt/workspaces/production",
-    infra = "/opt/workspaces/production/infra",
-    cache = "/opt/workspaces/production/.cache",
-  },
-
-  providers = {
-    active = ["upcloud"],
-    default = "upcloud",
-  },
-}
+

Generated configuration structure:

+
~/.config/provisioning/
+├── user_config.yaml      # User preferences and workspace registry
+├── credentials/          # Provider credentials (encrypted)
+├── age/                  # Age encryption keys
+└── cache/                # CLI cache
 
-

Verify Workspace

-
# Show workspace info
-provisioning workspace info
+

Configure Encryption

+
# Generate Age key pair for secrets
+age-keygen -o ~/.config/provisioning/age/provisioning.key
 
-# List all workspaces
-provisioning workspace list
+# Store public key
+age-keygen -y ~/.config/provisioning/age/provisioning.key > ~/.config/provisioning/age/provisioning.pub
 
-# Show active workspace
-provisioning workspace active
-# Expected: production
+# Configure SOPS to use Age
+cat > ~/.config/sops/config.yaml <<EOF
+creation_rules:
+  - path_regex: \.secret\.(yam| l tom| l json)$
+    age: $(cat ~/.config/provisioning/age/provisioning.pub)
+EOF
 
-

View and Validate Workspace Configuration

-

Now you can inspect and validate your Nickel workspace configuration:

-
# View complete workspace configuration
-provisioning workspace config show
+

Provider Credentials

+

Configure credentials for your chosen cloud provider.

+

UpCloud Configuration

+
# Edit user config
+nano ~/.config/provisioning/user_config.yaml
 
-# Show specific workspace
-provisioning workspace config show production
+# Add provider credentials
+cat >> ~/.config/provisioning/user_config.yaml <<EOF
+providers:
+  upcloud:
+    username: "your-upcloud-username"
+    password_env: "UPCLOUD_PASSWORD"  # Read from environment variable
+    default_zone: "de-fra1"
+EOF
 
-# View configuration in different formats
-provisioning workspace config show --format=json
-provisioning workspace config show --format=yaml
-provisioning workspace config show --format=nickel  # Raw Nickel file
-
-# Validate workspace configuration
-provisioning workspace config validate
-# Output: ✅ Validation complete - all configs are valid
-
-# Show configuration hierarchy (priority order)
-provisioning workspace config hierarchy
+# Set environment variable (add to ~/.bashrc or ~/.zshrc)
+export UPCLOUD_PASSWORD="your-upcloud-password"
 
-

Configuration Validation: The Nickel schema automatically validates:

-
    -
  • ✅ Semantic versioning format (for example, “1.0.0”)
  • -
  • ✅ Required sections present (workspace, paths, provisioning, etc.)
  • -
  • ✅ Valid file paths and types
  • -
  • ✅ Provider configuration exists for active providers
  • -
  • ✅ KMS and SOPS settings properly configured
  • -
-
-

Step 6: Configure Environment

-

Set Provider Credentials

-

UpCloud Provider:

-
# Create provider config
-vim workspace/config/providers/upcloud.toml
+

AWS Configuration

+
# Add AWS credentials to user config
+cat >> ~/.config/provisioning/user_config.yaml <<EOF
+providers:
+  aws:
+    access_key_id_env: "AWS_ACCESS_KEY_ID"
+    secret_access_key_env: "AWS_SECRET_ACCESS_KEY"
+    default_region: "eu-west-1"
+EOF
+
+# Set environment variables
+export AWS_ACCESS_KEY_ID="your-access-key-id"
+export AWS_SECRET_ACCESS_KEY="your-secret-access-key"
 
-
[upcloud]
-username = "your-upcloud-username"
-password = "your-upcloud-password"  # Will be encrypted
+

Local Provider (Development)

+
# Configure local provider for testing
+cat >> ~/.config/provisioning/user_config.yaml <<EOF
+providers:
+  local:
+    backend: "docker"  # or "podman", "libvirt"
+    storage_path: "$HOME/.local/share/provisioning/local"
+EOF
 
-# Default settings
-default_zone = "de-fra1"
-default_plan = "2xCPU-4 GB"
+# Ensure Docker is running
+docker info
 
-

AWS Provider:

-
# Create AWS config
-vim workspace/config/providers/aws.toml
-
-
[aws]
-region = "us-east-1"
-access_key_id = "AKIAXXXXX"
-secret_access_key = "xxxxx"  # Will be encrypted
-
-# Default settings
-default_instance_type = "t3.medium"
-default_region = "us-east-1"
-
-

Encrypt Sensitive Data

-
# Generate Age key if not done already
-age-keygen -o ~/.age/key.txt
-
-# Encrypt provider configs
-kms encrypt (open workspace/config/providers/upcloud.toml) --backend age \
-    | save workspace/config/providers/upcloud.toml.enc
-
-# Or use SOPS
-sops --encrypt --age $(cat ~/.age/key.txt | grep "public key:" | cut -d: -f2) \
-    workspace/config/providers/upcloud.toml > workspace/config/providers/upcloud.toml.enc
-
-# Remove plaintext
-rm workspace/config/providers/upcloud.toml
-
-

Configure Local Overrides

-
# Edit user-specific settings
-vim workspace/config/local-overrides.toml
-
-
[user]
-name = "admin"
-email = "admin@example.com"
-
-[preferences]
-editor = "vim"
-output_format = "yaml"
-confirm_delete = true
-confirm_deploy = true
-
-[http]
-use_curl = true  # Use curl instead of ureq
-
-[paths]
-ssh_key = "~/.ssh/id_ed25519"
-
-
-

Step 7: Discover and Load Modules

-

Discover Available Modules

-
# Discover task services
-provisioning module discover taskserv
-# Shows: kubernetes, containerd, etcd, cilium, helm, etc.
-
-# Discover providers
-provisioning module discover provider
-# Shows: upcloud, aws, local
-
-# Discover clusters
-provisioning module discover cluster
-# Shows: buildkit, registry, monitoring, etc.
-
-

Load Modules into Workspace

-
# Load Kubernetes taskserv
-provisioning module load taskserv production kubernetes
-
-# Load multiple modules
-provisioning module load taskserv production kubernetes containerd cilium
-
-# Load cluster configuration
-provisioning module load cluster production buildkit
-
-# Verify loaded modules
-provisioning module list taskserv production
-provisioning module list cluster production
-
-
-

Step 8: Validate Configuration

-

Before deploying, validate all configuration:

-
# Validate workspace configuration
-provisioning workspace validate
-
-# Validate infrastructure configuration
+

Validate Configuration

+
# Validate user configuration
 provisioning validate config
 
-# Validate specific infrastructure
-provisioning infra validate --infra production
+# Test provider connectivity
+provisioning providers
 
-# Check environment variables
-provisioning env
-
-# Show all configuration and environment
-provisioning allenv
+# Expected output:
+# PROVIDER    STATUS     REGION/ZONE
+# upcloud     connected  de-fra1
+# local       ready      localhost
 
-

Expected output:

-
✓ Configuration valid
-✓ Provider credentials configured
-✓ Workspace initialized
-✓ Modules loaded: 3 taskservs, 1 cluster
-✓ SSH key configured
-✓ Age encryption key available
+

Phase 3: Create First Workspace

+

Initialize Workspace

+
# Create workspace for first project
+provisioning workspace init my-first-project
+
+# Navigate to workspace
+cd workspace_my_first_project
+
+# Verify structure
+ls -la
 
-

Fix any errors before proceeding to deployment.

-
-

Step 9: Deploy Servers

-

Preview Server Creation (Dry Run)

-
# Check what would be created (no actual changes)
-provisioning server create --infra production --check
-
-# With debug output for details
-provisioning server create --infra production --check --debug
+

Workspace structure created:

+
workspace_my_first_project/
+├── infra/                   # Infrastructure definitions (Nickel)
+├── config/                  # Workspace configuration
+│   ├── provisioning.yaml    # Workspace metadata
+│   ├── dev-defaults.toml    # Development defaults
+│   ├── test-defaults.toml   # Testing defaults
+│   └── prod-defaults.toml   # Production defaults
+├── extensions/              # Workspace-specific extensions
+│   ├── providers/
+│   ├── taskservs/
+│   └── workflows/
+└── runtime/                 # State and logs (gitignored)
+    ├── state/
+    ├── checkpoints/
+    └── logs/
 
-

Review the output:

-
    -
  • Server names and configurations
  • -
  • Zones and regions
  • -
  • CPU, memory, disk specifications
  • -
  • Estimated costs
  • -
  • Network settings
  • -
-

Create Servers

-
# Create servers (with confirmation prompt)
-provisioning server create --infra production
-
-# Or auto-confirm (skip prompt)
-provisioning server create --infra production --yes
-
-# Wait for completion
-provisioning server create --infra production --wait
+

Configure Workspace

+
# Edit workspace metadata
+nano config/provisioning.yaml
 
-

Expected output:

-
Creating servers for infrastructure: production
+

Example workspace configuration:

+
workspace:
+  name: my-first-project
+  description: Learning Provisioning platform
+  environment: development
+  created: 2026-01-16T10:00:00Z
 
-  ● Creating server: k8s-master-01 (de-fra1, 4xCPU-8 GB)
-  ● Creating server: k8s-worker-01 (de-fra1, 4xCPU-8 GB)
-  ● Creating server: k8s-worker-02 (de-fra1, 4xCPU-8 GB)
+defaults:
+  provider: local
+  region: localhost
+  confirmation_required: false
 
-✓ Created 3 servers in 120 seconds
-
-Servers:
-  • k8s-master-01: 192.168.1.10 (Running)
-  • k8s-worker-01: 192.168.1.11 (Running)
-  • k8s-worker-02: 192.168.1.12 (Running)
+versioning:
+  nushell: "0.109.1"
+  nickel: "1.15.1"
+  kubernetes: "1.29.0"
 
-

Verify Server Creation

-
# List all servers
-provisioning server list --infra production
+

Phase 4: Define Infrastructure

+

Simple Server Configuration

+

Create your first infrastructure definition using Nickel:

+
# Create server definition
+cat > infra/simple-server.ncl <<'EOF'
+{
+  metadata = {
+    name = "simple-server"
+    provider = "local"
+    environment = 'development
+  }
 
-# Show detailed server info
-provisioning server list --infra production --out yaml
+  infrastructure = {
+    servers = [
+      {
+        name = "dev-web-01"
+        plan = "small"
+        zone = "localhost"
+        disk_size_gb = 25
+        backup_enabled = false
+        role = 'standalone
+      }
+    ]
+  }
 
-# SSH to server (test connectivity)
-provisioning server ssh k8s-master-01
-# Type 'exit' to return
+  services = {
+    taskservs = ["containerd"]
+  }
+}
+EOF
 
-
-

Step 10: Install Task Services

-

Task services are infrastructure components like Kubernetes, databases, monitoring, etc.

-

Install Kubernetes (Check Mode First)

-
# Preview Kubernetes installation
-provisioning taskserv create kubernetes --infra production --check
+

Validate Infrastructure Schema

+
# Type-check Nickel schema
+nickel typecheck infra/simple-server.ncl
 
-# Shows:
-# - Dependencies required (containerd, etcd)
-# - Configuration to be applied
-# - Resources needed
-# - Estimated installation time
+# Validate against platform contracts
+provisioning validate config --infra simple-server
+
+# Preview deployment
+provisioning server create --check --infra simple-server
 
-

Install Kubernetes

-
# Install Kubernetes (with dependencies)
-provisioning taskserv create kubernetes --infra production
+

Expected output:

+
Infrastructure Plan: simple-server
+Provider: local
+Environment: development
 
-# Or install dependencies first
-provisioning taskserv create containerd --infra production
-provisioning taskserv create etcd --infra production
-provisioning taskserv create kubernetes --infra production
+Servers to create:
+  - dev-web-01 (small, standalone)
+    Disk: 25 GB
+    Backup: disabled
 
-# Monitor progress
-provisioning workflow monitor <task_id>
+Task services:
+  - containerd
+
+Estimated resources:
+  CPU: 1 core
+  RAM: 1 GB
+  Disk: 25 GB
+
+Validation: PASSED
 
-

Expected output:

-
Installing taskserv: kubernetes
-
-  ● Installing containerd on k8s-master-01
-  ● Installing containerd on k8s-worker-01
-  ● Installing containerd on k8s-worker-02
-  ✓ Containerd installed (30s)
-
-  ● Installing etcd on k8s-master-01
-  ✓ etcd installed (20s)
-
-  ● Installing Kubernetes control plane on k8s-master-01
-  ✓ Kubernetes control plane ready (45s)
-
-  ● Joining worker nodes
-  ✓ k8s-worker-01 joined (15s)
-  ✓ k8s-worker-02 joined (15s)
-
-✓ Kubernetes installation complete (125 seconds)
-
-Cluster Info:
-  • Version: 1.28.0
-  • Nodes: 3 (1 control-plane, 2 workers)
-  • API Server: https://192.168.1.10:6443
-
-

Install Additional Services

-
# Install Cilium (CNI)
-provisioning taskserv create cilium --infra production
-
-# Install Helm
-provisioning taskserv create helm --infra production
-
-# Verify all taskservs
-provisioning taskserv list --infra production
-
-
-

Step 11: Create Clusters

-

Clusters are complete application stacks (for example, BuildKit, OCI Registry, Monitoring).

-

Create BuildKit Cluster (Check Mode)

-
# Preview cluster creation
-provisioning cluster create buildkit --infra production --check
-
-# Shows:
-# - Components to be deployed
-# - Dependencies required
-# - Configuration values
-# - Resource requirements
-
-

Create BuildKit Cluster

-
# Create BuildKit cluster
-provisioning cluster create buildkit --infra production
+

Deploy Infrastructure

+
# Create server
+provisioning server create --infra simple-server --yes
 
 # Monitor deployment
-provisioning workflow monitor <task_id>
-
-# Or use plugin for faster monitoring
-orch tasks --status running
+provisioning server status dev-web-01
 
-

Expected output:

-
Creating cluster: buildkit
+

Deployment progress:

+
Creating server: dev-web-01...
+  [████████████████████████] 100% - Container created
+  [████████████████████████] 100% - Network configured
+  [████████████████████████] 100% - SSH ready
 
-  ● Deploying BuildKit daemon
-  ● Deploying BuildKit worker
-  ● Configuring BuildKit cache
-  ● Setting up BuildKit registry integration
-
-✓ BuildKit cluster ready (60 seconds)
-
-Cluster Info:
-  • BuildKit version: 0.12.0
-  • Workers: 2
-  • Cache: 50 GB
-  • Registry: registry.production.local
+Server dev-web-01 created successfully
+IP Address: 172.17.0.2
+Status: running
+Provider: local (docker)
 
-

Verify Cluster

-
# List all clusters
-provisioning cluster list --infra production
+

Install Task Service

+
# Install containerd
+provisioning taskserv create containerd --infra simple-server
 
-# Show cluster details
-provisioning cluster list --infra production --out yaml
-
-# Check cluster health
-kubectl get pods -n buildkit
+# Verify installation
+provisioning taskserv status containerd
 
-
-

Step 12: Verify Deployment

-

Comprehensive Health Check

-
# Check orchestrator status
-orch status
-# or
-provisioning orchestrator status
+

Installation output:

+
Installing containerd on dev-web-01...
+  [████████████████████████] 100% - Dependencies resolved
+  [████████████████████████] 100% - Containerd installed
+  [████████████████████████] 100% - Service started
+  [████████████████████████] 100% - Health check passed
 
-# Check all servers
-provisioning server list --infra production
-
-# Check all taskservs
-provisioning taskserv list --infra production
-
-# Check all clusters
-provisioning cluster list --infra production
-
-# Verify Kubernetes cluster
-kubectl get nodes
-kubectl get pods --all-namespaces
+Containerd installed successfully
+Version: 1.7.0
+Runtime: runc
 
-

Run Validation Tests

-
# Validate infrastructure
-provisioning infra validate --infra production
+

Verify Deployment

+
# SSH into server
+provisioning server ssh dev-web-01
 
-# Test connectivity
-provisioning server ssh k8s-master-01 "kubectl get nodes"
+# Inside server - verify containerd
+sudo systemctl status containerd
+sudo ctr version
 
-# Test BuildKit
-kubectl exec -it -n buildkit buildkit-0 -- buildctl --version
+# Exit server
+exit
+
+# List all resources
+provisioning server list
+provisioning taskserv list
 
-

Expected Results

-

All checks should show:

-
    -
  • ✅ Servers: Running
  • -
  • ✅ Taskservs: Installed and healthy
  • -
  • ✅ Clusters: Deployed and operational
  • -
  • ✅ Kubernetes: 3/3 nodes ready
  • -
  • ✅ BuildKit: 2/2 workers ready
  • -
-
-

Step 13: Post-Deployment

-

Configure kubectl Access

-
# Get kubeconfig from master node
-provisioning server ssh k8s-master-01 "cat ~/.kube/config" > ~/.kube/config-production
+

Phase 5: Kubernetes Cluster Deployment

+

Define Kubernetes Infrastructure

+
# Create Kubernetes cluster definition
+cat > infra/k8s-cluster.ncl <<'EOF'
+{
+  metadata = {
+    name = "k8s-dev-cluster"
+    provider = "local"
+    environment = 'development
+  }
+
+  infrastructure = {
+    servers = [
+      {
+        name = "k8s-control-01"
+        plan = "medium"
+        role = 'control
+        zone = "localhost"
+        disk_size_gb = 50
+      }
+      {
+        name = "k8s-worker-01"
+        plan = "medium"
+        role = 'worker
+        zone = "localhost"
+        disk_size_gb = 50
+      }
+      {
+        name = "k8s-worker-02"
+        plan = "medium"
+        role = 'worker
+        zone = "localhost"
+        disk_size_gb = 50
+      }
+    ]
+  }
+
+  services = {
+    taskservs = ["containerd", "etcd", "kubernetes", "cilium"]
+  }
+
+  kubernetes = {
+    version = "1.29.0"
+    pod_cidr = "10.244.0.0/16"
+    service_cidr = "10.96.0.0/12"
+    container_runtime = "containerd"
+    cri_socket = "/run/containerd/containerd.sock"
+  }
+}
+EOF
+
+

Validate Kubernetes Configuration

+
# Type-check schema
+nickel typecheck infra/k8s-cluster.ncl
+
+# Validate configuration
+provisioning validate config --infra k8s-cluster
+
+# Preview deployment
+provisioning cluster create --check --infra k8s-cluster
+
+

Deploy Kubernetes Cluster

+
# Create cluster infrastructure
+provisioning cluster create --infra k8s-cluster --yes
+
+# Monitor cluster deployment
+provisioning cluster status k8s-dev-cluster
+
+

Cluster deployment phases:

+
Phase 1: Creating servers...
+  [████████████████████████] 100% - 3/3 servers created
+
+Phase 2: Installing containerd...
+  [████████████████████████] 100% - 3/3 nodes ready
+
+Phase 3: Installing etcd...
+  [████████████████████████] 100% - Control plane ready
+
+Phase 4: Installing Kubernetes...
+  [████████████████████████] 100% - API server available
+  [████████████████████████] 100% - Workers joined
+
+Phase 5: Installing Cilium CNI...
+  [████████████████████████] 100% - Network ready
+
+Kubernetes cluster deployed successfully
+Cluster: k8s-dev-cluster
+Control plane: k8s-control-01
+Workers: k8s-worker-01, k8s-worker-02
+
+

Access Kubernetes Cluster

+
# Get kubeconfig
+provisioning cluster kubeconfig k8s-dev-cluster > ~/.kube/config-dev
 
 # Set KUBECONFIG
-export KUBECONFIG=~/.kube/config-production
+export KUBECONFIG=~/.kube/config-dev
 
-# Verify access
+# Verify cluster
 kubectl get nodes
-kubectl get pods --all-namespaces
-
-

Set Up Monitoring (Optional)

-
# Deploy monitoring stack
-provisioning cluster create monitoring --infra production
 
-# Access Grafana
-kubectl port-forward -n monitoring svc/grafana 3000:80
-# Open: http://localhost:3000
-
-

Configure CI/CD Integration (Optional)

-
# Generate CI/CD credentials
-provisioning secrets generate aws --ttl 12h
+# Expected output:
+# NAME              STATUS   ROLES           AGE   VERSION
+# k8s-control-01    Ready    control-plane   5m    v1.29.0
+# k8s-worker-01     Ready    <none>          4m    v1.29.0
+# k8s-worker-02     Ready    <none>          4m    v1.29.0
 
-# Create CI/CD kubeconfig
-kubectl create serviceaccount ci-cd -n default
-kubectl create clusterrolebinding ci-cd --clusterrole=admin --serviceaccount=default:ci-cd
+# Use K9s for interactive management
+k9s
+
+

Phase 6: Security Configuration

+

Enable Audit Logging

+
# Configure audit logging
+cat > config/audit-config.toml <<EOF
+[audit]
+enabled = true
+log_path = "runtime/logs/audit"
+retention_days = 90
+level = "info"
+
+[audit.filters]
+include_commands = ["server create", "server delete", "cluster deploy"]
+exclude_users = []
+EOF
+
+

Configure SOPS for Secrets

+
# Create secrets file
+cat > config/secrets.secret.yaml <<EOF
+database:
+  password: "changeme-db-password"
+  admin_user: "admin"
+
+kubernetes:
+  service_account_key: "changeme-sa-key"
+EOF
+
+# Encrypt secrets with SOPS
+sops -e -i config/secrets.secret.yaml
+
+# Verify encryption
+cat config/secrets.secret.yaml  # Should show encrypted content
+
+# Decrypt when needed
+sops -d config/secrets.secret.yaml
+
+

Enable MFA (Optional)

+
# Enable multi-factor authentication
+provisioning security mfa enable
+
+# Scan QR code with authenticator app
+# Enter verification code
+
+

Configure RBAC

+
# Create role definition
+cat > config/rbac-roles.yaml <<EOF
+roles:
+  - name: developer
+    permissions:
+      - server:read
+      - server:create
+      - taskserv:read
+      - taskserv:install
+    deny:
+      - cluster:delete
+      - config:modify
+
+  - name: operator
+    permissions:
+      - "*:read"
+      - server:*
+      - taskserv:*
+      - cluster:read
+      - cluster:deploy
+
+  - name: admin
+    permissions:
+      - "*:*"
+EOF
+
+

Phase 7: Multi-Cloud Deployment

+

Define Multi-Cloud Infrastructure

+
# Create multi-cloud definition
+cat > infra/multi-cloud.ncl <<'EOF'
+{
+  batch_workflow = {
+    operations = [
+      {
+        id = "upcloud-frontend"
+        provider = "upcloud"
+        region = "de-fra1"
+        servers = [
+          {name = "upcloud-web-01", plan = "medium", role = 'web}
+        ]
+        taskservs = ["containerd", "nginx"]
+      }
+      {
+        id = "aws-backend"
+        provider = "aws"
+        region = "eu-west-1"
+        servers = [
+          {name = "aws-api-01", plan = "t3.medium", role = 'api}
+        ]
+        taskservs = ["containerd", "docker"]
+        dependencies = ["upcloud-frontend"]
+      }
+      {
+        id = "local-database"
+        provider = "local"
+        region = "localhost"
+        servers = [
+          {name = "local-db-01", plan = "large", role = 'database}
+        ]
+        taskservs = ["postgresql"]
+      }
+    ]
+    parallel_limit = 2
+  }
+}
+EOF
+
+

Deploy Multi-Cloud Infrastructure

+
# Submit batch workflow
+provisioning batch submit infra/multi-cloud.ncl
+
+# Monitor workflow progress
+provisioning batch status
+
+# View detailed operation status
+provisioning batch operations
+
+

Phase 8: Monitoring and Maintenance

+

Platform Health Monitoring

+
# Check platform health
+provisioning health
+
+# View service status
+provisioning service status orchestrator
+provisioning service status control-center
+
+# View logs
+provisioning logs --service orchestrator --tail 100
+
+

Infrastructure Monitoring

+
# List all servers
+provisioning server list --all-workspaces
+
+# Show server details
+provisioning server info k8s-control-01
+
+# Check task service status
+provisioning taskserv list
+provisioning taskserv health containerd
 

Backup Configuration

-
# Backup workspace configuration
-tar -czf workspace-production-backup.tar.gz workspace/
+
# Create backup
+provisioning backup create --type full --output ~/backups/provisioning-$(date +%Y%m%d).tar.gz
 
-# Encrypt backup
-kms encrypt (open workspace-production-backup.tar.gz | encode base64) --backend age \
-    | save workspace-production-backup.tar.gz.enc
-
-# Store securely (S3, Vault, etc.)
+# Schedule automatic backups
+provisioning backup schedule daily --time "02:00" --retention 7
+
+

Phase 9: Advanced Workflows

+

Custom Workflow Creation

+
# Create custom workflow
+cat > extensions/workflows/deploy-app.ncl <<'EOF'
+{
+  workflow = {
+    name = "deploy-application"
+    description = "Deploy application to Kubernetes"
+
+    steps = [
+      {
+        name = "build-image"
+        action = "docker-build"
+        params = {dockerfile = "Dockerfile", tag = "myapp:latest"}
+      }
+      {
+        name = "push-image"
+        action = "docker-push"
+        params = {image = "myapp:latest", registry = "registry.example.com"}
+        depends_on = ["build-image"]
+      }
+      {
+        name = "deploy-k8s"
+        action = "kubectl-apply"
+        params = {manifest = "k8s/deployment.yaml"}
+        depends_on = ["push-image"]
+      }
+      {
+        name = "verify-deployment"
+        action = "kubectl-rollout-status"
+        params = {deployment = "myapp"}
+        depends_on = ["deploy-k8s"]
+      }
+    ]
+  }
+}
+EOF
+
+

Execute Custom Workflow

+
# Run workflow
+provisioning workflow run deploy-application
+
+# Monitor workflow
+provisioning workflow status deploy-application
+
+# View workflow history
+provisioning workflow history
 
-

Troubleshooting

-

Server Creation Fails

-

Problem: Server creation times out or fails

-
# Check provider credentials
+

Common Issues

+

Server Creation Fails

+
# Enable debug logging
+provisioning --debug server create --infra simple-server
+
+# Check provider connectivity
+provisioning providers
+
+# Validate credentials
 provisioning validate config
-
-# Check provider API status
-curl -u username:password https://api.upcloud.com/1.3/account
-
-# Try with debug mode
-provisioning server create --infra production --check --debug
 
-

Taskserv Installation Fails

-

Problem: Kubernetes installation fails

+

Task Service Installation Fails

# Check server connectivity
-provisioning server ssh k8s-master-01
+provisioning server ssh dev-web-01
 
-# Check logs
-provisioning orchestrator logs | grep kubernetes
-
-# Check dependencies
-provisioning taskserv list --infra production | where status == "failed"
+# Verify dependencies
+provisioning taskserv check-deps containerd
 
 # Retry installation
-provisioning taskserv delete kubernetes --infra production
-provisioning taskserv create kubernetes --infra production
+provisioning taskserv create containerd --force
 
-

Plugin Commands Don’t Work

-

Problem: auth, kms, or orch commands not found

-
# Check plugin registration
-plugin list | where name =~ "auth|kms|orch"
+

Cluster Deployment Fails

+
# Check cluster status
+provisioning cluster status k8s-dev-cluster
 
-# Re-register if missing
-cd provisioning/core/plugins/nushell-plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
+# View cluster logs
+provisioning cluster logs k8s-dev-cluster
 
-# Restart Nushell
-exit
-nu
+# Reset and retry
+provisioning cluster reset k8s-dev-cluster
+provisioning cluster create --infra k8s-cluster
 
-

KMS Encryption Fails

-

Problem: kms encrypt returns error

-
# Check backend status
-kms status
-
-# Check RustyVault running
-curl http://localhost:8200/v1/sys/health
-
-# Use Age backend instead (local)
-kms encrypt "data" --backend age --key age1xxxxxxxxx
-
-# Check Age key
-cat ~/.age/key.txt
-
-

Orchestrator Not Running

-

Problem: orch status returns error

-
# Check orchestrator status
-ps aux | grep orchestrator
-
-# Start orchestrator
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-# Check logs
-tail -f provisioning/platform/orchestrator/data/orchestrator.log
-
-

Configuration Validation Errors

-

Problem: provisioning validate config shows errors

-
# Show detailed errors
-provisioning validate config --debug
-
-# Check configuration files
-provisioning allenv
-
-# Fix missing settings
-vim workspace/config/local-overrides.toml
-
-

Next Steps

-

Explore Advanced Features

-
    -
  1. -

    Multi-Environment Deployment

    -
    # Create dev and staging workspaces
    -provisioning workspace create dev
    -provisioning workspace create staging
    -provisioning workspace switch dev
    -
    -
  2. -
  3. -

    Batch Operations

    -
    # Deploy to multiple clouds
    -provisioning batch submit workflows/multi-cloud-deploy.ncl
    -
    -
  4. -
  5. -

    Security Features

    -
    # Enable MFA
    -auth mfa enroll totp
    -
    -# Set up break-glass
    -provisioning break-glass request "Emergency access"
    -
    -
  6. -
  7. -

    Compliance and Audit

    -
    # Generate compliance report
    -provisioning compliance report --standard soc2
    -
    -
  8. -
-

Learn More

+

Production Deployment

    -
  • Quick Reference: provisioning sc or docs/guides/quickstart-cheatsheet.md
  • -
  • Update Guide: docs/guides/update-infrastructure.md
  • -
  • Customize Guide: docs/guides/customize-infrastructure.md
  • -
  • Plugin Guide: docs/user/PLUGIN_INTEGRATION_GUIDE.md
  • -
  • Security System: docs/architecture/adr-009-security-system-complete.md
  • +
  • Review Security Best Practices
  • +
  • Configure Backup & Recovery
  • +
  • Set up Monitoring
  • +
  • Implement Disaster Recovery
  • +
+

Advanced Features

+ +

Learning Resources

+ -

Get Help

-
# Show help for any command
-provisioning help
-provisioning help server
-provisioning help taskserv
-
-# Check version
-provisioning version
-
-# Start Nushell session with provisioning library
-provisioning nu
-
-

Summary

-

You’ve successfully:

-

✅ Installed Nushell and essential tools -✅ Built and registered native plugins (10-50x faster operations) -✅ Cloned and configured the project -✅ Initialized a production workspace -✅ Configured provider credentials -✅ Deployed servers -✅ Installed Kubernetes and task services -✅ Created application clusters -✅ Verified complete deployment

-

Your infrastructure is now ready for production use!

-
-

Estimated Total Time: 30-60 minutes -Next Guide: Update Infrastructure -Questions?: Open an issue or contact platform-team@example.com

-

Last Updated: 2025-10-09 -Version: 3.5.0

+

You’ve completed the from-scratch guide and learned:

+
    +
  • Platform installation and configuration
  • +
  • Provider credential setup
  • +
  • Workspace creation and management
  • +
  • Infrastructure definition with Nickel
  • +
  • Server and task service deployment
  • +
  • Kubernetes cluster deployment
  • +
  • Security configuration
  • +
  • Multi-cloud deployment
  • +
  • Monitoring and maintenance
  • +
  • Custom workflow creation
  • +
+

Your Provisioning platform is now ready for production use.

+ + + + + + + + diff --git a/docs/book/guides/update-infrastructure.html b/docs/book/guides/update-infrastructure.html deleted file mode 100644 index f078d12..0000000 --- a/docs/book/guides/update-infrastructure.html +++ /dev/null @@ -1,862 +0,0 @@ - - - - - - Update Infrastructure - Provisioning Platform Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-

Keyboard shortcuts

-
-

Press or to navigate between chapters

-

Press S or / to search in the book

-

Press ? to show this help

-

Press Esc to hide this help

-
-
-
-
- - - - - - - - - - - - - -
- -
- - - - - - - - -
-
-

Update Existing Infrastructure

-

Goal: Safely update running infrastructure with minimal downtime -Time: 15-30 minutes -Difficulty: Intermediate

-

Overview

-

This guide covers:

-
    -
  1. Checking for updates
  2. -
  3. Planning update strategies
  4. -
  5. Updating task services
  6. -
  7. Rolling updates
  8. -
  9. Rollback procedures
  10. -
  11. Verification
  12. -
-

Update Strategies

-

Strategy 1: In-Place Updates (Fastest)

-

Best for: Non-critical environments, development, staging

-
# Direct update without downtime consideration
-provisioning t create <taskserv> --infra <project>
-
- -

Best for: Production environments, high availability

-
# Update servers one by one
-provisioning s update --infra <project> --rolling
-
-

Strategy 3: Blue-Green Deployment (Safest)

-

Best for: Critical production, zero-downtime requirements

-
# Create new infrastructure, switch traffic, remove old
-provisioning ws init <project>-green
-# ... configure and deploy
-# ... switch traffic
-provisioning ws delete <project>-blue
-
-

Step 1: Check for Updates

-

1.1 Check All Task Services

-
# Check all taskservs for updates
-provisioning t check-updates
-
-

Expected Output:

-
📦 Task Service Update Check:
-
-NAME         CURRENT   LATEST    STATUS
-kubernetes   1.29.0    1.30.0    ⬆️  update available
-containerd   1.7.13    1.7.13    ✅ up-to-date
-cilium       1.14.5    1.15.0    ⬆️  update available
-postgres     15.5      16.1      ⬆️  update available
-redis        7.2.3     7.2.3     ✅ up-to-date
-
-Updates available: 3
-
-

1.2 Check Specific Task Service

-
# Check specific taskserv
-provisioning t check-updates kubernetes
-
-

Expected Output:

-
📦 Kubernetes Update Check:
-
-Current:  1.29.0
-Latest:   1.30.0
-Status:   ⬆️  Update available
-
-Changelog:
-  • Enhanced security features
-  • Performance improvements
-  • Bug fixes in kube-apiserver
-  • New workload resource types
-
-Breaking Changes:
-  • None
-
-Recommended: ✅ Safe to update
-
-

1.3 Check Version Status

-
# Show detailed version information
-provisioning version show
-
-

Expected Output:

-
📋 Component Versions:
-
-COMPONENT    CURRENT   LATEST    DAYS OLD  STATUS
-kubernetes   1.29.0    1.30.0    45        ⬆️  update
-containerd   1.7.13    1.7.13    0         ✅ current
-cilium       1.14.5    1.15.0    30        ⬆️  update
-postgres     15.5      16.1      60        ⬆️  update (major)
-redis        7.2.3     7.2.3     0         ✅ current
-
-

1.4 Check for Security Updates

-
# Check for security-related updates
-provisioning version updates --security-only
-
-

Step 2: Plan Your Update

-

2.1 Review Current Configuration

-
# Show current infrastructure
-provisioning show settings --infra my-production
-
-

2.2 Backup Configuration

-
# Create configuration backup
-cp -r workspace/infra/my-production workspace/infra/my-production.backup-$(date +%Y%m%d)
-
-# Or use built-in backup
-provisioning ws backup my-production
-
-

Expected Output:

-
✅ Backup created: workspace/backups/my-production-20250930.tar.gz
-
-

2.3 Create Update Plan

-
# Generate update plan
-provisioning plan update --infra my-production
-
-

Expected Output:

-
📝 Update Plan for my-production:
-
-Phase 1: Minor Updates (Low Risk)
-  • containerd: No update needed
-  • redis: No update needed
-
-Phase 2: Patch Updates (Medium Risk)
-  • cilium: 1.14.5 → 1.15.0 (estimated 5 minutes)
-
-Phase 3: Major Updates (High Risk - Requires Testing)
-  • kubernetes: 1.29.0 → 1.30.0 (estimated 15 minutes)
-  • postgres: 15.5 → 16.1 (estimated 10 minutes, may require data migration)
-
-Recommended Order:
-  1. Update cilium (low risk)
-  2. Update kubernetes (test in staging first)
-  3. Update postgres (requires maintenance window)
-
-Total Estimated Time: 30 minutes
-Recommended: Test in staging environment first
-
-

Step 3: Update Task Services

-

3.1 Update Non-Critical Service (Cilium Example)

-

Dry-Run Update

-
# Test update without applying
-provisioning t create cilium --infra my-production --check
-
-

Expected Output:

-
🔍 CHECK MODE: Simulating Cilium update
-
-Current: 1.14.5
-Target:  1.15.0
-
-Would perform:
-  1. Download Cilium 1.15.0
-  2. Update configuration
-  3. Rolling restart of Cilium pods
-  4. Verify connectivity
-
-Estimated downtime: <1 minute per node
-No errors detected. Ready to update.
-
-

Generate Updated Configuration

-
# Generate new configuration
-provisioning t generate cilium --infra my-production
-
-

Expected Output:

-
✅ Generated Cilium configuration (version 1.15.0)
-   Saved to: workspace/infra/my-production/taskservs/cilium.ncl
-
-

Apply Update

-
# Apply update
-provisioning t create cilium --infra my-production
-
-

Expected Output:

-
🚀 Updating Cilium on my-production...
-
-Downloading Cilium 1.15.0... ⏳
-✅ Downloaded
-
-Updating configuration... ⏳
-✅ Configuration updated
-
-Rolling restart: web-01... ⏳
-✅ web-01 updated (Cilium 1.15.0)
-
-Rolling restart: web-02... ⏳
-✅ web-02 updated (Cilium 1.15.0)
-
-Verifying connectivity... ⏳
-✅ All nodes connected
-
-🎉 Cilium update complete!
-   Version: 1.14.5 → 1.15.0
-   Downtime: 0 minutes
-
-

Verify Update

-
# Verify updated version
-provisioning version taskserv cilium
-
-

Expected Output:

-
📦 Cilium Version Info:
-
-Installed: 1.15.0
-Latest:    1.15.0
-Status:    ✅ Up-to-date
-
-Nodes:
-  ✅ web-01: 1.15.0 (running)
-  ✅ web-02: 1.15.0 (running)
-
-

3.2 Update Critical Service (Kubernetes Example)

-

Test in Staging First

-
# If you have staging environment
-provisioning t create kubernetes --infra my-staging --check
-provisioning t create kubernetes --infra my-staging
-
-# Run integration tests
-provisioning test kubernetes --infra my-staging
-
-

Backup Current State

-
# Backup Kubernetes state
-kubectl get all -A -o yaml > k8s-backup-$(date +%Y%m%d).yaml
-
-# Backup etcd (if using external etcd)
-provisioning t backup kubernetes --infra my-production
-
-

Schedule Maintenance Window

-
# Set maintenance mode (optional, if supported)
-provisioning maintenance enable --infra my-production --duration 30m
-
-

Update Kubernetes

-
# Update control plane first
-provisioning t create kubernetes --infra my-production --control-plane-only
-
-

Expected Output:

-
🚀 Updating Kubernetes control plane on my-production...
-
-Draining control plane: web-01... ⏳
-✅ web-01 drained
-
-Updating control plane: web-01... ⏳
-✅ web-01 updated (Kubernetes 1.30.0)
-
-Uncordoning: web-01... ⏳
-✅ web-01 ready
-
-Verifying control plane... ⏳
-✅ Control plane healthy
-
-🎉 Control plane update complete!
-
-
# Update worker nodes one by one
-provisioning t create kubernetes --infra my-production --workers-only --rolling
-
-

Expected Output:

-
🚀 Updating Kubernetes workers on my-production...
-
-Rolling update: web-02...
-  Draining... ⏳
-  ✅ Drained (pods rescheduled)
-
-  Updating... ⏳
-  ✅ Updated (Kubernetes 1.30.0)
-
-  Uncordoning... ⏳
-  ✅ Ready
-
-  Waiting for pods to stabilize... ⏳
-  ✅ All pods running
-
-🎉 Worker update complete!
-   Updated: web-02
-   Version: 1.30.0
-
-

Verify Update

-
# Verify Kubernetes cluster
-kubectl get nodes
-provisioning version taskserv kubernetes
-
-

Expected Output:

-
NAME     STATUS   ROLES           AGE   VERSION
-web-01   Ready    control-plane   30d   v1.30.0
-web-02   Ready    <none>          30d   v1.30.0
-
-
# Run smoke tests
-provisioning test kubernetes --infra my-production
-
-

3.3 Update Database (PostgreSQL Example)

-

⚠️ WARNING: Database updates may require data migration. Always backup first!

-

Backup Database

-
# Backup PostgreSQL database
-provisioning t backup postgres --infra my-production
-
-

Expected Output:

-
🗄️  Backing up PostgreSQL...
-
-Creating dump: my-production-postgres-20250930.sql... ⏳
-✅ Dump created (2.3 GB)
-
-Compressing... ⏳
-✅ Compressed (450 MB)
-
-Saved to: workspace/backups/postgres/my-production-20250930.sql.gz
-
-

Check Compatibility

-
# Check if data migration is needed
-provisioning t check-migration postgres --from 15.5 --to 16.1
-
-

Expected Output:

-
🔍 PostgreSQL Migration Check:
-
-From: 15.5
-To:   16.1
-
-Migration Required: ✅ Yes (major version change)
-
-Steps Required:
-  1. Dump database with pg_dump
-  2. Stop PostgreSQL 15.5
-  3. Install PostgreSQL 16.1
-  4. Initialize new data directory
-  5. Restore from dump
-
-Estimated Time: 15-30 minutes (depending on data size)
-Estimated Downtime: 15-30 minutes
-
-Recommended: Use streaming replication for zero-downtime upgrade
-
-

Perform Update

-
# Update PostgreSQL (with automatic migration)
-provisioning t create postgres --infra my-production --migrate
-
-

Expected Output:

-
🚀 Updating PostgreSQL on my-production...
-
-⚠️  Major version upgrade detected (15.5 → 16.1)
-   Automatic migration will be performed
-
-Dumping database... ⏳
-✅ Database dumped (2.3 GB)
-
-Stopping PostgreSQL 15.5... ⏳
-✅ Stopped
-
-Installing PostgreSQL 16.1... ⏳
-✅ Installed
-
-Initializing new data directory... ⏳
-✅ Initialized
-
-Restoring database... ⏳
-✅ Restored (2.3 GB)
-
-Starting PostgreSQL 16.1... ⏳
-✅ Started
-
-Verifying data integrity... ⏳
-✅ All tables verified
-
-🎉 PostgreSQL update complete!
-   Version: 15.5 → 16.1
-   Downtime: 18 minutes
-
-

Verify Update

-
# Verify PostgreSQL
-provisioning version taskserv postgres
-ssh db-01 "psql --version"
-
-

Step 4: Update Multiple Services

-

4.1 Batch Update (Sequentially)

-
# Update multiple taskservs one by one
-provisioning t update --infra my-production --taskservs cilium,containerd,redis
-
-

Expected Output:

-
🚀 Updating 3 taskservs on my-production...
-
-[1/3] Updating cilium... ⏳
-✅ cilium updated (1.15.0)
-
-[2/3] Updating containerd... ⏳
-✅ containerd updated (1.7.14)
-
-[3/3] Updating redis... ⏳
-✅ redis updated (7.2.4)
-
-🎉 All updates complete!
-   Updated: 3 taskservs
-   Total time: 8 minutes
-
-

4.2 Parallel Update (Non-Dependent Services)

-
# Update taskservs in parallel (if they don't depend on each other)
-provisioning t update --infra my-production --taskservs redis,postgres --parallel
-
-

Expected Output:

-
🚀 Updating 2 taskservs in parallel on my-production...
-
-redis: Updating... ⏳
-postgres: Updating... ⏳
-
-redis: ✅ Updated (7.2.4)
-postgres: ✅ Updated (16.1)
-
-🎉 All updates complete!
-   Updated: 2 taskservs
-   Total time: 3 minutes (parallel)
-
-

Step 5: Update Server Configuration

-

5.1 Update Server Resources

-
# Edit server configuration
-provisioning sops workspace/infra/my-production/servers.ncl
-
-

Example: Upgrade server plan

-
# Before
-{
-    name = "web-01"
-    plan = "1xCPU-2 GB"  # Old plan
-}
-
-# After
-{
-    name = "web-01"
-    plan = "2xCPU-4 GB"  # New plan
-}
-
-
# Apply server update
-provisioning s update --infra my-production --check
-provisioning s update --infra my-production
-
-

5.2 Update Server OS

-
# Update operating system packages
-provisioning s update --infra my-production --os-update
-
-

Expected Output:

-
🚀 Updating OS packages on my-production servers...
-
-web-01: Updating packages... ⏳
-✅ web-01: 24 packages updated
-
-web-02: Updating packages... ⏳
-✅ web-02: 24 packages updated
-
-db-01: Updating packages... ⏳
-✅ db-01: 24 packages updated
-
-🎉 OS updates complete!
-
-

Step 6: Rollback Procedures

-

6.1 Rollback Task Service

-

If update fails or causes issues:

-
# Rollback to previous version
-provisioning t rollback cilium --infra my-production
-
-

Expected Output:

-
🔄 Rolling back Cilium on my-production...
-
-Current: 1.15.0
-Target:  1.14.5 (previous version)
-
-Rolling back: web-01... ⏳
-✅ web-01 rolled back
-
-Rolling back: web-02... ⏳
-✅ web-02 rolled back
-
-Verifying connectivity... ⏳
-✅ All nodes connected
-
-🎉 Rollback complete!
-   Version: 1.15.0 → 1.14.5
-
-

6.2 Rollback from Backup

-
# Restore configuration from backup
-provisioning ws restore my-production --from workspace/backups/my-production-20250930.tar.gz
-
-

6.3 Emergency Rollback

-
# Complete infrastructure rollback
-provisioning rollback --infra my-production --to-snapshot <snapshot-id>
-
-

Step 7: Post-Update Verification

-

7.1 Verify All Components

-
# Check overall health
-provisioning health --infra my-production
-
-

Expected Output:

-
🏥 Health Check: my-production
-
-Servers:
-  ✅ web-01: Healthy
-  ✅ web-02: Healthy
-  ✅ db-01: Healthy
-
-Task Services:
-  ✅ kubernetes: 1.30.0 (healthy)
-  ✅ containerd: 1.7.13 (healthy)
-  ✅ cilium: 1.15.0 (healthy)
-  ✅ postgres: 16.1 (healthy)
-
-Clusters:
-  ✅ buildkit: 2/2 replicas (healthy)
-
-Overall Status: ✅ All systems healthy
-
-

7.2 Verify Version Updates

-
# Verify all versions are updated
-provisioning version show
-
-

7.3 Run Integration Tests

-
# Run comprehensive tests
-provisioning test all --infra my-production
-
-

Expected Output:

-
🧪 Running Integration Tests...
-
-[1/5] Server connectivity... ⏳
-✅ All servers reachable
-
-[2/5] Kubernetes health... ⏳
-✅ All nodes ready, all pods running
-
-[3/5] Network connectivity... ⏳
-✅ All services reachable
-
-[4/5] Database connectivity... ⏳
-✅ PostgreSQL responsive
-
-[5/5] Application health... ⏳
-✅ All applications healthy
-
-🎉 All tests passed!
-
-

7.4 Monitor for Issues

-
# Monitor logs for errors
-provisioning logs --infra my-production --follow --level error
-
-

Update Checklist

-

Use this checklist for production updates:

-
    -
  • -Check for available updates
  • -
  • -Review changelog and breaking changes
  • -
  • -Create configuration backup
  • -
  • -Test update in staging environment
  • -
  • -Schedule maintenance window
  • -
  • -Notify team/users of maintenance
  • -
  • -Update non-critical services first
  • -
  • -Verify each update before proceeding
  • -
  • -Update critical services with rolling updates
  • -
  • -Backup database before major updates
  • -
  • -Verify all components after update
  • -
  • -Run integration tests
  • -
  • -Monitor for issues (30 minutes minimum)
  • -
  • -Document any issues encountered
  • -
  • -Close maintenance window
  • -
-

Common Update Scenarios

-

Scenario 1: Minor Security Patch

-
# Quick security update
-provisioning t check-updates --security-only
-provisioning t update --infra my-production --security-patches --yes
-
-

Scenario 2: Major Version Upgrade

-
# Careful major version update
-provisioning ws backup my-production
-provisioning t check-migration <service> --from X.Y --to X+1.Y
-provisioning t create <service> --infra my-production --migrate
-provisioning test all --infra my-production
-
-

Scenario 3: Emergency Hotfix

-
# Apply critical hotfix immediately
-provisioning t create <service> --infra my-production --hotfix --yes
-
-

Troubleshooting Updates

-

Issue: Update fails mid-process

-

Solution:

-
# Check update status
-provisioning t status <taskserv> --infra my-production
-
-# Resume failed update
-provisioning t update <taskserv> --infra my-production --resume
-
-# Or rollback
-provisioning t rollback <taskserv> --infra my-production
-
-

Issue: Service not starting after update

-

Solution:

-
# Check logs
-provisioning logs <taskserv> --infra my-production
-
-# Verify configuration
-provisioning t validate <taskserv> --infra my-production
-
-# Rollback if necessary
-provisioning t rollback <taskserv> --infra my-production
-
-

Issue: Data migration fails

-

Solution:

-
# Check migration logs
-provisioning t migration-logs <taskserv> --infra my-production
-
-# Restore from backup
-provisioning t restore <taskserv> --infra my-production --from <backup-file>
-
-

Best Practices

-
    -
  1. Always Test First: Test updates in staging before production
  2. -
  3. Backup Everything: Create backups before any update
  4. -
  5. Update Gradually: Update one service at a time
  6. -
  7. Monitor Closely: Watch for errors after each update
  8. -
  9. Have Rollback Plan: Always have a rollback strategy
  10. -
  11. Document Changes: Keep update logs for reference
  12. -
  13. Schedule Wisely: Update during low-traffic periods
  14. -
  15. Verify Thoroughly: Run tests after each update
  16. -
-

Next Steps

- -

Quick Reference

-
# Update workflow
-provisioning t check-updates
-provisioning ws backup my-production
-provisioning t create <taskserv> --infra my-production --check
-provisioning t create <taskserv> --infra my-production
-provisioning version taskserv <taskserv>
-provisioning health --infra my-production
-provisioning test all --infra my-production
-
-
-

This guide is part of the provisioning project documentation. Last updated: 2025-09-30

- -
- - -
-
- - - -
- - - - - - - - - - - - - - - - - - -
- - diff --git a/docs/book/highlight.js b/docs/book/highlight.js index 27e7be7..18d2434 100644 --- a/docs/book/highlight.js +++ b/docs/book/highlight.js @@ -51,4 +51,4 @@ hljs.registerLanguage("nim",function(){"use strict";return function(e){return{na hljs.registerLanguage("nix",function(){"use strict";return function(e){var n={keyword:"rec with let in inherit assert if else then",literal:"true false or and null",built_in:"import abort baseNameOf dirOf isNull builtins map removeAttrs throw toString derivation"},i={className:"subst",begin:/\$\{/,end:/}/,keywords:n},t={className:"string",contains:[i],variants:[{begin:"''",end:"''"},{begin:'"',end:'"'}]},s=[e.NUMBER_MODE,e.HASH_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,t,{begin:/[a-zA-Z0-9-_]+(\s*=)/,returnBegin:!0,relevance:0,contains:[{className:"attr",begin:/\S+/}]}];return i.contains=s,{name:"Nix",aliases:["nixos"],keywords:n,contains:s}}}()); hljs.registerLanguage("r",function(){"use strict";return function(e){var n="([a-zA-Z]|\\.[a-zA-Z.])[a-zA-Z0-9._]*";return{name:"R",contains:[e.HASH_COMMENT_MODE,{begin:n,keywords:{$pattern:n,keyword:"function if in break next repeat else for return switch while try tryCatch stop warning require library attach detach source setMethod setGeneric setGroupGeneric setClass ...",literal:"NULL NA TRUE FALSE T F Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10"},relevance:0},{className:"number",begin:"0[xX][0-9a-fA-F]+[Li]?\\b",relevance:0},{className:"number",begin:"\\d+(?:[eE][+\\-]?\\d*)?L\\b",relevance:0},{className:"number",begin:"\\d+\\.(?!\\d)(?:i\\b)?",relevance:0},{className:"number",begin:"\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{className:"number",begin:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{begin:"`",end:"`",relevance:0},{className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:'"',end:'"'},{begin:"'",end:"'"}]}]}}}()); hljs.registerLanguage("scala",function(){"use strict";return function(e){var n={className:"subst",variants:[{begin:"\\$[A-Za-z0-9_]+"},{begin:"\\${",end:"}"}]},a={className:"string",variants:[{begin:'"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:'"""',end:'"""',relevance:10},{begin:'[a-z]+"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE,n]},{className:"string",begin:'[a-z]+"""',end:'"""',contains:[n],relevance:10}]},s={className:"type",begin:"\\b[A-Z][A-Za-z0-9_]*",relevance:0},t={className:"title",begin:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,relevance:0},i={className:"class",beginKeywords:"class object trait type",end:/[:={\[\n;]/,excludeEnd:!0,contains:[{beginKeywords:"extends with",relevance:10},{begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},t]},l={className:"function",beginKeywords:"def",end:/[:={\[(\n;]/,excludeEnd:!0,contains:[t]};return{name:"Scala",keywords:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,{className:"symbol",begin:"'\\w[\\w\\d_]*(?!')"},s,l,i,e.C_NUMBER_MODE,{className:"meta",begin:"@[A-Za-z]+"}]}}}()); -hljs.registerLanguage("x86asm",function(){"use strict";return function(s){return{name:"Intel x86 Assembly",case_insensitive:!0,keywords:{$pattern:"[.%]?"+s.IDENT_RE,keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},contains:[s.COMMENT(";","$",{relevance:0}),{className:"number",variants:[{begin:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",relevance:0},{begin:"\\$[0-9][0-9A-Fa-f]*",relevance:0},{begin:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{begin:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QUOTE_STRING_MODE,{className:"string",variants:[{begin:"'",end:"[^\\\\]'"},{begin:"`",end:"[^\\\\]`"}],relevance:0},{className:"symbol",variants:[{begin:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{begin:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],relevance:0},{className:"subst",begin:"%[0-9]+",relevance:0},{className:"subst",begin:"%!S+",relevance:0},{className:"meta",begin:/^\s*\.[\w_-]+/}]}}}()); +hljs.registerLanguage("x86asm",function(){"use strict";return function(s){return{name:"Intel x86 Assembly",case_insensitive:!0,keywords:{$pattern:"[.%]?"+s.IDENT_RE,keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},contains:[s.COMMENT(";","$",{relevance:0}),{className:"number",variants:[{begin:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",relevance:0},{begin:"\\$[0-9][0-9A-Fa-f]*",relevance:0},{begin:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{begin:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QUOTE_STRING_MODE,{className:"string",variants:[{begin:"'",end:"[^\\\\]'"},{begin:"`",end:"[^\\\\]`"}],relevance:0},{className:"symbol",variants:[{begin:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{begin:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],relevance:0},{className:"subst",begin:"%[0-9]+",relevance:0},{className:"subst",begin:"%!S+",relevance:0},{className:"meta",begin:/^\s*\.[\w_-]+/}]}}}()); \ No newline at end of file diff --git a/docs/book/index.html b/docs/book/index.html index aa3d4aa..277093a 100644 --- a/docs/book/index.html +++ b/docs/book/index.html @@ -1,14 +1,14 @@ - + - Home - Provisioning Platform Documentation + Introduction - Provisioning Platform Documentation - + @@ -34,7 +34,7 @@ @@ -76,7 +76,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -140,10 +140,10 @@ - + - + @@ -173,353 +173,81 @@

- Provisioning Logo + Provisioning Logo

- Provisioning + Provisioning

Provisioning Platform Documentation

-

Last Updated: 2025-01-02 (Phase 3.A Cleanup Complete) -Status: ✅ Primary documentation source (145 files consolidated)

-

Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, -Nickel, and Rust.

-
-

Note: Architecture Decision Records (ADRs) and design documentation are in docs/ -directory. This location contains user-facing, operational, and product documentation.

-
-
-

Quick Navigation

-

🚀 Getting Started

-
- - - - -
DocumentDescriptionAudience
Installation GuideInstall and configure the systemNew Users
Getting StartedFirst steps and basic conceptsNew Users
Quick ReferenceCommand cheat sheetAll Users
From Scratch GuideComplete deployment walkthroughNew Users
-
-

📚 User Guides

-
- - - - - - - - - - -
DocumentDescription
CLI ReferenceComplete command reference
Workspace ManagementWorkspace creation and management
Workspace SwitchingSwitch between workspaces
Infrastructure ManagementServer, taskserv, cluster operations
Service ManagementPlatform service lifecycle management
OCI RegistryOCI artifact management
Gitea IntegrationGit workflow and collaboration
CoreDNS GuideDNS management
Test EnvironmentsContainerized testing
Extension DevelopmentCreate custom extensions
-
-

🏗️ Architecture

-
- - - - - -
DocumentDescription
System OverviewHigh-level architecture
Multi-Repo ArchitectureRepository structure and OCI distribution
Design PrinciplesArchitectural philosophy
Integration PatternsSystem integration patterns
Orchestrator ModelHybrid orchestration architecture
-
-

📋 Architecture Decision Records (ADRs)

-
- - - - - - -
ADRTitleStatus
ADR-001Project Structure DecisionAccepted
ADR-002Distribution StrategyAccepted
ADR-003Workspace IsolationAccepted
ADR-004Hybrid ArchitectureAccepted
ADR-005Extension FrameworkAccepted
ADR-006CLI RefactoringAccepted
-
-

🔌 API Documentation

-
- - - - - -
DocumentDescription
REST APIHTTP API endpoints
WebSocket APIReal-time event streams
Extensions APIExtension integration APIs
SDKsClient libraries
Integration ExamplesAPI usage examples
-
-

🛠️ Development

-
- - - - - - -
DocumentDescription
Development READMEDeveloper overview
Implementation GuideImplementation details
Provider DevelopmentCreate cloud providers
Taskserv DevelopmentCreate task services
Extension FrameworkExtension system
Command HandlersCLI command development
-
-

🐛 Troubleshooting

-
- -
DocumentDescription
Troubleshooting GuideCommon issues and solutions
-
-

📖 How-To Guides

-
- - - -
DocumentDescription
From ScratchComplete deployment from zero
Update InfrastructureSafe update procedures
Customize InfrastructureLayer and template customization
-
-

🔐 Configuration

-
- -
DocumentDescription
Workspace Config ArchitectureConfiguration architecture
-
-

📦 Quick References

-
- - -
DocumentDescription
Quickstart CheatsheetCommand shortcuts
OCI Quick ReferenceOCI operations
-
-
+

Welcome to the Provisioning Platform documentation. This is an enterprise-grade Infrastructure +as Code (IaC) platform built with Rust, Nushell, and Nickel.

+

What is Provisioning

+

Provisioning is a comprehensive infrastructure automation platform that manages complete +infrastructure lifecycles across multiple cloud providers. The platform emphasizes type-safety, +configuration-driven design, and workspace-first organization.

+

Key Features

+
    +
  • Workspace Management: Default mode for organizing infrastructure, settings, schemas, and extensions
  • +
  • Type-Safe Configuration: Nickel-based configuration system with validation and contracts
  • +
  • Multi-Cloud Support: Unified interface for AWS, UpCloud, and local providers
  • +
  • Modular CLI Architecture: 111+ commands with 84% code reduction through modularity
  • +
  • Batch Workflow Engine: Orchestrate complex multi-cloud operations
  • +
  • Complete Security System: Authentication, authorization, encryption, and compliance
  • +
  • Extensible Architecture: Custom providers, task services, and plugins
  • +
+

Getting Started

+

New users should start with:

+
    +
  1. Prerequisites - System requirements and dependencies
  2. +
  3. Installation - Install the platform
  4. +
  5. Quick Start - 5-minute deployment tutorial
  6. +
  7. First Deployment - Comprehensive walkthrough
  8. +

Documentation Structure

-
provisioning/docs/src/
-├── README.md (this file)          # Documentation hub
-├── getting-started/               # Getting started guides
-│   ├── installation-guide.md
-│   ├── getting-started.md
-│   └── quickstart-cheatsheet.md
-├── architecture/                  # System architecture
-│   ├── adr/                       # Architecture Decision Records
-│   ├── design-principles.md
-│   ├── integration-patterns.md
-│   ├── system-overview.md
-│   └── ... (and 10+ more architecture docs)
-├── infrastructure/                # Infrastructure guides
-│   ├── cli-reference.md
-│   ├── workspace-setup.md
-│   ├── workspace-switching-guide.md
-│   └── infrastructure-management.md
-├── api-reference/                 # API documentation
-│   ├── rest-api.md
-│   ├── websocket.md
-│   ├── integration-examples.md
-│   └── sdks.md
-├── development/                   # Developer guides
-│   ├── README.md
-│   ├── implementation-guide.md
-│   ├── quick-provider-guide.md
-│   ├── taskserv-developer-guide.md
-│   └── ... (15+ more developer docs)
-├── guides/                        # How-to guides
-│   ├── from-scratch.md
-│   ├── update-infrastructure.md
-│   └── customize-infrastructure.md
-├── operations/                    # Operations guides
-│   ├── service-management-guide.md
-│   ├── coredns-guide.md
-│   └── ... (more operations docs)
-├── security/                      # Security docs
-├── integration/                   # Integration guides
-├── testing/                       # Testing docs
-├── configuration/                 # Configuration docs
-├── troubleshooting/               # Troubleshooting guides
-└── quick-reference/               # Quick references
-
-
-

Key Concepts

-

Infrastructure as Code (IaC)

-

The provisioning platform uses declarative configuration to manage infrastructure. Instead of manually creating resources, you define what you -want in Nickel configuration files, and the system makes it happen.

-

Mode-Based Architecture

-

The system supports four operational modes:

    -
  • Solo: Single developer local development
  • -
  • Multi-user: Team collaboration with shared services
  • -
  • CI/CD: Automated pipeline execution
  • -
  • Enterprise: Production deployment with strict compliance
  • +
  • Getting Started: Installation and initial setup
  • +
  • User Guides: Workflow tutorials and best practices
  • +
  • Infrastructure as Code: Nickel configuration and schema reference
  • +
  • Platform Features: Core capabilities and systems
  • +
  • Operations: Deployment, monitoring, and maintenance
  • +
  • Security: Complete security system documentation
  • +
  • Development: Extension and plugin development
  • +
  • API Reference: REST API and CLI command reference
  • +
  • Architecture: System design and ADRs
  • +
  • Examples: Practical use cases and patterns
  • +
  • Troubleshooting: Problem-solving guides
-

Extension System

-

Extensibility through:

+

Core Technologies

    -
  • Providers: Cloud platform integrations (AWS, UpCloud, Local)
  • -
  • Task Services: Infrastructure components (Kubernetes, databases, etc.)
  • -
  • Clusters: Complete deployment configurations
  • +
  • Rust: Platform services and performance-critical components
  • +
  • Nushell: Scripting, CLI, and automation
  • +
  • Nickel: Type-safe infrastructure configuration
  • +
  • SecretumVault: Secrets management integration
-

OCI-Native Distribution

-

Extensions and packages distributed as OCI artifacts, enabling:

+

Workspace-First Approach

+

Provisioning uses workspaces as the default organizational unit. A workspace contains:

    -
  • Industry-standard packaging
  • -
  • Efficient caching and bandwidth
  • -
  • Version pinning and rollback
  • -
  • Air-gapped deployments
  • +
  • Infrastructure definitions (Nickel schemas)
  • +
  • Environment-specific settings
  • +
  • Custom extensions and providers
  • +
  • Deployment state and metadata
-
-

Documentation by Role

-

For New Users

-
    -
  1. Start with Installation Guide
  2. -
  3. Read Getting Started
  4. -
  5. Follow From Scratch Guide
  6. -
  7. Reference Quickstart Cheatsheet
  8. -
-

For Developers

-
    -
  1. Review System Overview
  2. -
  3. Study Design Principles
  4. -
  5. Read relevant ADRs
  6. -
  7. Follow Development Guide
  8. -
  9. Reference Nickel Quick Reference
  10. -
-

For Operators

-
    -
  1. Understand Mode System
  2. -
  3. Learn Service Management
  4. -
  5. Review Infrastructure Management
  6. -
  7. Study OCI Registry
  8. -
-

For Architects

-
    -
  1. Read System Overview
  2. -
  3. Study all ADRs
  4. -
  5. Review Integration Patterns
  6. -
  7. Understand Multi-Repo Architecture
  8. -
-
-

System Capabilities

-

✅ Infrastructure Automation

+

All operations work within workspace context, providing isolation and consistency.

+

Support and Community

    -
  • Multi-cloud support (AWS, UpCloud, Local)
  • -
  • Declarative configuration with Nickel
  • -
  • Automated dependency resolution
  • -
  • Batch operations with rollback
  • +
  • Issues: Report bugs and request features on GitHub
  • +
  • Documentation: This documentation site
  • +
  • Examples: See the Examples section
-

✅ Workflow Orchestration

-
    -
  • Hybrid Rust/Nushell orchestration
  • -
  • Checkpoint-based recovery
  • -
  • Parallel execution with limits
  • -
  • Real-time monitoring
  • -
-

✅ Test Environments

-
    -
  • Containerized testing
  • -
  • Multi-node cluster simulation
  • -
  • Topology templates
  • -
  • Automated cleanup
  • -
-

✅ Mode-Based Operation

-
    -
  • Solo: Local development
  • -
  • Multi-user: Team collaboration
  • -
  • CI/CD: Automated pipelines
  • -
  • Enterprise: Production deployment
  • -
-

✅ Extension Management

-
    -
  • OCI-native distribution
  • -
  • Automatic dependency resolution
  • -
  • Version management
  • -
  • Local and remote sources
  • -
-
-

Key Achievements

-

🚀 Batch Workflow System (v3.1.0)

-
    -
  • Provider-agnostic batch operations
  • -
  • Mixed provider support (UpCloud + AWS + local)
  • -
  • Dependency resolution with soft/hard dependencies
  • -
  • Real-time monitoring and rollback
  • -
-

🏗️ Hybrid Orchestrator (v3.0.0)

-
    -
  • Solves Nushell deep call stack limitations
  • -
  • Preserves all business logic
  • -
  • REST API for external integration
  • -
  • Checkpoint-based state management
  • -
-

⚙️ Configuration System (v2.0.0)

-
    -
  • Migrated from ENV to config-driven
  • -
  • Hierarchical configuration loading
  • -
  • Variable interpolation
  • -
  • True IaC without hardcoded fallbacks
  • -
-

🎯 Modular CLI (v3.2.0)

-
    -
  • 84% reduction in main file size
  • -
  • Domain-driven handlers
  • -
  • 80+ shortcuts
  • -
  • Bi-directional help system
  • -
-

🧪 Test Environment Service (v3.4.0)

-
    -
  • Automated containerized testing
  • -
  • Multi-node cluster topologies
  • -
  • CI/CD integration ready
  • -
  • Template-based configurations
  • -
-

🔄 Workspace Switching (v2.0.5)

-
    -
  • Centralized workspace management
  • -
  • Single-command workspace switching
  • -
  • Active workspace tracking
  • -
  • User preference system
  • -
-
-

Technology Stack

-
- - - - - - -
ComponentTechnologyPurpose
Core CLINushell 0.107.1Shell and scripting
ConfigurationNickel 1.0.0+Type-safe IaC
OrchestratorRustHigh-performance coordination
TemplatesJinja2 (nu_plugin_tera)Code generation
SecretsSOPS 3.10.2 + Age 1.2.1Encryption
DistributionOCI (skopeo/crane/oras)Artifact management
-
-
-

Support

-

Getting Help

-
    -
  • Documentation: You’re reading it!
  • -
  • Quick Reference: Run provisioning sc or provisioning guide quickstart
  • -
  • Help System: Run provisioning help or provisioning <command> help
  • -
  • Interactive Shell: Run provisioning nu for Nushell REPL
  • -
-

Reporting Issues

-
    -
  • Check Troubleshooting Guide
  • -
  • Review FAQ
  • -
  • Enable debug mode: provisioning --debug <command>
  • -
  • Check logs: provisioning platform logs <service>
  • -
-
-

Contributing

-

This project welcomes contributions! See Development Guide for:

-
    -
  • Development setup
  • -
  • Code style guidelines
  • -
  • Testing requirements
  • -
  • Pull request process
  • -
-

License

-

[Add license information]

-
-

Version History

-
- - - - - - - - -
VersionDateMajor Changes
3.5.02025-10-06Mode system, OCI registry, comprehensive documentation
3.4.02025-10-06Test environment service
3.3.02025-09-30Interactive guides system
3.2.02025-09-30Modular CLI refactoring
3.1.02025-09-25Batch workflow system
3.0.02025-09-25Hybrid orchestrator architecture
2.0.52025-10-02Workspace switching system
2.0.02025-09-23Configuration system migration
-
-
-

Maintained By: Provisioning Team -Last Review: 2025-10-06 -Next Review: 2026-01-06

+

See project LICENSE file for details.

+ + + + + + + + diff --git a/docs/book/print.html b/docs/book/print.html index c296411..1b7f9ee 100644 --- a/docs/book/print.html +++ b/docs/book/print.html @@ -1,5 +1,5 @@ - + @@ -9,7 +9,7 @@ - + @@ -35,7 +35,7 @@ @@ -77,7 +77,7 @@ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { } if (theme === null || theme === undefined) { theme = default_theme; } const html = document.documentElement; - html.classList.remove('ayu') + html.classList.remove('rust') html.classList.add(theme); html.classList.add("js"); @@ -141,7 +141,7 @@ - + @@ -171,81718 +171,21165 @@

- Provisioning Logo + Provisioning Logo

- Provisioning + Provisioning

Provisioning Platform Documentation

-

Last Updated: 2025-01-02 (Phase 3.A Cleanup Complete) -Status: ✅ Primary documentation source (145 files consolidated)

-

Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, -Nickel, and Rust.

-
-

Note: Architecture Decision Records (ADRs) and design documentation are in docs/ -directory. This location contains user-facing, operational, and product documentation.

-
-
-

Quick Navigation

-

🚀 Getting Started

-
- - - - -
DocumentDescriptionAudience
Installation GuideInstall and configure the systemNew Users
Getting StartedFirst steps and basic conceptsNew Users
Quick ReferenceCommand cheat sheetAll Users
From Scratch GuideComplete deployment walkthroughNew Users
-
-

📚 User Guides

-
- - - - - - - - - - -
DocumentDescription
CLI ReferenceComplete command reference
Workspace ManagementWorkspace creation and management
Workspace SwitchingSwitch between workspaces
Infrastructure ManagementServer, taskserv, cluster operations
Service ManagementPlatform service lifecycle management
OCI RegistryOCI artifact management
Gitea IntegrationGit workflow and collaboration
CoreDNS GuideDNS management
Test EnvironmentsContainerized testing
Extension DevelopmentCreate custom extensions
-
-

🏗️ Architecture

-
- - - - - -
DocumentDescription
System OverviewHigh-level architecture
Multi-Repo ArchitectureRepository structure and OCI distribution
Design PrinciplesArchitectural philosophy
Integration PatternsSystem integration patterns
Orchestrator ModelHybrid orchestration architecture
-
-

📋 Architecture Decision Records (ADRs)

-
- - - - - - -
ADRTitleStatus
ADR-001Project Structure DecisionAccepted
ADR-002Distribution StrategyAccepted
ADR-003Workspace IsolationAccepted
ADR-004Hybrid ArchitectureAccepted
ADR-005Extension FrameworkAccepted
ADR-006CLI RefactoringAccepted
-
-

🔌 API Documentation

-
- - - - - -
DocumentDescription
REST APIHTTP API endpoints
WebSocket APIReal-time event streams
Extensions APIExtension integration APIs
SDKsClient libraries
Integration ExamplesAPI usage examples
-
-

🛠️ Development

-
- - - - - - -
DocumentDescription
Development READMEDeveloper overview
Implementation GuideImplementation details
Provider DevelopmentCreate cloud providers
Taskserv DevelopmentCreate task services
Extension FrameworkExtension system
Command HandlersCLI command development
-
-

🐛 Troubleshooting

-
- -
DocumentDescription
Troubleshooting GuideCommon issues and solutions
-
-

📖 How-To Guides

-
- - - -
DocumentDescription
From ScratchComplete deployment from zero
Update InfrastructureSafe update procedures
Customize InfrastructureLayer and template customization
-
-

🔐 Configuration

-
- -
DocumentDescription
Workspace Config ArchitectureConfiguration architecture
-
-

📦 Quick References

-
- - -
DocumentDescription
Quickstart CheatsheetCommand shortcuts
OCI Quick ReferenceOCI operations
-
-
+

Welcome to the Provisioning Platform documentation. This is an enterprise-grade Infrastructure +as Code (IaC) platform built with Rust, Nushell, and Nickel.

+

What is Provisioning

+

Provisioning is a comprehensive infrastructure automation platform that manages complete +infrastructure lifecycles across multiple cloud providers. The platform emphasizes type-safety, +configuration-driven design, and workspace-first organization.

+

Key Features

+
    +
  • Workspace Management: Default mode for organizing infrastructure, settings, schemas, and extensions
  • +
  • Type-Safe Configuration: Nickel-based configuration system with validation and contracts
  • +
  • Multi-Cloud Support: Unified interface for AWS, UpCloud, and local providers
  • +
  • Modular CLI Architecture: 111+ commands with 84% code reduction through modularity
  • +
  • Batch Workflow Engine: Orchestrate complex multi-cloud operations
  • +
  • Complete Security System: Authentication, authorization, encryption, and compliance
  • +
  • Extensible Architecture: Custom providers, task services, and plugins
  • +
+

Getting Started

+

New users should start with:

+
    +
  1. Prerequisites - System requirements and dependencies
  2. +
  3. Installation - Install the platform
  4. +
  5. Quick Start - 5-minute deployment tutorial
  6. +
  7. First Deployment - Comprehensive walkthrough
  8. +

Documentation Structure

-
provisioning/docs/src/
-├── README.md (this file)          # Documentation hub
-├── getting-started/               # Getting started guides
-│   ├── installation-guide.md
-│   ├── getting-started.md
-│   └── quickstart-cheatsheet.md
-├── architecture/                  # System architecture
-│   ├── adr/                       # Architecture Decision Records
-│   ├── design-principles.md
-│   ├── integration-patterns.md
-│   ├── system-overview.md
-│   └── ... (and 10+ more architecture docs)
-├── infrastructure/                # Infrastructure guides
-│   ├── cli-reference.md
-│   ├── workspace-setup.md
-│   ├── workspace-switching-guide.md
-│   └── infrastructure-management.md
-├── api-reference/                 # API documentation
-│   ├── rest-api.md
-│   ├── websocket.md
-│   ├── integration-examples.md
-│   └── sdks.md
-├── development/                   # Developer guides
-│   ├── README.md
-│   ├── implementation-guide.md
-│   ├── quick-provider-guide.md
-│   ├── taskserv-developer-guide.md
-│   └── ... (15+ more developer docs)
-├── guides/                        # How-to guides
-│   ├── from-scratch.md
-│   ├── update-infrastructure.md
-│   └── customize-infrastructure.md
-├── operations/                    # Operations guides
-│   ├── service-management-guide.md
-│   ├── coredns-guide.md
-│   └── ... (more operations docs)
-├── security/                      # Security docs
-├── integration/                   # Integration guides
-├── testing/                       # Testing docs
-├── configuration/                 # Configuration docs
-├── troubleshooting/               # Troubleshooting guides
-└── quick-reference/               # Quick references
-
-
-

Key Concepts

-

Infrastructure as Code (IaC)

-

The provisioning platform uses declarative configuration to manage infrastructure. Instead of manually creating resources, you define what you -want in Nickel configuration files, and the system makes it happen.

-

Mode-Based Architecture

-

The system supports four operational modes:

    -
  • Solo: Single developer local development
  • -
  • Multi-user: Team collaboration with shared services
  • -
  • CI/CD: Automated pipeline execution
  • -
  • Enterprise: Production deployment with strict compliance
  • +
  • Getting Started: Installation and initial setup
  • +
  • User Guides: Workflow tutorials and best practices
  • +
  • Infrastructure as Code: Nickel configuration and schema reference
  • +
  • Platform Features: Core capabilities and systems
  • +
  • Operations: Deployment, monitoring, and maintenance
  • +
  • Security: Complete security system documentation
  • +
  • Development: Extension and plugin development
  • +
  • API Reference: REST API and CLI command reference
  • +
  • Architecture: System design and ADRs
  • +
  • Examples: Practical use cases and patterns
  • +
  • Troubleshooting: Problem-solving guides
-

Extension System

-

Extensibility through:

+

Core Technologies

    -
  • Providers: Cloud platform integrations (AWS, UpCloud, Local)
  • -
  • Task Services: Infrastructure components (Kubernetes, databases, etc.)
  • -
  • Clusters: Complete deployment configurations
  • +
  • Rust: Platform services and performance-critical components
  • +
  • Nushell: Scripting, CLI, and automation
  • +
  • Nickel: Type-safe infrastructure configuration
  • +
  • SecretumVault: Secrets management integration
-

OCI-Native Distribution

-

Extensions and packages distributed as OCI artifacts, enabling:

+

Workspace-First Approach

+

Provisioning uses workspaces as the default organizational unit. A workspace contains:

    -
  • Industry-standard packaging
  • -
  • Efficient caching and bandwidth
  • -
  • Version pinning and rollback
  • -
  • Air-gapped deployments
  • +
  • Infrastructure definitions (Nickel schemas)
  • +
  • Environment-specific settings
  • +
  • Custom extensions and providers
  • +
  • Deployment state and metadata
-
-

Documentation by Role

-

For New Users

-
    -
  1. Start with Installation Guide
  2. -
  3. Read Getting Started
  4. -
  5. Follow From Scratch Guide
  6. -
  7. Reference Quickstart Cheatsheet
  8. -
-

For Developers

-
    -
  1. Review System Overview
  2. -
  3. Study Design Principles
  4. -
  5. Read relevant ADRs
  6. -
  7. Follow Development Guide
  8. -
  9. Reference Nickel Quick Reference
  10. -
-

For Operators

-
    -
  1. Understand Mode System
  2. -
  3. Learn Service Management
  4. -
  5. Review Infrastructure Management
  6. -
  7. Study OCI Registry
  8. -
-

For Architects

-
    -
  1. Read System Overview
  2. -
  3. Study all ADRs
  4. -
  5. Review Integration Patterns
  6. -
  7. Understand Multi-Repo Architecture
  8. -
-
-

System Capabilities

-

✅ Infrastructure Automation

+

All operations work within workspace context, providing isolation and consistency.

+

Support and Community

    -
  • Multi-cloud support (AWS, UpCloud, Local)
  • -
  • Declarative configuration with Nickel
  • -
  • Automated dependency resolution
  • -
  • Batch operations with rollback
  • +
  • Issues: Report bugs and request features on GitHub
  • +
  • Documentation: This documentation site
  • +
  • Examples: See the Examples section
-

✅ Workflow Orchestration

-
    -
  • Hybrid Rust/Nushell orchestration
  • -
  • Checkpoint-based recovery
  • -
  • Parallel execution with limits
  • -
  • Real-time monitoring
  • -
-

✅ Test Environments

-
    -
  • Containerized testing
  • -
  • Multi-node cluster simulation
  • -
  • Topology templates
  • -
  • Automated cleanup
  • -
-

✅ Mode-Based Operation

-
    -
  • Solo: Local development
  • -
  • Multi-user: Team collaboration
  • -
  • CI/CD: Automated pipelines
  • -
  • Enterprise: Production deployment
  • -
-

✅ Extension Management

-
    -
  • OCI-native distribution
  • -
  • Automatic dependency resolution
  • -
  • Version management
  • -
  • Local and remote sources
  • -
-
-

Key Achievements

-

🚀 Batch Workflow System (v3.1.0)

-
    -
  • Provider-agnostic batch operations
  • -
  • Mixed provider support (UpCloud + AWS + local)
  • -
  • Dependency resolution with soft/hard dependencies
  • -
  • Real-time monitoring and rollback
  • -
-

🏗️ Hybrid Orchestrator (v3.0.0)

-
    -
  • Solves Nushell deep call stack limitations
  • -
  • Preserves all business logic
  • -
  • REST API for external integration
  • -
  • Checkpoint-based state management
  • -
-

⚙️ Configuration System (v2.0.0)

-
    -
  • Migrated from ENV to config-driven
  • -
  • Hierarchical configuration loading
  • -
  • Variable interpolation
  • -
  • True IaC without hardcoded fallbacks
  • -
-

🎯 Modular CLI (v3.2.0)

-
    -
  • 84% reduction in main file size
  • -
  • Domain-driven handlers
  • -
  • 80+ shortcuts
  • -
  • Bi-directional help system
  • -
-

🧪 Test Environment Service (v3.4.0)

-
    -
  • Automated containerized testing
  • -
  • Multi-node cluster topologies
  • -
  • CI/CD integration ready
  • -
  • Template-based configurations
  • -
-

🔄 Workspace Switching (v2.0.5)

-
    -
  • Centralized workspace management
  • -
  • Single-command workspace switching
  • -
  • Active workspace tracking
  • -
  • User preference system
  • -
-
-

Technology Stack

-
- - - - - - -
ComponentTechnologyPurpose
Core CLINushell 0.107.1Shell and scripting
ConfigurationNickel 1.0.0+Type-safe IaC
OrchestratorRustHigh-performance coordination
TemplatesJinja2 (nu_plugin_tera)Code generation
SecretsSOPS 3.10.2 + Age 1.2.1Encryption
DistributionOCI (skopeo/crane/oras)Artifact management
-
-
-

Support

-

Getting Help

-
    -
  • Documentation: You’re reading it!
  • -
  • Quick Reference: Run provisioning sc or provisioning guide quickstart
  • -
  • Help System: Run provisioning help or provisioning <command> help
  • -
  • Interactive Shell: Run provisioning nu for Nushell REPL
  • -
-

Reporting Issues

-
    -
  • Check Troubleshooting Guide
  • -
  • Review FAQ
  • -
  • Enable debug mode: provisioning --debug <command>
  • -
  • Check logs: provisioning platform logs <service>
  • -
-
-

Contributing

-

This project welcomes contributions! See Development Guide for:

-
    -
  • Development setup
  • -
  • Code style guidelines
  • -
  • Testing requirements
  • -
  • Pull request process
  • -
-

License

-

[Add license information]

-
-

Version History

-
- - - - - - - - -
VersionDateMajor Changes
3.5.02025-10-06Mode system, OCI registry, comprehensive documentation
3.4.02025-10-06Test environment service
3.3.02025-09-30Interactive guides system
3.2.02025-09-30Modular CLI refactoring
3.1.02025-09-25Batch workflow system
3.0.02025-09-25Hybrid orchestrator architecture
2.0.52025-10-02Workspace switching system
2.0.02025-09-23Configuration system migration
-
-
-

Maintained By: Provisioning Team -Last Review: 2025-10-06 -Next Review: 2026-01-06

-

Installation Guide

-

This guide will help you install Infrastructure Automation on your machine and get it ready for use.

+

See project LICENSE file for details.

+

Getting Started

+

Your journey to infrastructure automation starts here. This section guides you from zero to your first successful deployment in minutes.

+

Overview

+

Getting started with Provisioning involves:

+
    +
  • Verifying prerequisites - System requirements, tools, cloud accounts
  • +
  • Installing platform - Binary or container installation
  • +
  • Initial configuration - Environment setup, credentials, workspaces
  • +
  • First deployment - Deploy actual infrastructure in 5 minutes
  • +
  • Verification - Validate everything is working correctly
  • +
+

By the end of this section, you’ll have a running Provisioning installation and have deployed your first infrastructure.

+

Quick Start Guides

+

Starting from Scratch

+
    +
  • +

    Prerequisites - System requirements (Nushell 0.109.1+, Docker/Podman optional), cloud account setup, tool installation.

    +
  • +
  • +

    Installation - Step-by-step installation: binary download, container, or source build with platform verification.

    +
  • +
  • +

    Quick Start - 5-minute guide: install → configure → deploy infrastructure (requires 5 minutes and your AWS/UpCloud credentials).

    +
  • +
  • +

    First Deployment - Deploy your first infrastructure: create workspace, configure provider, deploy resources, verify success.

    +
  • +
  • +

    Verification - Validate installation: check system health, test CLI commands, verify cloud integration, confirm resource creation.

    +
  • +

What You’ll Learn

-
    -
  • System requirements and prerequisites
  • -
  • Different installation methods
  • -
  • How to verify your installation
  • -
  • Setting up your environment
  • -
  • Troubleshooting common installation issues
  • -
-

System Requirements

-

Operating System Support

-
    -
  • Linux: Any modern distribution (Ubuntu 20.04+, CentOS 8+, Debian 11+)
  • -
  • macOS: 11.0+ (Big Sur and newer)
  • -
  • Windows: Windows 10/11 with WSL2
  • -
-

Hardware Requirements

-
- - - - -
ComponentMinimumRecommended
CPU2 cores4+ cores
RAM4 GB8+ GB
Storage2 GB free10+ GB free
NetworkInternet connectionBroadband connection
-
-

Architecture Support

-
    -
  • x86_64 (Intel/AMD 64-bit) - Full support
  • -
  • ARM64 (Apple Silicon, ARM servers) - Full support
  • -
-

Prerequisites

-

Before installation, ensure you have:

+

By completing this section, you’ll know how to:

    -
  1. Administrative privileges - Required for system-wide installation
  2. -
  3. Internet connection - For downloading dependencies
  4. -
  5. Terminal/Command line access - Basic command line knowledge helpful
  6. +
  7. ✅ Install and configure Provisioning
  8. +
  9. ✅ Create your first workspace
  10. +
  11. ✅ Configure cloud providers (AWS, UpCloud, Hetzner, etc.)
  12. +
  13. ✅ Write simple Nickel infrastructure definitions
  14. +
  15. ✅ Deploy infrastructure using Provisioning
  16. +
  17. ✅ Verify and manage deployed resources
-

Pre-installation Checklist

-
# Check your system
-uname -a                    # View system information
-df -h                      # Check available disk space
-curl --version             # Verify internet connectivity
-
-

Installation Methods

- -

This is the easiest method for most users.

-

Step 1: Download the Package

-
# Download the latest release package
-wget https://releases.example.com/provisioning-latest.tar.gz
-
-# Or using curl
-curl -LO https://releases.example.com/provisioning-latest.tar.gz
-
-

Step 2: Extract and Install

-
# Extract the package
-tar xzf provisioning-latest.tar.gz
-
-# Navigate to extracted directory
-cd provisioning-*
-
-# Run the installation script
-sudo ./install-provisioning
-
-

The installer will:

+

Prerequisites Checklist

+

Before starting, verify you have:

    -
  • Install to /usr/local/provisioning
  • -
  • Create a global command at /usr/local/bin/provisioning
  • -
  • Install all required dependencies
  • -
  • Set up configuration templates
  • +
  • +Linux, macOS, or Windows with WSL2
  • +
  • +Nushell 0.109.1 or newer (nu --version)
  • +
  • +2GB+ RAM and 100MB disk space
  • +
  • +Internet connectivity
  • +
  • +Cloud account (AWS, UpCloud, Hetzner, or local demo mode)
  • +
  • +Access credentials or API tokens for cloud provider
-

Method 2: Container Installation

-

For containerized environments or testing.

-

Using Docker

-
# Pull the provisioning container
-docker pull provisioning:latest
+

Missing something? See Prerequisites for detailed instructions.

+

5-Minute Quick Start

+

If you’re impatient, here’s the ultra-quick path:

+
# 1. Install (2 minutes)
+curl -fsSL  [https://provisioning.io/install.sh](https://provisioning.io/install.sh) | sh
 
-# Create a container with persistent storage
-docker run -it --name provisioning-setup \
-  -v ~/provisioning-data:/data \
-  provisioning:latest
-
-# Install to host system (optional)
-docker cp provisioning-setup:/usr/local/provisioning ./
-sudo cp -r ./provisioning /usr/local/
-sudo ln -sf /usr/local/provisioning/bin/provisioning /usr/local/bin/provisioning
-
-

Using Podman

-
# Similar to Docker but with Podman
-podman pull provisioning:latest
-podman run -it --name provisioning-setup \
-  -v ~/provisioning-data:/data \
-  provisioning:latest
-
-

Method 3: Source Installation

-

For developers or custom installations.

-

Prerequisites for Source Installation

-
    -
  • Git - For cloning the repository
  • -
  • Build tools - Compiler toolchain for your platform
  • -
-

Installation Steps

-
# Clone the repository
-git clone https://github.com/your-org/provisioning.git
-cd provisioning
-
-# Run installation from source
-./distro/from-repo.sh
-
-# Or if you have development environment
-./distro/pack-install.sh
-
-

Method 4: Manual Installation

-

For advanced users who want complete control.

-
# Create installation directory
-sudo mkdir -p /usr/local/provisioning
-
-# Copy files (assumes you have the source)
-sudo cp -r ./* /usr/local/provisioning/
-
-# Create global command
-sudo ln -sf /usr/local/provisioning/core/nulib/provisioning /usr/local/bin/provisioning
-
-# Install dependencies manually
-./install-dependencies.sh
-
-

Installation Process Details

-

What Gets Installed

-

The installation process sets up:

-

1. Core System Files

-
/usr/local/provisioning/
-├── core/                 # Core provisioning logic
-├── providers/            # Cloud provider integrations
-├── taskservs/           # Infrastructure services
-├── cluster/             # Cluster configurations
-├── schemas/             # Configuration schemas (Nickel)
-├── templates/           # Template files
-└── resources/           # Project resources
-
-

2. Required Tools

-
- - - - - -
ToolVersionPurpose
Nushell0.107.1Primary shell and scripting
Nickel1.15.0+Configuration language
SOPS3.10.2Secret management
Age1.2.1Encryption
K9s0.50.6Kubernetes management
-
-

3. Nushell Plugins

-
    -
  • nu_plugin_tera - Template rendering
  • -
-

4. Configuration Files

-
    -
  • User configuration templates
  • -
  • Environment-specific configs
  • -
  • Default settings and schemas
  • -
-

Post-Installation Verification

-

Basic Verification

-
# Check if provisioning command is available
+# 2. Verify installation (30 seconds)
 provisioning --version
-
-# Verify installation
-provisioning env
-
-# Show comprehensive environment info
-provisioning allenv
-
-

Expected output should show:

-
✅ Provisioning v1.0.0 installed
-✅ All dependencies available
-✅ Configuration loaded successfully
-
-

Tool Verification

-
# Check individual tools
-nu --version              # Should show Nushell 0.109.0+
-nickel version            # Should show Nickel 1.5+
-sops --version           # Should show SOPS 3.10.2
-age --version            # Should show Age 1.2.1
-k9s version              # Should show K9s 0.50.6
-
-

Plugin Verification

-
# Start Nushell and check plugins
-nu -c "version | get installed_plugins"
-
-# Should include:
-# - nu_plugin_tera (template rendering)
-
-

Configuration Verification

-
# Validate configuration
-provisioning validate config
-
-# Should show:
-# ✅ Configuration validation passed!
-
-

Environment Setup

-

Shell Configuration

-

Add to your shell profile (~/.bashrc, ~/.zshrc, or ~/.profile):

-
# Add provisioning to PATH
-export PATH="/usr/local/bin:$PATH"
-
-# Optional: Set default provisioning directory
-export PROVISIONING="/usr/local/provisioning"
-
-

Configuration Initialization

-
# Initialize user configuration
-provisioning init config
-
-# This creates ~/.provisioning/config.user.toml
-
-

First-Time Setup

-
# Set up your first workspace
-mkdir -p ~/provisioning-workspace
-cd ~/provisioning-workspace
-
-# Initialize workspace
-provisioning init config dev
-
-# Verify setup
-provisioning env
-
-

Platform-Specific Instructions

-

Linux (Ubuntu/Debian)

-
# Install system dependencies
-sudo apt update
-sudo apt install -y curl wget tar
-
-# Proceed with standard installation
-wget https://releases.example.com/provisioning-latest.tar.gz
-tar xzf provisioning-latest.tar.gz
-cd provisioning-*
-sudo ./install-provisioning
-
-

Linux (RHEL/CentOS/Fedora)

-
# Install system dependencies
-sudo dnf install -y curl wget tar
-# or for older versions: sudo yum install -y curl wget tar
-
-# Proceed with standard installation
-
-

macOS

-
# Using Homebrew (if available)
-brew install curl wget
-
-# Or download directly
-curl -LO https://releases.example.com/provisioning-latest.tar.gz
-tar xzf provisioning-latest.tar.gz
-cd provisioning-*
-sudo ./install-provisioning
-
-

Windows (WSL2)

-
# In WSL2 terminal
-sudo apt update
-sudo apt install -y curl wget tar
-
-# Proceed with Linux installation steps
-wget https://releases.example.com/provisioning-latest.tar.gz
-# ... continue as Linux
-
-

Configuration Examples

-

Basic Configuration

-

Create ~/.provisioning/config.user.toml:

-
[core]
-name = "my-provisioning"
-
-[paths]
-base = "/usr/local/provisioning"
-infra = "~/provisioning-workspace"
-
-[debug]
-enabled = false
-log_level = "info"
-
-[providers]
-default = "local"
-
-[output]
-format = "yaml"
-
-

Development Configuration

-

For developers, use enhanced debugging:

-
[debug]
-enabled = true
-log_level = "debug"
-check = true
-
-[cache]
-enabled = false  # Disable caching during development
-
-

Upgrade and Migration

-

Upgrading from Previous Version

-
# Backup current installation
-sudo cp -r /usr/local/provisioning /usr/local/provisioning.backup
-
-# Download new version
-wget https://releases.example.com/provisioning-latest.tar.gz
-
-# Extract and install
-tar xzf provisioning-latest.tar.gz
-cd provisioning-*
-sudo ./install-provisioning
-
-# Verify upgrade
-provisioning --version
-
-

Migrating Configuration

-
# Backup your configuration
-cp -r ~/.provisioning ~/.provisioning.backup
-
-# Initialize new configuration
-provisioning init config
-
-# Manually merge important settings from backup
-
-

Troubleshooting Installation Issues

-

Common Installation Problems

-

Permission Denied Errors

-
# Problem: Cannot write to /usr/local
-# Solution: Use sudo
-sudo ./install-provisioning
-
-# Or install to user directory
-./install-provisioning --prefix=$HOME/provisioning
-export PATH="$HOME/provisioning/bin:$PATH"
-
-

Missing Dependencies

-
# Problem: curl/wget not found
-# Ubuntu/Debian solution:
-sudo apt install -y curl wget tar
-
-# RHEL/CentOS solution:
-sudo dnf install -y curl wget tar
-
-

Download Failures

-
# Problem: Cannot download package
-# Solution: Check internet connection and try alternative
-ping google.com
-
-# Try alternative download method
-curl -LO --retry 3 https://releases.example.com/provisioning-latest.tar.gz
-
-# Or use wget with retries
-wget --tries=3 https://releases.example.com/provisioning-latest.tar.gz
-
-

Extraction Failures

-
# Problem: Archive corrupted
-# Solution: Verify and re-download
-sha256sum provisioning-latest.tar.gz  # Check against published hash
-
-# Re-download if hash doesn't match
-rm provisioning-latest.tar.gz
-wget https://releases.example.com/provisioning-latest.tar.gz
-
-

Tool Installation Failures

-
# Problem: Nushell installation fails
-# Solution: Check architecture and OS compatibility
-uname -m    # Should show x86_64 or arm64
-uname -s    # Should show Linux, Darwin, etc.
-
-# Try manual tool installation
-./install-dependencies.sh --verbose
-
-

Verification Failures

-

Command Not Found

-
# Problem: 'provisioning' command not found
-# Check installation path
-ls -la /usr/local/bin/provisioning
-
-# If missing, create symlink
-sudo ln -sf /usr/local/provisioning/core/nulib/provisioning /usr/local/bin/provisioning
-
-# Add to PATH if needed
-export PATH="/usr/local/bin:$PATH"
-echo 'export PATH="/usr/local/bin:$PATH"' >> ~/.bashrc
-
-

Plugin Errors

-
# Problem: Plugin command not found
-# Solution: Ensure plugin is properly registered
-
-# Check available plugins
-nu -c "version | get installed_plugins"
-
-# If plugin missing, reload Nushell:
-exec nu
-
-

Configuration Errors

-
# Problem: Configuration validation fails
-# Solution: Initialize with template
-provisioning init config
-
-# Or validate and show errors
-provisioning validate config --detailed
-
-

Getting Help

-

If you encounter issues not covered here:

-
    -
  1. Check logs: provisioning --debug env
  2. -
  3. Validate configuration: provisioning validate config
  4. -
  5. Check system compatibility: provisioning version --verbose
  6. -
  7. Consult troubleshooting guide: docs/user/troubleshooting-guide.md
  8. -
-

Next Steps

-

After successful installation:

-
    -
  1. Complete the Getting Started Guide: docs/user/getting-started.md
  2. -
  3. Set up your first workspace: docs/user/workspace-setup.md
  4. -
  5. Learn about configuration: docs/user/configuration.md
  6. -
  7. Try example tutorials: docs/user/examples/
  8. -
-

Your provisioning is now ready to manage cloud infrastructure!

-

Installation Validation & Bootstrap Guide

-

Objective: Validate your provisioning installation, run bootstrap to initialize the workspace, and verify all components are working correctly.

-

Expected Duration: 30-45 minutes

-

Prerequisites: Fresh clone of provisioning repository at /Users/Akasha/project-provisioning

-
-

Section 1: Prerequisites Verification

-

Before running the bootstrap script, verify that your system has all required dependencies.

-

Step 1.1: Check System Requirements

-

Run these commands to verify your system meets minimum requirements:

-
# Check OS
-uname -s
-# Expected: Darwin (macOS), Linux, or WSL2
-
-# Check CPU cores
-sysctl -n hw.physicalcpu  # macOS
-# OR
-nproc  # Linux
-# Expected: 2 or more cores
-
-# Check RAM
-sysctl -n hw.memsize | awk '{print int($1 / 1024 / 1024 / 1024) " GB"}' # macOS
-# OR
-grep MemTotal /proc/meminfo | awk '{print int($2 / 1024 / 1024) " GB"}'  # Linux
-# Expected: 2 GB or more (4 GB+ recommended)
-
-# Check free disk space
-df -h | grep -E '^/dev|^Filesystem'
-# Expected: At least 2 GB free (10 GB+ recommended)
-
-

Success Criteria:

-
    -
  • OS is macOS, Linux, or WSL2
  • -
  • CPU: 2+ cores available
  • -
  • RAM: 2 GB minimum, 4+ GB recommended
  • -
  • Disk: 2 GB free minimum
  • -
-

Step 1.2: Verify Nushell Installation

-

Nushell is required for bootstrap and CLI operations:

-
command -v nu
-# Expected output: /path/to/nu
-
-nu --version
-# Expected output: 0.109.0 or higher
-
-

If Nushell is not installed:

-
# macOS (using Homebrew)
-brew install nushell
-
-# Linux (Debian/Ubuntu)
-sudo apt-get update && sudo apt-get install nushell
-
-# Linux (RHEL/CentOS)
-sudo yum install nushell
-
-# Or install from source: https://nushell.sh/book/installation.html
-
-

Step 1.3: Verify Nickel Installation

-

Nickel is required for configuration validation:

-
command -v nickel
-# Expected output: /path/to/nickel
-
-nickel --version
-# Expected output: nickel 1.x.x or higher
-
-

If Nickel is not installed:

-
# Install via Cargo (requires Rust)
-cargo install nickel-lang-cli
-
-# Or: https://nickel-lang.org/
-
-

Step 1.4: Verify Docker Installation

-

Docker is required for running containerized services:

-
command -v docker
-# Expected output: /path/to/docker
-
-docker --version
-# Expected output: Docker version 20.10 or higher
-
-

If Docker is not installed:

-

Visit Docker installation guide and install for your OS.

-

Step 1.5: Check Provisioning Binary

-

Verify the provisioning CLI binary exists:

-
ls -la /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning
-# Expected: -rwxr-xr-x (executable)
-
-file /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning
-# Expected: ELF 64-bit or similar binary format
-
-

If binary is not executable:

-
chmod +x /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning
-
-

Prerequisites Checklist

-
[ ] OS is macOS, Linux, or WSL2
-[ ] CPU: 2+ cores available
-[ ] RAM: 2 GB minimum installed
-[ ] Disk: 2+ GB free space
-[ ] Nushell 0.109.0+ installed
-[ ] Nickel 1.x.x installed
-[ ] Docker 20.10+ installed
-[ ] Provisioning binary exists and is executable
-
-
-

Section 2: Bootstrap Installation

-

The bootstrap script automates 7 stages of installation and initialization. Run it from the project root directory.

-

Step 2.1: Navigate to Project Root

-
cd /Users/Akasha/project-provisioning
-
-

Step 2.2: Run Bootstrap Script

-
./provisioning/bootstrap/install.sh
-
-

Bootstrap Output

-

You should see output similar to this:

-
╔════════════════════════════════════════════════════════════════╗
-║              PROVISIONING BOOTSTRAP (Bash)                     ║
-╚════════════════════════════════════════════════════════════════╝
-
-📊 Stage 1: System Detection
-─────────────────────────────────────────────────────────────────
-  OS: Darwin
-  Architecture: arm64 (or x86_64)
-  CPU Cores: 8
-  Memory: 16 GB
-  ✅ System requirements met
-
-📦 Stage 2: Checking Dependencies
-─────────────────────────────────────────────────────────────────
-  Versions:
-    Docker: Docker version 28.5.2
-    Rust: rustc 1.75.0
-    Nushell: 0.109.1
-  ✅ All dependencies found
-
-📁 Stage 3: Creating Directory Structure
-─────────────────────────────────────────────────────────────────
-  ✅ Directory structure created
-
-⚙️  Stage 4: Validating Configuration
-─────────────────────────────────────────────────────────────────
-  ✅ Configuration syntax valid
-
-📤 Stage 5: Exporting Configuration to TOML
-─────────────────────────────────────────────────────────────────
-  ✅ Configuration exported
-
-🚀 Stage 6: Initializing Orchestrator Service
-─────────────────────────────────────────────────────────────────
-  ✅ Orchestrator started
-
-✅ Stage 7: Verification
-─────────────────────────────────────────────────────────────────
-  ✅ All configuration files generated
-  ✅ All required directories created
-
-╔════════════════════════════════════════════════════════════════╗
-║                   BOOTSTRAP COMPLETE ✅                        ║
-╚════════════════════════════════════════════════════════════════╝
-
-📍 Next Steps:
-
-1. Verify configuration:
-   cat /Users/Akasha/project-provisioning/workspaces/workspace_librecloud/config/config.ncl
-
-2. Check orchestrator is running:
-   curl http://localhost:9090/health
-
-3. Start provisioning:
-   provisioning server create --infra sgoyol --name web-01
-
-

What Bootstrap Does

-

The bootstrap script automatically:

-
    -
  1. Detects your system (OS, CPU, RAM, architecture)
  2. -
  3. Verifies dependencies (Docker, Rust, Nushell)
  4. -
  5. Creates workspace directories (config, state, cache)
  6. -
  7. Validates Nickel configuration (syntax checking)
  8. -
  9. Exports configuration (Nickel → TOML files)
  10. -
  11. Initializes orchestrator (starts service in background)
  12. -
  13. Verifies installation (checks all files created)
  14. -
-
-

Section 3: Installation Validation

-

After bootstrap completes, verify that all components are working correctly.

-

Step 3.1: Verify Workspace Directories

-

Bootstrap should have created workspace directories. Verify they exist:

-
cd /Users/Akasha/project-provisioning
-
-# Check all required directories
-ls -la workspaces/workspace_librecloud/.orchestrator/data/queue/
-ls -la workspaces/workspace_librecloud/.kms/
-ls -la workspaces/workspace_librecloud/.providers/
-ls -la workspaces/workspace_librecloud/.taskservs/
-ls -la workspaces/workspace_librecloud/.clusters/
-
-

Expected Output:

-
total 0
-drwxr-xr-x  2 user  group  64 Jan  7 10:30 .
-
-(directories exist and are accessible)
-
-

Step 3.2: Verify Generated Configuration Files

-

Bootstrap should have exported Nickel configuration to TOML format:

-
# Check generated files exist
-ls -la workspaces/workspace_librecloud/config/generated/
-
-# View workspace configuration
-cat workspaces/workspace_librecloud/config/generated/workspace.toml
-
-# View provider configuration
-cat workspaces/workspace_librecloud/config/generated/providers/upcloud.toml
-
-# View orchestrator configuration
-cat workspaces/workspace_librecloud/config/generated/platform/orchestrator.toml
-
-

Expected Output:

-
config/
-├── generated/
-│   ├── workspace.toml
-│   ├── providers/
-│   │   └── upcloud.toml
-│   └── platform/
-│       └── orchestrator.toml
-
-

Step 3.3: Type-Check Nickel Configuration

-

Verify Nickel configuration files have valid syntax:

-
cd /Users/Akasha/project-provisioning/workspaces/workspace_librecloud
-
-# Type-check main workspace config
-nickel typecheck config/config.ncl
-# Expected: No output (success) or clear error messages
-
-# Type-check infrastructure configs
-nickel typecheck infra/wuji/main.ncl
-nickel typecheck infra/sgoyol/main.ncl
-
-# Use workspace utility for comprehensive validation
-nu workspace.nu validate
-# Expected: ✓ All files validated successfully
-
-# Type-check all Nickel files
-nu workspace.nu typecheck
-
-

Expected Output:

-
✓ All files validated successfully
-✓ infra/wuji/main.ncl
-✓ infra/sgoyol/main.ncl
-
-

Step 3.4: Verify Orchestrator Service

-

The orchestrator service manages workflows and deployments:

-
# Check if orchestrator is running (health check)
-curl http://localhost:9090/health
-# Expected: {"status": "healthy"} or similar response
-
-# If health check fails, check orchestrator logs
-tail -f /Users/Akasha/project-provisioning/provisioning/platform/orchestrator/data/orchestrator.log
-
-# Alternative: Check if orchestrator process is running
-ps aux | grep orchestrator
-# Expected: Running orchestrator process visible
-
-

Expected Output:

-
{
-  "status": "healthy",
-  "uptime": "0:05:23"
-}
-
-

If Orchestrator Failed to Start:

-

Check logs and restart manually:

-
cd /Users/Akasha/project-provisioning/provisioning/platform/orchestrator
-
-# Check log file
-cat data/orchestrator.log
-
-# Or start orchestrator manually
-./scripts/start-orchestrator.nu --background
-
-# Verify it's running
-curl http://localhost:9090/health
-
-

Step 3.5: Install Provisioning CLI (Optional)

-

You can install the provisioning CLI globally for easier access:

-
# Option A: System-wide installation (requires sudo)
-cd /Users/Akasha/project-provisioning
-sudo ./scripts/install-provisioning.sh
-
-# Verify installation
-provisioning --version
-provisioning help
-
-# Option B: Add to PATH temporarily (current session only)
-export PATH="$PATH:/Users/Akasha/project-provisioning/provisioning/core/cli"
-
-# Verify
-provisioning --version
-
-

Expected Output:

-
provisioning version 1.0.0
-
-Usage: provisioning [OPTIONS] COMMAND
-
-Commands:
-  server     - Server management
-  workspace  - Workspace management
-  config     - Configuration management
-  help       - Show help information
-
-

Installation Validation Checklist

-
[ ] Workspace directories created (.orchestrator, .kms, .providers, .taskservs, .clusters)
-[ ] Generated TOML files exist in config/generated/
-[ ] Nickel type-checking passes (no errors)
-[ ] Workspace utility validation passes
-[ ] Orchestrator responding to health check
-[ ] Orchestrator process running
-[ ] Provisioning CLI accessible and working
-
-
-

Section 4: Troubleshooting

-

This section covers common issues and solutions.

-

Issue: “Nushell not found”

-

Symptoms:

-
./provisioning/bootstrap/install.sh: line X: nu: command not found
-
-

Solution:

-
    -
  1. Install Nushell (see Step 1.2)
  2. -
  3. Verify installation: nu --version
  4. -
  5. Retry bootstrap script
  6. -
-

Issue: “Nickel configuration validation failed”

-

Symptoms:

-
⚙️  Stage 4: Validating Configuration
-Error: Nickel configuration validation failed
-
-

Solution:

-
    -
  1. Check Nickel syntax: nickel typecheck config/config.ncl
  2. -
  3. Review error message for specific issue
  4. -
  5. Edit config file: vim config/config.ncl
  6. -
  7. Run bootstrap again
  8. -
-

Issue: “Docker not installed”

-

Symptoms:

-
❌ Docker is required but not installed
-
-

Solution:

-
    -
  1. Install Docker: Docker installation guide
  2. -
  3. Verify: docker --version
  4. -
  5. Retry bootstrap script
  6. -
-

Issue: “Configuration export failed”

-

Symptoms:

-
⚠️ Configuration export encountered issues (may continue)
-
-

Solution:

-
    -
  1. Check Nushell library paths: nu -c "use provisioning/core/nulib/lib_provisioning/config/export.nu *"
  2. -
  3. Verify export library exists: ls provisioning/core/nulib/lib_provisioning/config/export.nu
  4. -
  5. Re-export manually: -
    cd /Users/Akasha/project-provisioning
    -nu -c "
    -  use provisioning/core/nulib/lib_provisioning/config/export.nu *
    -  export-all-configs 'workspaces/workspace_librecloud'
    -"
    -
    -
  6. -
-

Issue: “Orchestrator didn’t start”

-

Symptoms:

-
🚀 Stage 6: Initializing Orchestrator Service
-⚠️ Orchestrator may not have started (check logs)
-
-curl http://localhost:9090/health
-# Connection refused
-
-

Solution:

-
    -
  1. Check for port conflicts: lsof -i :9090
  2. -
  3. If port 9090 is in use, either: -
      -
    • Stop the conflicting service
    • -
    • Change orchestrator port in configuration
    • -
    -
  4. -
  5. Check logs: tail -f provisioning/platform/orchestrator/data/orchestrator.log
  6. -
  7. Start manually: cd provisioning/platform/orchestrator && ./scripts/start-orchestrator.nu --background
  8. -
  9. Verify: curl http://localhost:9090/health
  10. -
-

Issue: “Sudo password prompt during bootstrap”

-

Symptoms:

-
Stage 3: Creating Directory Structure
-[sudo] password for user:
-
-

Solution:

-
    -
  • This is normal if creating directories in system locations
  • -
  • Enter your sudo password when prompted
  • -
  • Or: Run bootstrap from home directory instead
  • -
-

Issue: “Permission denied” on binary

-

Symptoms:

-
bash: ./provisioning/bootstrap/install.sh: Permission denied
-
-

Solution:

-
# Make script executable
-chmod +x /Users/Akasha/project-provisioning/provisioning/bootstrap/install.sh
-
-# Retry
-./provisioning/bootstrap/install.sh
-
-
-

Section 5: Next Steps

-

After successful installation validation, you can:

-

Option 1: Deploy workspace_librecloud

-

To deploy infrastructure to UpCloud:

-
# Read workspace deployment guide
-cat workspaces/workspace_librecloud/docs/deployment-guide.md
-
-# Or: From workspace directory
-cd workspaces/workspace_librecloud
-cat docs/deployment-guide.md
-
-

Option 2: Create a New Workspace

-

To create a new workspace for different infrastructure:

-
provisioning workspace init my_workspace --template minimal
-
-

Option 3: Explore Available Modules

-

Discover what’s available to deploy:

-
# List available task services
-provisioning mod discover taskservs
-
-# List available providers
-provisioning mod discover providers
-
-# List available clusters
-provisioning mod discover clusters
-
-
-

Section 6: Verification Checklist

-

After completing all steps, verify with this final checklist:

-
Prerequisites Verified:
-  [ ] OS is macOS, Linux, or WSL2
-  [ ] CPU: 2+ cores
-  [ ] RAM: 2+ GB available
-  [ ] Disk: 2+ GB free
-  [ ] Nushell 0.109.0+ installed
-  [ ] Nickel 1.x.x installed
-  [ ] Docker 20.10+ installed
-  [ ] Provisioning binary executable
-
-Bootstrap Completed:
-  [ ] All 7 stages completed successfully
-  [ ] No error messages in output
-  [ ] Installation log shows success
-
-Installation Validated:
-  [ ] Workspace directories exist
-  [ ] Generated TOML files exist
-  [ ] Nickel type-checking passes
-  [ ] Workspace validation passes
-  [ ] Orchestrator health check passes
-  [ ] Provisioning CLI works (if installed)
-
-Ready to Deploy:
-  [ ] No errors in validation steps
-  [ ] All services responding correctly
-  [ ] Configuration properly exported
-
-
-

Getting Help

-

If you encounter issues not covered here:

-
    -
  1. Check logs: tail -f provisioning/platform/orchestrator/data/orchestrator.log
  2. -
  3. Enable debug mode: provisioning --debug <command>
  4. -
  5. Review bootstrap output: Scroll up to see detailed error messages
  6. -
  7. Check documentation: provisioning help or provisioning guide <topic>
  8. -
  9. Workspace guide: cat workspaces/workspace_librecloud/docs/deployment-guide.md
  10. -
-
-

Summary

-

This guide covers:

-
    -
  • ✅ Prerequisites verification (Nushell, Nickel, Docker)
  • -
  • ✅ Bootstrap installation (7-stage automated process)
  • -
  • ✅ Installation validation (directories, configs, services)
  • -
  • ✅ Troubleshooting common issues
  • -
  • ✅ Next steps for deployment
  • -
-

You now have a fully installed and validated provisioning system ready for workspace deployment.

-

Getting Started Guide

-

Welcome to Infrastructure Automation. This guide will walk you through your first steps with infrastructure automation, from basic setup to deploying -your first infrastructure.

-

What You’ll Learn

-
    -
  • Essential concepts and terminology
  • -
  • How to configure your first environment
  • -
  • Creating and managing infrastructure
  • -
  • Basic server and service management
  • -
  • Common workflows and best practices
  • -
-

Prerequisites

-

Before starting this guide, ensure you have:

-
    -
  • ✅ Completed the Installation Guide
  • -
  • ✅ Verified your installation with provisioning --version
  • -
  • ✅ Basic familiarity with command-line interfaces
  • -
-

Essential Concepts

-

Infrastructure as Code (IaC)

-

Provisioning uses declarative configuration to manage infrastructure. Instead of manually creating resources, you define what you want in -configuration files, and the system makes it happen.

-
You describe → System creates → Infrastructure exists
-
-

Key Components

-
- - - - -
ComponentPurposeExample
ProvidersCloud platformsAWS, UpCloud, Local
ServersVirtual machinesWeb servers, databases
Task ServicesInfrastructure softwareKubernetes, Docker, databases
ClustersGrouped servicesWeb cluster, database cluster
-
-

Configuration Languages

-
    -
  • Nickel: Primary configuration language for infrastructure definitions (type-safe, validated)
  • -
  • TOML: User preferences and system settings
  • -
  • YAML: Kubernetes manifests and service definitions
  • -
-

First-Time Setup

-

Step 1: Initialize Your Configuration

-

Create your personal configuration:

-
# Initialize user configuration
-provisioning init config
-
-# This creates ~/.provisioning/config.user.toml
-
-

Step 2: Verify Your Environment

-
# Check your environment setup
-provisioning env
-
-# View comprehensive configuration
-provisioning allenv
-
-

You should see output like:

-
✅ Configuration loaded successfully
-✅ All required tools available
-📁 Base path: /usr/local/provisioning
-🏠 User config: ~/.provisioning/config.user.toml
-
-

Step 3: Explore Available Resources

-
# List available providers
-provisioning list providers
-
-# List available task services
-provisioning list taskservs
-
-# List available clusters
-provisioning list clusters
-
-

Your First Infrastructure

-

Let’s create a simple local infrastructure to learn the basics.

-

Step 1: Create a Workspace

-
# Create a new workspace directory
-mkdir ~/my-first-infrastructure
-cd ~/my-first-infrastructure
-
-# Initialize workspace
-provisioning generate infra --new local-demo
-
-

This creates:

-
local-demo/
-├── config/
-│   └── config.ncl     # Master Nickel configuration
-├── infra/
-│   └── default/
-│       ├── main.ncl   # Infrastructure definition
-│       └── servers.ncl # Server configurations
-└── docs/              # Auto-generated guides
-
-

Step 2: Examine the Configuration

-
# View the generated configuration
-provisioning show settings --infra local-demo
-
-

Step 3: Validate the Configuration

-
# Validate syntax and structure
-provisioning validate config --infra local-demo
-
-# Should show: ✅ Configuration validation passed!
-
-

Step 4: Deploy Infrastructure (Check Mode)

-
# Dry run - see what would be created
-provisioning server create --infra local-demo --check
-
-# This shows planned changes without making them
-
-

Step 5: Create Your Infrastructure

-
# Create the actual infrastructure
-provisioning server create --infra local-demo
-
-# Wait for completion
-provisioning server list --infra local-demo
-
-

Working with Services

-

Installing Your First Service

-

Let’s install a containerized service:

-
# Install Docker/containerd
-provisioning taskserv create containerd --infra local-demo
-
-# Verify installation
-provisioning taskserv list --infra local-demo
-
-

Installing Kubernetes

-

For container orchestration:

-
# Install Kubernetes
-provisioning taskserv create kubernetes --infra local-demo
-
-# This may take several minutes...
-
-

Checking Service Status

-
# Show all services on your infrastructure
-provisioning show servers --infra local-demo
-
-# Show specific service details
-provisioning show servers web-01 taskserv kubernetes --infra local-demo
-
-

Understanding Commands

-

Command Structure

-

All commands follow this pattern:

-
provisioning [global-options] <command> [command-options] [arguments]
-
-

Global Options

-
- - - - -
OptionShortDescription
--infra-iSpecify infrastructure
--check-cDry run mode
--debug-xEnable debug output
--yes-yAuto-confirm actions
-
-

Essential Commands

-
- - - - - -
CommandPurposeExample
helpShow helpprovisioning help
envShow environmentprovisioning env
listList resourcesprovisioning list servers
showShow detailsprovisioning show settings
validateValidate configprovisioning validate config
-
-

Working with Multiple Environments

-

Environment Concepts

-

The system supports multiple environments:

-
    -
  • dev - Development and testing
  • -
  • test - Integration testing
  • -
  • prod - Production deployment
  • -
-

Switching Environments

-
# Set environment for this session
-export PROVISIONING_ENV=dev
-provisioning env
-
-# Or specify per command
-provisioning --environment dev server create
-
-

Environment-Specific Configuration

-

Create environment configs:

-
# Development environment
-provisioning init config dev
-
-# Production environment
-provisioning init config prod
-
-

Common Workflows

-

Workflow 1: Development Environment

-
# 1. Create development workspace
-mkdir ~/dev-environment
-cd ~/dev-environment
-
-# 2. Generate infrastructure
-provisioning generate infra --new dev-setup
-
-# 3. Customize for development
-# Edit settings.ncl to add development tools
-
-# 4. Deploy
-provisioning server create --infra dev-setup --check
-provisioning server create --infra dev-setup
-
-# 5. Install development services
-provisioning taskserv create kubernetes --infra dev-setup
-provisioning taskserv create containerd --infra dev-setup
-
-

Workflow 2: Service Updates

-
# Check for service updates
-provisioning taskserv check-updates
-
-# Update specific service
-provisioning taskserv update kubernetes --infra dev-setup
-
-# Verify update
-provisioning taskserv versions kubernetes
-
-

Workflow 3: Infrastructure Scaling

-
# Add servers to existing infrastructure
-# Edit settings.ncl to add more servers
-
-# Apply changes
-provisioning server create --infra dev-setup
-
-# Install services on new servers
-provisioning taskserv create containerd --infra dev-setup
-
-

Interactive Mode

-

Starting Interactive Shell

-
# Start Nushell with provisioning loaded
-provisioning nu
-
-

In the interactive shell, you have access to all provisioning functions:

-
# Inside Nushell session
-use lib_provisioning *
-
-# Check environment
-show_env
-
-# List available functions
-help commands | where name =~ "provision"
-
-

Useful Interactive Commands

-
# Show detailed server information
-find_servers "web-*" | table
-
-# Get cost estimates
-servers_walk_by_costs $settings "" false false "stdout"
-
-# Check task service status
-taskservs_list | where status == "running"
-
-

Configuration Management

-

Understanding Configuration Files

-
    -
  1. System Defaults: config.defaults.toml - System-wide defaults
  2. -
  3. User Config: ~/.provisioning/config.user.toml - Your preferences
  4. -
  5. Environment Config: config.{env}.toml - Environment-specific settings
  6. -
  7. Infrastructure Config: settings.ncl - Infrastructure definitions
  8. -
-

Configuration Hierarchy

-
Infrastructure settings.ncl
-    ↓ (overrides)
-Environment config.{env}.toml
-    ↓ (overrides)
-User config.user.toml
-    ↓ (overrides)
-System config.defaults.toml
-
-

Customizing Your Configuration

-
# Edit user configuration
-provisioning sops ~/.provisioning/config.user.toml
-
-# Or using your preferred editor
-nano ~/.provisioning/config.user.toml
-
-

Example customizations:

-
[debug]
-enabled = true        # Enable debug mode by default
-log_level = "debug"   # Verbose logging
-
-[providers]
-default = "aws"       # Use AWS as default provider
-
-[output]
-format = "json"       # Prefer JSON output
-
-

Monitoring and Observability

-

Checking System Status

-
# Overall system health
-provisioning env
-
-# Infrastructure status
-provisioning show servers --infra dev-setup
-
-# Service status
-provisioning taskserv list --infra dev-setup
-
-

Logging and Debugging

-
# Enable debug mode for troubleshooting
-provisioning --debug server create --infra dev-setup --check
-
-# View logs for specific operations
-provisioning show logs --infra dev-setup
-
-

Cost Monitoring

-
# Show cost estimates
-provisioning show cost --infra dev-setup
-
-# Detailed cost breakdown
-provisioning server price --infra dev-setup
-
-

Best Practices

-

1. Configuration Management

-
    -
  • ✅ Use version control for infrastructure definitions
  • -
  • ✅ Test changes in development before production
  • -
  • ✅ Use --check mode to preview changes
  • -
  • ✅ Keep user configuration separate from infrastructure
  • -
-

2. Security

-
    -
  • ✅ Use SOPS for encrypting sensitive data
  • -
  • ✅ Regular key rotation for cloud providers
  • -
  • ✅ Principle of least privilege for access
  • -
  • ✅ Audit infrastructure changes
  • -
-

3. Operational Excellence

-
    -
  • ✅ Monitor infrastructure costs regularly
  • -
  • ✅ Keep services updated
  • -
  • ✅ Document custom configurations
  • -
  • ✅ Plan for disaster recovery
  • -
-

4. Development Workflow

-
# 1. Always validate before applying
-provisioning validate config --infra my-infra
-
-# 2. Use check mode first
-provisioning server create --infra my-infra --check
-
-# 3. Apply changes incrementally
-provisioning server create --infra my-infra
-
-# 4. Verify results
-provisioning show servers --infra my-infra
-
-

Getting Help

-

Built-in Help System

-
# General help
-provisioning help
-
-# Command-specific help
-provisioning server help
-provisioning taskserv help
-provisioning cluster help
-
-# Show available options
-provisioning generate help
-
-

Command Reference

-

For complete command documentation, see: CLI Reference

-

Troubleshooting

-

If you encounter issues, see: Troubleshooting Guide

-

Real-World Example

-

Let’s walk through a complete example of setting up a web application infrastructure:

-

Step 1: Plan Your Infrastructure

-
# Create project workspace
-mkdir ~/webapp-infrastructure
-cd ~/webapp-infrastructure
-
-# Generate base infrastructure
-provisioning generate infra --new webapp
-
-

Step 2: Customize Configuration

-

Edit webapp/settings.ncl to define:

-
    -
  • 2 web servers for load balancing
  • -
  • 1 database server
  • -
  • Load balancer configuration
  • -
-

Step 3: Deploy Base Infrastructure

-
# Validate configuration
-provisioning validate config --infra webapp
-
-# Preview deployment
-provisioning server create --infra webapp --check
-
-# Deploy servers
-provisioning server create --infra webapp
-
-

Step 4: Install Services

-
# Install container runtime on all servers
-provisioning taskserv create containerd --infra webapp
-
-# Install load balancer on web servers
-provisioning taskserv create haproxy --infra webapp
-
-# Install database on database server
-provisioning taskserv create postgresql --infra webapp
-
-

Step 5: Deploy Application

-
# Create application cluster
-provisioning cluster create webapp --infra webapp
-
-# Verify deployment
-provisioning show servers --infra webapp
-provisioning cluster list --infra webapp
-
-

Next Steps

-

Now that you understand the basics:

-
    -
  1. Set up your workspace: Workspace Setup Guide
  2. -
  3. Learn about infrastructure management: Infrastructure Management Guide
  4. -
  5. Understand configuration: Configuration Guide
  6. -
  7. Explore examples: Examples and Tutorials
  8. -
-

You’re ready to start building and managing cloud infrastructure with confidence!

-

Provisioning Platform Quick Reference

-

Version: 3.5.0 -Last Updated: 2025-10-09

-
-

Quick Navigation

- -
-

Plugin Commands

-

Native Nushell plugins for high-performance operations. 10-50x faster than HTTP API.

-

Authentication Plugin (nu_plugin_auth)

-
# Login (password prompted securely)
-auth login admin
-
-# Login with custom URL
-auth login admin --url https://control-center.example.com
-
-# Verify current session
-auth verify
-# Returns: { active: true, user: "admin", role: "Admin", expires_at: "...", mfa_verified: true }
-
-# List active sessions
-auth sessions
-
-# Logout
-auth logout
-
-# MFA enrollment
-auth mfa enroll totp       # TOTP (Google Authenticator, Authy)
-auth mfa enroll webauthn   # WebAuthn (YubiKey, Touch ID, Windows Hello)
-
-# MFA verification
-auth mfa verify --code 123456
-auth mfa verify --code ABCD-EFGH-IJKL  # Backup code
-
-

Installation:

-
cd provisioning/core/plugins/nushell-plugins
-cargo build --release -p nu_plugin_auth
-plugin add target/release/nu_plugin_auth
-
-

KMS Plugin (nu_plugin_kms)

-

Performance: 10x faster encryption (~5 ms vs ~50 ms HTTP)

-
# Encrypt with auto-detected backend
-kms encrypt "secret data"
-# vault:v1:abc123...
-
-# Encrypt with specific backend
-kms encrypt "data" --backend rustyvault --key provisioning-main
-kms encrypt "data" --backend age --key age1xxxxxxxxx
-kms encrypt "data" --backend aws --key alias/provisioning
-
-# Encrypt with context (AAD for additional security)
-kms encrypt "data" --context "user=admin,env=production"
-
-# Decrypt (auto-detects backend from format)
-kms decrypt "vault:v1:abc123..."
-kms decrypt "-----BEGIN AGE ENCRYPTED FILE-----..."
-
-# Decrypt with context (must match encryption context)
-kms decrypt "vault:v1:abc123..." --context "user=admin,env=production"
-
-# Generate data encryption key
-kms generate-key
-kms generate-key --spec AES256
-
-# Check backend status
-kms status
-
-

Supported Backends:

-
    -
  • rustyvault: High-performance (~5 ms) - Production
  • -
  • age: Local encryption (~3 ms) - Development
  • -
  • cosmian: Cloud KMS (~30 ms)
  • -
  • aws: AWS KMS (~50 ms)
  • -
  • vault: HashiCorp Vault (~40 ms)
  • -
-

Installation:

-
cargo build --release -p nu_plugin_kms
-plugin add target/release/nu_plugin_kms
-
-# Set backend environment
-export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="hvs.xxxxx"
-
-

Orchestrator Plugin (nu_plugin_orchestrator)

-

Performance: 30-50x faster queries (~1 ms vs ~30-50 ms HTTP)

-
# Get orchestrator status (direct file access, ~1 ms)
-orch status
-# { active_tasks: 5, completed_tasks: 120, health: "healthy" }
-
-# Validate workflow Nickel file (~10 ms vs ~100 ms HTTP)
-orch validate workflows/deploy.ncl
-orch validate workflows/deploy.ncl --strict
-
-# List tasks (direct file read, ~5 ms)
-orch tasks
-orch tasks --status running
-orch tasks --status failed --limit 10
-
-

Installation:

-
cargo build --release -p nu_plugin_orchestrator
-plugin add target/release/nu_plugin_orchestrator
-
-

Plugin Performance Comparison

-
- - - - - - -
OperationHTTP APIPluginSpeedup
KMS Encrypt~50 ms~5 ms10x
KMS Decrypt~50 ms~5 ms10x
Orch Status~30 ms~1 ms30x
Orch Validate~100 ms~10 ms10x
Orch Tasks~50 ms~5 ms10x
Auth Verify~50 ms~10 ms5x
-
-
-

CLI Shortcuts

-

Infrastructure Shortcuts

-
# Server shortcuts
-provisioning s              # server (same as 'provisioning server')
-provisioning s create       # Create servers
-provisioning s delete       # Delete servers
-provisioning s list         # List servers
-provisioning s ssh web-01   # SSH into server
-
-# Taskserv shortcuts
-provisioning t              # taskserv (same as 'provisioning taskserv')
-provisioning task           # taskserv (alias)
-provisioning t create kubernetes
-provisioning t delete kubernetes
-provisioning t list
-provisioning t generate kubernetes
-provisioning t check-updates
-
-# Cluster shortcuts
-provisioning cl             # cluster (same as 'provisioning cluster')
-provisioning cl create buildkit
-provisioning cl delete buildkit
-provisioning cl list
-
-# Infrastructure shortcuts
-provisioning i              # infra (same as 'provisioning infra')
-provisioning infras         # infra (alias)
-provisioning i list
-provisioning i validate
-
-

Orchestration Shortcuts

-
# Workflow shortcuts
-provisioning wf             # workflow (same as 'provisioning workflow')
-provisioning flow           # workflow (alias)
-provisioning wf list
-provisioning wf status <task_id>
-provisioning wf monitor <task_id>
-provisioning wf stats
-provisioning wf cleanup
-
-# Batch shortcuts
-provisioning bat            # batch (same as 'provisioning batch')
-provisioning batch submit workflows/example.ncl
-provisioning bat list
-provisioning bat status <workflow_id>
-provisioning bat monitor <workflow_id>
-provisioning bat rollback <workflow_id>
-provisioning bat cancel <workflow_id>
-provisioning bat stats
-
-# Orchestrator shortcuts
-provisioning orch           # orchestrator (same as 'provisioning orchestrator')
-provisioning orch start
-provisioning orch stop
-provisioning orch status
-provisioning orch health
-provisioning orch logs
-
-

Development Shortcuts

-
# Module shortcuts
-provisioning mod            # module (same as 'provisioning module')
-provisioning mod discover taskserv
-provisioning mod discover provider
-provisioning mod discover cluster
-provisioning mod load taskserv workspace kubernetes
-provisioning mod list taskserv workspace
-provisioning mod unload taskserv workspace kubernetes
-provisioning mod sync-kcl
-
-# Layer shortcuts
-provisioning lyr            # layer (same as 'provisioning layer')
-provisioning lyr explain
-provisioning lyr show
-provisioning lyr test
-provisioning lyr stats
-
-# Version shortcuts
-provisioning version check
-provisioning version show
-provisioning version updates
-provisioning version apply <name> <version>
-provisioning version taskserv <name>
-
-# Package shortcuts
-provisioning pack core
-provisioning pack provider upcloud
-provisioning pack list
-provisioning pack clean
-
-

Workspace Shortcuts

-
# Workspace shortcuts
-provisioning ws             # workspace (same as 'provisioning workspace')
-provisioning ws init
-provisioning ws create <name>
-provisioning ws validate
-provisioning ws info
-provisioning ws list
-provisioning ws migrate
-provisioning ws switch <name>  # Switch active workspace
-provisioning ws active         # Show active workspace
-
-# Template shortcuts
-provisioning tpl            # template (same as 'provisioning template')
-provisioning tmpl           # template (alias)
-provisioning tpl list
-provisioning tpl types
-provisioning tpl show <name>
-provisioning tpl apply <name>
-provisioning tpl validate <name>
-
-

Configuration Shortcuts

-
# Environment shortcuts
-provisioning e              # env (same as 'provisioning env')
-provisioning val            # validate (same as 'provisioning validate')
-provisioning st             # setup (same as 'provisioning setup')
-provisioning config         # setup (alias)
-
-# Show shortcuts
-provisioning show settings
-provisioning show servers
-provisioning show config
-
-# Initialization
-provisioning init <name>
-
-# All environment
-provisioning allenv         # Show all config and environment
-
-

Utility Shortcuts

-
# List shortcuts
-provisioning l              # list (same as 'provisioning list')
-provisioning ls             # list (alias)
-provisioning list           # list (full)
-
-# SSH operations
-provisioning ssh <server>
-
-# SOPS operations
-provisioning sops <file>    # Edit encrypted file
-
-# Cache management
-provisioning cache clear
-provisioning cache stats
-
-# Provider operations
-provisioning providers list
-provisioning providers info <name>
-
-# Nushell session
-provisioning nu             # Start Nushell with provisioning library loaded
-
-# QR code generation
-provisioning qr <data>
-
-# Nushell information
-provisioning nuinfo
-
-# Plugin management
-provisioning plugin         # plugin (same as 'provisioning plugin')
-provisioning plugins        # plugin (alias)
-provisioning plugin list
-provisioning plugin test nu_plugin_kms
-
-

Generation Shortcuts

-
# Generate shortcuts
-provisioning g              # generate (same as 'provisioning generate')
-provisioning gen            # generate (alias)
-provisioning g server
-provisioning g taskserv <name>
-provisioning g cluster <name>
-provisioning g infra --new <name>
-provisioning g new <type> <name>
-
-

Action Shortcuts

-
# Common actions
-provisioning c              # create (same as 'provisioning create')
-provisioning d              # delete (same as 'provisioning delete')
-provisioning u              # update (same as 'provisioning update')
-
-# Pricing shortcuts
-provisioning price          # Show server pricing
-provisioning cost           # price (alias)
-provisioning costs          # price (alias)
-
-# Create server + taskservs (combo command)
-provisioning cst            # create-server-task
-provisioning csts           # create-server-task (alias)
-
-
-

Infrastructure Commands

-

Server Management

-
# Create servers
-provisioning server create
-provisioning server create --check  # Dry-run mode
-provisioning server create --yes    # Skip confirmation
-
-# Delete servers
-provisioning server delete
-provisioning server delete --check
-provisioning server delete --yes
-
-# List servers
-provisioning server list
-provisioning server list --infra wuji
-provisioning server list --out json
-
-# SSH into server
-provisioning server ssh web-01
-provisioning server ssh db-01
-
-# Show pricing
-provisioning server price
-provisioning server price --provider upcloud
-
-

Taskserv Management

-
# Create taskserv
-provisioning taskserv create kubernetes
-provisioning taskserv create kubernetes --check
-provisioning taskserv create kubernetes --infra wuji
-
-# Delete taskserv
-provisioning taskserv delete kubernetes
-provisioning taskserv delete kubernetes --check
-
-# List taskservs
-provisioning taskserv list
-provisioning taskserv list --infra wuji
-
-# Generate taskserv configuration
-provisioning taskserv generate kubernetes
-provisioning taskserv generate kubernetes --out yaml
-
-# Check for updates
-provisioning taskserv check-updates
-provisioning taskserv check-updates --taskserv kubernetes
-
-

Cluster Management

-
# Create cluster
-provisioning cluster create buildkit
-provisioning cluster create buildkit --check
-provisioning cluster create buildkit --infra wuji
-
-# Delete cluster
-provisioning cluster delete buildkit
-provisioning cluster delete buildkit --check
-
-# List clusters
-provisioning cluster list
-provisioning cluster list --infra wuji
-
-
-

Orchestration Commands

-

Workflow Management

-
# Submit server creation workflow
-nu -c "use core/nulib/workflows/server_create.nu *; server_create_workflow 'wuji' '' [] --check"
-
-# Submit taskserv workflow
-nu -c "use core/nulib/workflows/taskserv.nu *; taskserv create 'kubernetes' 'wuji' --check"
-
-# Submit cluster workflow
-nu -c "use core/nulib/workflows/cluster.nu *; cluster create 'buildkit' 'wuji' --check"
-
-# List all workflows
-provisioning workflow list
-nu -c "use core/nulib/workflows/management.nu *; workflow list"
-
-# Get workflow statistics
-provisioning workflow stats
-nu -c "use core/nulib/workflows/management.nu *; workflow stats"
-
-# Monitor workflow in real-time
-provisioning workflow monitor <task_id>
-nu -c "use core/nulib/workflows/management.nu *; workflow monitor <task_id>"
-
-# Check orchestrator health
-provisioning workflow orchestrator
-nu -c "use core/nulib/workflows/management.nu *; workflow orchestrator"
-
-# Get specific workflow status
-provisioning workflow status <task_id>
-nu -c "use core/nulib/workflows/management.nu *; workflow status <task_id>"
-
-

Batch Operations

-
# Submit batch workflow from Nickel
-provisioning batch submit workflows/example_batch.ncl
-nu -c "use core/nulib/workflows/batch.nu *; batch submit workflows/example_batch.ncl"
-
-# Monitor batch workflow progress
-provisioning batch monitor <workflow_id>
-nu -c "use core/nulib/workflows/batch.nu *; batch monitor <workflow_id>"
-
-# List batch workflows with filtering
-provisioning batch list
-provisioning batch list --status Running
-nu -c "use core/nulib/workflows/batch.nu *; batch list --status Running"
-
-# Get detailed batch status
-provisioning batch status <workflow_id>
-nu -c "use core/nulib/workflows/batch.nu *; batch status <workflow_id>"
-
-# Initiate rollback for failed workflow
-provisioning batch rollback <workflow_id>
-nu -c "use core/nulib/workflows/batch.nu *; batch rollback <workflow_id>"
-
-# Cancel running batch
-provisioning batch cancel <workflow_id>
-
-# Show batch workflow statistics
-provisioning batch stats
-nu -c "use core/nulib/workflows/batch.nu *; batch stats"
-
-

Orchestrator Management

-
# Start orchestrator in background
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-# Check orchestrator status
-./scripts/start-orchestrator.nu --check
-provisioning orchestrator status
-
-# Stop orchestrator
-./scripts/start-orchestrator.nu --stop
-provisioning orchestrator stop
-
-# View logs
-tail -f provisioning/platform/orchestrator/data/orchestrator.log
-provisioning orchestrator logs
-
-
-

Configuration Commands

-

Environment and Validation

-
# Show environment variables
-provisioning env
-
-# Show all environment and configuration
-provisioning allenv
-
-# Validate configuration
-provisioning validate config
-provisioning validate infra
-
-# Setup wizard
-provisioning setup
-
-

Configuration Files

-
# System defaults
-less provisioning/config/config.defaults.toml
-
-# User configuration
-vim workspace/config/local-overrides.toml
-
-# Environment-specific configs
-vim workspace/config/dev-defaults.toml
-vim workspace/config/test-defaults.toml
-vim workspace/config/prod-defaults.toml
-
-# Infrastructure-specific config
-vim workspace/infra/<name>/config.toml
-
-

HTTP Configuration

-
# Configure HTTP client behavior
-# In workspace/config/local-overrides.toml:
-[http]
-use_curl = true  # Use curl instead of ureq
-
-
-

Workspace Commands

-

Workspace Management

-
# List all workspaces
-provisioning workspace list
-
-# Show active workspace
-provisioning workspace active
-
-# Switch to another workspace
-provisioning workspace switch <name>
-provisioning workspace activate <name>  # alias
-
-# Register new workspace
-provisioning workspace register <name> <path>
-provisioning workspace register <name> <path> --activate
-
-# Remove workspace from registry
-provisioning workspace remove <name>
-provisioning workspace remove <name> --force
-
-# Initialize new workspace
-provisioning workspace init
-provisioning workspace init --name production
-
-# Create new workspace
-provisioning workspace create <name>
-
-# Validate workspace
-provisioning workspace validate
-
-# Show workspace info
-provisioning workspace info
-
-# Migrate workspace
-provisioning workspace migrate
-
-

User Preferences

-
# View user preferences
-provisioning workspace preferences
-
-# Set user preference
-provisioning workspace set-preference editor vim
-provisioning workspace set-preference output_format yaml
-provisioning workspace set-preference confirm_delete true
-
-# Get user preference
-provisioning workspace get-preference editor
-
-

User Config Location:

-
    -
  • macOS: ~/Library/Application Support/provisioning/user_config.yaml
  • -
  • Linux: ~/.config/provisioning/user_config.yaml
  • -
  • Windows: %APPDATA%\provisioning\user_config.yaml
  • -
-
-

Security Commands

-

Authentication (via CLI)

-
# Login
-provisioning login admin
-
-# Logout
-provisioning logout
-
-# Show session status
-provisioning auth status
-
-# List active sessions
-provisioning auth sessions
-
-

Multi-Factor Authentication (MFA)

-
# Enroll in TOTP (Google Authenticator, Authy)
-provisioning mfa totp enroll
-
-# Enroll in WebAuthn (YubiKey, Touch ID, Windows Hello)
-provisioning mfa webauthn enroll
-
-# Verify MFA code
-provisioning mfa totp verify --code 123456
-provisioning mfa webauthn verify
-
-# List registered devices
-provisioning mfa devices
-
-

Secrets Management

-
# Generate AWS STS credentials (15 min-12h TTL)
-provisioning secrets generate aws --ttl 1hr
-
-# Generate SSH key pair (Ed25519)
-provisioning secrets generate ssh --ttl 4hr
-
-# List active secrets
-provisioning secrets list
-
-# Revoke secret
-provisioning secrets revoke <secret_id>
-
-# Cleanup expired secrets
-provisioning secrets cleanup
-
-

SSH Temporal Keys

-
# Connect to server with temporal key
-provisioning ssh connect server01 --ttl 1hr
-
-# Generate SSH key pair only
-provisioning ssh generate --ttl 4hr
-
-# List active SSH keys
-provisioning ssh list
-
-# Revoke SSH key
-provisioning ssh revoke <key_id>
-
-

KMS Operations (via CLI)

-
# Encrypt configuration file
-provisioning kms encrypt secure.yaml
-
-# Decrypt configuration file
-provisioning kms decrypt secure.yaml.enc
-
-# Encrypt entire config directory
-provisioning config encrypt workspace/infra/production/
-
-# Decrypt config directory
-provisioning config decrypt workspace/infra/production/
-
-

Break-Glass Emergency Access

-
# Request emergency access
-provisioning break-glass request "Production database outage"
-
-# Approve emergency request (requires admin)
-provisioning break-glass approve <request_id> --reason "Approved by CTO"
-
-# List break-glass sessions
-provisioning break-glass list
-
-# Revoke break-glass session
-provisioning break-glass revoke <session_id>
-
-

Compliance and Audit

-
# Generate compliance report
-provisioning compliance report
-provisioning compliance report --standard gdpr
-provisioning compliance report --standard soc2
-provisioning compliance report --standard iso27001
-
-# GDPR operations
-provisioning compliance gdpr export <user_id>
-provisioning compliance gdpr delete <user_id>
-provisioning compliance gdpr rectify <user_id>
-
-# Incident management
-provisioning compliance incident create "Security breach detected"
-provisioning compliance incident list
-provisioning compliance incident update <incident_id> --status investigating
-
-# Audit log queries
-provisioning audit query --user alice --action deploy --from 24h
-provisioning audit export --format json --output audit-logs.json
-
-
-

Common Workflows

-

Complete Deployment from Scratch

-
# 1. Initialize workspace
-provisioning workspace init --name production
-
-# 2. Validate configuration
-provisioning validate config
-
-# 3. Create infrastructure definition
-provisioning generate infra --new production
-
-# 4. Create servers (check mode first)
-provisioning server create --infra production --check
-
-# 5. Create servers (actual deployment)
-provisioning server create --infra production --yes
-
-# 6. Install Kubernetes
-provisioning taskserv create kubernetes --infra production --check
-provisioning taskserv create kubernetes --infra production
-
-# 7. Deploy cluster services
-provisioning cluster create production --check
-provisioning cluster create production
-
-# 8. Verify deployment
-provisioning server list --infra production
-provisioning taskserv list --infra production
-
-# 9. SSH to servers
-provisioning server ssh k8s-master-01
-
-

Multi-Environment Deployment

-
# Deploy to dev
-provisioning server create --infra dev --check
-provisioning server create --infra dev
-provisioning taskserv create kubernetes --infra dev
-
-# Deploy to staging
-provisioning server create --infra staging --check
-provisioning server create --infra staging
-provisioning taskserv create kubernetes --infra staging
-
-# Deploy to production (with confirmation)
-provisioning server create --infra production --check
-provisioning server create --infra production
-provisioning taskserv create kubernetes --infra production
-
-

Update Infrastructure

-
# 1. Check for updates
-provisioning taskserv check-updates
-
-# 2. Update specific taskserv (check mode)
-provisioning taskserv update kubernetes --check
-
-# 3. Apply update
-provisioning taskserv update kubernetes
-
-# 4. Verify update
-provisioning taskserv list --infra production | where name == kubernetes
-
-

Encrypted Secrets Deployment

-
# 1. Authenticate
-auth login admin
-auth mfa verify --code 123456
-
-# 2. Encrypt secrets
-kms encrypt (open secrets/production.yaml) --backend rustyvault | save secrets/production.enc
-
-# 3. Deploy with encrypted secrets
-provisioning cluster create production --secrets secrets/production.enc
-
-# 4. Verify deployment
-orch tasks --status completed
-
-
-

Debug and Check Mode

-

Debug Mode

-

Enable verbose logging with --debug or -x flag:

-
# Server creation with debug output
-provisioning server create --debug
-provisioning server create -x
-
-# Taskserv creation with debug
-provisioning taskserv create kubernetes --debug
-
-# Show detailed error traces
-provisioning --debug taskserv create kubernetes
-
-

Check Mode (Dry Run)

-

Preview changes without applying them with --check or -c flag:

-
# Check what servers would be created
-provisioning server create --check
-provisioning server create -c
-
-# Check taskserv installation
-provisioning taskserv create kubernetes --check
-
-# Check cluster creation
-provisioning cluster create buildkit --check
-
-# Combine with debug for detailed preview
-provisioning server create --check --debug
-
-

Auto-Confirm Mode

-

Skip confirmation prompts with --yes or -y flag:

-
# Auto-confirm server creation
-provisioning server create --yes
-provisioning server create -y
-
-# Auto-confirm deletion
-provisioning server delete --yes
-
-

Wait Mode

-

Wait for operations to complete with --wait or -w flag:

-
# Wait for server creation to complete
-provisioning server create --wait
-
-# Wait for taskserv installation
-provisioning taskserv create kubernetes --wait
-
-

Infrastructure Selection

-

Specify target infrastructure with --infra or -i flag:

-
# Create servers in specific infrastructure
-provisioning server create --infra production
-provisioning server create -i production
-
-# List servers in specific infrastructure
-provisioning server list --infra production
-
-
-

Output Formats

-

JSON Output

-
# Output as JSON
-provisioning server list --out json
-provisioning taskserv list --out json
-
-# Pipeline JSON output
-provisioning server list --out json | jq '.[] | select(.status == "running")'
-
-

YAML Output

-
# Output as YAML
-provisioning server list --out yaml
-provisioning taskserv list --out yaml
-
-# Pipeline YAML output
-provisioning server list --out yaml | yq '.[] | select(.status == "running")'
-
-

Table Output (Default)

-
# Output as table (default)
-provisioning server list
-provisioning server list --out table
-
-# Pretty-printed table
-provisioning server list | table
-
-

Text Output

-
# Output as plain text
-provisioning server list --out text
-
-
-

Performance Tips

-

Use Plugins for Frequent Operations

-
# ❌ Slow: HTTP API (50 ms per call)
-for i in 1..100 { http post http://localhost:9998/encrypt { data: "secret" } }
-
-# ✅ Fast: Plugin (5 ms per call, 10x faster)
-for i in 1..100 { kms encrypt "secret" }
-
-

Batch Operations

-
# Use batch workflows for multiple operations
-provisioning batch submit workflows/multi-cloud-deploy.ncl
-
-

Check Mode for Testing

-
# Always test with --check first
-provisioning server create --check
-provisioning server create  # Only after verification
-
-
-

Help System

-

Command-Specific Help

-
# Show help for specific command
-provisioning help server
-provisioning help taskserv
-provisioning help cluster
-provisioning help workflow
-provisioning help batch
-
-# Show help for command category
-provisioning help infra
-provisioning help orch
-provisioning help dev
-provisioning help ws
-provisioning help config
-
-

Bi-Directional Help

-
# All these work identically:
-provisioning help workspace
-provisioning workspace help
-provisioning ws help
-provisioning help ws
-
-

General Help

-
# Show all commands
-provisioning help
-provisioning --help
-
-# Show version
-provisioning version
-provisioning --version
-
-
-

Quick Reference: Common Flags

-
- - - - - - -
FlagShortDescriptionExample
--debug-xEnable debug modeprovisioning server create --debug
--check-cCheck mode (dry run)provisioning server create --check
--yes-yAuto-confirmprovisioning server delete --yes
--wait-wWait for completionprovisioning server create --wait
--infra-iSpecify infrastructureprovisioning server list --infra prod
--out-Output formatprovisioning server list --out json
-
-
-

Plugin Installation Quick Reference

-
# Build all plugins (one-time setup)
-cd provisioning/core/plugins/nushell-plugins
-cargo build --release --all
-
-# Register plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# Verify installation
-plugin list | where name =~ "auth|kms|orch"
-auth --help
-kms --help
-orch --help
-
-# Set environment
-export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="hvs.xxxxx"
-export CONTROL_CENTER_URL="http://localhost:3000"
-
-
- -
    -
  • Complete Plugin Guide: docs/user/PLUGIN_INTEGRATION_GUIDE.md
  • -
  • Plugin Reference: docs/user/NUSHELL_PLUGINS_GUIDE.md
  • -
  • From Scratch Guide: docs/guides/from-scratch.md
  • -
  • Update Infrastructure: Update Guide
  • -
  • Customize Infrastructure: Customize Guide
  • -
  • CLI Architecture: CLI Reference
  • -
  • Security System: Security Architecture
  • -
-
-

For fastest access to this guide: provisioning sc

-

Last Updated: 2025-10-09 -Maintained By: Platform Team

-

Setup Quick Start - 5 Minutes to Deployment

-

Goal: Get provisioning running in 5 minutes with a working example

-

Step 1: Check Prerequisites (30 seconds)

-
# Check Nushell
-nu --version   # Should be 0.109.0+
-
-# Check deployment tool
-docker --version    # OR
-kubectl version     # OR
-ssh -V              # OR
-systemctl --version
-
-

Step 2: Install Provisioning (1 minute)

-
# Option A: Using installer script
-curl -sSL https://install.provisioning.dev | bash
-
-# Option B: From source
-git clone https://github.com/project-provisioning/provisioning
-cd provisioning
-./scripts/install.sh
-
-

Step 3: Initialize System (2 minutes)

-
# Run interactive setup
-provisioning setup system --interactive
-
-# Follow the prompts:
-# - Press Enter for defaults
-# - Select your deployment tool
-# - Enter provider credentials (if using cloud)
-
-

Step 4: Create Your First Workspace (1 minute)

-
# Create workspace
-provisioning setup workspace myapp
-
-# Verify it was created
-provisioning workspace list
-
-

Step 5: Deploy Your First Server (1 minute)

-
# Activate workspace
-provisioning workspace activate myapp
-
-# Check configuration
-provisioning setup validate
-
-# Deploy server (dry-run first)
-provisioning server create --check
-
-# Deploy for real
-provisioning server create --yes
-
-

Verify Everything Works

-
# Check health
-provisioning platform health
-
-# Check servers
-provisioning server list
-
-# SSH into server (if applicable)
-provisioning server ssh <server-name>
-
-

Common Commands Cheat Sheet

-
# Workspace management
-provisioning workspace list              # List all workspaces
-provisioning workspace activate prod     # Switch workspace
-provisioning workspace create dev        # Create new workspace
-
-# Server management
-provisioning server list                 # List servers
-provisioning server create               # Create server
-provisioning server delete <name>        # Delete server
-provisioning server ssh <name>           # SSH into server
-
-# Configuration
-provisioning setup validate              # Validate configuration
-provisioning setup update platform       # Update platform settings
-
-# System info
-provisioning info                        # System information
-provisioning capability check            # Check capabilities
-provisioning platform health             # Check platform health
-
-

Troubleshooting Quick Fixes

-

Setup wizard won’t start

-
# Check Nushell
-nu --version
-
-# Check permissions
-chmod +x $(which provisioning)
-
-

Configuration error

-
# Validate configuration
-provisioning setup validate --verbose
-
-# Check paths
-provisioning info paths
-
-

Deployment fails

-
# Dry-run to see what would happen
-provisioning server create --check
-
-# Check platform status
-provisioning platform status
-
-

What’s Next

-

After basic setup:

-
    -
  1. Configure Provider: Add cloud provider credentials
  2. -
  3. Create More Workspaces: Dev, staging, production
  4. -
  5. Deploy Services: Web servers, databases, etc.
  6. -
  7. Set Up Monitoring: Health checks, logging
  8. -
  9. Automate Deployments: CI/CD integration
  10. -
-

Need Help

-
# Get help
-provisioning help
-
-# Setup help
-provisioning help setup
-
-# Specific command help
-provisioning <command> --help
-
-# View documentation
-provisioning guide system-setup
-
-

Key Files

-

Your configuration is in:

-

macOS: ~/Library/Application Support/provisioning/ -Linux: ~/.config/provisioning/

-

Important files:

-
    -
  • system.toml - System configuration
  • -
  • user_preferences.toml - User settings
  • -
  • workspaces/*/ - Workspace definitions
  • -
-
-

Ready to dive deeper? Check out the Full Setup Guide

-

Provisioning Setup System Guide

-

Version: 1.0.0 -Last Updated: 2025-12-09 -Status: Production Ready

-

Quick Start

-

Prerequisites

-
    -
  • Nushell 0.109.0+
  • -
  • bash
  • -
  • One deployment tool: Docker, Kubernetes, SSH, or systemd
  • -
  • Optional: KCL, SOPS, Age
  • -
-

30-Second Setup

-
# Install provisioning
-curl -sSL https://install.provisioning.dev | bash
-
-# Run setup wizard
-provisioning setup system --interactive
-
-# Create workspace
-provisioning setup workspace myproject
-
-# Start deploying
-provisioning server create
-
-

Configuration Paths

-

macOS: ~/Library/Application Support/provisioning/ -Linux: ~/.config/provisioning/ -Windows: %APPDATA%/provisioning/

-

Directory Structure

-
provisioning/
-├── system.toml                  # System info (immutable)
-├── user_preferences.toml        # User settings (editable)
-├── platform/                    # Platform services
-├── providers/                   # Provider configs
-└── workspaces/                  # Workspace definitions
-    └── myproject/
-        ├── config/
-        ├── infra/
-        └── auth.token
-
-

Setup Wizard

-

Run the interactive setup wizard:

-
provisioning setup system --interactive
-
-

The wizard guides you through:

-
    -
  1. Welcome & Prerequisites Check
  2. -
  3. Operating System Detection
  4. -
  5. Configuration Path Selection
  6. -
  7. Platform Services Setup
  8. -
  9. Provider Selection
  10. -
  11. Security Configuration
  12. -
  13. Review & Confirmation
  14. -
-

Configuration Management

-

Hierarchy (highest to lowest priority)

-
    -
  1. Runtime Arguments (--flag value)
  2. -
  3. Environment Variables (PROVISIONING_*)
  4. -
  5. Workspace Configuration
  6. -
  7. Workspace Authentication Token
  8. -
  9. User Preferences (user_preferences.toml)
  10. -
  11. Platform Configurations (platform/*.toml)
  12. -
  13. Provider Configurations (providers/*.toml)
  14. -
  15. System Configuration (system.toml)
  16. -
  17. Built-in Defaults
  18. -
-

Configuration Files

-
    -
  • system.toml - System information (OS, architecture, paths)
  • -
  • user_preferences.toml - User preferences (editor, format, etc.)
  • -
  • platform/*.toml - Service endpoints and configuration
  • -
  • providers/*.toml - Cloud provider settings
  • -
-

Multiple Workspaces

-

Create and manage multiple isolated environments:

-
# Create workspace
-provisioning setup workspace dev
-provisioning setup workspace prod
-
-# List workspaces
-provisioning workspace list
-
-# Activate workspace
-provisioning workspace activate prod
-
-

Configuration Updates

-

Update any setting:

-
# Update platform configuration
-provisioning setup platform --config new-config.toml
-
-# Update provider settings
-provisioning setup provider upcloud --config upcloud-config.toml
-
-# Validate changes
-provisioning setup validate
-
-

Backup & Restore

-
# Backup current configuration
-provisioning setup backup --path ./backup.tar.gz
-
-# Restore from backup
-provisioning setup restore --path ./backup.tar.gz
-
-# Migrate from old setup
-provisioning setup migrate --from-existing
-
-

Troubleshooting

-

“Command not found: provisioning”

-
export PATH="/usr/local/bin:$PATH"
-
-

“Nushell not found”

-
curl -sSL https://raw.githubusercontent.com/nushell/nushell/main/install.sh | bash
-
-

“Cannot write to directory”

-
chmod 755 ~/Library/Application\ Support/provisioning/
-
-

Check required tools

-
provisioning setup validate --check-tools
-
-

FAQ

-

Q: Do I need all optional tools? -A: No. You need at least one deployment tool (Docker, Kubernetes, SSH, or systemd).

-

Q: Can I use provisioning without Docker? -A: Yes. Provisioning supports Docker, Kubernetes, SSH, systemd, or combinations.

-

Q: How do I update configuration? -A: provisioning setup update <category>

-

Q: Can I have multiple workspaces? -A: Yes, unlimited workspaces.

-

Q: Is my configuration secure? -A: Yes. Credentials stored securely, never in config files.

-

Q: Can I share workspaces with my team? -A: Yes, via GitOps - configurations in Git, secrets in secure storage.

-

Getting Help

-
# General help
-provisioning help
-
-# Setup help
-provisioning help setup
-
-# Specific command help
-provisioning setup system --help
-
-

Next Steps

-
    -
  1. Installation Guide
  2. -
  3. Workspace Setup
  4. -
  5. Provider Configuration
  6. -
  7. From Scratch Guide
  8. -
-
-

Status: Production Ready ✅ -Version: 1.0.0 -Last Updated: 2025-12-09

-

Quick Start

-

This guide has moved to a multi-chapter format for better readability.

-

📖 Navigate to Quick Start Guide

-

Please see the complete quick start guide here:

-
    -
  • Prerequisites - System requirements and setup
  • -
  • Installation - Install provisioning platform
  • -
  • First Deployment - Deploy your first infrastructure
  • -
  • Verification - Verify your deployment
  • -
-

Quick Commands

-
# Check system status
 provisioning status
 
-# Get next step suggestions
-provisioning next
+# 3. Create workspace (30 seconds)
+provisioning workspace create --name demo
 
-# View interactive guide
-provisioning guide from-scratch
+# 4. Add cloud provider (1 minute)
+provisioning config set --workspace demo \
+  providers.aws.region us-east-1 \
+  providers.aws.credentials_source aws_iam
+
+# 5. Deploy infrastructure (1 minute)
+provisioning deploy --workspace demo \
+  --config examples/simple-instance.ncl
+
+# 6. Verify (30 seconds)
+provisioning resource list --workspace demo
 
-
-

For the complete step-by-step walkthrough, start with Prerequisites.

-

Prerequisites

-

Before installing the Provisioning Platform, ensure your system meets the following requirements.

-

Hardware Requirements

-

Minimum Requirements (Solo Mode)

+

For detailed walkthrough, see Quick Start.

+

Installation Methods

+ +
# Download and extract
+curl -fsSL  [https://provisioning.io/provisioning-latest-linux.tar.gz](https://provisioning.io/provisioning-latest-linux.tar.gz) | tar xz
+sudo mv provisioning /usr/local/bin/
+provisioning --version
+
+

Option 2: Container

+
docker run -it provisioning/provisioning:latest \
+  provisioning --version
+
+

Option 3: Build from Source

+
git clone  [https://github.com/provisioning/provisioning.git](https://github.com/provisioning/provisioning.git)
+cd provisioning
+cargo build --release
+./target/release/provisioning --version
+
+

See Installation for detailed instructions.

+

Next Steps After Installation

+
    +
  1. Read Quick Start - 5-minute walkthrough
  2. +
  3. Complete First Deployment - Deploy real infrastructure
  4. +
  5. Run Verification - Validate system health
  6. +
  7. Move to Guides - Learn advanced features
  8. +
  9. Explore Examples - Real-world scenarios
  10. +
+

Common Questions

+

Q: How long does installation take? +A: 5-10 minutes including cloud credential setup.

+

Q: What if I don’t have a cloud account? +A: Try our demo provider in local mode - no cloud account needed.

+

Q: Can I use Provisioning offline? +A: Yes, with local provider. Cloud operations require internet.

+

Q: What’s the learning curve? +A: 30 minutes for basics, days to master advanced features.

+

Q: Where do I get help? +A: See Getting Help or Troubleshooting.

+

Architecture Overview

+

Provisioning works in these steps:

+
1. Install Platform
+   ↓
+2. Create Workspace
+   ↓
+3. Add Cloud Provider Credentials
+   ↓
+4. Write Nickel Configuration
+   ↓
+5. Deploy Infrastructure
+   ↓
+6. Monitor & Manage
+
+

What’s Next

+

After getting started:

    -
  • CPU: 2 cores
  • -
  • RAM: 4 GB
  • -
  • Disk: 20 GB available space
  • -
  • Network: Internet connection for downloading dependencies
  • +
  • Learn features → See Features
  • +
  • Build infrastructure → See Examples
  • +
  • Write guides → See Guides
  • +
  • Understand architecture → See Architecture
  • +
  • Develop extensions → See Development
- +

Getting Help

+

If you get stuck:

+
    +
  1. Check Troubleshooting
  2. +
  3. Review Guides for similar scenarios
  4. +
  5. Search Examples for your use case
  6. +
  7. Ask in community forums or open a GitHub issue
  8. +
+
    -
  • CPU: 4 cores
  • -
  • RAM: 8 GB
  • -
  • Disk: 50 GB available space
  • -
  • Network: Reliable internet connection
  • +
  • Full Guides → See provisioning/docs/src/guides/
  • +
  • Examples → See provisioning/docs/src/examples/
  • +
  • Architecture → See provisioning/docs/src/architecture/
  • +
  • Features → See provisioning/docs/src/features/
  • +
  • API Reference → See provisioning/docs/src/api-reference/
-

Production Requirements (Enterprise Mode)

+

Prerequisites

+

Before installing the Provisioning platform, ensure your system meets the following requirements.

+

Required Software

+

Nushell 0.109.1+

+

Nushell is the primary shell and scripting environment for the platform.

+

Installation:

+
# macOS (Homebrew)
+brew install nushell
+
+# Linux (Cargo)
+cargo install nu
+
+# From source
+git clone  [https://github.com/nushell/nushell](https://github.com/nushell/nushell)
+cd nushell
+cargo install --path .
+
+

Verify installation:

+
nu --version
+# Should show: 0.109.1 or higher
+
+

Nickel 1.15.1+

+

Nickel is the infrastructure-as-code language providing type-safe configuration with lazy evaluation.

+

Installation:

+
# macOS (Homebrew)
+brew install nickel
+
+# Linux (Cargo)
+cargo install nickel-lang-cli
+
+# From source
+git clone  [https://github.com/tweag/nickel](https://github.com/tweag/nickel)
+cd nickel
+cargo install --path cli
+
+

Verify installation:

+
nickel --version
+# Should show: 1.15.1 or higher
+
+

SOPS 3.10.2+

+

SOPS (Secrets OPerationS) provides encrypted configuration and secrets management.

+

Installation:

+
# macOS (Homebrew)
+brew install sops
+
+# Linux (binary download)
+wget  [https://github.com/getsops/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64](https://github.com/getsops/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64)
+sudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops
+sudo chmod +x /usr/local/bin/sops
+
+

Verify installation:

+
sops --version
+# Should show: 3.10.2 or higher
+
+

Age 1.2.1+

+

Age provides modern encryption for secrets used by SOPS.

+

Installation:

+
# macOS (Homebrew)
+brew install age
+
+# Linux (binary download)
+wget  [https://github.com/FiloSottile/age/releases/download/v1.2.1/age-v1.2.1-linux-amd64.tar.gz](https://github.com/FiloSottile/age/releases/download/v1.2.1/age-v1.2.1-linux-amd64.tar.gz)
+tar xzf age-v1.2.1-linux-amd64.tar.gz
+sudo mv age/age /usr/local/bin/
+sudo chmod +x /usr/local/bin/age
+
+

Verify installation:

+
age --version
+# Should show: 1.2.1 or higher
+
+

K9s 0.50.6+

+

K9s provides a terminal UI for managing Kubernetes clusters.

+

Installation:

+
# macOS (Homebrew)
+brew install derailed/k9s/k9s
+
+# Linux (binary download)
+wget  [https://github.com/derailed/k9s/releases/download/v0.50.6/k9s_Linux_amd64.tar.gz](https://github.com/derailed/k9s/releases/download/v0.50.6/k9s_Linux_amd64.tar.gz)
+tar xzf k9s_Linux_amd64.tar.gz
+sudo mv k9s /usr/local/bin/
+
+

Verify installation:

+
k9s version
+# Should show: 0.50.6 or higher
+
+

Optional Software

+

mdBook

+

For building and serving local documentation.

+
# Install with Cargo
+cargo install mdbook
+
+# Verify
+mdbook --version
+
+

Docker or Podman

+

Container runtime for test environments and local development.

+
# Docker (macOS)
+brew install --cask docker
+
+# Podman (Linux)
+sudo apt-get install podman
+
+# Verify
+docker --version
+# or
+podman --version
+
+

Cargo (Rust)

+

Required for building platform services and native plugins.

+
# Install Rust and Cargo
+curl --proto '=https' --tlsv1.2 -sSf  [https://sh.rustup.rs](https://sh.rustup.rs) | sh
+
+# Verify
+cargo --version
+
+

Git

+

Version control for workspace management and configuration.

+
# Most systems have Git pre-installed
+git --version
+
+# Install if needed (macOS)
+brew install git
+
+# Install if needed (Linux)
+sudo apt-get install git
+
+

System Requirements

+

Minimum Hardware

+

Development Workstation:

    -
  • CPU: 16 cores
  • -
  • RAM: 32 GB
  • -
  • Disk: 500 GB available space (SSD recommended)
  • -
  • Network: High-bandwidth connection with static IP
  • +
  • CPU: 2 cores
  • +
  • RAM: 4 GB
  • +
  • Disk: 20 GB available space
  • +
  • Network: Internet connection for provider APIs
-

Operating System

-

Supported Platforms

+

Production Control Plane:

    -
  • macOS: 12.0 (Monterey) or later
  • -
  • Linux: +
  • CPU: 4 cores
  • +
  • RAM: 8 GB
  • +
  • Disk: 50 GB available space (SSD recommended)
  • +
  • Network: Stable internet connection, public IP optional
  • +
+

Supported Operating Systems

+

Primary Support:

    -
  • Ubuntu 22.04 LTS or later
  • -
  • Fedora 38 or later
  • -
  • Debian 12 (Bookworm) or later
  • -
  • RHEL 9 or later
  • +
  • macOS 12.0+ (Monterey or newer)
  • +
  • Linux distributions with kernel 5.0+ +
      +
    • Ubuntu 20.04 LTS or newer
    • +
    • Debian 11 or newer
    • +
    • Fedora 35 or newer
    • +
    • RHEL 8 or newer
-

Platform-Specific Notes

-

macOS:

+

Limited Support:

    -
  • Xcode Command Line Tools required
  • -
  • Homebrew recommended for package management
  • +
  • Windows 10/11 via WSL2 (Windows Subsystem for Linux)
-

Linux:

+

Network Requirements

+

Outbound Access:

    -
  • systemd-based distribution recommended
  • -
  • sudo access required for some operations
  • +
  • HTTPS (443) to cloud provider APIs
  • +
  • HTTPS (443) to GitHub (for version updates)
  • +
  • SSH (22) for server management
-

Required Software

-

Core Dependencies

-
- - - - - -
SoftwareVersionPurpose
Nushell0.107.1+Shell and scripting language
Nickel1.15.0+Configuration language
Docker20.10+Container runtime (for platform services)
SOPS3.10.2+Secrets management
Age1.2.1+Encryption tool
-
-

Optional Dependencies

-
- - - - - -
SoftwareVersionPurpose
Podman4.0+Alternative container runtime
OrbStackLatestmacOS-optimized container runtime
K9s0.50.6+Kubernetes management interface
glowLatestMarkdown renderer for guides
batLatestSyntax highlighting for file viewing
-
-

Installation Verification

-

Before proceeding, verify your system has the core dependencies installed:

-

Nushell

+

Inbound Access (optional, for platform services):

+
    +
  • Port 8080: HTTP API
  • +
  • Port 8081: MCP server
  • +
  • Port 5000: Orchestrator service
  • +
+

Cloud Provider Access

+

At least one cloud provider account with API credentials:

+

UpCloud:

+
    +
  • API username and password
  • +
  • Account with sufficient quota for servers
  • +
+

AWS:

+
    +
  • AWS Access Key ID and Secret Access Key
  • +
  • IAM permissions for EC2, VPC, EBS operations
  • +
  • Account with sufficient EC2 quota
  • +
+

Local Provider:

+
    +
  • Docker or Podman installed
  • +
  • Sufficient local system resources
  • +
+

Permission Requirements

+

User Permissions

+

Standard User (recommended):

+
    +
  • Read/write access to workspace directory
  • +
  • Ability to create symlinks for CLI installation
  • +
  • SSH key generation capability
  • +
+

Administrative Tasks (optional):

+
    +
  • Installing CLI to /usr/local/bin (requires sudo)
  • +
  • Installing system-wide dependencies
  • +
  • Configuring system services
  • +
+

File System Permissions

+
# Workspace directory
+chmod 755 ~/provisioning-workspace
+
+# Configuration files
+chmod 600 ~/.config/provisioning/user_config.yaml
+chmod 600 ~/.ssh/provisioning_*
+
+# Executable permissions for CLI
+chmod +x /path/to/provisioning/core/cli/provisioning
+
+

Verification Checklist

+

Before proceeding to installation, verify all prerequisites:

+
# Check required tools
+nu --version              # 0.109.1+
+nickel --version          # 1.15.1+
+sops --version            # 3.10.2+
+age --version             # 1.2.1+
+k9s version               # 0.50.6+
+
+# Check optional tools
+mdbook --version          # Latest
+docker --version          # Latest
+cargo --version           # Latest
+git --version             # Latest
+
+# Verify system resources
+nproc                     # CPU cores (2+ minimum)
+free -h                   # RAM (4GB+ minimum)
+df -h ~                   # Disk space (20GB+ minimum)
+
+# Test network connectivity
+curl -I  [https://api.github.com](https://api.github.com)
+curl -I  [https://hub.upcloud.com](https://hub.upcloud.com)  # UpCloud API
+curl -I  [https://ec2.amazonaws.com](https://ec2.amazonaws.com)  # AWS API
+
+

Next Steps

+

Once all prerequisites are met, proceed to:

+ +

Installation

+

This guide covers installing the Provisioning platform on your system.

+

Prerequisites

+

Ensure all prerequisites are met before proceeding.

+

Installation Steps

+

Step 1: Clone the Repository

+
# Clone the provisioning repository
+git clone  [https://github.com/your-org/project-provisioning](https://github.com/your-org/project-provisioning)
+cd project-provisioning
+
+

Step 2: Add CLI to PATH

+

The CLI can be installed globally or run directly from the repository.

+

Option A: Symbolic Link (Recommended):

+
# Create symbolic link to /usr/local/bin
+ln -sf "$(pwd)/provisioning/core/cli/provisioning" /usr/local/bin/provisioning
+
+# Verify installation
+provisioning version
+
+

Option B: PATH Environment Variable:

+
# Add to ~/.bashrc, ~/.zshrc, or ~/.config/nushell/env.nu
+export PATH="$PATH:/path/to/project-provisioning/provisioning/core/cli"
+
+# Reload shell configuration
+source ~/.bashrc  # or ~/.zshrc
+
+

Option C: Direct Execution:

+
# Run directly from repository (no installation needed)
+./provisioning/core/cli/provisioning version
+
+

Step 3: Verify Installation

+
# Check CLI is accessible
+provisioning version
+
+# Show environment configuration
+provisioning env
+
+# Display help
+provisioning help
+
+

Expected output:

+
Provisioning Platform
+CLI Version: (current version)
+Nushell: 0.109.1+
+Nickel: 1.15.1+
+
+

Step 4: Initialize Configuration

+

Generate default configuration files:

+
# Create user configuration directory
+mkdir -p ~/.config/provisioning
+
+# Initialize default user configuration (optional)
+provisioning config init
+
+

This creates ~/.config/provisioning/user_config.yaml with sensible defaults.

+

Step 5: Configure Cloud Provider Credentials

+

Configure credentials for at least one cloud provider.

+

UpCloud:

+
# ~/.config/provisioning/user_config.yaml
+providers:
+  upcloud:
+    username: "your-username"
+    password: "your-password"  # Use SOPS for encryption in production
+    default_zone: "de-fra1"
+
+

AWS:

+
# ~/.config/provisioning/user_config.yaml
+providers:
+  aws:
+    access_key_id: "AKIA..."
+    secret_access_key: "..."  # Use SOPS for encryption in production
+    default_region: "us-east-1"
+
+

Local Provider (no credentials required):

+
# ~/.config/provisioning/user_config.yaml
+providers:
+  local:
+    container_runtime: "docker"  # or "podman"
+
+ +

Use SOPS to encrypt sensitive configuration:

+
# Generate Age encryption key
+age-keygen -o ~/.config/provisioning/age-key.txt
+
+# Extract public key
+export AGE_PUBLIC_KEY=$(grep "public key:" ~/.config/provisioning/age-key.txt | cut -d: -f2 | tr -d ' ')
+
+# Create .sops.yaml configuration
+cat > ~/.config/provisioning/.sops.yaml <<EOF
+creation_rules:
+  - path_regex: .*user_config\.yaml$
+    age: $AGE_PUBLIC_KEY
+EOF
+
+# Encrypt configuration file
+sops -e -i ~/.config/provisioning/user_config.yaml
+
+

Decrypting (automatic with SOPS):

+
# Set Age key path
+export SOPS_AGE_KEY_FILE=~/.config/provisioning/age-key.txt
+
+# SOPS will automatically decrypt when accessed
+provisioning config show
+
+

Step 7: Validate Configuration

+
# Validate all configuration files
+provisioning validate config
+
+# Check provider connectivity
+provisioning providers
+
+# Show complete environment
+provisioning allenv
+
+

Optional: Install Platform Services

+

Platform services provide additional capabilities like orchestration and web UI.

+

Orchestrator Service (Rust)

+
# Build orchestrator
+cd provisioning/platform/orchestrator
+cargo build --release
+
+# Start orchestrator
+./target/release/orchestrator --port 5000
+
+

Control Center (Web UI)

+
# Build control center
+cd provisioning/platform/control-center
+cargo build --release
+
+# Start control center
+./target/release/control-center --port 8080
+
+

Native Plugins (Performance)

+

Install Nushell plugins for 10-50x performance improvements:

+
# Build and register plugins
+cd provisioning/core/plugins
+
+# Auth plugin
+cargo build --release --package nu_plugin_auth
+nu -c "register target/release/nu_plugin_auth"
+
+# KMS plugin
+cargo build --release --package nu_plugin_kms
+nu -c "register target/release/nu_plugin_kms"
+
+# Orchestrator plugin
+cargo build --release --package nu_plugin_orchestrator
+nu -c "register target/release/nu_plugin_orchestrator"
+
+# Verify plugins are registered
+nu -c "plugin list"
+
+

Workspace Initialization

+

Create your first workspace for managing infrastructure:

+
# Initialize new workspace
+provisioning workspace init my-project
+cd my-project
+
+# Verify workspace structure
+ls -la
+
+

Expected workspace structure:

+
my-project/
+├── infra/          # Infrastructure Nickel schemas
+├── config/         # Workspace configuration
+├── extensions/     # Custom extensions
+└── runtime/        # Runtime data and state
+
+

Troubleshooting

+

Common Issues

+

CLI not found after installation:

+
# Verify symlink was created
+ls -l /usr/local/bin/provisioning
+
+# Check PATH includes /usr/local/bin
+echo $PATH
+
+# Try direct path
+/usr/local/bin/provisioning version
+
+

Permission denied when creating symlink:

+
# Use sudo for system-wide installation
+sudo ln -sf "$(pwd)/provisioning/core/cli/provisioning" /usr/local/bin/provisioning
+
+# Or use user-local bin directory
+mkdir -p ~/.local/bin
+ln -sf "$(pwd)/provisioning/core/cli/provisioning" ~/.local/bin/provisioning
+export PATH="$PATH:$HOME/.local/bin"
+
+

Nushell version mismatch:

# Check Nushell version
 nu --version
 
-# Expected output: 0.107.1 or higher
+# Update Nushell
+brew upgrade nushell  # macOS
+cargo install nu --force  # Linux
 
-

Nickel

-
# Check Nickel version
-nickel --version
-
-# Expected output: 1.15.0 or higher
-
-

Docker

-
# Check Docker version
-docker --version
-
-# Check Docker is running
-docker ps
-
-# Expected: Docker version 20.10+ and connection successful
-
-

SOPS

-
# Check SOPS version
-sops --version
-
-# Expected output: 3.10.2 or higher
-
-

Age

-
# Check Age version
-age --version
-
-# Expected output: 1.2.1 or higher
-
-

Installing Missing Dependencies

-

macOS (using Homebrew)

-
# Install Homebrew if not already installed
-/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
-
-# Install Nushell
-brew install nushell
-
-# Install Nickel
-brew install nickel
-
-# Install Docker Desktop
-brew install --cask docker
-
-# Install SOPS
-brew install sops
-
-# Install Age
-brew install age
-
-# Optional: Install extras
-brew install k9s glow bat
-
-

Ubuntu/Debian

-
# Update package list
-sudo apt update
-
-# Install prerequisites
-sudo apt install -y curl git build-essential
-
-# Install Nushell (from GitHub releases)
-curl -LO https://github.com/nushell/nushell/releases/download/0.107.1/nu-0.107.1-x86_64-linux-musl.tar.gz
-tar xzf nu-0.107.1-x86_64-linux-musl.tar.gz
-sudo mv nu /usr/local/bin/
-
-# Install Nickel (using Rust cargo)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
-cargo install nickel
-
-# Install Docker
-sudo apt install -y docker.io
-sudo systemctl enable --now docker
-sudo usermod -aG docker $USER
-
-# Install SOPS
-curl -LO https://github.com/getsops/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64
-chmod +x sops-v3.10.2.linux.amd64
-sudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops
-
-# Install Age
-sudo apt install -y age
-
-

Fedora/RHEL

-
# Install Nushell
-sudo dnf install -y nushell
-
-# Install Nickel (using Rust cargo)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
-cargo install nickel
-
-# Install Docker
-sudo dnf install -y docker
-sudo systemctl enable --now docker
-sudo usermod -aG docker $USER
-
-# Install SOPS
-sudo dnf install -y sops
-
-# Install Age
-sudo dnf install -y age
-
-

Network Requirements

-

Firewall Ports

-

If running platform services, ensure these ports are available:

-
- - - - - - -
ServicePortProtocolPurpose
Orchestrator8080HTTPWorkflow API
Control Center9090HTTPPolicy engine
KMS Service8082HTTPKey management
API Server8083HTTPREST API
Extension Registry8084HTTPExtension discovery
OCI Registry5000HTTPArtifact storage
-
-

External Connectivity

-

The platform requires outbound internet access to:

-
    -
  • Download dependencies and updates
  • -
  • Pull container images
  • -
  • Access cloud provider APIs (AWS, UpCloud)
  • -
  • Fetch extension packages
  • -
-

Cloud Provider Credentials (Optional)

-

If you plan to use cloud providers, prepare credentials:

-

AWS

-
    -
  • AWS Access Key ID
  • -
  • AWS Secret Access Key
  • -
  • Configured via ~/.aws/credentials or environment variables
  • -
-

UpCloud

-
    -
  • UpCloud username
  • -
  • UpCloud password
  • -
  • Configured via environment variables or config files
  • -
-

Next Steps

-

Once all prerequisites are met, proceed to: -→ Installation

-

Installation

-

This guide walks you through installing the Provisioning Platform on your system.

-

Overview

-

The installation process involves:

-
    -
  1. Cloning the repository
  2. -
  3. Installing Nushell plugins
  4. -
  5. Setting up configuration
  6. -
  7. Initializing your first workspace
  8. -
-

Estimated time: 15-20 minutes

-

Step 1: Clone the Repository

-
# Clone the repository
-git clone https://github.com/provisioning/provisioning-platform.git
-cd provisioning-platform
-
-# Checkout the latest stable release (optional)
-git checkout tags/v3.5.0
-
-

Step 2: Install Nushell Plugins

-

The platform uses multiple Nushell plugins for enhanced functionality.

-

Install nu_plugin_tera (Template Rendering)

-
# Install from crates.io
-cargo install nu_plugin_tera
-
-# Register with Nushell
-nu -c "plugin add ~/.cargo/bin/nu_plugin_tera; plugin use tera"
-
-

Verify Plugin Installation

-
# Start Nushell
-nu
-
-# List installed plugins
-plugin list
-
-# Expected output should include:
-# - tera
-
-

Step 3: Add CLI to PATH

-

Make the provisioning command available globally:

-
# Option 1: Symlink to /usr/local/bin (recommended)
-sudo ln -s "$(pwd)/provisioning/core/cli/provisioning" /usr/local/bin/provisioning
-
-# Option 2: Add to PATH in your shell profile
-echo 'export PATH="$PATH:'"$(pwd)"'/provisioning/core/cli"' >> ~/.bashrc  # or ~/.zshrc
-source ~/.bashrc  # or ~/.zshrc
-
-# Verify installation
-provisioning --version
-
-

Step 4: Generate Age Encryption Keys

-

Generate keys for encrypting sensitive configuration:

-
# Create Age key directory
-mkdir -p ~/.config/provisioning/age
-
-# Generate private key
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-
-# Extract public key
-age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt
-
-# Secure the keys
-chmod 600 ~/.config/provisioning/age/private_key.txt
-chmod 644 ~/.config/provisioning/age/public_key.txt
-
-

Step 5: Configure Environment

-

Set up basic environment variables:

-
# Create environment file
-cat > ~/.provisioning/env << 'ENVEOF'
-# Provisioning Environment Configuration
-export PROVISIONING_ENV=dev
-export PROVISIONING_PATH=$(pwd)
-export PROVISIONING_KAGE=~/.config/provisioning/age
-ENVEOF
-
-# Source the environment
-source ~/.provisioning/env
-
-# Add to shell profile for persistence
-echo 'source ~/.provisioning/env' >> ~/.bashrc  # or ~/.zshrc
-
-

Step 6: Initialize Workspace

-

Create your first workspace:

-
# Initialize a new workspace
-provisioning workspace init my-first-workspace
-
-# Expected output:
-# ✓ Workspace 'my-first-workspace' created successfully
-# ✓ Configuration template generated
-# ✓ Workspace activated
-
-# Verify workspace
-provisioning workspace list
-
-

Step 7: Validate Installation

-

Run the installation verification:

-
# Check system configuration
-provisioning validate config
-
-# Check all dependencies
-provisioning env
-
-# View detailed environment
-provisioning allenv
-
-

Expected output should show:

-
    -
  • ✅ All core dependencies installed
  • -
  • ✅ Age keys configured
  • -
  • ✅ Workspace initialized
  • -
  • ✅ Configuration valid
  • -
-

Optional: Install Platform Services

-

If you plan to use platform services (orchestrator, control center, etc.):

-
# Build platform services
-cd provisioning/platform
-
-# Build orchestrator
-cd orchestrator
-cargo build --release
-cd ..
-
-# Build control center
-cd control-center
-cargo build --release
-cd ..
-
-# Build KMS service
-cd kms-service
-cargo build --release
-cd ..
-
-# Verify builds
-ls */target/release/
-
-

Optional: Install Platform with Installer

-

Use the interactive installer for a guided setup:

-
# Build the installer
-cd provisioning/platform/installer
-cargo build --release
-
-# Run interactive installer
-./target/release/provisioning-installer
-
-# Or headless installation
-./target/release/provisioning-installer --headless --mode solo --yes
-
-

Troubleshooting

-

Nushell Plugin Not Found

-

If plugins aren’t recognized:

-
# Rebuild plugin registry
-nu -c "plugin list; plugin use tera"
-
-

Permission Denied

-

If you encounter permission errors:

-
# Ensure proper ownership
-sudo chown -R $USER:$USER ~/.config/provisioning
-
-# Check PATH
-echo $PATH | grep provisioning
-
-

Age Keys Not Found

-

If encryption fails:

-
# Verify keys exist
-ls -la ~/.config/provisioning/age/
-
-# Regenerate if needed
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-
-

Next Steps

-

Once installation is complete, proceed to: -→ First Deployment

-

Additional Resources

- -

First Deployment

-

This guide walks you through deploying your first infrastructure using the Provisioning Platform.

-

Overview

-

In this chapter, you’ll:

-
    -
  1. Configure a simple infrastructure
  2. -
  3. Create your first server
  4. -
  5. Install a task service (Kubernetes)
  6. -
  7. Verify the deployment
  8. -
-

Estimated time: 10-15 minutes

-

Step 1: Configure Infrastructure

-

Create a basic infrastructure configuration:

-
# Generate infrastructure template
-provisioning generate infra --new my-infra
-
-# This creates: workspace/infra/my-infra/
-# - config.toml (infrastructure settings)
-# - settings.ncl (Nickel configuration)
-
-

Step 2: Edit Configuration

-

Edit the generated configuration:

-
# Edit with your preferred editor
-$EDITOR workspace/infra/my-infra/settings.ncl
-
-

Example configuration:

-
import provisioning.settings as cfg
-
-# Infrastructure settings
-infra_settings = cfg.InfraSettings {
-    name = "my-infra"
-    provider = "local"  # Start with local provider
-    environment = "development"
-}
-
-# Server configuration
-servers = [
-    {
-        hostname = "dev-server-01"
-        cores = 2
-        memory = 4096  # MB
-        disk = 50  # GB
-    }
-]
-
-

Step 3: Create Server (Check Mode)

-

First, run in check mode to see what would happen:

-
# Check mode - no actual changes
-provisioning server create --infra my-infra --check
-
-# Expected output:
-# ✓ Validation passed
-# ⚠ Check mode: No changes will be made
-#
-# Would create:
-# - Server: dev-server-01 (2 cores, 4 GB RAM, 50 GB disk)
-
-

Step 4: Create Server (Real)

-

If check mode looks good, create the server:

-
# Create server
-provisioning server create --infra my-infra
-
-# Expected output:
-# ✓ Creating server: dev-server-01
-# ✓ Server created successfully
-# ✓ IP Address: 192.168.1.100
-# ✓ SSH access: ssh user@192.168.1.100
-
-

Step 5: Verify Server

-

Check server status:

-
# List all servers
-provisioning server list
-
-# Get detailed server info
-provisioning server info dev-server-01
-
-# SSH to server (optional)
-provisioning server ssh dev-server-01
-
-

Step 6: Install Kubernetes (Check Mode)

-

Install a task service on the server:

-
# Check mode first
-provisioning taskserv create kubernetes --infra my-infra --check
-
-# Expected output:
-# ✓ Validation passed
-# ⚠ Check mode: No changes will be made
-#
-# Would install:
-# - Kubernetes v1.28.0
-# - Required dependencies: containerd, etcd
-# - On servers: dev-server-01
-
-

Step 7: Install Kubernetes (Real)

-

Proceed with installation:

-
# Install Kubernetes
-provisioning taskserv create kubernetes --infra my-infra --wait
-
-# This will:
-# 1. Check dependencies
-# 2. Install containerd
-# 3. Install etcd
-# 4. Install Kubernetes
-# 5. Configure and start services
-
-# Monitor progress
-provisioning workflow monitor <task-id>
-
-

Step 8: Verify Installation

-

Check that Kubernetes is running:

-
# List installed task services
-provisioning taskserv list --infra my-infra
-
-# Check Kubernetes status
-provisioning server ssh dev-server-01
-kubectl get nodes  # On the server
-exit
-
-# Or remotely
-provisioning server exec dev-server-01 -- kubectl get nodes
-
-

Common Deployment Patterns

-

Pattern 1: Multiple Servers

-

Create multiple servers at once:

-
servers = [
-    {hostname = "web-01", cores = 2, memory = 4096},
-    {hostname = "web-02", cores = 2, memory = 4096},
-    {hostname = "db-01", cores = 4, memory = 8192}
-]
-
-
provisioning server create --infra my-infra --servers web-01,web-02,db-01
-
-

Pattern 2: Server with Multiple Task Services

-

Install multiple services on one server:

-
provisioning taskserv create kubernetes,cilium,postgres --infra my-infra --servers web-01
-
-

Pattern 3: Complete Cluster

-

Deploy a complete cluster configuration:

-
provisioning cluster create buildkit --infra my-infra
-
-

Deployment Workflow

-

The typical deployment workflow:

-
# 1. Initialize workspace
-provisioning workspace init production
-
-# 2. Generate infrastructure
-provisioning generate infra --new prod-infra
-
-# 3. Configure (edit settings.ncl)
-$EDITOR workspace/infra/prod-infra/settings.ncl
-
-# 4. Validate configuration
-provisioning validate config --infra prod-infra
-
-# 5. Create servers (check mode)
-provisioning server create --infra prod-infra --check
-
-# 6. Create servers (real)
-provisioning server create --infra prod-infra
-
-# 7. Install task services
-provisioning taskserv create kubernetes --infra prod-infra --wait
-
-# 8. Deploy cluster (if needed)
-provisioning cluster create my-cluster --infra prod-infra
-
-# 9. Verify
-provisioning server list
-provisioning taskserv list
-
-

Troubleshooting

-

Server Creation Fails

-
# Check logs
-provisioning server logs dev-server-01
-
-# Try with debug mode
-provisioning --debug server create --infra my-infra
-
-

Task Service Installation Fails

-
# Check task service logs
-provisioning taskserv logs kubernetes
-
-# Retry installation
-provisioning taskserv create kubernetes --infra my-infra --force
-
-

SSH Connection Issues

-
# Verify SSH key
-ls -la ~/.ssh/
-
-# Test SSH manually
-ssh -v user@<server-ip>
-
-# Use provisioning SSH helper
-provisioning server ssh dev-server-01 --debug
-
-

Next Steps

-

Now that you’ve completed your first deployment: -→ Verification - Verify your deployment is working correctly

-

Additional Resources

- -

Verification

-

This guide helps you verify that your Provisioning Platform deployment is working correctly.

-

Overview

-

After completing your first deployment, verify:

-
    -
  1. System configuration
  2. -
  3. Server accessibility
  4. -
  5. Task service health
  6. -
  7. Platform services (if installed)
  8. -
-

Step 1: Verify Configuration

-

Check that all configuration is valid:

-
# Validate all configuration
-provisioning validate config
-
-# Expected output:
-# ✓ Configuration valid
-# ✓ No errors found
-# ✓ All required fields present
-
-
# Check environment variables
-provisioning env
-
-# View complete configuration
-provisioning allenv
-
-

Step 2: Verify Servers

-

Check that servers are accessible and healthy:

-
# List all servers
-provisioning server list
-
-# Expected output:
-# ┌───────────────┬──────────┬───────┬────────┬──────────────┬──────────┐
-# │ Hostname      │ Provider │ Cores │ Memory │ IP Address   │ Status   │
-# ├───────────────┼──────────┼───────┼────────┼──────────────┼──────────┤
-# │ dev-server-01 │ local    │ 2     │ 4096   │ 192.168.1.100│ running  │
-# └───────────────┴──────────┴───────┴────────┴──────────────┴──────────┘
-
-
# Check server details
-provisioning server info dev-server-01
-
-# Test SSH connectivity
-provisioning server ssh dev-server-01 -- echo "SSH working"
-
-

Step 3: Verify Task Services

-

Check installed task services:

-
# List task services
-provisioning taskserv list
-
-# Expected output:
-# ┌────────────┬─────────┬────────────────┬──────────┐
-# │ Name       │ Version │ Server         │ Status   │
-# ├────────────┼─────────┼────────────────┼──────────┤
-# │ containerd │ 1.7.0   │ dev-server-01  │ running  │
-# │ etcd       │ 3.5.0   │ dev-server-01  │ running  │
-# │ kubernetes │ 1.28.0  │ dev-server-01  │ running  │
-# └────────────┴─────────┴────────────────┴──────────┘
-
-
# Check specific task service
-provisioning taskserv status kubernetes
-
-# View task service logs
-provisioning taskserv logs kubernetes --tail 50
-
-

Step 4: Verify Kubernetes (If Installed)

-

If you installed Kubernetes, verify it’s working:

-
# Check Kubernetes nodes
-provisioning server ssh dev-server-01 -- kubectl get nodes
-
-# Expected output:
-# NAME            STATUS   ROLES           AGE   VERSION
-# dev-server-01   Ready    control-plane   10m   v1.28.0
-
-
# Check Kubernetes pods
-provisioning server ssh dev-server-01 -- kubectl get pods -A
-
-# All pods should be Running or Completed
-
-

Step 5: Verify Platform Services (Optional)

-

If you installed platform services:

-

Orchestrator

-
# Check orchestrator health
-curl http://localhost:8080/health
-
-# Expected:
-# {"status":"healthy","version":"0.1.0"}
-
-
# List tasks
-curl http://localhost:8080/tasks
-
-

Control Center

-
# Check control center health
-curl http://localhost:9090/health
-
-# Test policy evaluation
-curl -X POST http://localhost:9090/policies/evaluate \
-  -H "Content-Type: application/json" \
-  -d '{"principal":{"id":"test"},"action":{"id":"read"},"resource":{"id":"test"}}'
-
-

KMS Service

-
# Check KMS health
-curl http://localhost:8082/api/v1/kms/health
-
-# Test encryption
-echo "test" | provisioning kms encrypt
-
-

Step 6: Run Health Checks

-

Run comprehensive health checks:

-
# Check all components
-provisioning health check
-
-# Expected output:
-# ✓ Configuration: OK
-# ✓ Servers: 1/1 healthy
-# ✓ Task Services: 3/3 running
-# ✓ Platform Services: 3/3 healthy
-# ✓ Network Connectivity: OK
-# ✓ Encryption Keys: OK
-
-

Step 7: Verify Workflows

-

If you used workflows:

-
# List all workflows
-provisioning workflow list
-
-# Check specific workflow
-provisioning workflow status <workflow-id>
-
-# View workflow stats
-provisioning workflow stats
-
-

Common Verification Checks

-

DNS Resolution (If CoreDNS Installed)

-
# Test DNS resolution
-dig @localhost test.provisioning.local
-
-# Check CoreDNS status
-provisioning server ssh dev-server-01 -- systemctl status coredns
-
-

Network Connectivity

-
# Test server-to-server connectivity
-provisioning server ssh dev-server-01 -- ping -c 3 dev-server-02
-
-# Check firewall rules
-provisioning server ssh dev-server-01 -- sudo iptables -L
-
-

Storage and Resources

-
# Check disk usage
-provisioning server ssh dev-server-01 -- df -h
-
-# Check memory usage
-provisioning server ssh dev-server-01 -- free -h
-
-# Check CPU usage
-provisioning server ssh dev-server-01 -- top -bn1 | head -20
-
-

Troubleshooting Failed Verifications

-

Configuration Validation Failed

-
# View detailed error
-provisioning validate config --verbose
-
-# Check specific infrastructure
-provisioning validate config --infra my-infra
-
-

Server Unreachable

-
# Check server logs
-provisioning server logs dev-server-01
-
-# Try debug mode
-provisioning --debug server ssh dev-server-01
-
-

Task Service Not Running

-
# Check service logs
-provisioning taskserv logs kubernetes
-
-# Restart service
-provisioning taskserv restart kubernetes --infra my-infra
-
-

Platform Service Down

-
# Check service status
-provisioning platform status orchestrator
-
-# View service logs
-provisioning platform logs orchestrator --tail 100
-
-# Restart service
-provisioning platform restart orchestrator
-
-

Performance Verification

-

Response Time Tests

-
# Measure server response time
-time provisioning server info dev-server-01
-
-# Measure task service response time
-time provisioning taskserv list
-
-# Measure workflow submission time
-time provisioning workflow submit test-workflow.ncl
-
-

Resource Usage

-
# Check platform resource usage
-docker stats  # If using Docker
-
-# Check system resources
-provisioning system resources
-
-

Security Verification

-

Encryption

-
# Verify encryption keys
-ls -la ~/.config/provisioning/age/
-
-# Test encryption/decryption
-echo "test" | provisioning kms encrypt | provisioning kms decrypt
-
-

Authentication (If Enabled)

-
# Test login
-provisioning login --username admin
-
-# Verify token
-provisioning whoami
-
-# Test MFA (if enabled)
-provisioning mfa verify <code>
-
-

Verification Checklist

-

Use this checklist to ensure everything is working:

-
    -
  • -Configuration validation passes
  • -
  • -All servers are accessible via SSH
  • -
  • -All servers show “running” status
  • -
  • -All task services show “running” status
  • -
  • -Kubernetes nodes are “Ready” (if installed)
  • -
  • -Kubernetes pods are “Running” (if installed)
  • -
  • -Platform services respond to health checks
  • -
  • -Encryption/decryption works
  • -
  • -Workflows can be submitted and complete
  • -
  • -No errors in logs
  • -
  • -Resource usage is within expected limits
  • -
-

Next Steps

-

Once verification is complete:

- -

Additional Resources

- -
-

Congratulations! You’ve successfully deployed and verified your first Provisioning Platform infrastructure!

-

Platform Service Configuration

-

After verifying your installation, the next step is to configure the platform services. This guide walks you through setting up your provisioning -platform for deployment.

-

What You’ll Learn

-
    -
  • Understanding platform services and configuration modes
  • -
  • Setting up platform configurations with setup-platform-config.sh
  • -
  • Choosing the right deployment mode for your use case
  • -
  • Configuring services interactively or with quick mode
  • -
  • Running platform services with your configuration
  • -
-

Prerequisites

-

Before configuring platform services, ensure you have:

-
    -
  • ✅ Completed Installation Steps
  • -
  • ✅ Verified installation with Verification
  • -
  • Nickel 0.10+ (for configuration language)
  • -
  • Nushell 0.109+ (for scripts)
  • -
  • TypeDialog (optional, for interactive configuration)
  • -
-

Platform Services Overview

-

The provisioning platform consists of 8 core services:

-
- - - - - - - - -
ServicePurposeDefault Mode
orchestratorMain orchestration engineRequired
control-centerWeb UI and management consoleRequired
mcp-serverModel Context Protocol integrationOptional
vault-serviceSecrets management and encryptionRequired
extension-registryExtension distribution systemRequired
ragRetrieval-Augmented GenerationOptional
ai-serviceAI model integrationOptional
provisioning-daemonBackground operationsRequired
-
-

Deployment Modes

-

Choose a deployment mode based on your needs:

-
- - - - -
ModeResourcesUse Case
solo2 CPU, 4 GB RAMDevelopment, testing, local machines
multiuser4 CPU, 8 GB RAMTeam staging, team development
cicd8 CPU, 16 GB RAMCI/CD pipelines, automated testing
enterprise16+ CPU, 32+ GBProduction, high-availability
-
-

Step 1: Initialize Configuration Script

-

The configuration system is managed by a standalone script that doesn’t require the main installer:

-
# Navigate to the provisioning directory
-cd /path/to/project-provisioning
-
-# Verify the setup script exists
-ls -la provisioning/scripts/setup-platform-config.sh
-
-# Make script executable
-chmod +x provisioning/scripts/setup-platform-config.sh
-
-

Step 2: Choose Configuration Method

- -

TypeDialog provides an interactive form-based configuration interface available in multiple backends (web, TUI, CLI).

-

Quick Interactive Setup (All Services at Once)

-
# Run interactive setup - prompts for choices
-./provisioning/scripts/setup-platform-config.sh
-
-# Follow the prompts to:
-# 1. Choose action (TypeDialog, Quick Mode, Clean, List)
-# 2. Select service (or all services)
-# 3. Choose deployment mode
-# 4. Select backend (web, tui, cli)
-
-

Configure Specific Service with TypeDialog

-
# Configure orchestrator in solo mode with web UI
-./provisioning/scripts/setup-platform-config.sh \
-  --service orchestrator \
-  --mode solo \
-  --backend web
-
-# TypeDialog opens browser → User fills form → Config generated
-
-

When to use TypeDialog:

-
    -
  • First-time setup with visual form guidance
  • -
  • Updating configuration with validation
  • -
  • Multiple services needing coordinated changes
  • -
  • Team environments where UI is preferred
  • -
-

Method B: Quick Mode Configuration (Fastest)

-

Quick mode automatically creates all service configurations from defaults overlaid with mode-specific tuning.

-
# Quick setup for solo development mode
-./provisioning/scripts/setup-platform-config.sh --quick-mode --mode solo
-
-# Quick setup for enterprise production
-./provisioning/scripts/setup-platform-config.sh --quick-mode --mode enterprise
-
-# Result: All 8 services configured immediately with appropriate resource limits
-
-

When to use Quick Mode:

-
    -
  • Initial setup with standard defaults
  • -
  • Switching deployment modes
  • -
  • CI/CD automated setup
  • -
  • Scripted/programmatic configuration
  • -
-

Method C: Manual Nickel Configuration

-

For advanced users who prefer editing configuration files directly:

-
# View schema definition
-cat provisioning/schemas/platform/schemas/orchestrator.ncl
-
-# View default values
-cat provisioning/schemas/platform/defaults/orchestrator-defaults.ncl
-
-# View mode overlay
-cat provisioning/schemas/platform/defaults/deployment/solo-defaults.ncl
-
-# Edit configuration directly
-vim provisioning/config/runtime/orchestrator.solo.ncl
-
-# Validate Nickel syntax
-nickel typecheck provisioning/config/runtime/orchestrator.solo.ncl
-
-# Regenerate TOML from edited config (CRITICAL STEP)
-./provisioning/scripts/setup-platform-config.sh --generate-toml
-
-

When to use Manual Edit:

-
    -
  • Advanced customization beyond form options
  • -
  • Programmatic configuration generation
  • -
  • Integration with CI/CD systems
  • -
  • Custom workspace-specific overrides
  • -
-

Step 3: Understand Configuration Layers

-

The configuration system uses layered composition:

-
1. Schema (Type contract)
-   ↓ Defines valid fields and constraints
-
-2. Service Defaults (Base values)
-   ↓ Default configuration for each service
-
-3. Mode Overlay (Mode-specific tuning)
-   ↓ solo, multiuser, cicd, or enterprise settings
-
-4. User Customization (Overrides)
-   ↓ User-specific or workspace-specific changes
-
-5. Runtime Config (Final result)
-   ↓ provisioning/config/runtime/orchestrator.solo.ncl
-
-6. TOML Export (Service consumption)
-   ↓ provisioning/config/runtime/generated/orchestrator.solo.toml
-
-

All layers are automatically composed and validated.

-

Step 4: Verify Generated Configuration

-

After running the setup script, verify the configuration was created:

-
# List generated runtime configurations
-ls -la provisioning/config/runtime/
-
-# Check generated TOML files
-ls -la provisioning/config/runtime/generated/
-
-# Verify TOML is valid
-cat provisioning/config/runtime/generated/orchestrator.solo.toml | head -20
-
-

You should see files for all 8 services in both the runtime directory (Nickel format) and the generated directory (TOML format).

-

Step 5: Run Platform Services

-

After successful configuration, services can be started:

-

Running a Single Service

-
# Set deployment mode
-export ORCHESTRATOR_MODE=solo
-
-# Run the orchestrator service
-cd provisioning/platform
-cargo run -p orchestrator
-
-

Running Multiple Services

-
# Terminal 1: Vault Service (secrets management)
-export VAULT_MODE=solo
-cargo run -p vault-service
-
-# Terminal 2: Orchestrator (main service)
-export ORCHESTRATOR_MODE=solo
-cargo run -p orchestrator
-
-# Terminal 3: Control Center (web UI)
-export CONTROL_CENTER_MODE=solo
-cargo run -p control-center
-
-# Access web UI at http://localhost:8080 (default)
-
-

Docker-Based Deployment

-
# Start all services in Docker (requires docker-compose.yml)
-cd provisioning/platform/infrastructure/docker
-docker-compose -f docker-compose.solo.yml up
-
-# Or for enterprise mode
-docker-compose -f docker-compose.enterprise.yml up
-
-

Step 6: Verify Services Are Running

-
# Check orchestrator status
-curl http://localhost:9000/health
-
-# Check control center web UI
-open http://localhost:8080
-
-# View service logs
-export ORCHESTRATOR_MODE=solo
-cargo run -p orchestrator -- --log-level debug
-
-

Customizing Configuration

-

Scenario: Change Deployment Mode

-

If you need to switch from solo to multiuser mode:

-
# Option 1: Re-run setup with new mode
-./provisioning/scripts/setup-platform-config.sh --quick-mode --mode multiuser
-
-# Option 2: Interactive update via TypeDialog
-./provisioning/scripts/setup-platform-config.sh --service orchestrator --mode multiuser --backend web
-
-# Result: All configurations updated for multiuser mode
-#         Services read from provisioning/config/runtime/generated/orchestrator.multiuser.toml
-
-

Scenario: Manual Configuration Edit

-

If you need fine-grained control:

-
# 1. Edit the Nickel configuration directly
-vim provisioning/config/runtime/orchestrator.solo.ncl
-
-# 2. Make your changes (for example, change port, add environment variables)
-
-# 3. Validate syntax
-nickel typecheck provisioning/config/runtime/orchestrator.solo.ncl
-
-# 4. CRITICAL: Regenerate TOML (services won't see changes without this)
-./provisioning/scripts/setup-platform-config.sh --generate-toml
-
-# 5. Verify TOML was updated
-stat provisioning/config/runtime/generated/orchestrator.solo.toml
-
-# 6. Restart service with new configuration
-pkill orchestrator
-export ORCHESTRATOR_MODE=solo
-cargo run -p orchestrator
-
-

Scenario: Workspace-Specific Overrides

-

For workspace-specific customization:

-
# Create workspace override file
-mkdir -p workspace_myworkspace/config
-cat > workspace_myworkspace/config/platform-overrides.ncl <<'EOF'
-# Workspace-specific settings
-{
-  orchestrator = {
-    server.port = 9999,  # Custom port
-    workspace.name = "myworkspace"
-  },
-
-  control_center = {
-    workspace.name = "myworkspace"
-  }
-}
-EOF
-
-# Generate config with workspace overrides
-./provisioning/scripts/setup-platform-config.sh --workspace workspace_myworkspace
-
-# Configuration system merges: defaults + mode overlay + workspace overrides
-
-

Available Configuration Commands

-
# List all available modes
-./provisioning/scripts/setup-platform-config.sh --list-modes
-# Output: solo, multiuser, cicd, enterprise
-
-# List all configurable services
-./provisioning/scripts/setup-platform-config.sh --list-services
-# Output: orchestrator, control-center, mcp-server, vault-service, extension-registry, rag, ai-service, provisioning-daemon
-
-# List current configurations
-./provisioning/scripts/setup-platform-config.sh --list-configs
-# Output: Shows current runtime configurations and their status
-
-# Clean all runtime configurations (use with caution)
-./provisioning/scripts/setup-platform-config.sh --clean
-# Removes: provisioning/config/runtime/*.ncl
-#          provisioning/config/runtime/generated/*.toml
-
-

Configuration File Locations

-

Public Definitions (Part of repository)

-
provisioning/schemas/platform/
-├── schemas/              # Type contracts (Nickel)
-├── defaults/             # Base configuration values
-│   └── deployment/       # Mode-specific: solo, multiuser, cicd, enterprise
-├── validators/           # Business logic validation
-├── templates/            # Configuration generation templates
-└── constraints/          # Validation limits
-
-

Private Runtime Configs (Gitignored)

-
provisioning/config/runtime/              # User-specific deployments
-├── orchestrator.solo.ncl                 # Editable config
-├── orchestrator.multiuser.ncl
-└── generated/                            # Auto-generated, don't edit
-    ├── orchestrator.solo.toml            # For Rust services
-    └── orchestrator.multiuser.toml
-
-

Examples (Reference)

-
provisioning/config/examples/
-├── orchestrator.solo.example.ncl         # Solo mode reference
-└── orchestrator.enterprise.example.ncl   # Enterprise mode reference
-
-

Troubleshooting Configuration

-

Issue: Script Fails with “Nickel not found”

+

Nickel not found:

# Install Nickel
-# macOS
-brew install nickel
-
-# Linux
-cargo install nickel --version 0.10
-
-# Verify installation
-nickel --version
-# Expected: 0.10.0 or higher
-
-

Issue: Configuration Won’t Generate TOML

-
# Check Nickel syntax
-nickel typecheck provisioning/config/runtime/orchestrator.solo.ncl
-
-# If errors found, view detailed message
-nickel typecheck -i provisioning/config/runtime/orchestrator.solo.ncl
-
-# Try manual export
-nickel export --format toml provisioning/config/runtime/orchestrator.solo.ncl
-
-

Issue: Service Can’t Read Configuration

-
# Verify TOML file exists
-ls -la provisioning/config/runtime/generated/orchestrator.solo.toml
-
-# Verify file is valid TOML
-head -20 provisioning/config/runtime/generated/orchestrator.solo.toml
-
-# Check service is looking in right location
-echo $ORCHESTRATOR_MODE  # Should be set to 'solo', 'multiuser', etc.
-
-# Verify environment variable is correct
-export ORCHESTRATOR_MODE=solo
-cargo run -p orchestrator --verbose
-
-

Issue: Services Won’t Start After Config Change

-
# If you edited .ncl file manually, TOML must be regenerated
-./provisioning/scripts/setup-platform-config.sh --generate-toml
-
-# Verify new TOML was created
-stat provisioning/config/runtime/generated/orchestrator.solo.toml
-
-# Check modification time (should be recent)
-ls -lah provisioning/config/runtime/generated/orchestrator.solo.toml
-
-

Important Notes

-

🔒 Runtime Configurations Are Private

-

Files in provisioning/config/runtime/ are gitignored because:

-
    -
  • May contain encrypted secrets or credentials
  • -
  • Deployment-specific (different per environment)
  • -
  • User-customized (each developer/machine has different needs)
  • -
-

📘 Schemas Are Public

-

Files in provisioning/schemas/platform/ are version-controlled because:

-
    -
  • Define product structure and constraints
  • -
  • Part of official releases
  • -
  • Source of truth for configuration format
  • -
  • Shared across the team
  • -
-

🔄 Configuration Is Idempotent

-

The setup script is safe to run multiple times:

-
# Safe: Updates only what's needed
-./provisioning/scripts/setup-platform-config.sh --quick-mode --mode enterprise
-
-# Safe: Doesn't overwrite without --clean
-./provisioning/scripts/setup-platform-config.sh --generate-toml
-
-# Only deletes on explicit request
-./provisioning/scripts/setup-platform-config.sh --clean
-
-

⚠️ Installer Status

-

The full provisioning installer (provisioning/scripts/install.sh) is not yet implemented. Currently:

-
    -
  • ✅ Configuration setup script is standalone and ready to use
  • -
  • ⏳ Full installer integration is planned for future release
  • -
  • ✅ Manual workflow works perfectly without installer
  • -
  • ✅ CI/CD integration available now
  • -
-

Next Steps

-

After completing platform configuration:

-
    -
  1. Run Services: Start your platform services with configured settings
  2. -
  3. Access Web UI: Open Control Center at http://localhost:8080 (default)
  4. -
  5. Create First Infrastructure: Deploy your first servers and clusters
  6. -
  7. Set Up Extensions: Configure providers and task services for your needs
  8. -
  9. Backup Configuration: Back up runtime configs to private repository
  10. -
-

Additional Resources

- -
-

Version: 1.0.0 -Last Updated: 2026-01-05 -Difficulty: Beginner to Intermediate

-

AI Integration - Intelligent Infrastructure Provisioning

-

The provisioning platform integrates AI capabilities to provide intelligent assistance for infrastructure configuration, deployment, and -troubleshooting. -This section documents the AI system architecture, features, and usage patterns.

-

Overview

-

The AI integration consists of multiple components working together to provide intelligent infrastructure provisioning:

-
    -
  • typdialog-ai: AI-assisted form filling and configuration
  • -
  • typdialog-ag: Autonomous AI agents for complex workflows
  • -
  • typdialog-prov-gen: Natural language to Nickel configuration generation
  • -
  • ai-service: Core AI service backend with multi-provider support
  • -
  • mcp-server: Model Context Protocol server for LLM integration
  • -
  • rag: Retrieval-Augmented Generation for contextual knowledge
  • -
-

Key Features

-

Natural Language Configuration

-

Generate infrastructure configurations from plain English descriptions:

-
provisioning ai generate "Create a production PostgreSQL cluster with encryption and daily backups"
-
-

AI-Assisted Forms

-

Real-time suggestions and explanations as you fill out configuration forms via typdialog web UI.

-

Intelligent Troubleshooting

-

AI analyzes deployment failures and suggests fixes:

-
provisioning ai troubleshoot deployment-12345
-
-

-

Configuration Optimization -AI reviews configurations and suggests performance and security improvements:

-
provisioning ai optimize workspaces/prod/config.ncl
-
-

Autonomous Agents

-

AI agents execute multi-step workflows with minimal human intervention:

-
provisioning ai agent --goal "Set up complete dev environment for Python app"
-
-

Documentation Structure

- -

Quick Start

-

Enable AI Features

-
# Edit provisioning config
-vim provisioning/config/ai.toml
-
-# Set provider and enable features
-[ai]
-enabled = true
-provider = "anthropic"  # or "openai" or "local"
-model = "claude-sonnet-4"
-
-[ai.features]
-form_assistance = true
-config_generation = true
-troubleshooting = true
-
-

Generate Configuration from Natural Language

-
# Simple generation
-provisioning ai generate "PostgreSQL database with encryption"
-
-# With specific schema
-provisioning ai generate \
-  --schema database \
-  --output workspaces/dev/db.ncl \
-  "Production PostgreSQL with 100GB storage and daily backups"
-
-

Use AI-Assisted Forms

-
# Open typdialog web UI with AI assistance
-provisioning workspace init --interactive --ai-assist
-
-# AI provides real-time suggestions as you type
-# AI explains validation errors in plain English
-# AI fills multiple fields from natural language description
-
-

Troubleshoot with AI

-
# Analyze failed deployment
-provisioning ai troubleshoot deployment-12345
-
-# AI analyzes logs and suggests fixes
-# AI generates corrected configuration
-# AI explains root cause in plain language
-
-

Security and Privacy

-

The AI system implements strict security controls:

-
    -
  • Cedar Policies: AI access controlled by Cedar authorization
  • -
  • Secret Isolation: AI cannot access secrets directly
  • -
  • Human Approval: Critical operations require human approval
  • -
  • Audit Trail: All AI operations logged
  • -
  • Data Sanitization: Secrets/PII sanitized before sending to LLM
  • -
  • Local Models: Support for air-gapped deployments
  • -
-

See Security Policies for complete details.

-

Supported LLM Providers

-

| | Provider | Models | Best For | | -| | ––––– | –––– | ––––– | | -| | Anthropic | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context | | -| | OpenAI | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling | | -| | Local | Llama 3, Mistral | Air-gapped, privacy-critical | |

-

Cost Considerations

-

AI features incur LLM API costs. The system implements cost controls:

-
    -
  • Caching: Reduces API calls by 50-80%
  • -
  • Rate Limiting: Prevents runaway costs
  • -
  • Budget Limits: Daily/monthly cost caps
  • -
  • Local Models: Zero marginal cost for air-gapped deployments
  • -
-

See Cost Management for optimization strategies.

-

Architecture Decision Record

-

The AI integration is documented in:

- -

Next Steps

-
    -
  1. Read Architecture to understand AI system design
  2. -
  3. Configure AI features in Configuration
  4. -
  5. Try Natural Language Config for your first AI-generated config
  6. -
  7. Explore AI Agents for automation workflows
  8. -
  9. Review Security Policies to understand access controls
  10. -
-
-

Version: 1.0 -Last Updated: 2025-01-08 -Status: Active

-

AI Integration Architecture

-

Overview

-

The provisioning platform’s AI system provides intelligent capabilities for configuration generation, troubleshooting, and automation. The -architecture consists of multiple layers designed for reliability, security, and performance.

-

Core Components - Production-Ready

-

1. AI Service (provisioning/platform/ai-service)

-

Status: ✅ Production-Ready (2,500+ lines Rust code)

-

The core AI service provides:

-
    -
  • Multi-provider LLM support (Anthropic Claude, OpenAI GPT-4, local models)
  • -
  • Streaming response support for real-time feedback
  • -
  • Request caching with LRU and semantic similarity
  • -
  • Rate limiting and cost control
  • -
  • Comprehensive error handling
  • -
  • HTTP REST API on port 8083
  • -
-

Supported Models:

-
    -
  • Claude Sonnet 4, Claude Opus 4 (Anthropic)
  • -
  • GPT-4 Turbo, GPT-4 (OpenAI)
  • -
  • Llama 3, Mistral (local/on-premise)
  • -
-

2. RAG System (Retrieval-Augmented Generation)

-

Status: ✅ Production-Ready (22/22 tests passing)

-

The RAG system enables AI to access and reason over platform documentation:

-
    -
  • Vector embeddings via SurrealDB vector store
  • -
  • Hybrid search: vector similarity + BM25 keyword search
  • -
  • Document chunking (code and markdown aware)
  • -
  • Relevance ranking and context selection
  • -
  • Semantic caching for repeated queries
  • -
-

Capabilities:

-
provisioning ai query "How do I set up Kubernetes?"
-provisioning ai template "Describe my infrastructure"
-
-

3. MCP Server (Model Context Protocol)

-

Status: ✅ Production-Ready

-

Provides Model Context Protocol integration:

-
    -
  • Standardized tool interface for LLMs
  • -
  • Complex workflow composition
  • -
  • Integration with external AI systems (Claude, other LLMs)
  • -
  • Tool calling for provisioning operations
  • -
-

4. CLI Integration

-

Status: ✅ Production-Ready

-

Interactive commands:

-
provisioning ai template --prompt "Describe infrastructure"
-provisioning ai query --prompt "Configuration question"
-provisioning ai chat    # Interactive mode
-
-

Configuration:

-
[ai]
-enabled = true
-provider = "anthropic"  # or "openai" or "local"
-model = "claude-sonnet-4"
-
-[ai.cache]
-enabled = true
-semantic_similarity = true
-ttl_seconds = 3600
-
-[ai.limits]
-max_tokens = 4096
-temperature = 0.7
-
-

Planned Components - Q2 2025

-

Autonomous Agents (typdialog-ag)

-

Status: 🔴 Planned

-

Self-directed agents for complex tasks:

-
    -
  • Multi-step workflow execution
  • -
  • Decision making and adaptation
  • -
  • Monitoring and self-healing recommendations
  • -
-

AI-Assisted Forms (typdialog-ai)

-

Status: 🔴 Planned

-

Real-time AI suggestions in configuration forms:

-
    -
  • Context-aware field recommendations
  • -
  • Validation error explanations
  • -
  • Auto-completion for infrastructure patterns
  • -
-

Advanced Features

-
    -
  • Fine-tuning capabilities for custom models
  • -
  • Autonomous workflow execution with human approval
  • -
  • Cedar authorization policies for AI actions
  • -
  • Custom knowledge bases per workspace
  • -
-

Architecture Diagram

-
┌─────────────────────────────────────────────────┐
-│  User Interface                                 │
-│  ├── CLI (provisioning ai ...)                  │
-│  ├── Web UI (typdialog)                         │
-│  └── MCP Client (Claude, etc.)                  │
-└──────────────┬──────────────────────────────────┘
-               ↓
-┌──────────────────────────────────────────────────┐
-│  AI Service (Port 8083)                          │
-│  ├── Request Router                             │
-│  ├── Cache Layer (LRU + Semantic)              │
-│  ├── Prompt Engineering                         │
-│  └── Response Streaming                         │
-└──────┬─────────────────┬─────────────────────────┘
-       ↓                 ↓
-┌─────────────┐  ┌──────────────────┐
-│ RAG System  │  │ LLM Provider     │
-│ SurrealDB   │  │ ├── Anthropic    │
-│ Vector DB   │  │ ├── OpenAI       │
-│ + BM25      │  │ └── Local Model  │
-└─────────────┘  └──────────────────┘
-       ↓                 ↓
-┌──────────────────────────────────────┐
-│  Cached Responses + Real Responses   │
-│  Streamed to User                    │
-└──────────────────────────────────────┘
-
-

Performance Characteristics

-

| | Metric | Value | | -| | –––– | —–– | | -| | Cold response (cache miss) | 2-5 seconds | | -| | Cached response | <500ms | | -| | Streaming start time | <1 second | | -| | AI service memory usage | ~200MB at rest | | -| | Cache size (configurable) | Up to 500MB | | -| | Vector DB (SurrealDB) | Included, auto-managed | |

-

Security Model

-

Cedar Authorization

-

All AI operations controlled by Cedar policies:

-
    -
  • User role-based access control
  • -
  • Operation-specific permissions
  • -
  • Complete audit logging
  • -
-

Secret Protection

-
    -
  • Secrets never sent to external LLMs
  • -
  • PII/sensitive data sanitized before API calls
  • -
  • Encryption at rest in local cache
  • -
  • HSM support for key storage
  • -
-

Local Model Support

-

Air-gapped deployments:

-
    -
  • On-premise LLM models (Llama 3, Mistral)
  • -
  • Zero external API calls
  • -
  • Full data privacy compliance
  • -
  • Ideal for classified environments
  • -
-

Configuration

-

See Configuration Guide for:

-
    -
  • LLM provider setup
  • -
  • Cache configuration
  • -
  • Cost limits and budgets
  • -
  • Security policies
  • -
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready (core system) -Test Coverage: 22/22 tests passing

-

Retrieval-Augmented Generation (RAG) System

-

Status: ✅ Production-Ready (SurrealDB 1.5.0+, 22/22 tests passing)

-

The RAG system enables the AI service to access, retrieve, and reason over infrastructure documentation, schemas, and past configurations. This allows -the AI to generate contextually accurate infrastructure configurations and provide intelligent troubleshooting advice grounded in actual platform -knowledge.

-

Architecture Overview

-

The RAG system consists of:

-
    -
  1. Document Store: SurrealDB vector store with semantic indexing
  2. -
  3. Hybrid Search: Vector similarity + BM25 keyword search
  4. -
  5. Chunk Management: Intelligent document chunking for code and markdown
  6. -
  7. Context Ranking: Relevance scoring for retrieved documents
  8. -
  9. Semantic Cache: Deduplication of repeated queries
  10. -
-

Core Components

-

1. Vector Embeddings

-

The system uses embedding models to convert documents into vector representations:

-
┌─────────────────────┐
-│ Document Source     │
-│ (Markdown, Code)    │
-└──────────┬──────────┘
-           │
-           ▼
-┌──────────────────────────────────┐
-│ Chunking & Tokenization          │
-│ - Code-aware splits              │
-│ - Markdown aware                 │
-│ - Preserves context              │
-└──────────┬───────────────────────┘
-           │
-           ▼
-┌──────────────────────────────────┐
-│ Embedding Model                  │
-│ (OpenAI Ada, Anthropic, Local)   │
-└──────────┬───────────────────────┘
-           │
-           ▼
-┌──────────────────────────────────┐
-│ Vector Storage (SurrealDB)       │
-│ - Vector index                   │
-│ - Metadata indexed               │
-│ - BM25 index for keywords        │
-└──────────────────────────────────┘
-
-

2. SurrealDB Integration

-

SurrealDB serves as the vector database and knowledge store:

-
# Configuration in provisioning/schemas/ai.ncl
-let {
-  rag = {
-    enabled = true,
-    db_url = "surreal://localhost:8000",
-    namespace = "provisioning",
-    database = "ai_rag",
-
-    # Collections for different document types
-    collections = {
-      documentation = {
-        chunking_strategy = "markdown",
-        chunk_size = 1024,
-        overlap = 256,
-      },
-      schemas = {
-        chunking_strategy = "code",
-        chunk_size = 512,
-        overlap = 128,
-      },
-      deployments = {
-        chunking_strategy = "json",
-        chunk_size = 2048,
-        overlap = 512,
-      },
-    },
-
-    # Embedding configuration
-    embedding = {
-      provider = "openai",  # or "anthropic", "local"
-      model = "text-embedding-3-small",
-      cache_vectors = true,
-    },
-
-    # Search configuration
-    search = {
-      hybrid_enabled = true,
-      vector_weight = 0.7,
-      keyword_weight = 0.3,
-      top_k = 5,  # Number of results to return
-      semantic_cache = true,
-    },
-  }
-}
-
-

3. Document Chunking

-

Intelligent chunking preserves context while managing token limits:

-

Markdown Chunking Strategy

-
Input Document: provisioning/docs/src/guides/from-scratch.md
-
-Chunks:
-  [1] Header + first section (up to 1024 tokens)
-  [2] Next logical section + overlap with [1]
-  [3] Code examples preserve as atomic units
-  [4] Continue with overlap...
-
-Each chunk includes:
-  - Original section heading (for context)
-  - Content
-  - Source file and line numbers
-  - Metadata (doctype, category, version)
-
-

Code Chunking Strategy

-
Input Document: provisioning/schemas/main.ncl
-
-Chunks:
-  [1] Top-level let binding + comments
-  [2] Function definition (atomic, preserves signature)
-  [3] Type definition (atomic, preserves interface)
-  [4] Implementation blocks with context overlap
-
-Each chunk preserves:
-  - Type signatures
-  - Function signatures
-  - Import statements needed for context
-  - Comments and docstrings
-
- -

The system implements dual search strategy for optimal results:

- -
// Find semantically similar documents
-async fn vector_search(query: &str, top_k: usize) -> Vec<Document> {
-    let embedding = embed(query).await?;
-
-    // L2 distance in SurrealDB
-    db.query("
-        SELECT *, vector::similarity::cosine(embedding, $embedding) AS score
-        FROM documents
-        WHERE embedding <~> $embedding
-        ORDER BY score DESC
-        LIMIT $top_k
-    ")
-    .bind(("embedding", embedding))
-    .bind(("top_k", top_k))
-    .await
-}
-
-

Use case: Semantic understanding of intent

-
    -
  • Query: “How to configure PostgreSQL”
  • -
  • Finds: Documents about database configuration, examples, schemas
  • -
- -
// Find documents with matching keywords
-async fn keyword_search(query: &str, top_k: usize) -> Vec<Document> {
-    // BM25 full-text search in SurrealDB
-    db.query("
-        SELECT *, search::bm25(.) AS score
-        FROM documents
-        WHERE text @@ $query
-        ORDER BY score DESC
-        LIMIT $top_k
-    ")
-    .bind(("query", query))
-    .bind(("top_k", top_k))
-    .await
-}
-
-

Use case: Exact term matching

-
    -
  • Query: “SurrealDB configuration”
  • -
  • Finds: Documents mentioning SurrealDB specifically
  • -
-

Hybrid Results

-
async fn hybrid_search(
-    query: &str,
-    vector_weight: f32,
-    keyword_weight: f32,
-    top_k: usize,
-) -> Vec<Document> {
-    let vector_results = vector_search(query, top_k * 2).await?;
-    let keyword_results = keyword_search(query, top_k * 2).await?;
-
-    let mut scored = HashMap::new();
-
-    // Score from vector search
-    for (i, doc) in vector_results.iter().enumerate() {
-        *scored.entry(doc.id).or_insert(0.0) +=
-            vector_weight * (1.0 - (i as f32 / top_k as f32));
-    }
-
-    // Score from keyword search
-    for (i, doc) in keyword_results.iter().enumerate() {
-        *scored.entry(doc.id).or_insert(0.0) +=
-            keyword_weight * (1.0 - (i as f32 / top_k as f32));
-    }
-
-    // Return top-k by combined score
-    let mut results: Vec<_> = scored.into_iter().collect();
-| results.sort_by( | a, b | b.1.partial_cmp(&a.1).unwrap()); |
-| Ok(results.into_iter().take(top_k).map( | (id, _) | ...).collect()) |
-}
-
-

Semantic Caching

-

Reduces API calls by caching embeddings of repeated queries:

-
struct SemanticCache {
-    queries: Arc<DashMap<Vec<f32>, CachedResult>>,
-    similarity_threshold: f32,
-}
-
-impl SemanticCache {
-    async fn get(&self, query: &str) -> Option<CachedResult> {
-        let embedding = embed(query).await?;
-
-        // Find cached query with similar embedding
-        // (cosine distance < threshold)
-        for entry in self.queries.iter() {
-            let distance = cosine_distance(&embedding, entry.key());
-            if distance < self.similarity_threshold {
-                return Some(entry.value().clone());
-            }
-        }
-        None
-    }
-
-    async fn insert(&self, query: &str, result: CachedResult) {
-        let embedding = embed(query).await?;
-        self.queries.insert(embedding, result);
-    }
-}
-
-

Benefits:

-
    -
  • 50-80% reduction in embedding API calls
  • -
  • Identical queries return in <10ms
  • -
  • Similar queries reuse cached context
  • -
-

Ingestion Workflow

-

Document Indexing

-
# Index all documentation
-provisioning ai index-docs provisioning/docs/src
-
-# Index schemas
-provisioning ai index-schemas provisioning/schemas
-
-# Index past deployments
-provisioning ai index-deployments workspaces/*/deployments
-
-# Watch directory for changes (development mode)
-provisioning ai watch docs provisioning/docs/src
-
-

Programmatic Indexing

-
// In ai-service on startup
-async fn initialize_rag() -> Result<()> {
-    let rag = RAGSystem::new(&config.rag).await?;
-
-    // Index documentation
-    let docs = load_markdown_docs("provisioning/docs/src")?;
-    for doc in docs {
-        rag.ingest_document(&doc).await?;
-    }
-
-    // Index schemas
-    let schemas = load_nickel_schemas("provisioning/schemas")?;
-    for schema in schemas {
-        rag.ingest_schema(&schema).await?;
-    }
-
-    Ok(())
-}
-
-

Usage Examples

-

Query the RAG System

-
# Search for context-aware information
-provisioning ai query "How do I configure PostgreSQL with encryption?"
-
-# Get configuration template
-provisioning ai template "Describe production Kubernetes on AWS"
-
-# Interactive mode
-provisioning ai chat
-> What are the best practices for database backup?
-
-

AI Service Integration

-
// AI service uses RAG to enhance generation
-async fn generate_config(user_request: &str) -> Result<String> {
-    // Retrieve relevant context
-    let context = rag.search(user_request, top_k=5).await?;
-
-    // Build prompt with context
-    let prompt = build_prompt_with_context(user_request, &context);
-
-    // Generate configuration
-    let config = llm.generate(&prompt).await?;
-
-    // Validate against schemas
-    validate_nickel_config(&config)?;
-
-    Ok(config)
-}
-
-

Form Assistance Integration

-
// In typdialog-ai (JavaScript/TypeScript)
-async function suggestFieldValue(fieldName, currentInput) {
-    // Query RAG for similar configurations
-    const context = await rag.search(
-        `Field: ${fieldName}, Input: ${currentInput}`,
-        { topK: 3, semantic: true }
-    );
-
-    // Generate suggestion using context
-    const suggestion = await ai.suggest({
-        field: fieldName,
-        input: currentInput,
-        context: context,
-    });
-
-    return suggestion;
-}
-
-

Performance Characteristics

-

| | Operation | Time | Cache Hit | | -| | ———– | —— | ———– | | -| | Vector embedding | 200-500ms | N/A | | -| | Vector search (cold) | 300-800ms | N/A | | -| | Keyword search | 50-200ms | N/A | | -| | Hybrid search | 500-1200ms | <100ms cached | | -| | Semantic cache hit | 10-50ms | Always | |

-

Typical query flow:

-
    -
  1. Embedding: 300ms
  2. -
  3. Vector search: 400ms
  4. -
  5. Keyword search: 100ms
  6. -
  7. Ranking: 50ms
  8. -
  9. Total: ~850ms (first call), <100ms (cached)
  10. -
-

Configuration

-

See Configuration Guide for detailed RAG setup:

-
    -
  • LLM provider for embeddings
  • -
  • SurrealDB connection
  • -
  • Chunking strategies
  • -
  • Search weights and limits
  • -
  • Cache settings and TTLs
  • -
-

Limitations and Considerations

-

Document Freshness

-
    -
  • RAG indexes static snapshots
  • -
  • Changes to documentation require re-indexing
  • -
  • Use watch mode during development
  • -
-

Token Limits

-
    -
  • Large documents chunked to fit LLM context
  • -
  • Some context may be lost in chunking
  • -
  • Adjustable chunk size vs. context trade-off
  • -
-

Embedding Quality

-
    -
  • Quality depends on embedding model
  • -
  • Domain-specific models perform better
  • -
  • Fine-tuning possible for specialized vocabularies
  • -
-

Monitoring and Debugging

-

Query Metrics

-
# View RAG search metrics
-provisioning ai metrics show rag
-
-# Analysis of search quality
-provisioning ai eval-rag --sample-queries 100
-
-

Debug Mode

-
# In provisioning/config/ai.toml
-[ai.rag.debug]
-enabled = true
-log_embeddings = true      # Log embedding vectors
-log_search_scores = true   # Log relevance scores
-log_context_used = true    # Log context retrieved
-
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -Test Coverage: 22/22 tests passing -Database: SurrealDB 1.5.0+

-

Model Context Protocol (MCP) Integration

-

Status: ✅ Production-Ready (MCP 0.6.0+, integrated with Claude, compatible with all LLMs)

-

The MCP server provides standardized Model Context Protocol integration, allowing external LLMs (Claude, GPT-4, local models) to access provisioning -platform capabilities as tools. This enables complex multi-step workflows, tool composition, and integration with existing LLM applications.

-

Architecture Overview

-

The MCP integration follows the Model Context Protocol specification:

-
┌──────────────────────────────────────────────────────────────┐
-│ External LLM (Claude, GPT-4, etc.)                           │
-└────────────────────┬─────────────────────────────────────────┘
-                     │
-                     │ Tool Calls (JSON-RPC)
-                     ▼
-┌──────────────────────────────────────────────────────────────┐
-│ MCP Server (provisioning/platform/crates/mcp-server)         │
-│                                                              │
-│ ┌───────────────────────────────────────────────────────┐    │
-│ │ Tool Registry                                         │    │
-│ │ - generate_config(description, schema)                │    │
-│ │ - validate_config(config)                             │    │
-│ │ - search_docs(query)                                  │    │
-│ │ - troubleshoot_deployment(logs)                       │    │
-│ │ - get_schema(name)                                    │    │
-│ │ - check_compliance(config, policy)                    │    │
-│ └───────────────────────────────────────────────────────┘    │
-│                         │                                    │
-│                         ▼                                    │
-│ ┌───────────────────────────────────────────────────────┐    │
-│ │ Implementation Layer                                  │    │
-│ │ - AI Service client (ai-service port 8083)            │    │
-│ │ - Validator client                                    │    │
-│ │ - RAG client (SurrealDB)                              │    │
-│ │ - Schema loader                                       │    │
-│ └───────────────────────────────────────────────────────┘    │
-└──────────────────────────────────────────────────────────────┘
-
-

MCP Server Launch

-

The MCP server is started as a stdio-based service:

-
# Start MCP server (stdio transport)
-provisioning-mcp-server --config /etc/provisioning/ai.toml
-
-# With debug logging
-RUST_LOG=debug provisioning-mcp-server --config /etc/provisioning/ai.toml
-
-# In Claude Desktop configuration
-~/.claude/claude_desktop_config.json:
-{
-  "mcpServers": {
-    "provisioning": {
-      "command": "provisioning-mcp-server",
-      "args": ["--config", "/etc/provisioning/ai.toml"],
-      "env": {
-        "PROVISIONING_TOKEN": "your-auth-token"
-      }
-    }
-  }
-}
-
-

Available Tools

-

1. Config Generation

-

Tool: generate_config

-

Generate infrastructure configuration from natural language description.

-
{
-  "name": "generate_config",
-  "description": "Generate a Nickel infrastructure configuration from a natural language description",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "description": {
-        "type": "string",
-        "description": "Natural language description of desired infrastructure"
-      },
-      "schema": {
-        "type": "string",
-        "description": "Target schema name (e.g., 'database', 'kubernetes', 'network'). Optional."
-      },
-      "format": {
-        "type": "string",
-        "enum": ["nickel", "toml"],
-        "description": "Output format (default: nickel)"
-      }
-    },
-    "required": ["description"]
-  }
-}
-
-

Example Usage:

-
# Via MCP client
-mcp-client provisioning generate_config \
-  --description "Production PostgreSQL cluster with encryption and daily backups" \
-  --schema database
-
-# Claude desktop prompt:
-# @provisioning: Generate a production PostgreSQL setup with automated backups
-
-

Response:

-
{
-  database = {
-    engine = "postgresql",
-    version = "15.0",
-
-    instance = {
-      instance_class = "db.r6g.xlarge",
-      allocated_storage_gb = 100,
-      iops = 3000,
-    },
-
-    security = {
-      encryption_enabled = true,
-      encryption_key_id = "kms://prod-db-key",
-      tls_enabled = true,
-      tls_version = "1.3",
-    },
-
-    backup = {
-      enabled = true,
-      retention_days = 30,
-      preferred_window = "03:00-04:00",
-      copy_to_region = "us-west-2",
-    },
-
-    monitoring = {
-      enhanced_monitoring_enabled = true,
-      monitoring_interval_seconds = 60,
-      log_exports = ["postgresql"],
-    },
-  }
-}
-
-

2. Config Validation

-

Tool: validate_config

-

Validate a Nickel configuration against schemas and policies.

-
{
-  "name": "validate_config",
-  "description": "Validate a Nickel configuration file",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "config": {
-        "type": "string",
-        "description": "Nickel configuration content or file path"
-      },
-      "schema": {
-        "type": "string",
-        "description": "Schema name to validate against (optional)"
-      },
-      "strict": {
-        "type": "boolean",
-        "description": "Enable strict validation (default: true)"
-      }
-    },
-    "required": ["config"]
-  }
-}
-
-

Example Usage:

-
# Validate configuration
-mcp-client provisioning validate_config \
-  --config "$(cat workspaces/prod/database.ncl)"
-
-# With specific schema
-mcp-client provisioning validate_config \
-  --config "workspaces/prod/kubernetes.ncl" \
-  --schema kubernetes
-
-

Response:

-
{
-  "valid": true,
-  "errors": [],
-  "warnings": [
-    "Consider enabling automated backups for production use"
-  ],
-  "metadata": {
-    "schema": "kubernetes",
-    "version": "1.28",
-    "validated_at": "2025-01-13T10:45:30Z"
-  }
-}
-
- -

Tool: search_docs

-

Search infrastructure documentation using RAG system.

-
{
-  "name": "search_docs",
-  "description": "Search provisioning documentation for information",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "query": {
-        "type": "string",
-        "description": "Search query (natural language)"
-      },
-      "top_k": {
-        "type": "integer",
-        "description": "Number of results (default: 5)"
-      },
-      "doc_type": {
-        "type": "string",
-        "enum": ["guide", "schema", "example", "troubleshooting"],
-        "description": "Filter by document type (optional)"
-      }
-    },
-    "required": ["query"]
-  }
-}
-
-

Example Usage:

-
# Search documentation
-mcp-client provisioning search_docs \
-  --query "How do I configure PostgreSQL with replication?"
-
-# Get examples
-mcp-client provisioning search_docs \
-  --query "Kubernetes networking" \
-  --doc_type example \
-  --top_k 3
-
-

Response:

-
{
-  "results": [
-    {
-      "source": "provisioning/docs/src/guides/database-replication.md",
-      "excerpt": "PostgreSQL logical replication enables streaming of changes...",
-      "relevance": 0.94,
-      "section": "Setup Logical Replication"
-    },
-    {
-      "source": "provisioning/schemas/database.ncl",
-      "excerpt": "replication = { enabled = true, mode = \"logical\", ... }",
-      "relevance": 0.87,
-      "section": "Replication Configuration"
-    }
-  ]
-}
-
-

4. Deployment Troubleshooting

-

Tool: troubleshoot_deployment

-

Analyze deployment failures and suggest fixes.

-
{
-  "name": "troubleshoot_deployment",
-  "description": "Analyze deployment logs and suggest fixes",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "deployment_id": {
-        "type": "string",
-        "description": "Deployment ID (e.g., 'deploy-2025-01-13-001')"
-      },
-      "logs": {
-        "type": "string",
-        "description": "Deployment logs (optional, if deployment_id not provided)"
-      },
-      "error_analysis_depth": {
-        "type": "string",
-        "enum": ["shallow", "deep"],
-        "description": "Analysis depth (default: deep)"
-      }
-    }
-  }
-}
-
-

Example Usage:

-
# Troubleshoot recent deployment
-mcp-client provisioning troubleshoot_deployment \
-  --deployment_id "deploy-2025-01-13-001"
-
-# With custom logs
-mcp-client provisioning troubleshoot_deployment \
-| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
-
-

Response:

-
{
-  "status": "failure",
-  "root_cause": "Database connection timeout during migration phase",
-  "analysis": {
-    "phase": "database_migration",
-    "error_type": "connectivity",
-    "confidence": 0.95
-  },
-  "suggestions": [
-    "Verify database security group allows inbound on port 5432",
-    "Check database instance status (may be rebooting)",
-    "Increase connection timeout in configuration"
-  ],
-  "corrected_config": "...generated Nickel config with fixes...",
-  "similar_issues": [
-    "[https://docs/troubleshooting/database-connectivity.md"](https://docs/troubleshooting/database-connectivity.md")
-  ]
-}
-
-

5. Get Schema

-

Tool: get_schema

-

Retrieve schema definition with examples.

-
{
-  "name": "get_schema",
-  "description": "Get a provisioning schema definition",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "schema_name": {
-        "type": "string",
-        "description": "Schema name (e.g., 'database', 'kubernetes')"
-      },
-      "format": {
-        "type": "string",
-        "enum": ["schema", "example", "documentation"],
-        "description": "Response format (default: schema)"
-      }
-    },
-    "required": ["schema_name"]
-  }
-}
-
-

Example Usage:

-
# Get schema definition
-mcp-client provisioning get_schema --schema_name database
-
-# Get example configuration
-mcp-client provisioning get_schema \
-  --schema_name kubernetes \
-  --format example
-
-

6. Compliance Check

-

Tool: check_compliance

-

Verify configuration against compliance policies (Cedar).

-
{
-  "name": "check_compliance",
-  "description": "Check configuration against compliance policies",
-  "inputSchema": {
-    "type": "object",
-    "properties": {
-      "config": {
-        "type": "string",
-        "description": "Configuration to check"
-      },
-      "policy_set": {
-        "type": "string",
-        "description": "Policy set to check against (e.g., 'pci-dss', 'hipaa', 'sox')"
-      }
-    },
-    "required": ["config", "policy_set"]
-  }
-}
-
-

Example Usage:

-
# Check against PCI-DSS
-mcp-client provisioning check_compliance \
-  --config "$(cat workspaces/prod/database.ncl)" \
-  --policy_set pci-dss
-
-

Integration Examples

-

Claude Desktop (Most Common)

-
~/.claude/claude_desktop_config.json:
-{
-  "mcpServers": {
-    "provisioning": {
-      "command": "provisioning-mcp-server",
-      "args": ["--config", "/etc/provisioning/ai.toml"],
-      "env": {
-        "PROVISIONING_API_KEY": "sk-...",
-        "PROVISIONING_BASE_URL": "[http://localhost:8083"](http://localhost:8083")
-      }
-    }
-  }
-}
-
-

Usage in Claude:

-
User: I need a production Kubernetes cluster in AWS with automatic scaling
-
-Claude can now use provisioning tools:
-I'll help you create a production Kubernetes cluster. Let me:
-1. Search the documentation for best practices
-2. Generate a configuration template
-3. Validate it against your policies
-4. Provide the final configuration
-
-

OpenAI Function Calling

-
import openai
-
-tools = [
-    {
-        "type": "function",
-        "function": {
-            "name": "generate_config",
-            "description": "Generate infrastructure configuration",
-            "parameters": {
-                "type": "object",
-                "properties": {
-                    "description": {
-                        "type": "string",
-                        "description": "Infrastructure description"
-                    }
-                },
-                "required": ["description"]
-            }
-        }
-    }
-]
-
-response = openai.ChatCompletion.create(
-    model="gpt-4",
-    messages=[{"role": "user", "content": "Create a PostgreSQL database"}],
-    tools=tools
-)
-
-

Local LLM Integration (Ollama)

-
# Start Ollama with provisioning MCP
-OLLAMA_MCP_SERVERS=provisioning://localhost:3000 \
-  ollama serve
-
-# Use with llama2 or mistral
-curl [http://localhost:11434/api/generate](http://localhost:11434/api/generate) \
-  -d '{
-    "model": "mistral",
-    "prompt": "Create a Kubernetes cluster",
-    "tools": [{"type": "mcp", "server": "provisioning"}]
-  }'
-
-

Error Handling

-

Tools return consistent error responses:

-
{
-  "error": {
-    "code": "VALIDATION_ERROR",
-    "message": "Configuration has 3 validation errors",
-    "details": [
-      {
-        "field": "database.version",
-        "message": "PostgreSQL version 9.6 is deprecated",
-        "severity": "error"
-      },
-      {
-        "field": "backup.retention_days",
-        "message": "Recommended minimum is 30 days for production",
-        "severity": "warning"
-      }
-    ]
-  }
-}
-
-

Performance

-

| | Operation | Latency | Notes | | -| | ———– | ——— | —–– | | -| | generate_config | 2-5s | Depends on LLM and config complexity | | -| | validate_config | 500-1000ms | Parallel schema validation | | -| | search_docs | 300-800ms | RAG hybrid search | | -| | troubleshoot | 3-8s | Depends on log size and analysis depth | | -| | get_schema | 100-300ms | Cached schema retrieval | | -| | check_compliance | 500-2000ms | Policy evaluation | |

-

Configuration

-

See Configuration Guide for MCP-specific settings:

-
    -
  • MCP server port and binding
  • -
  • Tool registry customization
  • -
  • Rate limiting for tool calls
  • -
  • Access control (Cedar policies)
  • -
-

Security

-

Authentication

-
    -
  • Tools require valid provisioning API token
  • -
  • Token scoped to user’s workspace
  • -
  • All tool calls authenticated and logged
  • -
-

Authorization

-
    -
  • Cedar policies control which tools user can call
  • -
  • Example: allow(principal, action, resource) when role == "admin"
  • -
  • Detailed audit trail of all tool invocations
  • -
-

Data Protection

-
    -
  • Secrets never passed through MCP
  • -
  • Configuration sanitized before analysis
  • -
  • PII removed from logs sent to external LLMs
  • -
-

Monitoring and Debugging

-
# Monitor MCP server
-provisioning admin mcp status
-
-# View MCP tool calls
-provisioning admin logs --filter "mcp_tools" --tail 100
-
-# Debug tool response
-RUST_LOG=provisioning::mcp=debug provisioning-mcp-server
-
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -MCP Version: 0.6.0+ -Supported LLMs: Claude, GPT-4, Llama, Mistral, all MCP-compatible models

-

AI System Configuration Guide

-

Status: ✅ Production-Ready (Configuration system)

-

Complete setup guide for AI features in the provisioning platform. This guide covers LLM provider configuration, feature enablement, cache setup, cost -controls, and security settings.

-

Quick Start

-

Minimal Configuration

-
# provisioning/config/ai.toml
-[ai]
-enabled = true
-provider = "anthropic"  # or "openai" or "local"
-model = "claude-sonnet-4"
-api_key = "sk-ant-..."  # Set via PROVISIONING_AI_API_KEY env var
-
-[ai.cache]
-enabled = true
-
-[ai.limits]
-max_tokens = 4096
-temperature = 0.7
-
-

Initialize Configuration

-
# Generate default configuration
-provisioning config init ai
-
-# Edit configuration
-provisioning config edit ai
-
-# Validate configuration
-provisioning config validate ai
-
-# Show current configuration
-provisioning config show ai
-
-

Provider Configuration

-

Anthropic Claude

-
[ai]
-enabled = true
-provider = "anthropic"
-model = "claude-sonnet-4"  # or "claude-opus-4", "claude-haiku-4"
-api_key = "${PROVISIONING_AI_API_KEY}"
-api_base = "[https://api.anthropic.com"](https://api.anthropic.com")
-
-# Request parameters
-[ai.request]
-max_tokens = 4096
-temperature = 0.7
-top_p = 0.95
-top_k = 40
-
-# Supported models
-# - claude-opus-4: Most capable, for complex reasoning ($15/MTok input, $45/MTok output)
-# - claude-sonnet-4: Balanced (recommended), ($3/MTok input, $15/MTok output)
-# - claude-haiku-4: Fast, for simple tasks ($0.80/MTok input, $4/MTok output)
-
-

OpenAI GPT-4

-
[ai]
-enabled = true
-provider = "openai"
-model = "gpt-4-turbo"  # or "gpt-4", "gpt-4o"
-api_key = "${OPENAI_API_KEY}"
-api_base = "[https://api.openai.com/v1"](https://api.openai.com/v1")
-
-[ai.request]
-max_tokens = 4096
-temperature = 0.7
-top_p = 0.95
-
-# Supported models
-# - gpt-4: Most capable ($0.03/1K input, $0.06/1K output)
-# - gpt-4-turbo: Better at code ($0.01/1K input, $0.03/1K output)
-# - gpt-4o: Latest, multi-modal ($5/MTok input, $15/MTok output)
-
-

Local Models

-
[ai]
-enabled = true
-provider = "local"
-model = "llama2-70b"  # or "mistral", "neural-chat"
-api_base = "[http://localhost:8000"](http://localhost:8000")  # Local Ollama or LM Studio
-
-# Local model support
-# - Ollama: docker run -d -v ollama:/root/.ollama -p 11434:11434 ollama/ollama
-# - LM Studio: GUI app with API
-# - vLLM: High-throughput serving
-# - llama.cpp: CPU inference
-
-[ai.local]
-gpu_enabled = true
-gpu_memory_gb = 24
-max_batch_size = 4
-
-

Feature Configuration

-

Enable Specific Features

-
[ai.features]
-# Core features (production-ready)
-rag_search = true           # Retrieve-Augmented Generation
-config_generation = true    # Generate Nickel from natural language
-mcp_server = true           # Model Context Protocol server
-troubleshooting = true      # AI-assisted debugging
-
-# Form assistance (planned Q2 2025)
-form_assistance = false     # AI suggestions in forms
-form_explanations = false   # AI explains validation errors
-
-# Agents (planned Q2 2025)
-autonomous_agents = false   # AI agents for workflows
-agent_learning = false      # Agents learn from deployments
-
-# Advanced features
-fine_tuning = false        # Fine-tune models for domain
-knowledge_base = false     # Custom knowledge base per workspace
-
-

Cache Configuration

-

Cache Strategy

-
[ai.cache]
-enabled = true
-cache_type = "memory"  # or "redis", "disk"
-ttl_seconds = 3600     # Cache entry lifetime
-
-# Memory cache (recommended for single server)
-[ai.cache.memory]
-max_size_mb = 500
-eviction_policy = "lru"  # Least Recently Used
-
-# Redis cache (recommended for distributed)
-[ai.cache.redis]
-url = "redis://localhost:6379"
-db = 0
-password = "${REDIS_PASSWORD}"
-ttl_seconds = 3600
-
-# Disk cache (recommended for persistent caching)
-[ai.cache.disk]
-path = "/var/cache/provisioning/ai"
-max_size_mb = 5000
-
-# Semantic caching (for RAG)
-[ai.cache.semantic]
-enabled = true
-similarity_threshold = 0.95  # Cache hit if query similarity > 0.95
-cache_embeddings = true       # Cache embedding vectors
-
-

Cache Metrics

-
# Monitor cache performance
-provisioning admin cache stats ai
-
-# Clear cache
-provisioning admin cache clear ai
-
-# Analyze cache efficiency
-provisioning admin cache analyze ai --hours 24
-
-

Rate Limiting and Cost Control

-

Rate Limits

-
[ai.limits]
-# Tokens per request
-max_tokens = 4096
-max_input_tokens = 8192
-max_output_tokens = 4096
-
-# Requests per minute/hour
-rpm_limit = 60              # Requests per minute
-rpm_burst = 100             # Allow bursts up to 100 RPM
-
-# Daily cost limit
-daily_cost_limit_usd = 100
-warn_at_percent = 80        # Warn when at 80% of daily limit
-stop_at_percent = 95        # Stop accepting requests at 95%
-
-# Token usage tracking
-track_token_usage = true
-track_cost_per_request = true
-
-

Cost Budgeting

-
[ai.budget]
-enabled = true
-monthly_limit_usd = 1000
-
-# Budget alerts
-alert_at_percent = [50, 75, 90]
-alert_email = "ops@company.com"
-alert_slack = "[https://hooks.slack.com/services/..."](https://hooks.slack.com/services/...")
-
-# Cost by provider
-[ai.budget.providers]
-anthropic_limit = 500
-openai_limit = 300
-local_limit = 0  # Free (run locally)
-
-

Track Costs

-
# View cost metrics
-provisioning admin costs show ai --period month
-
-# Forecast cost
-provisioning admin costs forecast ai --days 30
-
-# Analyze cost by feature
-provisioning admin costs analyze ai --by feature
-
-# Export cost report
-provisioning admin costs export ai --format csv --output costs.csv
-
-

Security Configuration

-

Authentication

-
[ai.auth]
-# API key from environment variable
-api_key = "${PROVISIONING_AI_API_KEY}"
-
-# Or from secure store
-api_key_vault = "secrets/ai-api-key"
-
-# Token rotation
-rotate_key_days = 90
-rotation_alert_days = 7
-
-# Request signing (for cloud providers)
-sign_requests = true
-signing_method = "hmac-sha256"
-
-

Authorization (Cedar)

-
[ai.authorization]
-enabled = true
-policy_file = "provisioning/policies/ai-policies.cedar"
-
-# Example policies:
-# allow(principal, action, resource) when principal.role == "admin"
-# allow(principal == ?principal, action == "ai_generate_config", resource)
-#   when principal.workspace == resource.workspace
-
-

Data Protection

-
[ai.security]
-# Sanitize data before sending to external LLM
-sanitize_pii = true
-sanitize_secrets = true
-redact_patterns = [
-  "(?i)password\\s*[:=]\\s*[^\\s]+",  # Passwords
-  "(?i)api[_-]?key\\s*[:=]\\s*[^\\s]+", # API keys
-  "(?i)secret\\s*[:=]\\s*[^\\s]+",     # Secrets
-]
-
-# Encryption
-encryption_enabled = true
-encryption_algorithm = "aes-256-gcm"
-key_derivation = "argon2id"
-
-# Local-only mode (never send to external LLM)
-local_only = false  # Set true for air-gapped deployments
-
-

RAG Configuration

-

Vector Store Setup

-
[ai.rag]
-enabled = true
-
-# SurrealDB backend
-[ai.rag.database]
-url = "surreal://localhost:8000"
-username = "root"
-password = "${SURREALDB_PASSWORD}"
-namespace = "provisioning"
-database = "ai_rag"
-
-# Embedding model
-[ai.rag.embedding]
-provider = "openai"  # or "anthropic", "local"
-model = "text-embedding-3-small"
-batch_size = 100
-cache_embeddings = true
-
-# Search configuration
-[ai.rag.search]
-hybrid_enabled = true
-vector_weight = 0.7      # Weight for vector search
-keyword_weight = 0.3     # Weight for BM25 search
-top_k = 5                # Number of results to return
-rerank_enabled = false   # Use cross-encoder to rerank results
-
-# Chunking strategy
-[ai.rag.chunking]
-markdown_chunk_size = 1024
-markdown_overlap = 256
-code_chunk_size = 512
-code_overlap = 128
-
-

Index Management

-
# Create indexes
-provisioning ai index create rag
-
-# Rebuild indexes
-provisioning ai index rebuild rag
-
-# Show index status
-provisioning ai index status rag
-
-# Remove old indexes
-provisioning ai index cleanup rag --older-than 30days
-
-

MCP Server Configuration

-

MCP Server Setup

-
[ai.mcp]
-enabled = true
-port = 3000
-host = "127.0.0.1"  # Change to 0.0.0.0 for network access
-
-# Tool registry
-[ai.mcp.tools]
-generate_config = true
-validate_config = true
-search_docs = true
-troubleshoot_deployment = true
-get_schema = true
-check_compliance = true
-
-# Rate limiting for tool calls
-rpm_limit = 30
-burst_limit = 50
-
-# Tool request timeout
-timeout_seconds = 30
-
-

MCP Client Configuration

-
~/.claude/claude_desktop_config.json:
-{
-  "mcpServers": {
-    "provisioning": {
-      "command": "provisioning-mcp-server",
-      "args": ["--config", "/etc/provisioning/ai.toml"],
-      "env": {
-        "PROVISIONING_API_KEY": "sk-ant-...",
-        "RUST_LOG": "info"
-      }
-    }
-  }
-}
-
-

Logging and Observability

-

Logging Configuration

-
[ai.logging]
-level = "info"  # or "debug", "warn", "error"
-format = "json"  # or "text"
-output = "stdout"  # or "file"
-
-# Log file
-[ai.logging.file]
-path = "/var/log/provisioning/ai.log"
-max_size_mb = 100
-max_backups = 10
-retention_days = 30
-
-# Log filters
-[ai.logging.filters]
-log_requests = true
-log_responses = false  # Don't log full responses (verbose)
-log_token_usage = true
-log_costs = true
-
-

Metrics and Monitoring

-
# View AI service metrics
-provisioning admin metrics show ai
-
-# Prometheus metrics endpoint
-curl [http://localhost:8083/metrics](http://localhost:8083/metrics)
-
-# Key metrics:
-# - ai_requests_total: Total requests by provider/model
-# - ai_request_duration_seconds: Request latency
-# - ai_token_usage_total: Token consumption by provider
-# - ai_cost_total: Cumulative cost by provider
-# - ai_cache_hits: Cache hit rate
-# - ai_errors_total: Errors by type
-
-

Health Checks

-

Configuration Validation

-
# Validate configuration syntax
-provisioning config validate ai
-
-# Test provider connectivity
-provisioning ai test provider anthropic
-
-# Test RAG system
-provisioning ai test rag
-
-# Test MCP server
-provisioning ai test mcp
-
-# Full health check
-provisioning ai health-check
-
-

Environment Variables

-

Common Settings

-
# Provider configuration
-export PROVISIONING_AI_PROVIDER="anthropic"
-export PROVISIONING_AI_MODEL="claude-sonnet-4"
-export PROVISIONING_AI_API_KEY="sk-ant-..."
-
-# Feature flags
-export PROVISIONING_AI_ENABLED="true"
-export PROVISIONING_AI_CACHE_ENABLED="true"
-export PROVISIONING_AI_RAG_ENABLED="true"
-
-# Cost control
-export PROVISIONING_AI_DAILY_LIMIT_USD="100"
-export PROVISIONING_AI_RPM_LIMIT="60"
-
-# Security
-export PROVISIONING_AI_SANITIZE_PII="true"
-export PROVISIONING_AI_LOCAL_ONLY="false"
-
-# Logging
-export RUST_LOG="provisioning::ai=info"
-
-

Troubleshooting Configuration

-

Common Issues

-

Issue: API key not recognized

-
# Check environment variable is set
-echo $PROVISIONING_AI_API_KEY
-
-# Test connectivity
-provisioning ai test provider anthropic
-
-# Verify key format (should start with sk-ant- or sk-)
-| provisioning config show ai | grep api_key |
-
-

Issue: Cache not working

-
# Check cache status
-provisioning admin cache stats ai
-
-# Clear cache and restart
-provisioning admin cache clear ai
-provisioning service restart ai-service
-
-# Enable cache debugging
-RUST_LOG=provisioning::cache=debug provisioning-ai-service
-
-

Issue: RAG search not finding results

-
# Rebuild RAG indexes
-provisioning ai index rebuild rag
-
-# Test search
-provisioning ai query "test query"
-
-# Check index status
-provisioning ai index status rag
-
-

Upgrading Configuration

-

Backward Compatibility

-

New AI versions automatically migrate old configurations:

-
# Check configuration version
-provisioning config version ai
-
-# Migrate configuration to latest version
-provisioning config migrate ai --auto
-
-# Backup before migration
-provisioning config backup ai
-
-

Production Deployment

- -
[ai]
-enabled = true
-provider = "anthropic"
-model = "claude-sonnet-4"
-api_key = "${PROVISIONING_AI_API_KEY}"
-
-[ai.features]
-rag_search = true
-config_generation = true
-mcp_server = true
-troubleshooting = true
-
-[ai.cache]
-enabled = true
-cache_type = "redis"
-ttl_seconds = 3600
-
-[ai.limits]
-rpm_limit = 60
-daily_cost_limit_usd = 1000
-max_tokens = 4096
-
-[ai.security]
-sanitize_pii = true
-sanitize_secrets = true
-encryption_enabled = true
-
-[ai.logging]
-level = "warn"  # Less verbose in production
-format = "json"
-output = "file"
-
-[ai.rag.database]
-url = "surreal://surrealdb-cluster:8000"
-
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -Versions Supported: v1.0+

-

AI Security Policies and Cedar Authorization

-

Status: ✅ Production-Ready (Cedar integration, policy enforcement)

-

Comprehensive documentation of security controls, authorization policies, and data protection mechanisms for the AI system. All AI operations are -controlled through Cedar policies and include strict secret isolation.

-

Security Model Overview

-

Defense in Depth

-
┌─────────────────────────────────────────┐
-│ User Request to AI                      │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 1: Authentication                 │
-│ - Verify user identity                  │
-│ - Validate API token/credentials        │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 2: Authorization (Cedar)          │
-│ - Check if user can access AI features  │
-│ - Verify workspace permissions          │
-│ - Check role-based access               │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 3: Data Sanitization              │
-│ - Remove secrets from data              │
-│ - Redact PII                            │
-│ - Filter sensitive information          │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 4: Request Validation             │
-│ - Check request parameters              │
-│ - Verify resource constraints           │
-│ - Apply rate limits                     │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 5: External API Call              │
-│ - Only if all previous checks pass      │
-│ - Encrypted TLS connection              │
-│ - No secrets in request                 │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Layer 6: Audit Logging                  │
-│ - Log all AI operations                 │
-│ - Capture user, time, action            │
-│ - Store in tamper-proof log             │
-└─────────────────────────────────────────┘
-
-

Cedar Policies

-

Policy Engine Setup

-
// File: provisioning/policies/ai-policies.cedar
-
-// Core principle: Least privilege
-// All actions denied by default unless explicitly allowed
-
-// Admin users can access all AI features
-permit(
-  principal == ?principal,
-  action == Action::"ai_generate_config",
-  resource == ?resource
-)
-when {
-  principal.role == "admin"
-};
-
-// Developers can use AI within their workspace
-permit(
-  principal == ?principal,
-  action in [
-    Action::"ai_query",
-    Action::"ai_generate_config",
-    Action::"ai_troubleshoot"
-  ],
-  resource == ?resource
-)
-when {
-  principal.role in ["developer", "senior_engineer"]
-  && principal.workspace == resource.workspace
-};
-
-// Operators can access troubleshooting and queries
-permit(
-  principal == ?principal,
-  action in [
-    Action::"ai_query",
-    Action::"ai_troubleshoot"
-  ],
-  resource == ?resource
-)
-when {
-  principal.role in ["operator", "devops"]
-};
-
-// Form assistance enabled for all authenticated users
-permit(
-  principal == ?principal,
-  action == Action::"ai_form_assistance",
-  resource == ?resource
-)
-when {
-  principal.authenticated == true
-};
-
-// Agents (when available) require explicit approval
-permit(
-  principal == ?principal,
-  action == Action::"ai_agent_execute",
-  resource == ?resource
-)
-when {
-  principal.role == "automation_admin"
-  && resource.requires_approval == true
-};
-
-// MCP tool access - restrictive by default
-permit(
-  principal == ?principal,
-  action == Action::"mcp_tool_call",
-  resource == ?resource
-)
-when {
-  principal.role == "admin"
-|  |  | (principal.role == "developer" && resource.tool in ["generate_config", "validate_config"]) |
-};
-
-// Cost control policies
-permit(
-  principal == ?principal,
-  action == Action::"ai_generate_config",
-  resource == ?resource
-)
-when {
-  // User must have remaining budget
-  principal.ai_budget_remaining_usd > resource.estimated_cost_usd
-  // Workspace must be under budget
-  && resource.workspace.ai_budget_remaining_usd > resource.estimated_cost_usd
-};
-
-

Policy Best Practices

-
    -
  1. Explicit Allow: Only allow specific actions, deny by default
  2. -
  3. Workspace Isolation: Users can’t access AI in other workspaces
  4. -
  5. Role-Based: Use consistent role definitions
  6. -
  7. Cost-Aware: Check budgets before operations
  8. -
  9. Audit Trail: Log all policy decisions
  10. -
-

Data Sanitization

-

Automatic PII Removal

-

Before sending data to external LLMs, the system removes:

-
Patterns Removed:
-├─ Passwords: password="...", pwd=..., etc.
-├─ API Keys: api_key=..., api-key=..., etc.
-├─ Tokens: token=..., bearer=..., etc.
-├─ Email addresses: user@example.com (unless necessary for context)
-├─ Phone numbers: +1-555-0123 patterns
-├─ Credit cards: 4111-1111-1111-1111 patterns
-├─ SSH keys: -----BEGIN RSA PRIVATE KEY-----...
-└─ AWS/GCP/Azure: AKIA2..., AIza..., etc.
-
-

Configuration

-
[ai.security]
-sanitize_pii = true
-sanitize_secrets = true
-
-# Custom redaction patterns
-redact_patterns = [
-  # Database passwords
-  "(?i)db[_-]?password\\s*[:=]\\s*'?[^'\\n]+'?",
-  # Generic secrets
-  "(?i)secret\\s*[:=]\\s*'?[^'\\n]+'?",
-  # API endpoints that shouldn't be logged
-  "https?://api[.-]secret\\..+",
-]
-
-# Exceptions (patterns NOT to redact)
-preserve_patterns = [
-  # Preserve example.com domain for docs
-  "example\\.com",
-  # Preserve placeholder emails
-  "user@example\\.com",
-]
-
-

Example Sanitization

-

Before:

-
Error configuring database:
-connection_string: postgresql://dbadmin:MySecurePassword123@prod-db.us-east-1.rds.amazonaws.com:5432/app
-api_key: sk-ant-abc123def456
-vault_token: hvs.CAESIyg7...
-
-

After Sanitization:

-
Error configuring database:
-connection_string: postgresql://dbadmin:[REDACTED]@prod-db.us-east-1.rds.amazonaws.com:5432/app
-api_key: [REDACTED]
-vault_token: [REDACTED]
-
-

Secret Isolation

-

Never Access Secrets Directly

-

AI cannot directly access secrets. Instead:

-
User wants: "Configure PostgreSQL with encrypted backups"
-  ↓
-AI generates: Configuration schema with placeholders
-  ↓
-User inserts: Actual secret values (connection strings, passwords)
-  ↓
-System encrypts: Secrets remain encrypted at rest
-  ↓
-Deployment: Uses secrets from secure store (Vault, AWS Secrets Manager)
-
-

Secret Protection Rules

-
    -
  1. No Direct Access: AI never reads from Vault/Secrets Manager
  2. -
  3. Never in Logs: Secrets never logged or stored in cache
  4. -
  5. Sanitization: All secrets redacted before sending to LLM
  6. -
  7. Encryption: Secrets encrypted at rest and in transit
  8. -
  9. Audit Trail: All access to secrets logged
  10. -
  11. TTL: Temporary secrets auto-expire
  12. -
-

Local Models Support

-

Air-Gapped Deployments

-

For environments requiring zero external API calls:

-
# Deploy local Ollama with provisioning support
-docker run -d \
-  --name provisioning-ai \
-  -p 11434:11434 \
-  -v ollama:/root/.ollama \
-  -e OLLAMA_HOST=0.0.0.0:11434 \
-  ollama/ollama
-
-# Pull model
-ollama pull mistral
-ollama pull llama2-70b
-
-# Configure provisioning to use local model
-provisioning config edit ai
-
-[ai]
-provider = "local"
-model = "mistral"
-api_base = "[http://localhost:11434"](http://localhost:11434")
-
-

Benefits

-
    -
  • ✅ Zero external API calls
  • -
  • ✅ Full data privacy (no LLM vendor access)
  • -
  • ✅ Compliance with classified/regulated data
  • -
  • ✅ No API key exposure
  • -
  • ✅ Deterministic (same results each run)
  • -
-

Performance Trade-offs

-

| | Factor | Local | Cloud | | -| | –––– | —–– | —–– | | -| | Privacy | Excellent | Requires trust | | -| | Cost | Free (hardware) | Per token | | -| | Speed | 5-30s/response | 2-5s/response | | -| | Quality | Good (70B models) | Excellent (Opus) | | -| | Hardware | Requires GPU | None | |

-

HSM Integration

-

Hardware Security Module Support

-

For highly sensitive environments:

-
[ai.security.hsm]
-enabled = true
-provider = "aws-cloudhsm"  # or "thales", "yubihsm"
-
-[ai.security.hsm.aws]
-cluster_id = "cluster-123"
-customer_ca_cert = "/etc/provisioning/certs/customerCA.crt"
-server_cert = "/etc/provisioning/certs/server.crt"
-server_key = "/etc/provisioning/certs/server.key"
-
-

Encryption

-

Data at Rest

-
[ai.security.encryption]
-enabled = true
-algorithm = "aes-256-gcm"
-key_derivation = "argon2id"
-
-# Key rotation
-key_rotation_enabled = true
-key_rotation_days = 90
-rotation_alert_days = 7
-
-# Encrypted storage
-cache_encryption = true
-log_encryption = true
-
-

Data in Transit

-
All external LLM API calls:
-├─ TLS 1.3 (minimum)
-├─ Certificate pinning (optional)
-├─ Mutual TLS (with cloud providers)
-└─ No plaintext transmission
-
-

Audit Logging

-

What Gets Logged

-
{
-  "timestamp": "2025-01-13T10:30:45Z",
-  "event_type": "ai_action",
-  "action": "generate_config",
-  "principal": {
-    "user_id": "user-123",
-    "role": "developer",
-    "workspace": "prod"
-  },
-  "resource": {
-    "type": "database",
-    "name": "prod-postgres"
-  },
-  "authorization": {
-    "decision": "permit",
-    "policy": "ai-policies.cedar",
-    "reason": "developer role in workspace"
-  },
-  "cost": {
-    "tokens_used": 1250,
-    "estimated_cost_usd": 0.037
-  },
-  "sanitization": {
-    "items_redacted": 3,
-    "patterns_matched": ["db_password", "api_key", "token"]
-  },
-  "status": "success"
-}
-
-

Audit Trail Access

-
# View recent AI actions
-provisioning audit log ai --tail 100
-
-# Filter by user
-provisioning audit log ai --user alice@company.com
-
-# Filter by action
-provisioning audit log ai --action generate_config
-
-# Filter by time range
-provisioning audit log ai --from "2025-01-01" --to "2025-01-13"
-
-# Export for analysis
-provisioning audit export ai --format csv --output audit.csv
-
-# Full-text search
-provisioning audit search ai "error in database configuration"
-
-

Compliance Frameworks

-

Built-in Compliance Checks

-
[ai.compliance]
-frameworks = ["pci-dss", "hipaa", "sox", "gdpr"]
-
-[ai.compliance.pci-dss]
-enabled = true
-# Requires encryption, audit logs, access controls
-
-[ai.compliance.hipaa]
-enabled = true
-# Requires local models, encrypted storage, audit logs
-
-[ai.compliance.gdpr]
-enabled = true
-# Requires data deletion, consent tracking, privacy by design
-
-

Compliance Reports

-
# Generate compliance report
-provisioning audit compliance-report \
-  --framework pci-dss \
-  --period month \
-  --output report.pdf
-
-# Verify compliance
-provisioning audit verify-compliance \
-  --framework hipaa \
-  --verbose
-
-

Security Best Practices

-

For Administrators

-
    -
  1. Rotate API Keys: Every 90 days minimum
  2. -
  3. Monitor Budget: Set up alerts at 80% and 90%
  4. -
  5. Review Policies: Quarterly policy audit
  6. -
  7. Audit Logs: Weekly review of AI operations
  8. -
  9. Update Models: Use latest stable models
  10. -
  11. Test Recovery: Monthly rollback drills
  12. -
-

For Developers

-
    -
  1. Use Workspace Isolation: Never share workspace access
  2. -
  3. Don’t Log Secrets: Use sanitization, never bypass it
  4. -
  5. Validate Outputs: Always review AI-generated configs
  6. -
  7. Report Issues: Security issues to security-ai@company.com
  8. -
  9. Stay Updated: Follow security bulletins
  10. -
-

For Operators

-
    -
  1. Monitor Costs: Alert if exceeding 110% of budget
  2. -
  3. Watch Errors: Unusual error patterns may indicate attacks
  4. -
  5. Check Audit Logs: Unauthorized access attempts
  6. -
  7. Test Policies: Periodically verify Cedar policies work
  8. -
  9. Backup Configs: Secure backup of policy files
  10. -
-

Incident Response

-

Compromised API Key

-
# 1. Immediately revoke key
-provisioning admin revoke-key ai-api-key-123
-
-# 2. Rotate key
-provisioning admin rotate-key ai \
-  --notify ops-team@company.com
-
-# 3. Audit usage since compromise
-provisioning audit log ai \
-  --since "2025-01-13T09:00:00Z" \
-  --api-key-id ai-api-key-123
-
-# 4. Review any generated configs from this period
-# Configs generated while key was compromised may need review
-
-

Unauthorized Access

-
# Review Cedar policy logs
-provisioning audit log ai \
-  --decision deny \
-  --last-hour
-
-# Check for pattern
-provisioning audit search ai "authorization.*deny" \
-  --trend-analysis
-
-# Update policies if needed
-provisioning policy update ai-policies.cedar
-
-

Security Checklist

-

Pre-Production

-
    -
  • ✅ Cedar policies reviewed and tested
  • -
  • ✅ API keys rotated and secured
  • -
  • ✅ Data sanitization tested with real secrets
  • -
  • ✅ Encryption enabled for cache
  • -
  • ✅ Audit logging configured
  • -
  • ✅ Cost limits set appropriately
  • -
  • ✅ Local-only mode tested (if needed)
  • -
  • ✅ HSM configured (if required)
  • -
-

Ongoing

-
    -
  • ✅ Monthly policy review
  • -
  • ✅ Weekly audit log review
  • -
  • ✅ Quarterly key rotation
  • -
  • ✅ Annual compliance assessment
  • -
  • ✅ Continuous budget monitoring
  • -
  • ✅ Error pattern analysis
  • -
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -Compliance: PCI-DSS, HIPAA, SOX, GDPR -Cedar Version: 3.0+

-

AI-Assisted Troubleshooting and Debugging

-

Status: ✅ Production-Ready (AI troubleshooting analysis, log parsing)

-

The AI troubleshooting system provides intelligent debugging assistance for infrastructure failures. The system analyzes deployment logs, identifies -root causes, suggests fixes, and generates corrected configurations based on failure patterns.

-

Feature Overview

-

What It Does

-

Transform deployment failures into actionable insights:

-
Deployment Fails with Error
-        ↓
-AI analyzes logs:
-  - Identifies failure phase (networking, database, k8s, etc.)
-  - Detects root cause (resource limits, configuration, timeout)
-  - Correlates with similar past failures
-  - Reviews deployment configuration
-        ↓
-AI generates report:
-  - Root cause explanation in plain English
-  - Configuration issues identified
-  - Suggested fixes with rationale
-  - Alternative solutions
-  - Links to relevant documentation
-        ↓
-Developer reviews and accepts:
-  - Understands what went wrong
-  - Knows how to fix it
-  - Can implement fix with confidence
-
-

Troubleshooting Workflow

-

Automatic Detection and Analysis

-
┌──────────────────────────────────────────┐
-│ Deployment Monitoring                    │
-│ - Watches deployment for failures        │
-│ - Captures logs in real-time             │
-│ - Detects failure events                 │
-└──────────────┬───────────────────────────┘
-               ↓
-┌──────────────────────────────────────────┐
-│ Log Collection                           │
-│ - Gather all relevant logs               │
-│ - Include stack traces                   │
-│ - Capture metrics at failure time        │
-│ - Get resource usage data                │
-└──────────────┬───────────────────────────┘
-               ↓
-┌──────────────────────────────────────────┐
-│ Context Retrieval (RAG)                  │
-│ - Find similar past failures             │
-│ - Retrieve troubleshooting guides        │
-│ - Get schema constraints                 │
-│ - Find best practices                    │
-└──────────────┬───────────────────────────┘
-               ↓
-┌──────────────────────────────────────────┐
-│ AI Analysis                              │
-│ - Identify failure pattern               │
-│ - Determine root cause                   │
-│ - Generate hypotheses                    │
-│ - Score likely causes                    │
-└──────────────┬───────────────────────────┘
-               ↓
-┌──────────────────────────────────────────┐
-│ Solution Generation                      │
-│ - Create fixed configuration             │
-│ - Generate step-by-step fix guide        │
-│ - Suggest preventative measures          │
-│ - Provide alternative approaches         │
-└──────────────┬───────────────────────────┘
-               ↓
-┌──────────────────────────────────────────┐
-│ Report and Recommendations               │
-│ - Explain what went wrong                │
-│ - Show how to fix it                     │
-│ - Provide corrected configuration        │
-│ - Link to prevention strategies          │
-└──────────────────────────────────────────┘
-
-

Usage Examples

-

Example 1: Database Connection Timeout

-

Failure:

-
Deployment: deploy-2025-01-13-001
-Status: FAILED at phase database_migration
-Error: connection timeout after 30s connecting to postgres://...
-
-

Run Troubleshooting:

-
$ provisioning ai troubleshoot deploy-2025-01-13-001
-
-Analyzing deployment failure...
-
-╔════════════════════════════════════════════════════════════════╗
-║ Root Cause Analysis: Database Connection Timeout              ║
-╠════════════════════════════════════════════════════════════════╣
-║                                                                ║
-║ Phase: database_migration (occurred during migration job)     ║
-║ Error: Timeout after 30 seconds connecting to database        ║
-║                                                                ║
-║ Most Likely Causes (confidence):                              ║
-║   1. Database security group blocks migration job (85%)       ║
-║   2. Database instance not fully initialized yet (60%)        ║
-║   3. Network connectivity issue (40%)                         ║
-║                                                                ║
-║ Analysis:                                                     ║
-║   - Database was created only 2 seconds before connection    ║
-║   - Migration job started immediately (no wait time)         ║
-║   - Security group: allows 5432 only from default SG         ║
-║   - Migration pod uses different security group              ║
-║                                                                ║
-╠════════════════════════════════════════════════════════════════╣
-║ Recommended Fix                                                ║
-╠════════════════════════════════════════════════════════════════╣
-║                                                                ║
-║ Issue: Migration security group not in database's inbound    ║
-║                                                                ║
-║ Solution: Add migration pod security group to DB inbound     ║
-║                                                                ║
-║   database.security_group.ingress = [                         ║
-║     {                                                          ║
-║       from_port = 5432,                                       ║
-║       to_port = 5432,                                         ║
-║       source_security_group = "migration-pods-sg"             ║
-║     }                                                          ║
-║   ]                                                            ║
-║                                                                ║
-║ Alternative: Add 30-second wait after database creation      ║
-║                                                                ║
-║   deployment.phases.database.post_actions = [                 ║
-║     {action = "wait_for_database", timeout_seconds = 30}     ║
-║   ]                                                            ║
-║                                                                ║
-╠════════════════════════════════════════════════════════════════╣
-║ Prevention                                                     ║
-╠════════════════════════════════════════════════════════════════╣
-║                                                                ║
-║ To prevent this in future deployments:                        ║
-║                                                                ║
-║ 1. Always verify security group rules before migration       ║
-║ 2. Add health check: `SELECT 1` before starting migration    ║
-║ 3. Increase initial timeout: database can be slow to start   ║
-║ 4. Use RDS wait condition instead of time-based wait         ║
-║                                                                ║
-║ See: docs/troubleshooting/database-connectivity.md            ║
-║      docs/guides/database-migrations.md                       ║
-║                                                                ║
-╚════════════════════════════════════════════════════════════════╝
-
-Generate corrected configuration? [yes/no]: yes
-
-Configuration generated and saved to:
-  workspaces/prod/database.ncl.fixed
-
-Changes made:
-  ✓ Added migration security group to database inbound
-  ✓ Added health check before migration
-  ✓ Increased connection timeout to 60s
-
-Ready to redeploy with corrected configuration? [yes/no]: yes
-
-

Example 2: Kubernetes Deployment Error

-

Failure:

-
Deployment: deploy-2025-01-13-002
-Status: FAILED at phase kubernetes_workload
-Error: failed to create deployment app: Pod exceeded capacity
-
-

Troubleshooting:

-
$ provisioning ai troubleshoot deploy-2025-01-13-002 --detailed
-
-╔════════════════════════════════════════════════════════════════╗
-║ Root Cause: Pod Exceeded Node Capacity                        ║
-╠════════════════════════════════════════════════════════════════╣
-║                                                                ║
-║ Failure Analysis:                                             ║
-║                                                                ║
-║ Error: Pod requests 4CPU/8GB, but largest node has 2CPU/4GB  ║
-║ Cluster: 3 nodes, each t3.medium (2CPU/4GB)                  ║
-║ Pod requirements:                                             ║
-║   - CPU: 4 (requested) + 2 (reserved system) = 6 needed      ║
-║   - Memory: 8Gi (requested) + 1Gi (system) = 9Gi needed      ║
-║                                                                ║
-║ Why this happened:                                            ║
-║   Pod spec updated to 4CPU/8GB but node group wasn't        ║
-║   Node group still has t3.medium (too small)                 ║
-║   No autoscaling configured (won't scale up automatically)   ║
-║                                                                ║
-║ Solution Options:                                             ║
-║   1. Reduce pod resource requests to 2CPU/4GB (simpler)      ║
-║   2. Scale up node group to t3.large (2x cost, safer)        ║
-║   3. Use both: t3.large nodes + reduce pod requests          ║
-║                                                                ║
-╠════════════════════════════════════════════════════════════════╣
-║ Recommended: Option 2 (Scale up nodes)                        ║
-╠════════════════════════════════════════════════════════════════╣
-║                                                                ║
-║ Reason: Pod requests are reasonable for production app       ║
-║         Better to scale infrastructure than reduce resources  ║
-║                                                                ║
-║ Changes needed:                                               ║
-║                                                                ║
-║   kubernetes.node_group = {                                   ║
-║     instance_type = "t3.large"  # was t3.medium              ║
-║     min_size = 3                                              ║
-║     max_size = 10                                             ║
-║                                                                ║
-║     auto_scaling = {                                          ║
-║       enabled = true                                          ║
-║       target_cpu_percent = 70                                 ║
-║     }                                                          ║
-║   }                                                            ║
-║                                                                ║
-║ Cost Impact:                                                  ║
-║   Current: 3 × t3.medium = ~$90/month                        ║
-║   Proposed: 3 × t3.large = ~$180/month                       ║
-║   With autoscaling, average: ~$150/month (some scale-down)   ║
-║                                                                ║
-╚════════════════════════════════════════════════════════════════╝
-
-

CLI Commands

-

Basic Troubleshooting

-
# Troubleshoot recent deployment
-provisioning ai troubleshoot deploy-2025-01-13-001
-
-# Get detailed analysis
-provisioning ai troubleshoot deploy-2025-01-13-001 --detailed
-
-# Analyze with specific focus
-provisioning ai troubleshoot deploy-2025-01-13-001 --focus networking
-
-# Get alternative solutions
-provisioning ai troubleshoot deploy-2025-01-13-001 --alternatives
-
-

Working with Logs

-
# Troubleshoot from custom logs
-provisioning ai troubleshoot \
-| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
-
-# Troubleshoot from file
-provisioning ai troubleshoot --log-file /var/log/deployment.log
-
-# Troubleshoot from cloud provider
-provisioning ai troubleshoot \
-  --cloud-logs aws-deployment-123 \
-  --region us-east-1
-
-

Generate Reports

-
# Generate detailed troubleshooting report
-provisioning ai troubleshoot deploy-123 \
-  --report \
-  --output troubleshooting-report.md
-
-# Generate with suggestions
-provisioning ai troubleshoot deploy-123 \
-  --report \
-  --include-suggestions \
-  --output report-with-fixes.md
-
-# Generate compliance report (PCI-DSS, HIPAA)
-provisioning ai troubleshoot deploy-123 \
-  --report \
-  --compliance pci-dss \
-  --output compliance-report.pdf
-
-

Analysis Depth

-

Shallow Analysis (Fast)

-
provisioning ai troubleshoot deploy-123 --depth shallow
-
-Analyzes:
-- First error message
-- Last few log lines
-- Basic pattern matching
-- Returns in 30-60 seconds
-
-

Deep Analysis (Thorough)

-
provisioning ai troubleshoot deploy-123 --depth deep
-
-Analyzes:
-- Full log context
-- Correlates multiple errors
-- Checks resource metrics
-- Compares to past failures
-- Generates alternative hypotheses
-- Returns in 5-10 seconds
-
-

Integration with Monitoring

-

Automatic Troubleshooting

-
# Enable auto-troubleshoot on failures
-provisioning config set ai.troubleshooting.auto_analyze true
-
-# Deployments that fail automatically get analyzed
-# Reports available in provisioning dashboard
-# Alerts sent to on-call engineer with analysis
-
-

WebUI Integration

-
Deployment Dashboard
-  ├─ deployment-123 [FAILED]
-  │   └─ AI Analysis
-  │       ├─ Root Cause: Database timeout
-  │       ├─ Suggested Fix: ✓ View
-  │       ├─ Corrected Config: ✓ Download
-  │       └─ Alternative Solutions: 3 options
-
-

Learning from Failures

-

Pattern Recognition

-

The system learns common failure patterns:

-
Collected Patterns:
-├─ Database Timeouts (25% of failures)
-│  └─ Usually: Security group, connection pool, slow startup
-├─ Kubernetes Pod Failures (20%)
-│  └─ Usually: Insufficient resources, bad config
-├─ Network Connectivity (15%)
-│  └─ Usually: Security groups, routing, DNS
-└─ Other (40%)
-   └─ Various causes, each analyzed individually
-
-

Improvement Tracking

-
# See patterns in your deployments
-provisioning ai analytics failures --period month
-
-Month Summary:
-  Total deployments: 50
-  Failed: 5 (10% failure rate)
-
-  Common causes:
-  1. Security group rules (3 failures, 60%)
-  2. Resource limits (1 failure, 20%)
-  3. Configuration error (1 failure, 20%)
-
-  Improvement opportunities:
-  - Pre-check security groups before deployment
-  - Add health checks for resource sizing
-  - Add configuration validation
-
-

Configuration

-

Troubleshooting Settings

-
[ai.troubleshooting]
-enabled = true
-
-# Analysis depth
-default_depth = "deep"  # or "shallow" for speed
-max_analysis_time_seconds = 30
-
-# Features
-auto_analyze_failed_deployments = true
-generate_corrected_config = true
-suggest_prevention = true
-
-# Learning
-track_failure_patterns = true
-learn_from_similar_failures = true
-improve_suggestions_over_time = true
-
-# Reporting
-auto_send_report = false  # Email report to user
-report_format = "markdown"  # or "json", "pdf"
-include_alternatives = true
-
-# Cost impact analysis
-estimate_fix_cost = true
-estimate_alternative_costs = true
-
-

Failure Detection

-
[ai.troubleshooting.detection]
-# Monitor logs for these patterns
-watch_patterns = [
-  "error",
-  "timeout",
-  "failed",
-  "unable to",
-  "refused",
-  "denied",
-  "exceeded",
-  "quota",
-]
-
-# Minimum log lines before analyzing
-min_log_lines = 10
-
-# Time window for log collection
-log_window_seconds = 300
-
-

Best Practices

-

For Effective Troubleshooting

-
    -
  1. Keep Detailed Logs: Enable verbose logging in deployments
  2. -
  3. Include Context: Share full logs, not just error snippet
  4. -
  5. Check Suggestions: Review AI suggestions even if obvious
  6. -
  7. Learn Patterns: Track recurring failures and address root cause
  8. -
  9. Update Configs: Use corrected configs from AI, validate them
  10. -
-

For Prevention

-
    -
  1. Use Health Checks: Add database/service health checks
  2. -
  3. Test Before Deploy: Use dry-run to catch issues early
  4. -
  5. Monitor Metrics: Watch CPU/memory before failures occur
  6. -
  7. Review Policies: Ensure security groups are correct
  8. -
  9. Document Changes: When updating configs, note the change
  10. -
-

Limitations

-

What AI Can Troubleshoot

-

✅ Configuration errors -✅ Resource limit problems -✅ Networking/security group issues -✅ Database connectivity problems -✅ Deployment ordering issues -✅ Common application errors -✅ Performance problems

-

What Requires Human Review

-

⚠️ Data corruption scenarios -⚠️ Multi-failure cascades -⚠️ Unclear error messages -⚠️ Custom application code failures -⚠️ Third-party service issues -⚠️ Physical infrastructure failures

-

Examples and Guides

- - - - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -Success Rate: 85-95% accuracy in root cause identification -Supported: All deployment types (infrastructure, Kubernetes, database)

-

AI Cost Management and Optimization

-

Status: ✅ Production-Ready (cost tracking, budgets, caching benefits)

-

Comprehensive guide to managing LLM API costs, optimizing usage through caching and rate limiting, and tracking spending. The provisioning platform -includes built-in cost controls to prevent runaway spending while maximizing value.

-

Cost Overview

-

API Provider Pricing

-

| | Provider | Model | Input | Output | Per MTok | | -| | ––––– | —–– | —–– | –––– | ––––– | | -| | Anthropic | Claude Sonnet 4 | $3 | $15 | $0.003 input / $0.015 output | | -| | | Claude Opus 4 | $15 | $45 | Higher accuracy, longer context | | -| | | Claude Haiku 4 | $0.80 | $4 | Fast, for simple queries | | -| | OpenAI | GPT-4 Turbo | $0.01 | $0.03 | Per 1K tokens | | -| | | GPT-4 | $0.03 | $0.06 | Legacy, avoid | | -| | | GPT-4o | $5 | $15 | Per MTok | | -| | Local | Llama 2, Mistral | Free | Free | Hardware cost only | |

-

Cost Examples

-
Scenario 1: Generate simple database configuration
-  - Input: 500 tokens (description + schema)
-  - Output: 200 tokens (generated config)
-  - Cost: (500 × $3 + 200 × $15) / 1,000,000 = $0.0045
-  - With caching (hit rate 50%): $0.0023
-
-Scenario 2: Deep troubleshooting analysis
-  - Input: 5000 tokens (logs + context)
-  - Output: 2000 tokens (analysis + recommendations)
-  - Cost: (5000 × $3 + 2000 × $15) / 1,000,000 = $0.045
-  - With caching (hit rate 70%): $0.0135
-
-Scenario 3: Monthly usage (typical organization)
-  - ~1000 config generations @ $0.005 = $5
-  - ~500 troubleshooting calls @ $0.045 = $22.50
-  - ~2000 form assists @ $0.002 = $4
-  - ~200 agent executions @ $0.10 = $20
-  - **Total: ~$50-100/month for small org**
-  - **Total: ~$500-1000/month for large org**
-
-

Cost Control Mechanisms

-

Request Caching

-

Caching is the primary cost reduction strategy, cutting costs by 50-80%:

-
Without Caching:
-  User 1: "Generate PostgreSQL config" → API call → $0.005
-  User 2: "Generate PostgreSQL config" → API call → $0.005
-  Total: $0.010 (2 identical requests)
-
-With LRU Cache:
-  User 1: "Generate PostgreSQL config" → API call → $0.005
-  User 2: "Generate PostgreSQL config" → Cache hit → $0.00001
-  Total: $0.00501 (500x cost reduction for identical)
-
-With Semantic Cache:
-  User 1: "Generate PostgreSQL database config" → API call → $0.005
-  User 2: "Create a PostgreSQL database" → Semantic hit → $0.00001
-  (Slightly different wording, but same intent)
-  Total: $0.00501 (near 500x reduction for similar)
-
-

Cache Configuration

-
[ai.cache]
-enabled = true
-cache_type = "redis"  # Distributed cache across instances
-ttl_seconds = 3600    # 1-hour cache lifetime
-
-# Cache size limits
-max_size_mb = 500
-eviction_policy = "lru"  # Least Recently Used
-
-# Semantic caching - cache similar queries
-[ai.cache.semantic]
-enabled = true
-similarity_threshold = 0.95  # Cache if 95%+ similar to previous query
-cache_embeddings = true      # Cache embedding vectors themselves
-
-# Cache metrics
-[ai.cache.metrics]
-track_hit_rate = true
-track_space_usage = true
-alert_on_low_hit_rate = true
-
-

Rate Limiting

-

Prevent usage spikes from unexpected costs:

-
[ai.limits]
-# Per-request limits
-max_tokens = 4096
-max_input_tokens = 8192
-max_output_tokens = 4096
-
-# Throughput limits
-rpm_limit = 60                    # 60 requests per minute
-rpm_burst = 100                   # Allow burst to 100
-daily_request_limit = 5000        # Max 5000 requests/day
-
-# Cost limits
-daily_cost_limit_usd = 100        # Stop at $100/day
-monthly_cost_limit_usd = 2000     # Stop at $2000/month
-
-# Budget alerts
-warn_at_percent = 80              # Warn when at 80% of daily budget
-stop_at_percent = 95              # Stop when at 95% of budget
-
-

Workspace-Level Budgets

-
[ai.workspace_budgets]
-# Per-workspace cost limits
-dev.daily_limit_usd = 10
-staging.daily_limit_usd = 50
-prod.daily_limit_usd = 100
-
-# Can override globally for specific workspaces
-teams.team-a.monthly_limit = 500
-teams.team-b.monthly_limit = 300
-
-

Cost Tracking

-

Track Spending

-
# View current month spending
-provisioning admin costs show ai
-
-# Forecast monthly spend
-provisioning admin costs forecast ai --days-remaining 15
-
-# Analyze by feature
-provisioning admin costs analyze ai --by feature
-
-# Analyze by user
-provisioning admin costs analyze ai --by user
-
-# Export for billing
-provisioning admin costs export ai --format csv --output costs.csv
-
-

Cost Breakdown

-
Month: January 2025
-
-Total Spending: $285.42
-
-By Feature:
-  Config Generation:    $150.00 (52%) [300 requests × avg $0.50]
-  Troubleshooting:      $95.00  (33%) [80 requests × avg $1.19]
-  Form Assistance:      $30.00  (11%) [5000 requests × avg $0.006]
-  Agents:               $10.42  (4%)  [20 runs × avg $0.52]
-
-By Provider:
-  Anthropic (Claude):   $200.00 (70%)
-  OpenAI (GPT-4):       $85.42  (30%)
-  Local:                $0      (0%)
-
-By User:
-  alice@company.com:    $50.00  (18%)
-  bob@company.com:      $45.00  (16%)
-  ...
-  other (20 users):     $190.42 (67%)
-
-By Workspace:
-  production:           $150.00 (53%)
-  staging:              $85.00  (30%)
-  development:          $50.42  (18%)
-
-Cache Performance:
-  Requests: 50,000
-  Cache hits: 35,000 (70%)
-  Cache misses: 15,000 (30%)
-  Cost savings from cache: ~$175 (38% reduction)
-
-

Optimization Strategies

-

Strategy 1: Increase Cache Hit Rate

-
# Longer TTL = more cache hits
-[ai.cache]
-ttl_seconds = 7200  # 2 hours instead of 1 hour
-
-# Semantic caching helps with slight variations
-[ai.cache.semantic]
-enabled = true
-similarity_threshold = 0.90  # Lower threshold = more hits
-
-# Result: Increase hit rate from 65% → 80%
-# Cost reduction: 15% → 23%
-
-

Strategy 2: Use Local Models

-
[ai]
-provider = "local"
-model = "mistral-7b"  # Free, runs on GPU
-
-# Cost: Hardware ($5-20/month) instead of API calls
-# Savings: 50-100 config generations/month × $0.005 = $0.25-0.50
-# Hardware amortized cost: <$0.50/month on existing GPU
-
-# Tradeoff: Slightly lower quality, 2x slower
-
-

Strategy 3: Use Haiku for Simple Tasks

-
Task Complexity vs Model:
-
-Simple (form assist): Claude Haiku 4 ($0.80/$4)
-Medium (config gen): Claude Sonnet 4 ($3/$15)
-Complex (agents): Claude Opus 4 ($15/$45)
-
-Example optimization:
-  Before: All tasks use Sonnet 4
-  - 5000 form assists/month: 5000 × $0.006 = $30
-
-  After: Route by complexity
-  - 5000 form assists → Haiku: 5000 × $0.001 = $5 (83% savings)
-  - 200 config gen → Sonnet: 200 × $0.005 = $1
-  - 10 agent runs → Opus: 10 × $0.10 = $1
-
-

Strategy 4: Batch Operations

-
# Instead of individual requests, batch similar operations:
-
-# Before: 100 configs, 100 separate API calls
-provisioning ai generate "PostgreSQL config" --output db1.ncl
-provisioning ai generate "PostgreSQL config" --output db2.ncl
-# ... 100 calls = $0.50
-
-# After: Batch similar requests
-provisioning ai batch --input configs-list.yaml
-# Groups similar requests, reuses cache
-# ... 3-5 API calls = $0.02 (90% savings)
-
-

Strategy 5: Smart Feature Enablement

-
[ai.features]
-# Enable high-ROI features
-config_generation = true    # High value, moderate cost
-troubleshooting = true      # High value, higher cost
-rag_search = true           # Low cost, high value
-
-# Disable low-ROI features if cost-constrained
-form_assistance = false     # Low value, non-zero cost (if budget tight)
-agents = false              # Complex, requires multiple calls
-
-

Budget Management Workflow

-

1. Set Budget

-
# Set monthly budget
-provisioning config set ai.budget.monthly_limit_usd 500
-
-# Set daily limit
-provisioning config set ai.limits.daily_cost_limit_usd 50
-
-# Set workspace limits
-provisioning config set ai.workspace_budgets.prod.monthly_limit 300
-provisioning config set ai.workspace_budgets.dev.monthly_limit 100
-
-

2. Monitor Spending

-
# Daily check
-provisioning admin costs show ai
-
-# Weekly analysis
-provisioning admin costs analyze ai --period week
-
-# Monthly review
-provisioning admin costs analyze ai --period month
-
-

3. Adjust If Needed

-
# If overspending:
-# - Increase cache TTL
-# - Enable local models for simple tasks
-# - Reduce form assistance (high volume, low cost but adds up)
-# - Route complex tasks to Haiku instead of Opus
-
-# If underspending:
-# - Enable new features (agents, form assistance)
-# - Increase rate limits
-# - Lower cache hit requirements (broader semantic matching)
-
-

4. Forecast and Plan

-
# Current monthly run rate
-provisioning admin costs forecast ai
-
-# If trending over budget, recommend actions:
-# - Reduce daily limit
-# - Switch to local model for 50% of tasks
-# - Increase batch processing
-
-# If trending under budget:
-# - Enable agents for automation workflows
-# - Enable form assistance across all workspaces
-
-

Cost Allocation

-

Chargeback Models

-

Per-Workspace Model:

-
Development workspace: $50/month
-Staging workspace:     $100/month
-Production workspace:  $300/month
-------
-Total:                 $450/month
-
-

Per-User Model:

-
Each user charged based on their usage
-Encourages efficiency
-Difficult to track/allocate
-
-

Shared Pool Model:

-
All teams share $1000/month budget
-Budget splits by consumption rate
-Encourages optimization
-Most flexible
-
-

Cost Reporting

-

Generate Reports

-
# Monthly cost report
-provisioning admin costs report ai \
-  --format pdf \
-  --period month \
-  --output cost-report-2025-01.pdf
-
-# Detailed analysis for finance
-provisioning admin costs report ai \
-  --format xlsx \
-  --include-forecasts \
-  --include-optimization-suggestions
-
-# Executive summary
-provisioning admin costs report ai \
-  --format markdown \
-  --summary-only
-
-

Cost-Benefit Analysis

-

ROI Examples

-
Scenario 1: Developer Time Savings
-  Problem: Manual config creation takes 2 hours
-  Solution: AI config generation, 10 minutes (12x faster)
-  Time saved: 1.83 hours/config
-  Hourly rate: $100
-  Value: $183/config
-
-  AI cost: $0.005/config
-  ROI: 36,600x (far exceeds cost)
-
-Scenario 2: Troubleshooting Efficiency
-  Problem: Manual debugging takes 4 hours
-  Solution: AI troubleshooting analysis, 2 minutes
-  Time saved: 3.97 hours
-  Value: $397/incident
-
-  AI cost: $0.045/incident
-  ROI: 8,822x
-
-Scenario 3: Reduction in Failed Deployments
-  Before: 5% of 1000 deployments fail (50 failures)
-  Failure cost: $500 each (lost time, data cleanup)
-  Total: $25,000/month
-
-  After: With AI analysis, 2% fail (20 failures)
-  Total: $10,000/month
-  Savings: $15,000/month
-
-  AI cost: $200/month
-  Net savings: $14,800/month
-  ROI: 74:1
-
-

Advanced Cost Optimization

- -
✓ Local models for:
-  - Form assistance (high volume, low complexity)
-  - Simple validation checks
-  - Document retrieval (RAG)
-  Cost: Hardware only (~$500 setup)
-
-✓ Cloud API for:
-  - Complex generation (requires latest model capability)
-  - Troubleshooting (needs high accuracy)
-  - Agents (complex reasoning)
-  Cost: $50-200/month per organization
-
-Result:
-  - 70% of requests → Local (free after hardware amortization)
-  - 30% of requests → Cloud ($50/month)
-  - 80% overall cost reduction vs cloud-only
-
-

Monitoring and Alerts

-

Cost Anomaly Detection

-
# Enable anomaly detection
-provisioning config set ai.monitoring.anomaly_detection true
-
-# Set thresholds
-provisioning config set ai.monitoring.cost_spike_percent 150
-# Alert if daily cost is 150% of average
-
-# System alerts:
-# - Daily cost exceeded by 10x normal
-# - New expensive operation (agent run)
-# - Cache hit rate dropped below 40%
-# - Rate limit nearly exhausted
-
-

Alert Configuration

-
[ai.monitoring.alerts]
-enabled = true
-spike_threshold_percent = 150
-check_interval_minutes = 5
-
-[ai.monitoring.alerts.channels]
-email = "ops@company.com"
-slack = "[https://hooks.slack.com/..."](https://hooks.slack.com/...")
-pagerduty = "integration-key"
-
-# Alert thresholds
-[ai.monitoring.alerts.thresholds]
-daily_budget_warning_percent = 80
-daily_budget_critical_percent = 95
-monthly_budget_warning_percent = 70
-
- - -
-

Last Updated: 2025-01-13 -Status: ✅ Production-Ready -Average Savings: 50-80% through caching -Typical Cost: $50-500/month per organization -ROI: 100:1 to 10,000:1 depending on use case

-

Natural Language Configuration Generation

-

Status: 🔴 Planned (Q2 2025 target)

-

Natural Language Configuration (NLC) is a planned feature that enables users to describe infrastructure requirements in plain English and have the -system automatically generate validated Nickel configurations. This feature combines natural language understanding with schema-aware generation and -validation.

-

Feature Overview

-

What It Does

-

Transform infrastructure descriptions into production-ready Nickel configurations:

-
User Input:
-  "Create a production PostgreSQL cluster with 100GB storage,
-   daily backups, encryption enabled, and cross-region replication
-   to us-west-2"
-
-System Output:
-  provisioning/schemas/database.ncl (validated, production-ready)
-
-

Primary Use Cases

-
    -
  1. Rapid Prototyping: From description to working config in seconds
  2. -
  3. Infrastructure Documentation: Describe infrastructure as code
  4. -
  5. Configuration Templates: Generate reusable patterns
  6. -
  7. Non-Expert Operations: Enable junior developers to provision infrastructure
  8. -
  9. Configuration Migration: Describe existing infrastructure to generate Nickel
  10. -
-

Architecture

-

Generation Pipeline

-
Input Description (Natural Language)
-        ↓
-┌─────────────────────────────────────┐
-│ Understanding & Analysis             │
-│ - Intent extraction                  │
-│ - Entity recognition                 │
-│ - Constraint identification          │
-│ - Best practice inference            │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ RAG Context Retrieval                │
-│ - Find similar configs               │
-│ - Retrieve best practices            │
-│ - Get schema examples                │
-│ - Identify constraints               │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ Schema-Aware Generation              │
-│ - Map entities to schema fields      │
-│ - Apply type constraints             │
-│ - Include required fields            │
-│ - Generate valid Nickel              │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ Validation & Refinement              │
-│ - Type checking                      │
-│ - Schema validation                  │
-│ - Policy compliance                  │
-│ - Security checks                    │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ Output & Explanation                 │
-│ - Generated Nickel config            │
-│ - Decision rationale                 │
-│ - Alternative suggestions            │
-│ - Warnings if any                    │
-└─────────────────────────────────────┘
-
-

Planned Implementation Details

-

1. Intent Extraction

-

Extract structured intent from natural language:

-
Input: "Create a production PostgreSQL cluster with encryption and backups"
-
-Extracted Intent:
-{
-  resource_type: "database",
-  engine: "postgresql",
-  environment: "production",
-  requirements: [
-    {constraint: "encryption", type: "boolean", value: true},
-    {constraint: "backups", type: "enabled", frequency: "daily"},
-  ],
-  modifiers: ["production"],
-}
-
-

2. Entity Mapping

-

Map natural language entities to schema fields:

-
Description Terms → Schema Fields:
-  "100GB storage" → database.instance.allocated_storage_gb = 100
-  "daily backups" → backup.enabled = true, backup.frequency = "daily"
-  "encryption" → security.encryption_enabled = true
-  "cross-region" → backup.copy_to_region = "us-west-2"
-  "PostgreSQL 15" → database.engine_version = "15.0"
-
-

3. Prompt Engineering

-

Sophisticated prompting for schema-aware generation:

-
System Prompt:
-You are generating Nickel infrastructure configurations.
-Generate ONLY valid Nickel syntax.
-Follow these rules:
-- Use record syntax: `field = value`
-- Type annotations must be valid
-- All required fields must be present
-- Apply best practices for [ENVIRONMENT]
-
-Schema Context:
-[Database schema from provisioning/schemas/database.ncl]
-
-Examples:
-[3 relevant examples from RAG]
-
-User Request:
-[User natural language description]
-
-Generate the complete Nickel configuration.
-Start with: let { database = {
-
-

4. Iterative Refinement

-

Handle generation errors through iteration:

-
Attempt 1: Generate initial config
-  ↓ Validate
-  ✗ Error: field `version` type mismatch (string vs number)
-  ↓ Re-prompt with error
-Attempt 2: Fix with context from error
-  ↓ Validate
-  ✓ Success: Config is valid
-
-

Command Interface

-

CLI Usage

-
# Simple generation
-provisioning ai generate "PostgreSQL database for production"
-
-# With schema specification
-provisioning ai generate \
-  --schema database \
-  "Create PostgreSQL 15 with encryption and daily backups"
-
-# Interactive generation (refine output)
-provisioning ai generate --interactive \
-  "Kubernetes cluster on AWS"
-
-# Generate and validate
-provisioning ai generate \
-  --validate \
-  "Production Redis cluster with sentinel"
-
-# Generate and save directly
-provisioning ai generate \
-  --schema database \
-  --output workspaces/prod/database.ncl \
-  "PostgreSQL production setup"
-
-# Batch generation from file
-provisioning ai generate --batch descriptions.yaml
-
-

Interactive Refinement

-
$ provisioning ai generate --interactive
-> Describe infrastructure: Create production PostgreSQL cluster
-
-Generated configuration shown.
-
-> Refine: Add cross-region backup to us-west-2
-Configuration updated.
-
-> Refine: Use larger instance class for performance
-Configuration updated.
-
-> Accept? [y/n]: y
-Configuration saved to: workspaces/prod/database.ncl
-
-

Example Generations

-

Example 1: Simple Database

-

Input:

-
"PostgreSQL database with 50GB storage and encryption"
-
-

Output:

-
let {
-  database = {
-    engine = "postgresql",
-    version = "15.0",
-
-    instance = {
-      instance_class = "db.t3.medium",
-      allocated_storage_gb = 50,
-      iops = 1000,
-    },
-
-    security = {
-      encryption_enabled = true,
-      tls_enabled = true,
-      tls_version = "1.3",
-    },
-
-    backup = {
-      enabled = true,
-      retention_days = 7,
-    },
-  }
-}
-
-

Rationale:

-
    -
  • PostgreSQL 15 is current stable version
  • -
  • db.t3.medium suitable for 50GB with general workload
  • -
  • Encryption enabled per requirement
  • -
  • Automatic backups with 7-day retention (default)
  • -
-

Example 2: Complex Kubernetes Setup

-

Input:

-
"Production Kubernetes cluster in AWS with 3 availability zones,
-auto-scaling from 3 to 10 nodes, managed PostgreSQL, and monitoring"
-
-

Output:

-
let {
-  kubernetes = {
-    version = "1.28.0",
-
-    cluster = {
-      name = "prod-cluster",
-      region = "us-east-1",
-      availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"],
-    },
-
-    node_group = {
-      min_size = 3,
-      max_size = 10,
-      desired_size = 3,
-      instance_type = "t3.large",
-
-      auto_scaling = {
-        enabled = true,
-        target_cpu = 70,
-        scale_down_delay = 300,
-      },
-    },
-
-    managed_services = {
-      postgres = {
-        enabled = true,
-        engine = "postgresql",
-        version = "15.0",
-        storage_gb = 100,
-      },
-    },
-
-    monitoring = {
-      prometheus = {enabled = true},
-      grafana = {enabled = true},
-      cloudwatch_integration = true,
-    },
-
-    networking = {
-      vpc_cidr = "10.0.0.0/16",
-      enable_nat_gateway = true,
-      enable_dns_hostnames = true,
-    },
-  }
-}
-
-

Rationale:

-
    -
  • 3 AZs for high availability
  • -
  • t3.large balances cost and performance for general workload
  • -
  • Auto-scaling target 70% CPU (best practice)
  • -
  • Managed PostgreSQL reduces operational overhead
  • -
  • Full observability with Prometheus + Grafana
  • -
-

Configuration and Constraints

-

Configurable Generation Parameters

-
# In provisioning/config/ai.toml
-[ai.generation]
-# Which schema to use by default
-default_schema = "database"
-
-# Whether to require explicit environment specification
-require_environment = false
-
-# Optimization targets
-optimization_target = "balanced"  # or "cost", "performance"
-
-# Best practices to always apply
-best_practices = [
-  "encryption",
-  "high_availability",
-  "monitoring",
-  "backup",
-]
-
-# Constraints that limit generation
-[ai.generation.constraints]
-min_storage_gb = 10
-max_instances = 100
-allowed_engines = ["postgresql", "mysql", "mongodb"]
-
-# Validation before accepting generated config
-[ai.generation.validation]
-strict_mode = true
-require_security_review = false
-require_compliance_check = true
-
-

Safety Guardrails

-
    -
  1. Required Fields: All schema required fields must be present
  2. -
  3. Type Validation: Generated values must match schema types
  4. -
  5. Security Checks: Encryption/backups enabled for production
  6. -
  7. Cost Estimation: Warn if projected cost exceeds threshold
  8. -
  9. Resource Limits: Enforce organizational constraints
  10. -
  11. Policy Compliance: Check against Cedar policies
  12. -
-

User Workflow

-

Typical Usage Session

-
# 1. Describe infrastructure need
-$ provisioning ai generate "I need a database for my web app"
-
-# System generates basic config, suggests refinements
-# Generated config shown with explanations
-
-# 2. Refine if needed
-$ provisioning ai generate --interactive
-
-# 3. Review and validate
-$ provisioning ai validate workspaces/dev/database.ncl
-
-# 4. Deploy
-$ provisioning workspace apply workspaces/dev
-
-# 5. Monitor
-$ provisioning workspace logs database
-
-

Integration with Other Systems

-

RAG Integration

-

NLC uses RAG to find similar configurations:

-
User: "Create Kubernetes cluster"
-  ↓
-RAG searches for:
-  - Existing Kubernetes configs in workspaces
-  - Kubernetes documentation and examples
-  - Best practices from provisioning/docs/guides/kubernetes.md
-  ↓
-Context fed to LLM for generation
-
-

Form Assistance

-

NLC and form assistance share components:

-
    -
  • Intent extraction for pre-filling forms
  • -
  • Constraint validation for form field values
  • -
  • Explanation generation for validation errors
  • -
-

CLI Integration

-
# Generate then preview
-| provisioning ai generate "PostgreSQL prod" | \ |
-  provisioning config preview
-
-# Generate and apply
-provisioning ai generate \
-  --apply \
-  --environment prod \
-  "PostgreSQL cluster"
-
-

Testing and Validation

-

Test Cases (Planned)

-
    -
  1. -

    Simple Descriptions: Single resource, few requirements

    -
      -
    • “PostgreSQL database”
    • -
    • “Redis cache”
    • -
    -
  2. -
  3. -

    Complex Descriptions: Multiple resources, constraints

    -
      -
    • “Kubernetes with managed database and monitoring”
    • -
    • “Multi-region deployment with failover”
    • -
    -
  4. -
  5. -

    Edge Cases:

    -
      -
    • Conflicting requirements
    • -
    • Ambiguous specifications
    • -
    • Deprecated technologies
    • -
    -
  6. -
  7. -

    Refinement Cycles:

    -
      -
    • Interactive generation with multiple refines
    • -
    • Error recovery and re-prompting
    • -
    • User feedback incorporation
    • -
    -
  8. -
-

Success Criteria (Q2 2025)

-
    -
  • ✅ Generates valid Nickel for 90% of user descriptions
  • -
  • ✅ Generated configs pass all schema validation
  • -
  • ✅ Supports top 10 infrastructure patterns
  • -
  • ✅ Interactive refinement works smoothly
  • -
  • ✅ Error messages explain issues clearly
  • -
  • ✅ User testing with non-experts succeeds
  • -
  • ✅ Documentation complete with examples
  • -
  • ✅ Integration with form assistance operational
  • -
- - -
-

Status: 🔴 Planned -Target Release: Q2 2025 -Last Updated: 2025-01-13 -Architecture: Complete -Implementation: In Design Phase

-

Configuration Generation (typdialog-prov-gen)

-

Status: 🔴 Planned for Q2 2025

-

Overview

-

The Configuration Generator (typdialog-prov-gen) will provide template-based Nickel configuration generation with AI-powered customization.

-

Planned Features

-

Template Selection

-
    -
  • Library of production-ready infrastructure templates
  • -
  • AI recommends templates based on requirements
  • -
  • Preview before generation
  • -
-

Customization via Natural Language

-
provisioning ai config-gen \
-  --template "kubernetes-cluster" \
-  --customize "Add Prometheus monitoring, increase replicas to 5, use us-east-1"
-
-

Multi-Provider Support

-
    -
  • AWS, Hetzner, UpCloud, local infrastructure
  • -
  • Automatic provider-specific optimizations
  • -
  • Cost estimation across providers
  • -
-

Validation and Testing

-
    -
  • Type-checking via Nickel before deployment
  • -
  • Dry-run execution for safety
  • -
  • Test data fixtures for verification
  • -
-

Architecture

-
Template Library
-      ↓
-Template Selection (AI + User)
-      ↓
-Customization Layer (NL → Nickel)
-      ↓
-Validation (Type + Runtime)
-      ↓
-Generated Configuration
-
-

Integration Points

-
    -
  • typdialog web UI for template browsing
  • -
  • CLI for batch generation
  • -
  • AI service for customization suggestions
  • -
  • Nickel for type-safe validation
  • -
- - -
-

Status: 🔴 Planned -Expected Release: Q2 2025 -Priority: High (enables non-technical users to generate configs)

-

AI-Assisted Forms (typdialog-ai)

-

Status: 🔴 Planned (Q2 2025 target)

-

AI-Assisted Forms is a planned feature that integrates intelligent suggestions, context-aware assistance, and natural language understanding into the -typdialog web UI. This enables users to configure infrastructure through interactive forms with real-time AI guidance.

-

Feature Overview

-

What It Does

-

Enhance configuration forms with AI-powered assistance:

-
User typing in form field: "storage"
-  ↓
-AI analyzes context:
-  - Current form (database configuration)
-  - Field type (storage capacity)
-  - Similar past configurations
-  - Best practices for this workload
-  ↓
-Suggestions appear:
-  ✓ "100 GB (standard production size)"
-  ✓ "50 GB (development environment)"
-  ✓ "500 GB (large-scale analytics)"
-
-

Primary Use Cases

-
    -
  1. Guided Configuration: Step-by-step assistance filling complex forms
  2. -
  3. Error Explanation: AI explains validation failures in plain English
  4. -
  5. Smart Autocomplete: Suggestions based on context, not just keywords
  6. -
  7. Learning: New users learn patterns from AI explanations
  8. -
  9. Efficiency: Experienced users get quick suggestions
  10. -
-

Architecture

-

User Interface Integration

-
┌────────────────────────────────────────┐
-│ Typdialog Web UI (React/TypeScript)    │
-│                                        │
-│ ┌──────────────────────────────────┐  │
-│ │ Form Fields                      │  │
-│ │                                  │  │
-│ │ Database Engine: [postgresql  ▼] │  │
-│ │ Storage (GB):    [100 GB    ↓ ?] │  │
-│ │                   AI suggestions  │  │
-│ │ Encryption:      [✓ enabled  ]   │  │
-│ │                   "Required for  │  │
-│ │                    production"   │  │
-│ │                                  │  │
-│ │ [← Back] [Next →]                │  │
-│ └──────────────────────────────────┘  │
-│                  ↓                     │
-│         AI Assistance Panel            │
-│      (suggestions & explanations)      │
-└────────────────────────────────────────┘
-        ↓                    ↑
-   User Input           AI Service
-                      (port 8083)
-
-

Suggestion Pipeline

-
User Event (typing, focusing field, validation error)
-        ↓
-┌─────────────────────────────────────┐
-│ Context Extraction                   │
-│ - Current field and value            │
-│ - Form schema and constraints        │
-│ - Other filled fields                │
-│ - User role and workspace            │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ RAG Retrieval                        │
-│ - Find similar configs               │
-│ - Get examples for field type        │
-│ - Retrieve relevant documentation    │
-│ - Find validation rules              │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ Suggestion Generation                │
-│ - AI generates suggestions           │
-│ - Rank by relevance                  │
-│ - Format for display                 │
-│ - Generate explanation               │
-└─────────────────────┬───────────────┘
-                      ↓
-┌─────────────────────────────────────┐
-│ Response Formatting                  │
-│ - Debounce (don't update too fast)   │
-│ - Cache identical results            │
-│ - Stream if long response            │
-│ - Display to user                    │
-└─────────────────────────────────────┘
-
-

Planned Features

-

1. Smart Field Suggestions

-

Intelligent suggestions based on context:

-
Scenario: User filling database configuration form
-
-1. Engine selection
-   User types: "post"
-   Suggestion: "postgresql" (99% match)
-   Explanation: "PostgreSQL is the most popular open-source relational database"
-
-2. Storage size
-   User has selected: "postgresql", "production", "web-application"
-   Suggestions appear:
-   • "100 GB" (standard production web app database)
-   • "500 GB" (if expected growth > 1000 connections)
-   • "1 TB" (high-traffic SaaS platform)
-   Explanation: "For typical web applications with 1000s of concurrent users, 100 GB is recommended"
-
-3. Backup frequency
-   User has selected: "production", "critical-data"
-   Suggestions appear:
-   • "Daily" (standard for critical databases)
-   • "Hourly" (for data warehouses with frequent updates)
-   Explanation: "Critical production data requires daily or more frequent backups"
-
-

2. Validation Error Explanation

-

Human-readable error messages with fixes:

-
User enters: "storage = -100"
-
-Current behavior:
-  ✗ Error: Expected positive integer
-
-Planned AI behavior:
-  ✗ Storage must be positive (1-65535 GB)
-
-  Why: Negative storage doesn't make sense.
-       Storage capacity must be at least 1 GB.
-
-  Fix suggestions:
-  • Use 100 GB (typical production size)
-  • Use 50 GB (development environment)
-  • Use your required size in GB
-
-

3. Field-to-Field Context Awareness

-

Suggestions change based on other fields:

-
Scenario: Multi-step configuration form
-
-Step 1: Select environment
-User: "production"
-  → Form shows constraints: (min storage 50GB, encryption required, backup required)
-
-Step 2: Select database engine
-User: "postgresql"
-  → Suggestions adapted:
-    - PostgreSQL 15 recommended for production
-    - Point-in-time recovery available
-    - Replication options highlighted
-
-Step 3: Storage size
-  → Suggestions show:
-    - Minimum 50 GB for production
-    - Examples from similar production configs
-    - Cost estimate updates in real-time
-
-Step 4: Encryption
-  → Suggestion appears: "Recommended: AES-256"
-  → Explanation: "Required for production environments"
-
-

4. Inline Documentation

-

Quick access to relevant docs:

-
Field: "Backup Retention Days"
-
-Suggestion popup:
-  ┌─────────────────────────────────┐
-  │ Suggested value: 30              │
-  │                                 │
-  │ Why: 30 days is industry-standard│
-  │ standard for compliance (PCI-DSS)│
-  │                                 │
-  │ Learn more:                      │
-  │ → Backup best practices guide    │
-  │ → Your compliance requirements   │
-  │ → Cost vs retention trade-offs   │
-  └─────────────────────────────────┘
-
-

5. Multi-Field Suggestions

-

Suggest multiple related fields together:

-
User selects: environment = "production"
-
-AI suggests completing:
-  ┌─────────────────────────────────┐
-  │ Complete Production Setup        │
-  │                                 │
-  │ Based on production environment │
-  │ we recommend:                    │
-  │                                 │
-  │ Encryption: enabled              │ ← Auto-fill
-  │ Backups: daily                   │ ← Auto-fill
-  │ Monitoring: enabled              │ ← Auto-fill
-  │ High availability: enabled       │ ← Auto-fill
-  │ Retention: 30 days              │ ← Auto-fill
-  │                                 │
-  │ [Accept All] [Review] [Skip]    │
-  └─────────────────────────────────┘
-
-

Implementation Components

-

Frontend (typdialog-ai JavaScript/TypeScript)

-
// React component for field with AI assistance
-interface AIFieldProps {
-  fieldName: string;
-  fieldType: string;
-  currentValue: string;
-  formContext: Record<string, any>;
-  schema: FieldSchema;
-}
-
-function AIAssistedField({fieldName, formContext, schema}: AIFieldProps) {
-  const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
-  const [explanation, setExplanation] = useState<string>("");
-
-  // Debounced suggestion generation
-  useEffect(() => {
-    const timer = setTimeout(async () => {
-      const suggestions = await ai.suggestFieldValue({
-        field: fieldName,
-        context: formContext,
-        schema: schema,
-      });
-      setSuggestions(suggestions);
-| setExplanation(suggestions[0]?.explanation |  | ""); |
-    }, 300);  // Debounce 300ms
-
-    return () => clearTimeout(timer);
-  }, [formContext[fieldName]]);
-
-  return (
-    <div className="ai-field">
-      <input
-        value={formContext[fieldName]}
-        onChange={(e) => handleChange(e.target.value)}
-      />
-
-      {suggestions.length > 0 && (
-        <div className="ai-suggestions">
-          {suggestions.map((s) => (
-            <button key={s.value} onClick={() => accept(s.value)}>
-              {s.label}
-            </button>
-          ))}
-          {explanation && (
-            <p className="ai-explanation">{explanation}</p>
-          )}
-        </div>
-      )}
-    </div>
-  );
-}
-
-

Backend Service Integration

-
// In AI Service: field suggestion endpoint
-async fn suggest_field_value(
-    req: SuggestFieldRequest,
-) -> Result<Vec<Suggestion>> {
-    // Build context for the suggestion
-    let context = build_field_context(&req.form_context, &req.field_name)?;
-
-    // Retrieve relevant examples from RAG
-    let examples = rag.search_by_field(&req.field_name, &context)?;
-
-    // Generate suggestions via LLM
-    let suggestions = llm.generate_suggestions(
-        &req.field_name,
-        &req.field_type,
-        &context,
-        &examples,
-    ).await?;
-
-    // Rank and format suggestions
-    let ranked = rank_suggestions(suggestions, &context);
-
-    Ok(ranked)
-}
-
-

Configuration

-

Form Assistant Settings

-
# In provisioning/config/ai.toml
-[ai.forms]
-enabled = true
-
-# Suggestion delivery
-suggestions_enabled = true
-suggestions_debounce_ms = 300
-max_suggestions_per_field = 3
-
-# Error explanations
-error_explanations_enabled = true
-explain_validation_errors = true
-suggest_fixes = true
-
-# Field context awareness
-field_context_enabled = true
-cross_field_suggestions = true
-
-# Inline documentation
-inline_docs_enabled = true
-docs_link_type = "modal"  # or "sidebar", "tooltip"
-
-# Performance
-cache_suggestions = true
-cache_ttl_seconds = 3600
-
-# Learning
-track_accepted_suggestions = true
-track_rejected_suggestions = true
-
-

User Experience Flow

-

Scenario: New User Configuring PostgreSQL

-
1. User opens typdialog form
-   - Form title: "Create Database"
-   - First field: "Database Engine"
-   - AI shows: "PostgreSQL recommended for relational data"
-
-2. User types "post"
-   - Autocomplete shows: "postgresql"
-   - AI explains: "PostgreSQL is the most stable open-source database"
-
-3. User selects "postgresql"
-   - Form progresses
-   - Next field: "Version"
-   - AI suggests: "PostgreSQL 15 (latest stable)"
-   - Explanation: "Version 15 is current stable, recommended for new deployments"
-
-4. User selects version 15
-   - Next field: "Environment"
-   - User selects "production"
-   - AI note appears: "Production environment requires encryption and backups"
-
-5. Next field: "Storage (GB)"
-   - Form shows: Minimum 50 GB (production requirement)
-   - AI suggestions:
-     • 100 GB (standard production)
-     • 250 GB (high-traffic site)
-   - User accepts: 100 GB
-
-6. Validation error on next field
-   - Old behavior: "Invalid backup_days value"
-   - New behavior:
-     "Backup retention must be 1-35 days. Recommended: 30 days.
-     30-day retention meets compliance requirements for production systems."
-
-7. User completes form
-   - Summary shows all AI-assisted decisions
-   - Generate button creates configuration
-
-

Integration with Natural Language Generation

-

NLC and form assistance share the same backend:

-
Natural Language Generation    AI-Assisted Forms
-        ↓                              ↓
-    "Create a PostgreSQL db"    Select field values
-        ↓                              ↓
-    Intent Extraction         Context Extraction
-        ↓                              ↓
-    RAG Search              RAG Search (same results)
-        ↓                              ↓
-    LLM Generation          LLM Suggestions
-        ↓                              ↓
-    Config Output           Form Field Population
-
-

Success Criteria (Q2 2025)

-
    -
  • ✅ Suggestions appear within 300ms of user action
  • -
  • ✅ 80% suggestion acceptance rate in user testing
  • -
  • ✅ Error explanations clearly explain issues and fixes
  • -
  • ✅ Cross-field context awareness works for 5+ database scenarios
  • -
  • ✅ Form completion time reduced by 40% with AI
  • -
  • ✅ User satisfaction > 8/10 in testing
  • -
  • ✅ No false suggestions (all suggestions are valid)
  • -
  • ✅ Offline mode works with cached suggestions
  • -
- - -
-

Status: 🔴 Planned -Target Release: Q2 2025 -Last Updated: 2025-01-13 -Component: typdialog-ai -Architecture: Complete -Implementation: In Design Phase

-

Autonomous AI Agents (typdialog-ag)

-

Status: 🔴 Planned (Q2 2025 target)

-

Autonomous AI Agents is a planned feature that enables AI agents to execute multi-step -infrastructure provisioning workflows with minimal human intervention. Agents make -decisions, adapt to changing conditions, and execute complex tasks while maintaining -security and requiring human approval for critical operations.

-

Feature Overview

-

What It Does

-

Enable AI agents to manage complex provisioning workflows:

-
User Goal:
-  "Set up a complete development environment with:
-   - PostgreSQL database
-   - Redis cache
-   - Kubernetes cluster
-   - Monitoring stack
-   - Logging infrastructure"
-
-AI Agent executes:
-1. Analyzes requirements and constraints
-2. Plans multi-step deployment sequence
-3. Creates configurations for all components
-4. Validates configurations against policies
-5. Requests human approval for critical decisions
-6. Executes deployment in correct order
-7. Monitors for failures and adapts
-8. Reports completion and recommendations
-
-

Agent Capabilities

-

Multi-Step Workflow Execution

-

Agents coordinate complex, multi-component deployments:

-
Goal: "Deploy production Kubernetes cluster with managed databases"
-
-Agent Plan:
-  Phase 1: Infrastructure
-    ├─ Create VPC and networking
-    ├─ Set up security groups
-    └─ Configure IAM roles
-
-  Phase 2: Kubernetes
-    ├─ Create EKS cluster
-    ├─ Configure network plugins
-    ├─ Set up autoscaling
-    └─ Install cluster add-ons
-
-  Phase 3: Managed Services
-    ├─ Provision RDS PostgreSQL
-    ├─ Configure backups
-    └─ Set up replicas
-
-  Phase 4: Observability
-    ├─ Deploy Prometheus
-    ├─ Deploy Grafana
-    ├─ Configure log collection
-    └─ Set up alerting
-
-  Phase 5: Validation
-    ├─ Run smoke tests
-    ├─ Verify connectivity
-    └─ Check compliance
-
-

Adaptive Decision Making

-

Agents adapt to conditions and make intelligent decisions:

-
Scenario: Database provisioning fails due to resource quota
-
-Standard approach (human):
-1. Detect failure
-2. Investigate issue
-3. Decide on fix (reduce size, change region, etc.)
-4. Update config
-5. Retry
-
-Agent approach:
-1. Detect failure
-2. Analyze error: "Quota exceeded for db.r6g.xlarge"
-3. Check available options:
-   - Try smaller instance: db.r6g.large (may be insufficient)
-   - Try different region: different cost, latency
-   - Request quota increase (requires human approval)
-4. Ask human: "Quota exceeded. Suggest: use db.r6g.large instead
-   (slightly reduced performance). Approve? [yes/no/try-other]"
-5. Execute based on approval
-6. Continue workflow
-
-

Dependency Management

-

Agents understand resource dependencies:

-
Knowledge graph of dependencies:
-
-  VPC ──→ Subnets ──→ EC2 Instances
-   ├─────────→ Security Groups
-   └────→ NAT Gateway ──→ Route Tables
-
-  RDS ──→ DB Subnet Group ──→ VPC
-   ├─────────→ Security Group
-   └────→ Parameter Group
-
-Agent ensures:
-- VPC exists before creating subnets
-- Subnets exist before creating EC2
-- Security groups reference correct VPC
-- Deployment order respects all dependencies
-- Rollback order is reverse of creation
-
-

Architecture

-

Agent Design Pattern

-
┌────────────────────────────────────────────────────────┐
-│ Agent Supervisor (Orchestrator)                        │
-│ - Accepts user goal                                    │
-│ - Plans workflow                                       │
-│ - Coordinates specialist agents                        │
-│ - Requests human approvals                             │
-│ - Monitors overall progress                            │
-└────────────────────────────────────────────────────────┘
-        ↑                    ↑                    ↑
-        │                    │                    │
-        ↓                    ↓                    ↓
-┌──────────────┐  ┌──────────────┐  ┌──────────────┐
-│ Database     │  │ Kubernetes   │  │ Monitoring   │
-│ Specialist   │  │ Specialist   │  │ Specialist   │
-│              │  │              │  │              │
-│ Tasks:       │  │ Tasks:       │  │ Tasks:       │
-│ - Create DB  │  │ - Create K8s │  │ - Deploy     │
-│ - Configure  │  │ - Configure  │  │   Prometheus │
-│ - Validate   │  │ - Validate   │  │ - Deploy     │
-│ - Report     │  │ - Report     │  │   Grafana    │
-└──────────────┘  └──────────────┘  └──────────────┘
-
-

Agent Workflow

-
Start: User Goal
-  ↓
-┌─────────────────────────────────────────┐
-│ Goal Analysis & Planning                │
-│ - Parse user intent                     │
-│ - Identify resources needed             │
-│ - Plan dependency graph                 │
-│ - Generate task list                    │
-└──────────────┬──────────────────────────┘
-               ↓
-┌─────────────────────────────────────────┐
-│ Resource Generation                     │
-│ - Generate configs for each resource    │
-│ - Validate against schemas              │
-│ - Check compliance policies             │
-│ - Identify potential issues             │
-└──────────────┬──────────────────────────┘
-               ↓
-         Human Review Point?
-         ├─ No issues: Continue
-         └─ Issues found: Request approval/modification
-               ↓
-┌─────────────────────────────────────────┐
-│ Execution Plan Verification             │
-│ - Check all configs are valid           │
-│ - Verify dependencies are resolvable    │
-│ - Estimate costs and timeline           │
-│ - Identify risks                        │
-└──────────────┬──────────────────────────┘
-               ↓
-         Execute Workflow?
-         ├─ User approves: Start execution
-         └─ User modifies: Return to planning
-               ↓
-┌─────────────────────────────────────────┐
-│ Phase-by-Phase Execution                │
-│ - Execute one logical phase             │
-│ - Monitor for errors                    │
-│ - Report progress                       │
-│ - Ask for decisions if needed           │
-└──────────────┬──────────────────────────┘
-               ↓
-         All Phases Complete?
-         ├─ No: Continue to next phase
-         └─ Yes: Final validation
-               ↓
-┌─────────────────────────────────────────┐
-│ Final Validation & Reporting            │
-│ - Smoke tests                           │
-│ - Connectivity tests                    │
-│ - Compliance verification               │
-│ - Performance checks                    │
-│ - Generate final report                 │
-└──────────────┬──────────────────────────┘
-               ↓
-Success: Deployment Complete
-
-

Planned Agent Types

-

1. Database Specialist Agent

-
Responsibilities:
-- Create and configure databases
-- Set up replication and backups
-- Configure encryption and security
-- Monitor database health
-- Handle database-specific issues
-
-Examples:
-- Provision PostgreSQL cluster with replication
-- Set up MySQL with read replicas
-- Configure MongoDB sharding
-- Create backup pipelines
-
-

2. Kubernetes Specialist Agent

-
Responsibilities:
-- Create and configure Kubernetes clusters
-- Configure networking and ingress
-- Set up autoscaling policies
-- Deploy cluster add-ons
-- Manage workload placement
-
-Examples:
-- Create EKS/GKE/AKS cluster
-- Configure Istio service mesh
-- Deploy Prometheus + Grafana
-- Configure auto-scaling policies
-
-

3. Infrastructure Agent

-
Responsibilities:
-- Create networking infrastructure
-- Configure security and firewalls
-- Set up load balancers
-- Configure DNS and CDN
-- Manage identity and access
-
-Examples:
-- Create VPC with subnets
-- Configure security groups
-- Set up application load balancer
-- Configure Route53 DNS
-
-

4. Monitoring Agent

-
Responsibilities:
-- Deploy monitoring stack
-- Configure alerting
-- Set up logging infrastructure
-- Create dashboards
-- Configure notification channels
-
-Examples:
-- Deploy Prometheus + Grafana
-- Set up CloudWatch dashboards
-- Configure log aggregation
-- Set up PagerDuty integration
-
-

5. Compliance Agent

-
Responsibilities:
-- Check security policies
-- Verify compliance requirements
-- Audit configurations
-- Generate compliance reports
-- Recommend security improvements
-
-Examples:
-- Check PCI-DSS compliance
-- Verify encryption settings
-- Audit access controls
-- Generate compliance report
-
-

Usage Examples

-

Example 1: Development Environment Setup

-
$ provisioning ai agent --goal "Set up dev environment for Python web app"
-
-Agent Plan Generated:
-┌─────────────────────────────────────────┐
-│ Environment: Development                │
-│ Components: PostgreSQL + Redis + Monitoring
-│                                         │
-│ Phase 1: Database (1-2 min)            │
-│   - PostgreSQL 15                       │
-│   - 10 GB storage                       │
-│   - Dev security settings               │
-│                                         │
-│ Phase 2: Cache (1 min)                 │
-│   - Redis Cluster Mode disabled         │
-│   - Single node                         │
-│   - 2 GB memory                         │
-│                                         │
-│ Phase 3: Monitoring (1-2 min)          │
-│   - Prometheus (metrics)                │
-│   - Grafana (dashboards)                │
-│   - Log aggregation                     │
-│                                         │
-│ Estimated time: 5-10 minutes            │
-│ Estimated cost: $15/month               │
-│                                         │
-│ [Approve] [Modify] [Cancel]             │
-└─────────────────────────────────────────┘
-
-Agent: Approve to proceed with setup.
-
-User: Approve
-
-[Agent execution starts]
-Creating PostgreSQL...     [████████░░] 80%
-Creating Redis...          [░░░░░░░░░░] 0%
-[Waiting for PostgreSQL creation...]
-
-PostgreSQL created successfully!
-Connection string: postgresql://dev:pwd@db.internal:5432/app
-
-Creating Redis...          [████████░░] 80%
-[Waiting for Redis creation...]
-
-Redis created successfully!
-Connection string: redis://cache.internal:6379
-
-Deploying monitoring...    [████████░░] 80%
-[Waiting for Grafana startup...]
-
-All services deployed successfully!
-Grafana dashboards: [http://grafana.internal:3000](http://grafana.internal:3000)
-
-

Example 2: Production Kubernetes Deployment

-
$ provisioning ai agent --interactive \
-    --goal "Deploy production Kubernetes cluster with managed databases"
-
-Agent Analysis:
-- Cluster size: 3-10 nodes (auto-scaling)
-- Databases: RDS PostgreSQL + ElastiCache Redis
-- Monitoring: Full observability stack
-- Security: TLS, encryption, VPC isolation
-
-Agent suggests modifications:
-  1. Enable cross-AZ deployment for HA
-  2. Add backup retention: 30 days
-  3. Add network policies for security
-  4. Enable cluster autoscaling
-  Approve all? [yes/review]
-
-User: Review
-
-Agent points out:
-  - Network policies may affect performance
-  - Cross-AZ increases costs by ~20%
-  - Backup retention meets compliance
-
-User: Approve with modifications
-  - Network policies: use audit mode first
-  - Keep cross-AZ
-  - Keep backups
-
-[Agent creates configs with modifications]
-
-Configs generated:
-  ✓ infrastructure/vpc.ncl
-  ✓ infrastructure/kubernetes.ncl
-  ✓ databases/postgres.ncl
-  ✓ databases/redis.ncl
-  ✓ monitoring/prometheus.ncl
-  ✓ monitoring/grafana.ncl
-
-Estimated deployment time: 15-20 minutes
-Estimated cost: $2,500/month
-
-[Start deployment?] [Review configs]
-
-User: Review configs
-
-[User reviews and approves]
-
-[Agent executes deployment in phases]
-
-

Safety and Control

-

Human-in-the-Loop Checkpoints

-

Agents stop and ask humans for approval at critical points:

-
Automatic Approval (Agent decides):
-- Create configuration
-- Validate configuration
-- Check dependencies
-- Generate execution plan
-
-Human Approval Required:
-- First-time resource creation
-- Cost changes > 10%
-- Security policy changes
-- Cross-region deployment
-- Data deletion operations
-- Major version upgrades
-
-

Decision Logging

-

All decisions logged for audit trail:

-
Agent Decision Log:
-| 2025-01-13 10:00:00 | Generate database config |
-| 2025-01-13 10:00:05 | Config validation: PASS |
-| 2025-01-13 10:00:07 | Requesting human approval: "Create new PostgreSQL instance" |
-| 2025-01-13 10:00:45 | Human approval: APPROVED |
-| 2025-01-13 10:00:47 | Cost estimate: $100/month - within budget |
-| 2025-01-13 10:01:00 | Creating infrastructure... |
-| 2025-01-13 10:02:15 | Database created successfully |
-| 2025-01-13 10:02:16 | Running health checks... |
-| 2025-01-13 10:02:45 | Health check: PASSED |
-
-

Rollback Capability

-

Agents can rollback on failure:

-
Scenario: Database creation succeeds, but Kubernetes creation fails
-
-Agent behavior:
-1. Detect failure in Kubernetes phase
-2. Try recovery (retry, different configuration)
-3. Recovery fails
-4. Ask human: "Kubernetes creation failed. Rollback database creation? [yes/no]"
-5. If yes: Delete database, clean up, report failure
-6. If no: Keep database, manual cleanup needed
-
-Full rollback capability if entire workflow fails before human approval.
-
-

Configuration

-

Agent Settings

-
# In provisioning/config/ai.toml
-[ai.agents]
-enabled = true
-
-# Agent decision-making
-auto_approve_threshold = 0.95  # Approve if confidence > 95%
-require_approval_for = [
-  "first_resource_creation",
-  "cost_change_above_percent",
-  "security_policy_change",
-  "data_deletion",
-]
-
-cost_change_threshold_percent = 10
-
-# Execution control
-max_parallel_phases = 2
-phase_timeout_minutes = 30
-execution_log_retention_days = 90
-
-# Safety
-dry_run_mode = false  # Always perform dry run first
-require_final_approval = true
-rollback_on_failure = true
-
-# Learning
-track_agent_decisions = true
-track_success_rate = true
-improve_from_feedback = true
-
-

Success Criteria (Q2 2025)

-
    -
  • ✅ Agents complete 5 standard workflows without human intervention
  • -
  • ✅ Cost estimation accuracy within 5%
  • -
  • ✅ Execution time matches or beats manual setup by 30%
  • -
  • ✅ Success rate > 95% for tested scenarios
  • -
  • ✅ Zero unapproved critical decisions
  • -
  • ✅ Full decision audit trail for all operations
  • -
  • ✅ Rollback capability tested and verified
  • -
  • ✅ User satisfaction > 8/10 in testing
  • -
  • ✅ Documentation complete with examples
  • -
  • ✅ Integration with form assistance and NLC working
  • -
- - -
-

Status: 🔴 Planned -Target Release: Q2 2025 -Last Updated: 2025-01-13 -Component: typdialog-ag -Architecture: Complete -Implementation: In Design Phase

-

System Overview

-

Executive Summary

-

Provisioning is an Infrastructure Automation Platform built with a hybrid Rust/Nushell architecture. It enables Infrastructure as Code (IaC) with -multi-provider support (AWS, UpCloud, local), sophisticated workflow orchestration, and configuration-driven operations.

-

The system solves fundamental technical challenges through architectural innovation and hybrid language design.

-

High-Level Architecture

-

System Diagram

-
┌─────────────────────────────────────────────────────────────────┐
-│                        User Interface Layer                     │
-├─────────────────┬─────────────────┬─────────────────────────────┤
-│   CLI Tools     │   REST API      │   Control Center UI         │
-│   (Nushell)     │   (Rust)        │   (Web Interface)           │
-└─────────────────┴─────────────────┴─────────────────────────────┘
-                           │
-┌─────────────────────────────────────────────────────────────────┐
-│                    Orchestration Layer                          │
-├─────────────────────────────────────────────────────────────────┤
-│   Rust Orchestrator: Workflow Coordination & State Management   │
-│   • Task Queue & Scheduling    • Batch Processing               │
-│   • State Persistence         • Error Recovery & Rollback       │
-│   • REST API Server          • Real-time Monitoring             │
-└─────────────────────────────────────────────────────────────────┘
-                           │
-┌─────────────────────────────────────────────────────────────────┐
-│                    Business Logic Layer                         │
-├─────────────────┬─────────────────┬─────────────────────────────┤
-│   Providers     │   Task Services │   Workflows                 │
-│   (Nushell)     │   (Nushell)     │   (Nushell)                 │
-│   • AWS         │   • Kubernetes  │   • Server Creation         │
-│   • UpCloud     │   • Storage     │   • Cluster Deployment      │
-│   • Local       │   • Networking  │   • Batch Operations        │
-└─────────────────┴─────────────────┴─────────────────────────────┘
-                           │
-┌─────────────────────────────────────────────────────────────────┐
-│                    Configuration Layer                          │
-├─────────────────┬─────────────────┬─────────────────────────────┤
-│   Nickel Schemas│   TOML Config   │   Templates                 │
-│   • Type Safety │   • Hierarchy   │   • Infrastructure          │
-│   • Validation  │   • Environment │   • Service Configs         │
-│   • Extensible  │   • User Prefs  │   • Code Generation         │
-└─────────────────┴─────────────────┴─────────────────────────────┘
-                           │
-┌─────────────────────────────────────────────────────────────────┐
-│                      Infrastructure Layer                       │
-├─────────────────┬─────────────────┬─────────────────────────────┤
-│   Cloud APIs    │   Kubernetes    │   Local Systems             │
-│   • AWS EC2     │   • Clusters    │   • Docker                  │
-│   • UpCloud     │   • Services    │   • Containers              │
-│   • Others      │   • Storage     │   • Host Services           │
-└─────────────────┴─────────────────┴─────────────────────────────┘
-
-

Core Components

-

1. Hybrid Architecture Foundation

-

Coordination Layer (Rust)

-

Purpose: High-performance workflow orchestration and system coordination

-

Components:

-
    -
  • Orchestrator Engine: Task scheduling and execution coordination
  • -
  • REST API Server: HTTP endpoints for external integration
  • -
  • State Management: Persistent state tracking with checkpoint recovery
  • -
  • Batch Processor: Parallel execution of complex multi-provider workflows
  • -
  • File-based Queue: Lightweight, reliable task persistence
  • -
  • Error Recovery: Sophisticated rollback and cleanup capabilities
  • -
-

Key Features:

-
    -
  • Solves Nushell deep call stack limitations
  • -
  • Handles 1000+ concurrent operations
  • -
  • Checkpoint-based recovery from any failure point
  • -
  • Real-time workflow monitoring and status tracking
  • -
-

Business Logic Layer (Nushell)

-

Purpose: Domain-specific operations and configuration management

-

Components:

-
    -
  • Provider Implementations: Cloud-specific operations (AWS, UpCloud, local)
  • -
  • Task Service Management: Infrastructure component lifecycle
  • -
  • Configuration Processing: Nickel-based configuration validation and templating
  • -
  • CLI Interface: User-facing command-line tools
  • -
  • Workflow Definitions: Business process implementations
  • -
-

Key Features:

-
    -
  • 65+ domain-specific modules preserved and enhanced
  • -
  • Configuration-driven operations with zero hardcoded values
  • -
  • Type-safe Nickel integration for Infrastructure as Code
  • -
  • Extensible provider and service architecture
  • -
-

2. Configuration System (v2.0.0)

-

Hierarchical Configuration Management

-

Migration Achievement: 65+ files migrated, 200+ ENV variables → 476 config accessors

-

Configuration Hierarchy (precedence order):

-
    -
  1. Runtime Parameters (command line, environment variables)
  2. -
  3. Environment Configuration (dev/test/prod specific)
  4. -
  5. Infrastructure Configuration (project-specific settings)
  6. -
  7. User Configuration (personal preferences)
  8. -
  9. System Defaults (system-wide defaults)
  10. -
-

Configuration Files:

-
    -
  • config.defaults.toml - System-wide defaults
  • -
  • config.user.toml - User-specific preferences
  • -
  • config.{dev,test,prod}.toml - Environment-specific configurations
  • -
  • Infrastructure-specific configuration files
  • -
-

Features:

-
    -
  • Variable Interpolation: {{paths.base}}, {{env.HOME}}, {{now.date}}, {{git.branch}}
  • -
  • Environment Switching: PROVISIONING_ENV=prod for environment-specific configs
  • -
  • Validation Framework: Comprehensive configuration validation and error reporting
  • -
  • Migration Tools: Automated migration from ENV-based to config-driven architecture
  • -
-

3. Workflow System (v3.1.0)

-

Batch Workflow Engine

-

Batch Capabilities:

-
    -
  • Provider-Agnostic Workflows: Mix UpCloud, AWS, and local providers in single workflow
  • -
  • Dependency Resolution: Topological sorting with soft/hard dependency support
  • -
  • Parallel Execution: Configurable parallelism limits with resource management
  • -
  • State Recovery: Checkpoint-based recovery with rollback capabilities
  • -
  • Real-time Monitoring: Live progress tracking and health monitoring
  • -
-

Workflow Types:

-
    -
  • Server Workflows: Multi-provider server provisioning and management
  • -
  • Task Service Workflows: Infrastructure component installation and configuration
  • -
  • Cluster Workflows: Complete Kubernetes cluster deployment and management
  • -
  • Batch Workflows: Complex multi-step operations with dependency management
  • -
-

Nickel Workflow Definitions:

-
{
-  batch_workflow = {
-    name = "multi_cloud_deployment",
-    version = "1.0.0",
-    parallel_limit = 5,
-    rollback_enabled = true,
-
-    operations = [
-      {
-        id = "servers",
-        type = "server_batch",
-        provider = "upcloud",
-        dependencies = [],
-      },
-      {
-        id = "services",
-        type = "taskserv_batch",
-        provider = "aws",
-        dependencies = ["servers"],
-      }
-    ]
-  }
-}
-
-

4. Provider Ecosystem

-

Multi-Provider Architecture

-

Supported Providers:

-
    -
  • AWS: Amazon Web Services integration
  • -
  • UpCloud: UpCloud provider with full feature support
  • -
  • Local: Local development and testing provider
  • -
-

Provider Features:

-
    -
  • Standardized Interfaces: Consistent API across all providers
  • -
  • Configuration Templates: Provider-specific configuration generation
  • -
  • Resource Management: Complete lifecycle management for cloud resources
  • -
  • Cost Optimization: Pricing information and cost optimization recommendations
  • -
  • Regional Support: Multi-region deployment capabilities
  • -
-

Task Services Ecosystem

-

Infrastructure Components (40+ services):

-
    -
  • Container Orchestration: Kubernetes, container runtimes (containerd, cri-o, crun, runc, youki)
  • -
  • Networking: Cilium, CoreDNS, HAProxy, service mesh integration
  • -
  • Storage: Rook-Ceph, external-NFS, Mayastor, persistent volumes
  • -
  • Security: Policy engines, secrets management, RBAC
  • -
  • Observability: Monitoring, logging, tracing, metrics collection
  • -
  • Development Tools: Gitea, databases, build systems
  • -
-

Service Features:

-
    -
  • Version Management: Real-time version checking against GitHub releases
  • -
  • Configuration Generation: Automated service configuration from templates
  • -
  • Dependency Management: Automatic dependency resolution and installation order
  • -
  • Health Monitoring: Service health checks and status reporting
  • -
-

Key Architectural Decisions

-

1. Hybrid Language Architecture (ADR-004)

-

Decision: Use Rust for coordination, Nushell for business logic -Rationale: Solves Nushell’s deep call stack limitations while preserving domain expertise -Impact: Eliminates technical limitations while maintaining productivity and configuration advantages

-

2. Configuration-Driven Architecture (ADR-002)

-

Decision: Complete migration from ENV variables to hierarchical configuration -Rationale: True Infrastructure as Code requires configuration flexibility without hardcoded fallbacks -Impact: 476 configuration accessors provide complete customization without code changes

-

3. Domain-Driven Structure (ADR-001)

-

Decision: Organize by functional domains (core, platform, provisioning) -Rationale: Clear boundaries enable scalable development and maintenance -Impact: Enables specialized development while maintaining system coherence

-

4. Workspace Isolation (ADR-003)

-

Decision: Isolated user workspaces with hierarchical configuration -Rationale: Multi-user support and customization without system impact -Impact: Complete user independence with easy backup and migration

-

5. Registry-Based Extensions (ADR-005)

-

Decision: Manifest-driven extension framework with structured discovery -Rationale: Enable community contributions while maintaining system stability -Impact: Extensible system supporting custom providers, services, and workflows

-

Data Flow Architecture

-

Configuration Resolution Flow

-
1. Workspace Discovery → 2. Configuration Loading → 3. Hierarchy Merge →
-4. Variable Interpolation → 5. Schema Validation → 6. Runtime Application
-
-

Workflow Execution Flow

-
1. Workflow Submission → 2. Dependency Analysis → 3. Task Scheduling →
-4. Parallel Execution → 5. State Tracking → 6. Result Aggregation →
-7. Error Handling → 8. Cleanup/Rollback
-
-

Provider Integration Flow

-
1. Provider Discovery → 2. Configuration Validation → 3. Authentication →
-4. Resource Planning → 5. Operation Execution → 6. State Persistence →
-7. Result Reporting
-
-

Technology Stack

-

Core Technologies

-
    -
  • Nushell 0.107.1: Primary shell and scripting language
  • -
  • Rust: High-performance coordination and orchestration
  • -
  • Nickel 1.15.0+: Configuration language for Infrastructure as Code
  • -
  • TOML: Configuration file format with human readability
  • -
  • JSON: Data exchange format between components
  • -
-

Infrastructure Technologies

-
    -
  • Kubernetes: Container orchestration platform
  • -
  • Docker/Containerd: Container runtime environments
  • -
  • SOPS 3.10.2: Secrets management and encryption
  • -
  • Age 1.2.1: Encryption tool for secrets
  • -
  • HTTP/REST: API communication protocols
  • -
-

Development Technologies

-
    -
  • nu_plugin_tera: Native Nushell template rendering
  • -
  • K9s 0.50.6: Kubernetes management interface
  • -
  • Git: Version control and configuration management
  • -
-

Scalability and Performance

-

Performance Characteristics

-
    -
  • Batch Processing: 1000+ concurrent operations with configurable parallelism
  • -
  • Provider Operations: Sub-second response for most cloud API operations
  • -
  • Configuration Loading: Millisecond-level configuration resolution
  • -
  • State Persistence: File-based persistence with minimal overhead
  • -
  • Memory Usage: Efficient memory management with streaming operations
  • -
-

Scalability Features

-
    -
  • Horizontal Scaling: Multiple orchestrator instances for high availability
  • -
  • Resource Management: Configurable resource limits and quotas
  • -
  • Caching Strategy: Multi-level caching for performance optimization
  • -
  • Streaming Operations: Large dataset processing without memory limits
  • -
  • Async Processing: Non-blocking operations for improved throughput
  • -
-

Security Architecture

-

Security Layers

-
    -
  • Workspace Isolation: User data isolated from system installation
  • -
  • Configuration Security: Encrypted secrets with SOPS/Age integration
  • -
  • Extension Sandboxing: Extensions run in controlled environments
  • -
  • API Authentication: Secure REST API endpoints with authentication
  • -
  • Audit Logging: Comprehensive audit trails for all operations
  • -
-

Security Features

-
    -
  • Secrets Management: Encrypted configuration files with rotation support
  • -
  • Permission Model: Role-based access control for operations
  • -
  • Code Signing: Digital signature verification for extensions
  • -
  • Network Security: Secure communication with cloud providers
  • -
  • Input Validation: Comprehensive input validation and sanitization
  • -
-

Quality Attributes

-

Reliability

-
    -
  • Error Recovery: Sophisticated error handling and rollback capabilities
  • -
  • State Consistency: Transactional operations with rollback support
  • -
  • Health Monitoring: Comprehensive system health checks and monitoring
  • -
  • Fault Tolerance: Graceful degradation and recovery from failures
  • -
-

Maintainability

-
    -
  • Clear Architecture: Well-defined boundaries and responsibilities
  • -
  • Documentation: Comprehensive architecture and development documentation
  • -
  • Testing Strategy: Multi-layer testing with integration validation
  • -
  • Code Quality: Consistent patterns and quality standards
  • -
-

Extensibility

-
    -
  • Plugin Framework: Registry-based extension system
  • -
  • Provider API: Standardized interfaces for new providers
  • -
  • Configuration Schema: Extensible configuration with validation
  • -
  • Workflow Engine: Custom workflow definitions and execution
  • -
-

This system architecture represents a mature, production-ready platform for Infrastructure as Code with unique architectural innovations and proven -scalability.

-

Provisioning Platform - Architecture Overview

-

Version: 3.5.0 -Date: 2025-10-06 -Status: Production -Maintainers: Architecture Team

-
-

Table of Contents

-
    -
  1. Executive Summary
  2. -
  3. System Architecture
  4. -
  5. Component Architecture
  6. -
  7. Mode Architecture
  8. -
  9. Network Architecture
  10. -
  11. Data Architecture
  12. -
  13. Security Architecture
  14. -
  15. Deployment Architecture
  16. -
  17. Integration Architecture
  18. -
  19. Performance and Scalability
  20. -
  21. Evolution and Roadmap
  22. -
-
-

Executive Summary

-

What is the Provisioning Platform

-

The Provisioning Platform is a modern, cloud-native infrastructure automation system that combines:

-
    -
  • the simplicity of declarative configuration (Nickel)
  • -
  • the power of shell scripting (Nushell)
  • -
  • high-performance coordination (Rust).
  • -
-

Key Characteristics

-
    -
  • Hybrid Architecture: Rust for coordination, Nushell for business logic, Nickel for configuration
  • -
  • Mode-Based: Adapts from solo development to enterprise production
  • -
  • OCI-Native: Extends leveraging industry-standard OCI distribution
  • -
  • Provider-Agnostic: Supports multiple cloud providers (AWS, UpCloud) and local infrastructure
  • -
  • Extension-Driven: Core functionality enhanced through modular extensions
  • -
-

Architecture at a Glance

-
┌─────────────────────────────────────────────────────────────────────┐
-│                        Provisioning Platform                        │
-├─────────────────────────────────────────────────────────────────────┤
-│                                                                     │
-│   ┌──────────────┐   ┌─────────────┐    ┌──────────────┐            │
-│   │ User Layer   │   │  Extension  │    │   Service    │            │
-│   │  (CLI/UI)    │   │  Registry   │    │   Registry   │            │
-│   └──────┬───────┘   └──────┬──────┘    └──────┬───────┘            │
-│          │                  │                  │                    │
-│   ┌──────┴──────────────────┴──────────────────┴──--────┐           │
-│   │            Core Provisioning Engine                 │           │
-│   │  (Config | Dependency Resolution | Workflows)       │           │
-│   └──────┬──────────────────────────────────────┬───────┘           │
-│          │                                      │                   │
-│   ┌──────┴─────────┐                   ┌──────-─┴─────────┐         │
-│   │  Orchestrator  │                   │   Business Logic │         │
-│   │    (Rust)      │ ←─ Coordination → │    (Nushell)     │         │
-│   └──────┬─────────┘                   └───────┬──────────┘         │
-│          │                                     │                    │
-│   ┌──────┴─────────────────────────────────────┴---──────┐          │
-│   │                  Extension System                    │          │
-│   │      (Providers | Task Services | Clusters)          │          │
-│   └──────┬───────────────────────────────────────────────┘          │
-│          │                                                          │
-│   ┌──────┴──────────────────────────────────────────────────-─┐     │
-│   │        Infrastructure (Cloud | Local | Kubernetes)        │     │
-│   └───────────────────────────────────────────────────────────┘     │
-│                                                                     │
-└─────────────────────────────────────────────────────────────────────┘
-
-

Key Metrics

-
- - - - - - - -
MetricValueDescription
Codebase Size~50,000 LOCNushell (60%), Rust (30%), Nickel (10%)
Extensions100+Providers, taskservs, clusters
Supported Providers3AWS, UpCloud, Local
Task Services50+Kubernetes, databases, monitoring, etc.
Deployment Modes5Binary, Docker, Docker Compose, K8s, Remote
Operational Modes4Solo, Multi-user, CI/CD, Enterprise
API Endpoints80+REST, WebSocket, GraphQL (planned)
-
-
-

System Architecture

-

High-Level Architecture

-
┌────────────────────────────────────────────────────────────────────────────┐
-│                         PRESENTATION LAYER                                 │
-├────────────────────────────────────────────────────────────────────────────┤
-│                                                                            │
-│    ┌─────────────┐  ┌──────────────┐  ┌──────────────┐  ┌────────────┐     │
-│    │  CLI (Nu)   │  │ Control      │  │  REST API    │  │  MCP       │     │
-│    │             │  │ Center (Yew) │  │  Gateway     │  │  Server    │     │
-│    └─────────────┘  └──────────────┘  └──────────────┘  └────────────┘     │
-│                                                                            │
-└──────────────────────────────────┬─────────────────────────────────────────┘
-                                   │
-┌──────────────────────────────────┴─────────────────────────────────────────┐
-│                         CORE LAYER                                         │
-├────────────────────────────────────────────────────────────────────────────┤
-│                                                                            │
-│   ┌─────────────────────────────────────────────────────────────────┐      │
-│   │               Configuration Management                          │      │
-│   │   (Nickel Schemas | TOML Config | Hierarchical Loading)         │      │
-│   └─────────────────────────────────────────────────────────────────┘      │
-│                                                                            │
-│   ┌──────────────────┐  ┌──────────────────┐  ┌──────────────────┐         │
-│   │   Dependency     │  │   Module/Layer   │  │   Workspace      │         │
-│   │   Resolution     │  │     System       │  │   Management     │         │
-│   └──────────────────┘  └──────────────────┘  └──────────────────┘         │
-│                                                                            │
-│  ┌──────────────────────────────────────────────────────────────────┐      │
-│  │                  Workflow Engine                                 │      │
-│  │   (Batch Operations | Checkpoints | Rollback)                    │      │
-│  └──────────────────────────────────────────────────────────────────┘      │
-│                                                                            │
-└──────────────────────────────────┬─────────────────────────────────────────┘
-                                   │
-┌──────────────────────────────────┴─────────────────────────────────────────┐
-│                      ORCHESTRATION LAYER                                   │
-├────────────────────────────────────────────────────────────────────────────┤
-│                                                                            │
-│  ┌──────────────────────────────────────────────────────────────────┐      │
-│  │                Orchestrator (Rust)                               │      │
-│  │   • Task Queue (File-based persistence)                          │      │
-│  │   • State Management (Checkpoints)                               │      │
-│  │   • Health Monitoring                                            │      │
-│  │   • REST API (HTTP/WS)                                           │      │
-│  └──────────────────────────────────────────────────────────────────┘      │
-│                                                                            │
-│  ┌──────────────────────────────────────────────────────────────────┐      │
-│  │           Business Logic (Nushell)                               │      │
-│  │   • Provider operations (AWS, UpCloud, Local)                    │      │
-│  │   • Server lifecycle (create, delete, configure)                 │      │
-│  │   • Taskserv installation (50+ services)                         │      │
-│  │   • Cluster deployment                                           │      │
-│  └──────────────────────────────────────────────────────────────────┘      │
-│                                                                            │
-└──────────────────────────────────┬─────────────────────────────────────────┘
-                                   │
-┌──────────────────────────────────┴─────────────────────────────────────────┐
-│                      EXTENSION LAYER                                       │
-├────────────────────────────────────────────────────────────────────────────┤
-│                                                                            │
-│   ┌────────────────┐  ┌──────────────────┐  ┌───────────────────┐          │
-│   │   Providers    │  │   Task Services  │  │    Clusters       │          │
-│   │   (3 types)    │  │   (50+ types)    │  │   (10+ types)     │          │
-│   │                │  │                  │  │                   │          │
-│   │  • AWS         │  │  • Kubernetes    │  │  • Buildkit       │          │
-│   │  • UpCloud     │  │  • Containerd    │  │  • Web cluster    │          │
-│   │  • Local       │  │  • Databases     │  │  • CI/CD          │          │
-│   │                │  │  • Monitoring    │  │                   │          │
-│   └────────────────┘  └──────────────────┘  └───────────────────┘          │
-│                                                                            │
-│  ┌──────────────────────────────────────────────────────────────────┐      │
-│  │            Extension Distribution (OCI Registry)                 │      │
-│  │   • Zot (local development)                                      │      │
-│  │   • Harbor (multi-user/enterprise)                               │      │
-│  └──────────────────────────────────────────────────────────────────┘      │
-│                                                                            │
-└──────────────────────────────────┬─────────────────────────────────────────┘
-                                   │
-┌──────────────────────────────────┴─────────────────────────────────────────┐
-│                      INFRASTRUCTURE LAYER                                  │
-├────────────────────────────────────────────────────────────────────────────┤
-│                                                                            │
-│   ┌────────────────┐  ┌──────────────────┐  ┌───────────────────┐          │
-│   │  Cloud (AWS)   │  │ Cloud (UpCloud)  │  │  Local (Docker)   │          │
-│   │                │  │                  │  │                   │          │
-│   │  • EC2         │  │  • Servers       │  │  • Containers     │          │
-│   │  • EKS         │  │  • LoadBalancer  │  │  • Local K8s      │          │
-│   │  • RDS         │  │  • Networking    │  │  • Processes      │          │
-│   └────────────────┘  └──────────────────┘  └───────────────────┘          │
-│                                                                            │
-└────────────────────────────────────────────────────────────────────────────┘
-
-

Multi-Repository Architecture

-

The system is organized into three separate repositories:

-

provisioning-core

-
Core system functionality
-├── CLI interface (Nushell entry point)
-├── Core libraries (lib_provisioning)
-├── Base Nickel schemas
-├── Configuration system
-├── Workflow engine
-└── Build/distribution tools
-
-

Distribution: oci://registry/provisioning-core:v3.5.0

-

provisioning-extensions

-
All provider, taskserv, cluster extensions
-├── providers/
-│   ├── aws/
-│   ├── upcloud/
-│   └── local/
-├── taskservs/
-│   ├── kubernetes/
-│   ├── containerd/
-│   ├── postgres/
-│   └── (50+ more)
-└── clusters/
-    ├── buildkit/
-    ├── web/
-    └── (10+ more)
-
-

Distribution: Each extension as separate OCI artifact

-
    -
  • oci://registry/provisioning-extensions/kubernetes:1.28.0
  • -
  • oci://registry/provisioning-extensions/aws:2.0.0
  • -
-

provisioning-platform

-
Platform services
-├── orchestrator/      (Rust)
-├── control-center/    (Rust/Yew)
-├── mcp-server/        (Rust)
-└── api-gateway/       (Rust)
-
-

Distribution: Docker images in OCI registry

-
    -
  • oci://registry/provisioning-platform/orchestrator:v1.2.0
  • -
-
-

Component Architecture

-

Core Components

-

1. CLI Interface (Nushell)

-

Location: provisioning/core/cli/provisioning

-

Purpose: Primary user interface for all provisioning operations

-

Architecture:

-
Main CLI (211 lines)
-    ↓
-Command Dispatcher (264 lines)
-    ↓
-Domain Handlers (7 modules)
-    ├── infrastructure.nu (117 lines)
-    ├── orchestration.nu (64 lines)
-    ├── development.nu (72 lines)
-    ├── workspace.nu (56 lines)
-    ├── generation.nu (78 lines)
-    ├── utilities.nu (157 lines)
-    └── configuration.nu (316 lines)
-
-

Key Features:

-
    -
  • 80+ command shortcuts
  • -
  • Bi-directional help system
  • -
  • Centralized flag handling
  • -
  • Domain-driven design
  • -
-

2. Configuration System (Nickel + TOML)

-

Hierarchical Loading:

-
1. System defaults     (config.defaults.toml)
-2. User config         (~/.provisioning/config.user.toml)
-3. Workspace config    (workspace/config/provisioning.yaml)
-4. Environment config  (workspace/config/{env}-defaults.toml)
-5. Infrastructure config (workspace/infra/{name}/config.toml)
-6. Runtime overrides   (CLI flags, ENV variables)
-
-

Variable Interpolation:

-
    -
  • {{paths.base}} - Path references
  • -
  • {{env.HOME}} - Environment variables
  • -
  • {{now.date}} - Dynamic values
  • -
  • {{git.branch}} - Git context
  • -
-

3. Orchestrator (Rust)

-

Location: provisioning/platform/orchestrator/

-

Architecture:

-
src/
-├── main.rs              // Entry point
-├── api/
-│   ├── routes.rs        // HTTP routes
-│   ├── workflows.rs     // Workflow endpoints
-│   └── batch.rs         // Batch endpoints
-├── workflow/
-│   ├── engine.rs        // Workflow execution
-│   ├── state.rs         // State management
-│   └── checkpoint.rs    // Checkpoint/recovery
-├── task_queue/
-│   ├── queue.rs         // File-based queue
-│   ├── priority.rs      // Priority scheduling
-│   └── retry.rs         // Retry logic
-├── health/
-│   └── monitor.rs       // Health checks
-├── nushell/
-│   └── bridge.rs        // Nu execution bridge
-└── test_environment/    // Test env management
-    ├── container_manager.rs
-    ├── test_orchestrator.rs
-    └── topologies.rs
-

Key Features:

-
    -
  • File-based task queue (reliable, simple)
  • -
  • Checkpoint-based recovery
  • -
  • Priority scheduling
  • -
  • REST API (HTTP/WebSocket)
  • -
  • Nushell script execution bridge
  • -
-

4. Workflow Engine (Nushell)

-

Location: provisioning/core/nulib/workflows/

-

Workflow Types:

-
workflows/
-├── server_create.nu     // Server provisioning
-├── taskserv.nu          // Task service management
-├── cluster.nu           // Cluster deployment
-├── batch.nu             // Batch operations
-└── management.nu        // Workflow monitoring
-
-

Batch Workflow Features:

-
    -
  • Provider-agnostic (mix AWS, UpCloud, local)
  • -
  • Dependency resolution (hard/soft dependencies)
  • -
  • Parallel execution (configurable limits)
  • -
  • Rollback support
  • -
  • Real-time monitoring
  • -
-

5. Extension System

-

Extension Types:

-
- - - -
TypeCountPurposeExample
Providers3Cloud platform integrationAWS, UpCloud, Local
Task Services50+Infrastructure componentsKubernetes, Postgres
Clusters10+Complete configurationsBuildkit, Web cluster
-
-

Extension Structure:

-
extension-name/
-├── schemas/
-│   ├── main.ncl             // Main schema
-│   ├── contracts.ncl        // Contract definitions
-│   ├── defaults.ncl         // Default values
-│   └── version.ncl          // Version management
-├── scripts/
-│   ├── install.nu           // Installation logic
-│   ├── check.nu             // Health check
-│   └── uninstall.nu         // Cleanup
-├── templates/               // Config templates
-├── docs/                    // Documentation
-├── tests/                   // Extension tests
-└── manifest.yaml            // Extension metadata
-
-

OCI Distribution: -Each extension packaged as OCI artifact:

-
    -
  • Nickel schemas
  • -
  • Nushell scripts
  • -
  • Templates
  • -
  • Documentation
  • -
  • Manifest
  • -
-

6. Module and Layer System

-

Module System:

-
# Discover available extensions
-provisioning module discover taskservs
-
-# Load into workspace
-provisioning module load taskserv my-workspace kubernetes containerd
-
-# List loaded modules
-provisioning module list taskserv my-workspace
-
-

Layer System (Configuration Inheritance):

-
Layer 1: Core     (provisioning/extensions/{type}/{name})
-    ↓
-Layer 2: Workspace (workspace/extensions/{type}/{name})
-    ↓
-Layer 3: Infrastructure (workspace/infra/{infra}/extensions/{type}/{name})
-
-

Resolution Priority: Infrastructure → Workspace → Core

-

7. Dependency Resolution

-

Algorithm: Topological sort with cycle detection

-

Features:

-
    -
  • Hard dependencies (must exist)
  • -
  • Soft dependencies (optional enhancement)
  • -
  • Conflict detection
  • -
  • Circular dependency prevention
  • -
  • Version compatibility checking
  • -
-

Example:

-
let { TaskservDependencies } = import "provisioning/dependencies.ncl" in
-{
-  kubernetes = TaskservDependencies {
-    name = "kubernetes",
-    version = "1.28.0",
-    requires = ["containerd", "etcd", "os"],
-    optional = ["cilium", "helm"],
-    conflicts = ["docker", "podman"],
-  }
-}
-
-

8. Service Management

-

Supported Services:

-
- - - - - - - -
ServiceTypeCategoryPurpose
orchestratorPlatformOrchestrationWorkflow coordination
control-centerPlatformUIWeb management interface
corednsInfrastructureDNSLocal DNS resolution
giteaInfrastructureGitSelf-hosted Git service
oci-registryInfrastructureRegistryOCI artifact storage
mcp-serverPlatformAPIModel Context Protocol
api-gatewayPlatformAPIUnified API access
-
-

Lifecycle Management:

-
# Start all auto-start services
-provisioning platform start
-
-# Start specific service (with dependencies)
-provisioning platform start orchestrator
-
-# Check health
-provisioning platform health
-
-# View logs
-provisioning platform logs orchestrator --follow
-
-

9. Test Environment Service

-

Architecture:

-
User Command (CLI)
-    ↓
-Test Orchestrator (Rust)
-    ↓
-Container Manager (bollard)
-    ↓
-Docker API
-    ↓
-Isolated Test Containers
-
-

Test Types:

-
    -
  • Single taskserv testing
  • -
  • Server simulation (multiple taskservs)
  • -
  • Multi-node cluster topologies
  • -
-

Topology Templates:

-
    -
  • kubernetes_3node - 3-node HA cluster
  • -
  • kubernetes_single - All-in-one K8s
  • -
  • etcd_cluster - 3-node etcd
  • -
  • postgres_redis - Database stack
  • -
-
-

Mode Architecture

-

Mode-Based System Overview

-

The platform supports four operational modes that adapt the system from individual development to enterprise production.

-

Mode Comparison

-
┌───────────────────────────────────────────────────────────────────────┐
-│                        MODE ARCHITECTURE                              │
-├───────────────┬───────────────┬───────────────┬───────────────────────┤
-│    SOLO       │  MULTI-USER   │    CI/CD      │    ENTERPRISE         │
-├───────────────┼───────────────┼───────────────┼───────────────────────┤
-│               │               │               │                       │
-│  Single Dev   │  Team (5-20)  │  Pipelines    │  Production           │
-│               │               │               │                       │
-│  ┌─────────┐  │ ┌──────────┐  │ ┌──────────┐  │ ┌──────────────────┐  │
-│  │ No Auth │  │ │Token(JWT)│  │ │Token(1h) │  │ │  mTLS (TLS 1.3)  │  │
-│  └─────────┘  │ └──────────┘  │ └──────────┘  │ └──────────────────┘  │
-│               │               │               │                       │
-│  ┌─────────┐  │ ┌──────────┐  │ ┌──────────┐  │ ┌──────────────────┐  │
-│  │ Local   │  │ │ Remote   │  │ │ Remote   │  │ │ Kubernetes (HA)  │  │
-│  │ Binary  │  │ │ Docker   │  │ │ K8s      │  │ │ Multi-AZ         │  │
-│  └─────────┘  │ └──────────┘  │ └──────────┘  │ └──────────────────┘  │
-│               │               │               │                       │
-│  ┌─────────┐  │ ┌──────────┐  │ ┌──────────┐  │ ┌──────────────────┐  │
-│  │ Local   │  │ │ OCI (Zot)│  │ │OCI(Harbor│  │ │ OCI (Harbor HA)  │  │
-│  │ Files   │  │ │ or Harbor│  │ │ required)│  │ │ + Replication    │  │
-│  └─────────┘  │ └──────────┘  │ └──────────┘  │ └──────────────────┘  │
-│               │               │               │                       │
-│  ┌─────────┐  │ ┌──────────┐  │ ┌──────────-┐ │ ┌──────────────────┐  │
-│  │ None    │  │ │ Gitea    │  │ │ Disabled  │ │ │ etcd (mandatory) │  │
-│  │         │  │ │(optional)│  │ │(stateless)| │ │                  │  │
-│  └─────────┘  │ └──────────┘  │ └─────────-─┘ │ └──────────────────┘  │
-│               │               │               │                       │
-│  Unlimited    │  10 srv, 32   │  5 srv, 16    │ 20 srv, 64 cores      │
-│               │ cores, 128 GB  │ cores, 64 GB   │ 256 GB per user        │
-│               │               │               │                       │
-└───────────────┴───────────────┴───────────────┴───────────────────────┘
-
-

Mode Configuration

-

Mode Templates: workspace/config/modes/{mode}.yaml

-

Active Mode: ~/.provisioning/config/active-mode.yaml

-

Switching Modes:

-
# Check current mode
-provisioning mode current
-
-# Switch to another mode
-provisioning mode switch multi-user
-
-# Validate mode requirements
-provisioning mode validate enterprise
-
-

Mode-Specific Workflows

-

Solo Mode

-
# 1. Default mode, no setup needed
-provisioning workspace init
-
-# 2. Start local orchestrator
-provisioning platform start orchestrator
-
-# 3. Create infrastructure
-provisioning server create
-
-

Multi-User Mode

-
# 1. Switch mode and authenticate
-provisioning mode switch multi-user
-provisioning auth login
-
-# 2. Lock workspace
-provisioning workspace lock my-infra
-
-# 3. Pull extensions from OCI
-provisioning extension pull upcloud kubernetes
-
-# 4. Work...
-
-# 5. Unlock workspace
-provisioning workspace unlock my-infra
-
-

CI/CD Mode

-
# GitLab CI
-deploy:
-  stage: deploy
-  script:
-    - export PROVISIONING_MODE=cicd
-    - echo "$TOKEN" > /var/run/secrets/provisioning/token
-    - provisioning validate --all
-    - provisioning test quick kubernetes
-    - provisioning server create --check
-    - provisioning server create
-  after_script:
-    - provisioning workspace cleanup
-
-

Enterprise Mode

-
# 1. Switch to enterprise, verify K8s
-provisioning mode switch enterprise
-kubectl get pods -n provisioning-system
-
-# 2. Request workspace (approval required)
-provisioning workspace request prod-deployment
-
-# 3. After approval, lock with etcd
-provisioning workspace lock prod-deployment --provider etcd
-
-# 4. Pull verified extensions
-provisioning extension pull upcloud --verify-signature
-
-# 5. Deploy
-provisioning infra create --check
-provisioning infra create
-
-# 6. Release
-provisioning workspace unlock prod-deployment
-
-
-

Network Architecture

-

Service Communication

-
┌──────────────────────────────────────────────────────────────────────┐
-│                         NETWORK LAYER                                 │
-├──────────────────────────────────────────────────────────────────────┤
-│                                                                        │
-│  ┌───────────────────────┐          ┌──────────────────────────┐     │
-│  │   Ingress/Load        │          │    API Gateway           │     │
-│  │   Balancer            │──────────│   (Optional)             │     │
-│  └───────────────────────┘          └──────────────────────────┘     │
-│              │                                    │                   │
-│              │                                    │                   │
-│  ┌───────────┴────────────────────────────────────┴──────────┐       │
-│  │                 Service Mesh (Optional)                    │       │
-│  │           (mTLS, Circuit Breaking, Retries)               │       │
-│  └────┬──────────┬───────────┬────────────┬──────────────┬───┘       │
-│       │          │           │            │              │            │
-│  ┌────┴─────┐ ┌─┴────────┐ ┌┴─────────┐ ┌┴──────────┐ ┌┴───────┐   │
-│  │ Orchestr │ │ Control  │ │ CoreDNS  │ │   Gitea   │ │  OCI   │   │
-│  │   ator   │ │ Center   │ │          │ │           │ │Registry│   │
-│  │          │ │          │ │          │ │           │ │        │   │
-│  │ :9090    │ │ :3000    │ │ :5353    │ │ :3001     │ │ :5000  │   │
-│  └──────────┘ └──────────┘ └──────────┘ └───────────┘ └────────┘   │
-│                                                                        │
-│  ┌────────────────────────────────────────────────────────────┐       │
-│  │              DNS Resolution (CoreDNS)                       │       │
-│  │  • *.prov.local  →  Internal services                      │       │
-│  │  • *.infra.local →  Infrastructure nodes                   │       │
-│  └────────────────────────────────────────────────────────────┘       │
-│                                                                        │
-└──────────────────────────────────────────────────────────────────────┘
-
-

Port Allocation

-
- - - - - - - - -
ServicePortProtocolPurpose
Orchestrator8080HTTP/WSREST API, WebSocket
Control Center3000HTTPWeb UI
CoreDNS5353UDP/TCPDNS resolution
Gitea3001HTTPGit operations
OCI Registry (Zot)5000HTTPOCI artifacts
OCI Registry (Harbor)443HTTPSOCI artifacts (prod)
MCP Server8081HTTPMCP protocol
API Gateway8082HTTPUnified API
-
-

Network Security

-

Solo Mode:

-
    -
  • Localhost-only bindings
  • -
  • No authentication
  • -
  • No encryption
  • -
-

Multi-User Mode:

-
    -
  • Token-based authentication (JWT)
  • -
  • TLS for external access
  • -
  • Firewall rules
  • -
-

CI/CD Mode:

-
    -
  • Token authentication (short-lived)
  • -
  • Full TLS encryption
  • -
  • Network isolation
  • -
-

Enterprise Mode:

-
    -
  • mTLS for all connections
  • -
  • Network policies (Kubernetes)
  • -
  • Zero-trust networking
  • -
  • Audit logging
  • -
-
-

Data Architecture

-

Data Storage

-
┌────────────────────────────────────────────────────────────────┐
-│                     DATA LAYER                                  │
-├────────────────────────────────────────────────────────────────┤
-│                                                                  │
-│  ┌─────────────────────────────────────────────────────────┐   │
-│  │            Configuration Data (Hierarchical)             │   │
-│  │                                                           │   │
-│  │  ~/.provisioning/                                        │   │
-│  │  ├── config.user.toml       (User preferences)          │   │
-│  │  └── config/                                             │   │
-│  │      ├── active-mode.yaml   (Active mode)               │   │
-│  │      └── user_config.yaml   (Workspaces, preferences)   │   │
-│  │                                                           │   │
-│  │  workspace/                                              │   │
-│  │  ├── config/                                             │   │
-│  │  │   ├── provisioning.yaml  (Workspace config)          │   │
-│  │  │   └── modes/*.yaml       (Mode templates)            │   │
-│  │  └── infra/{name}/                                       │   │
-│  │      ├── main.ncl           (Infrastructure Nickel)     │   │
-│  │      └── config.toml        (Infra-specific)            │   │
-│  └─────────────────────────────────────────────────────────┘   │
-│                                                                  │
-│  ┌─────────────────────────────────────────────────────────┐   │
-│  │            State Data (Runtime)                          │   │
-│  │                                                           │   │
-│  │  ~/.provisioning/orchestrator/data/                      │   │
-│  │  ├── tasks/                  (Task queue)                │   │
-│  │  ├── workflows/              (Workflow state)            │   │
-│  │  └── checkpoints/            (Recovery points)           │   │
-│  │                                                           │   │
-│  │  ~/.provisioning/services/                               │   │
-│  │  ├── pids/                   (Process IDs)               │   │
-│  │  ├── logs/                   (Service logs)              │   │
-│  │  └── state/                  (Service state)             │   │
-│  └─────────────────────────────────────────────────────────┘   │
-│                                                                  │
-│  ┌─────────────────────────────────────────────────────────┐   │
-│  │            Cache Data (Performance)                      │   │
-│  │                                                           │   │
-│  │  ~/.provisioning/cache/                                  │   │
-│  │  ├── oci/                    (OCI artifacts)             │   │
-│  │  ├── schemas/                (Nickel compiled)           │   │
-│  │  └── modules/                (Module cache)              │   │
-│  └─────────────────────────────────────────────────────────┘   │
-│                                                                  │
-│  ┌─────────────────────────────────────────────────────────┐   │
-│  │            Extension Data (OCI Artifacts)                │   │
-│  │                                                           │   │
-│  │  OCI Registry (localhost:5000 or harbor.company.com)    │   │
-│  │  ├── provisioning-core:v3.5.0                           │   │
-│  │  ├── provisioning-extensions/                           │   │
-│  │  │   ├── kubernetes:1.28.0                              │   │
-│  │  │   ├── aws:2.0.0                                      │   │
-│  │  │   └── (100+ artifacts)                               │   │
-│  │  └── provisioning-platform/                             │   │
-│  │      ├── orchestrator:v1.2.0                            │   │
-│  │      └── (4 service images)                             │   │
-│  └─────────────────────────────────────────────────────────┘   │
-│                                                                  │
-│  ┌─────────────────────────────────────────────────────────┐   │
-│  │            Secrets (Encrypted)                           │   │
-│  │                                                           │   │
-│  │  workspace/secrets/                                      │   │
-│  │  ├── keys.yaml.enc           (SOPS-encrypted)           │   │
-│  │  ├── ssh-keys/               (SSH keys)                 │   │
-│  │  └── tokens/                 (API tokens)               │   │
-│  │                                                           │   │
-│  │  KMS Integration (Enterprise):                          │   │
-│  │  • AWS KMS                                               │   │
-│  │  • HashiCorp Vault                                       │   │
-│  │  • Age encryption (local)                                │   │
-│  └─────────────────────────────────────────────────────────┘   │
-│                                                                  │
-└────────────────────────────────────────────────────────────────┘
-
-

Data Flow

-

Configuration Loading:

-
1. Load system defaults (config.defaults.toml)
-2. Merge user config (~/.provisioning/config.user.toml)
-3. Load workspace config (workspace/config/provisioning.yaml)
-4. Load environment config (workspace/config/{env}-defaults.toml)
-5. Load infrastructure config (workspace/infra/{name}/config.toml)
-6. Apply runtime overrides (ENV variables, CLI flags)
-
-

State Persistence:

-
Workflow execution
-    ↓
-Create checkpoint (JSON)
-    ↓
-Save to ~/.provisioning/orchestrator/data/checkpoints/
-    ↓
-On failure, load checkpoint and resume
-
-

OCI Artifact Flow:

-
1. Package extension (oci-package.nu)
-2. Push to OCI registry (provisioning oci push)
-3. Extension stored as OCI artifact
-4. Pull when needed (provisioning oci pull)
-5. Cache locally (~/.provisioning/cache/oci/)
-
-
-

Security Architecture

-

Security Layers

-
┌─────────────────────────────────────────────────────────────────┐
-│                     SECURITY ARCHITECTURE                        │
-├─────────────────────────────────────────────────────────────────┤
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 1: Authentication & Authorization               │     │
-│  │                                                          │     │
-│  │  Solo:       None (local development)                  │     │
-│  │  Multi-user: JWT tokens (24h expiry)                   │     │
-│  │  CI/CD:      CI-injected tokens (1h expiry)            │     │
-│  │  Enterprise: mTLS (TLS 1.3, mutual auth)               │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 2: Encryption                                    │     │
-│  │                                                          │     │
-│  │  In Transit:                                            │     │
-│  │  • TLS 1.3 (multi-user, CI/CD, enterprise)             │     │
-│  │  • mTLS (enterprise)                                    │     │
-│  │                                                          │     │
-│  │  At Rest:                                               │     │
-│  │  • SOPS + Age (secrets encryption)                      │     │
-│  │  • KMS integration (CI/CD, enterprise)                  │     │
-│  │  • Encrypted filesystems (enterprise)                   │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 3: Secret Management                             │     │
-│  │                                                          │     │
-│  │  • SOPS for file encryption                             │     │
-│  │  • Age for key management                               │     │
-│  │  • KMS integration (AWS KMS, Vault)                     │     │
-│  │  • SSH key storage (KMS-backed)                         │     │
-│  │  • API token management                                 │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 4: Access Control                                │     │
-│  │                                                          │     │
-│  │  • RBAC (Role-Based Access Control)                     │     │
-│  │  • Workspace isolation                                   │     │
-│  │  • Workspace locking (Gitea, etcd)                      │     │
-│  │  • Resource quotas (per-user limits)                    │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 5: Network Security                              │     │
-│  │                                                          │     │
-│  │  • Network policies (Kubernetes)                        │     │
-│  │  • Firewall rules                                       │     │
-│  │  • Zero-trust networking (enterprise)                   │     │
-│  │  • Service mesh (optional, mTLS)                        │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-│  ┌────────────────────────────────────────────────────────┐     │
-│  │  Layer 6: Audit & Compliance                            │     │
-│  │                                                          │     │
-│  │  • Audit logs (all operations)                          │     │
-│  │  • Compliance policies (SOC2, ISO27001)                 │     │
-│  │  • Image signing (cosign, notation)                     │     │
-│  │  • Vulnerability scanning (Harbor)                      │     │
-│  └────────────────────────────────────────────────────────┘     │
-│                                                                   │
-└─────────────────────────────────────────────────────────────────┘
-
-

Secret Management

-

SOPS Integration:

-
# Edit encrypted file
-provisioning sops workspace/secrets/keys.yaml.enc
-
-# Encryption happens automatically on save
-# Decryption happens automatically on load
-
-

KMS Integration (Enterprise):

-
# workspace/config/provisioning.yaml
-secrets:
-  provider: "kms"
-  kms:
-    type: "aws"  # or "vault"
-    region: "us-east-1"
-    key_id: "arn:aws:kms:..."
-
-

Image Signing and Verification

-

CI/CD Mode (Required):

-
# Sign OCI artifact
-cosign sign oci://registry/kubernetes:1.28.0
-
-# Verify signature
-cosign verify oci://registry/kubernetes:1.28.0
-
-

Enterprise Mode (Mandatory):

-
# Pull with verification
-provisioning extension pull kubernetes --verify-signature
-
-# System blocks unsigned artifacts
-
-
-

Deployment Architecture

-

Deployment Modes

-

1. Binary Deployment (Solo, Multi-user)

-
User Machine
-├── ~/.provisioning/bin/
-│   ├── provisioning-orchestrator
-│   ├── provisioning-control-center
-│   └── ...
-├── ~/.provisioning/orchestrator/data/
-├── ~/.provisioning/services/
-└── Process Management (PID files, logs)
-
-

Pros: Simple, fast startup, no Docker dependency -Cons: Platform-specific binaries, manual updates

-

2. Docker Deployment (Multi-user, CI/CD)

-
Docker Daemon
-├── Container: provisioning-orchestrator
-├── Container: provisioning-control-center
-├── Container: provisioning-coredns
-├── Container: provisioning-gitea
-├── Container: provisioning-oci-registry
-└── Volumes: ~/.provisioning/data/
-
-

Pros: Consistent environment, easy updates -Cons: Requires Docker, resource overhead

-

3. Docker Compose Deployment (Multi-user)

-
# provisioning/platform/docker-compose.yaml
-services:
-  orchestrator:
-    image: provisioning-platform/orchestrator:v1.2.0
-    ports:
-      - "8080:9090"
-    volumes:
-      - orchestrator-data:/data
-
-  control-center:
-    image: provisioning-platform/control-center:v1.2.0
-    ports:
-      - "3000:3000"
-    depends_on:
-      - orchestrator
-
-  coredns:
-    image: coredns/coredns:1.11.1
-    ports:
-      - "5353:53/udp"
-
-  gitea:
-    image: gitea/gitea:1.20
-    ports:
-      - "3001:3000"
-
-  oci-registry:
-    image: ghcr.io/project-zot/zot:latest
-    ports:
-      - "5000:5000"
-
-

Pros: Easy multi-service orchestration, declarative -Cons: Local only, no HA

-

4. Kubernetes Deployment (CI/CD, Enterprise)

-
# Namespace: provisioning-system
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: orchestrator
-spec:
-  replicas: 3  # HA
-  selector:
-    matchLabels:
-      app: orchestrator
-  template:
-    metadata:
-      labels:
-        app: orchestrator
-    spec:
-      containers:
-      - name: orchestrator
-        image: harbor.company.com/provisioning-platform/orchestrator:v1.2.0
-        ports:
-        - containerPort: 8080
-        env:
-        - name: RUST_LOG
-          value: "info"
-        volumeMounts:
-        - name: data
-          mountPath: /data
-        livenessProbe:
-          httpGet:
-            path: /health
-            port: 8080
-        readinessProbe:
-          httpGet:
-            path: /health
-            port: 8080
-      volumes:
-      - name: data
-        persistentVolumeClaim:
-          claimName: orchestrator-data
-
-

Pros: HA, scalability, production-ready -Cons: Complex setup, Kubernetes required

-

5. Remote Deployment (All modes)

-
# Connect to remotely-running services
-services:
-  orchestrator:
-    deployment:
-      mode: "remote"
-      remote:
-        endpoint: "https://orchestrator.company.com"
-        tls_enabled: true
-        auth_token_path: "~/.provisioning/tokens/orchestrator.token"
-
-

Pros: No local resources, centralized -Cons: Network dependency, latency

-
-

Integration Architecture

-

Integration Patterns

-

1. Hybrid Language Integration (Rust ↔ Nushell)

-
Rust Orchestrator
-    ↓ (HTTP API)
-Nushell CLI
-    ↓ (exec via bridge)
-Nushell Business Logic
-    ↓ (returns JSON)
-Rust Orchestrator
-    ↓ (updates state)
-File-based Task Queue
-
-

Communication: HTTP API + stdin/stdout JSON

-

2. Provider Abstraction

-
Unified Provider Interface
-├── create_server(config) -> Server
-├── delete_server(id) -> bool
-├── list_servers() -> [Server]
-└── get_server_status(id) -> Status
-
-Provider Implementations:
-├── AWS Provider (aws-sdk-rust, aws cli)
-├── UpCloud Provider (upcloud API)
-└── Local Provider (Docker, libvirt)
-
-

3. OCI Registry Integration

-
Extension Development
-    ↓
-Package (oci-package.nu)
-    ↓
-Push (provisioning oci push)
-    ↓
-OCI Registry (Zot/Harbor)
-    ↓
-Pull (provisioning oci pull)
-    ↓
-Cache (~/.provisioning/cache/oci/)
-    ↓
-Load into Workspace
-
-

4. Gitea Integration (Multi-user, Enterprise)

-
Workspace Operations
-    ↓
-Check Lock Status (Gitea API)
-    ↓
-Acquire Lock (Create lock file in Git)
-    ↓
-Perform Changes
-    ↓
-Commit + Push
-    ↓
-Release Lock (Delete lock file)
-
-

Benefits:

-
    -
  • Distributed locking
  • -
  • Change tracking via Git history
  • -
  • Collaboration features
  • -
-

5. CoreDNS Integration

-
Service Registration
-    ↓
-Update CoreDNS Corefile
-    ↓
-Reload CoreDNS
-    ↓
-DNS Resolution Available
-
-Zones:
-├── *.prov.local     (Internal services)
-├── *.infra.local    (Infrastructure nodes)
-└── *.test.local     (Test environments)
-
-
-

Performance and Scalability

-

Performance Characteristics

-
- - - - - - - - -
MetricValueNotes
CLI Startup Time< 100 msNushell cold start
CLI Response Time< 50 msMost commands
Workflow Submission< 200 msTo orchestrator
Task Processing10-50/secOrchestrator throughput
Batch OperationsUp to 100 serversParallel execution
OCI Pull Time1-5sCached: <100 ms
Configuration Load< 500 msFull hierarchy
Health Check Interval10sConfigurable
-
-

Scalability Limits

-

Solo Mode:

-
    -
  • Unlimited local resources
  • -
  • Limited by machine capacity
  • -
-

Multi-User Mode:

-
    -
  • 10 servers per user
  • -
  • 32 cores, 128 GB RAM per user
  • -
  • 5-20 concurrent users
  • -
-

CI/CD Mode:

-
    -
  • 5 servers per pipeline
  • -
  • 16 cores, 64 GB RAM per pipeline
  • -
  • 100+ concurrent pipelines
  • -
-

Enterprise Mode:

-
    -
  • 20 servers per user
  • -
  • 64 cores, 256 GB RAM per user
  • -
  • 1000+ concurrent users
  • -
  • Horizontal scaling via Kubernetes
  • -
-

Optimization Strategies

-

Caching:

-
    -
  • OCI artifacts cached locally
  • -
  • Nickel compilation cached
  • -
  • Module resolution cached
  • -
-

Parallel Execution:

-
    -
  • Batch operations with configurable limits
  • -
  • Dependency-aware parallel starts
  • -
  • Workflow DAG execution
  • -
-

Incremental Operations:

-
    -
  • Only update changed resources
  • -
  • Checkpoint-based recovery
  • -
  • Delta synchronization
  • -
-
-

Evolution and Roadmap

-

Version History

-
- - - - - - - - -
VersionDateMajor Features
v3.5.02025-10-06Mode system, OCI distribution, comprehensive docs
v3.4.02025-10-06Test environment service
v3.3.02025-09-30Interactive guides
v3.2.02025-09-30Modular CLI refactoring
v3.1.02025-09-25Batch workflow system
v3.0.02025-09-25Hybrid orchestrator
v2.0.52025-10-02Workspace switching
v2.0.02025-09-23Configuration migration
-
-

Roadmap (Future Versions)

-

v3.6.0 (Q1 2026):

-
    -
  • GraphQL API
  • -
  • Advanced RBAC
  • -
  • Multi-tenancy
  • -
  • Observability enhancements (OpenTelemetry)
  • -
-

v4.0.0 (Q2 2026):

-
    -
  • Multi-repository split complete
  • -
  • Extension marketplace
  • -
  • Advanced workflow features (conditional execution, loops)
  • -
  • Cost optimization engine
  • -
-

v4.1.0 (Q3 2026):

-
    -
  • AI-assisted infrastructure generation
  • -
  • Policy-as-code (OPA integration)
  • -
  • Advanced compliance features
  • -
-

Long-term Vision:

-
    -
  • Serverless workflow execution
  • -
  • Edge computing support
  • -
  • Multi-cloud failover
  • -
  • Self-healing infrastructure
  • -
-
- -

Architecture

- -

ADRs

- -

User Guides

- -
-

Maintained By: Architecture Team -Review Cycle: Quarterly -Next Review: 2026-01-06

-

Design Principles

-

Overview

-

Provisioning is built on a foundation of architectural principles that guide design decisions, -ensure system quality, and maintain consistency across the codebase. -These principles have evolved from real-world experience -and represent lessons learned from complex infrastructure automation challenges.

-

Core Architectural Principles

-

1. Project Architecture Principles (PAP) Compliance

-

Principle: Fully agnostic and configuration-driven, not hardcoded. Use abstraction layers dynamically loaded from configurations.

-

Rationale: Infrastructure as Code (IaC) systems must be flexible enough to adapt to any environment -without code changes. Hardcoded values defeat the purpose of IaC and create maintenance burdens.

-

Implementation Guidelines:

-
    -
  • Never patch the system with hardcoded fallbacks when configuration parsing fails
  • -
  • All behavior must be configurable through the hierarchical configuration system
  • -
  • Use abstraction layers that are dynamically loaded from configuration
  • -
  • Validate configuration fully before execution, fail fast on invalid config
  • -
-

Anti-Patterns (Anti-PAP):

-
    -
  • Hardcoded provider endpoints or credentials
  • -
  • Environment-specific logic in code
  • -
  • Fallback to default values when configuration is missing
  • -
  • Mixed configuration and implementation logic
  • -
-

Example:

-
# ✅ PAP Compliant - Configuration-driven
-[providers.aws]
-regions = ["us-west-2", "us-east-1"]
-instance_types = ["t3.micro", "t3.small"]
-api_endpoint = "https://ec2.amazonaws.com"
-
-# ❌ Anti-PAP - Hardcoded fallback in code
-if config.providers.aws.regions.is_empty() {
-    regions = vec!["us-west-2"]; // Hardcoded fallback
-}
-
-

2. Hybrid Architecture Optimization

-

Principle: Use each language for what it does best - Rust for coordination, Nushell for business logic.

-

Rationale: Different languages have different strengths. Rust excels at performance-critical coordination tasks, while Nushell excels at -configuration management and domain-specific operations.

-

Implementation Guidelines:

-
    -
  • Rust handles orchestration, state management, and performance-critical paths
  • -
  • Nushell handles provider operations, configuration processing, and CLI interfaces
  • -
  • Clear boundaries between language responsibilities
  • -
  • Structured data exchange (JSON) between languages
  • -
  • Preserve existing domain expertise in Nushell
  • -
-

Language Responsibility Matrix:

-
Rust Layer:
-├── Workflow orchestration and coordination
-├── REST API servers and HTTP endpoints
-├── State persistence and checkpoint management
-├── Parallel processing and batch operations
-├── Error recovery and rollback logic
-└── Performance-critical data processing
-
-Nushell Layer:
-├── Provider implementations (AWS, UpCloud, local)
-├── Task service management and configuration
-├── Nickel configuration processing and validation
-├── Template generation and Infrastructure as Code
-├── CLI user interfaces and interactive tools
-└── Domain-specific business logic
-
-

3. Configuration-First Architecture

-

Principle: All system behavior is determined by configuration, with clear hierarchical precedence and validation.

-

Rationale: True Infrastructure as Code requires that all behavior be configurable without code changes. Configuration hierarchy provides -flexibility while maintaining predictability.

-

Configuration Hierarchy (precedence order):

-
    -
  1. Runtime Parameters (highest precedence)
  2. -
  3. Environment Configuration
  4. -
  5. Infrastructure Configuration
  6. -
  7. User Configuration
  8. -
  9. System Defaults (lowest precedence)
  10. -
-

Implementation Guidelines:

-
    -
  • Complete configuration validation before execution
  • -
  • Variable interpolation for dynamic values
  • -
  • Schema-based validation using Nickel
  • -
  • Configuration immutability during execution
  • -
  • Comprehensive error reporting for configuration issues
  • -
-

4. Domain-Driven Structure

-

Principle: Organize code by business domains and functional boundaries, not by technical concerns.

-

Rationale: Domain-driven organization scales better, reduces coupling, and enables focused development by domain experts.

-

Domain Organization:

-
├── core/           # Core system and library functions
-├── platform/       # High-performance coordination layer
-├── provisioning/   # Main business logic with providers and services
-├── control-center/ # Web-based management interface
-├── tools/          # Development and utility tools
-└── extensions/     # Plugin and extension framework
-
-

Domain Responsibilities:

-
    -
  • Each domain has clear ownership and boundaries
  • -
  • Cross-domain communication through well-defined interfaces
  • -
  • Domain-specific testing and validation strategies
  • -
  • Independent evolution and versioning within architectural guidelines
  • -
-

5. Isolation and Modularity

-

Principle: Components are isolated, modular, and independently deployable with clear interface contracts.

-

Rationale: Isolation enables independent development, testing, and deployment. Clear interfaces prevent tight coupling and enable system -evolution.

-

Implementation Guidelines:

-
    -
  • User workspace isolation from system installation
  • -
  • Extension sandboxing and security boundaries
  • -
  • Provider abstraction with standardized interfaces
  • -
  • Service modularity with dependency management
  • -
  • Clear API contracts between components
  • -
-

Quality Attribute Principles

-

6. Reliability Through Recovery

-

Principle: Build comprehensive error recovery and rollback capabilities into every operation.

-

Rationale: Infrastructure operations can fail at any point. Systems must be able to recover gracefully and maintain consistent state.

-

Implementation Guidelines:

-
    -
  • Checkpoint-based recovery for long-running workflows
  • -
  • Comprehensive rollback capabilities for all operations
  • -
  • Transactional semantics where possible
  • -
  • State validation and consistency checks
  • -
  • Detailed audit trails for debugging and recovery
  • -
-

Recovery Strategies:

-
Operation Level:
-├── Atomic operations with rollback
-├── Retry logic with exponential backoff
-├── Circuit breakers for external dependencies
-└── Graceful degradation on partial failures
-
-Workflow Level:
-├── Checkpoint-based recovery
-├── Dependency-aware rollback
-├── State consistency validation
-└── Resume from failure points
-
-System Level:
-├── Health monitoring and alerting
-├── Automatic recovery procedures
-├── Data backup and restoration
-└── Disaster recovery capabilities
-
-

7. Performance Through Parallelism

-

Principle: Design for parallel execution and efficient resource utilization while maintaining correctness.

-

Rationale: Infrastructure operations often involve multiple independent resources that can be processed in parallel for significant performance -gains.

-

Implementation Guidelines:

-
    -
  • Configurable parallelism limits to prevent resource exhaustion
  • -
  • Dependency-aware parallel execution
  • -
  • Resource pooling and connection management
  • -
  • Efficient data structures and algorithms
  • -
  • Memory-conscious processing for large datasets
  • -
-

8. Security Through Isolation

-

Principle: Implement security through isolation boundaries, least privilege, and comprehensive validation.

-

Rationale: Infrastructure systems handle sensitive data and powerful operations. Security must be built in at the architectural level.

-

Security Implementation:

-
Authentication & Authorization:
-├── API authentication for external access
-├── Role-based access control for operations
-├── Permission validation before execution
-└── Audit logging for all security events
-
-Data Protection:
-├── Encrypted secrets management (SOPS/Age)
-├── Secure configuration file handling
-├── Network communication encryption
-└── Sensitive data sanitization in logs
-
-Isolation Boundaries:
-├── User workspace isolation
-├── Extension sandboxing
-├── Provider credential isolation
-└── Process and network isolation
-
-

Development Methodology Principles

-

9. Configuration-Driven Testing

-

Principle: Tests should be configuration-driven and validate both happy path and error conditions.

-

Rationale: Infrastructure systems must work across diverse environments and configurations. Tests must validate the configuration-driven nature of -the system.

-

Testing Strategy:

-
Unit Testing:
-├── Configuration validation tests
-├── Individual component tests
-├── Error condition tests
-└── Performance benchmark tests
-
-Integration Testing:
-├── Multi-provider workflow tests
-├── Configuration hierarchy tests
-├── Error recovery tests
-└── End-to-end scenario tests
-
-System Testing:
-├── Full deployment tests
-├── Upgrade and migration tests
-├── Performance and scalability tests
-└── Security and isolation tests
-
-

Error Handling Principles

-

11. Fail Fast, Recover Gracefully

-

Principle: Validate early and fail fast on errors, but provide comprehensive recovery mechanisms.

-

Rationale: Early validation prevents complex error states, while graceful recovery maintains system reliability.

-

Implementation Guidelines:

-
    -
  • Complete configuration validation before execution
  • -
  • Input validation at system boundaries
  • -
  • Clear error messages without internal stack traces (except in DEBUG mode)
  • -
  • Comprehensive error categorization and handling
  • -
  • Recovery procedures for all error categories
  • -
-

Error Categories:

-
Configuration Errors:
-├── Invalid configuration syntax
-├── Missing required configuration
-├── Configuration conflicts
-└── Schema validation failures
-
-Runtime Errors:
-├── Provider API failures
-├── Network connectivity issues
-├── Resource availability problems
-└── Permission and authentication errors
-
-System Errors:
-├── File system access problems
-├── Memory and resource exhaustion
-├── Process communication failures
-└── External dependency failures
-
-

12. Observable Operations

-

Principle: All operations must be observable through comprehensive logging, metrics, and monitoring.

-

Rationale: Infrastructure operations must be debuggable and monitorable in production environments.

-

Observability Implementation:

-
Logging:
-├── Structured JSON logging
-├── Configurable log levels
-├── Context-aware log messages
-└── Audit trail for all operations
-
-Metrics:
-├── Operation performance metrics
-├── Resource utilization metrics
-├── Error rate and type metrics
-└── Business logic metrics
-
-Monitoring:
-├── Health check endpoints
-├── Real-time status reporting
-├── Workflow progress tracking
-└── Alert integration capabilities
-
-

Evolution and Maintenance Principles

-

13. Backward Compatibility

-

Principle: Maintain backward compatibility for configuration, APIs, and user interfaces.

-

Rationale: Infrastructure systems are long-lived and must support existing configurations and workflows during evolution.

-

Compatibility Guidelines:

-
    -
  • Semantic versioning for all interfaces
  • -
  • Configuration migration tools and procedures
  • -
  • Deprecation warnings and migration guides
  • -
  • API versioning for external interfaces
  • -
  • Comprehensive upgrade testing
  • -
-

14. Documentation-Driven Development

-

Principle: Architecture decisions, APIs, and operational procedures must be thoroughly documented.

-

Rationale: Infrastructure systems are complex and require clear documentation for operation, maintenance, and evolution.

-

Documentation Requirements:

-
    -
  • Architecture Decision Records (ADRs) for major decisions
  • -
  • API documentation with examples
  • -
  • Operational runbooks and procedures
  • -
  • Configuration guides and examples
  • -
  • Troubleshooting guides and common issues
  • -
-

15. Technical Debt Management

-

Principle: Actively manage technical debt through regular assessment and systematic improvement.

-

Rationale: Infrastructure systems accumulate complexity over time. Proactive debt management prevents system degradation.

-

Debt Management Strategy:

-
Assessment:
-├── Regular code quality reviews
-├── Performance profiling and optimization
-├── Security audit and updates
-└── Dependency management and updates
-
-Improvement:
-├── Refactoring for clarity and maintainability
-├── Performance optimization based on metrics
-├── Security enhancement and hardening
-└── Test coverage improvement and validation
-
-

Trade-off Management

-

16. Explicit Trade-off Documentation

-

Principle: All architectural trade-offs must be explicitly documented with rationale and alternatives considered.

-

Rationale: Understanding trade-offs enables informed decision making and future evolution of the system.

-

Trade-off Categories:

-
Performance vs. Maintainability:
-├── Rust coordination layer for performance
-├── Nushell business logic for maintainability
-├── Caching strategies for speed vs. consistency
-└── Parallel processing vs. resource usage
-
-Flexibility vs. Complexity:
-├── Configuration-driven architecture vs. simplicity
-├── Extension framework vs. core system complexity
-├── Multi-provider support vs. specialization
-└── Hierarchical configuration vs. simple key-value
-
-Security vs. Usability:
-├── Workspace isolation vs. convenience
-├── Extension sandboxing vs. functionality
-├── Authentication requirements vs. ease of use
-└── Audit logging vs. performance overhead
-
-

Conclusion

-

These design principles form the foundation of provisioning’s architecture. They guide decision making, ensure quality, and provide a framework for -system evolution. Adherence to these principles has enabled the development of a sophisticated, reliable, and maintainable infrastructure automation -platform.

-

The principles are living guidelines that evolve with the system while maintaining core architectural integrity. They serve as both implementation -guidance and evaluation criteria for new features and modifications.

-

Success in applying these principles is measured by:

-
    -
  • System reliability and error recovery capabilities
  • -
  • Development efficiency and maintainability
  • -
  • Configuration flexibility and user experience
  • -
  • Performance and scalability characteristics
  • -
  • Security and isolation effectiveness
  • -
-

These principles represent the distilled wisdom from building and operating complex infrastructure automation systems at scale.

-

Integration Patterns

-

Overview

-

Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider -workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.

-

Core Integration Patterns

-

1. Hybrid Language Integration

-

Rust-to-Nushell Communication Pattern

-

Use Case: Orchestrator invoking business logic operations

-

Implementation:

-
use tokio::process::Command;
-use serde_json;
-
-pub async fn execute_nushell_workflow(
-    workflow: &str,
-    args: &[String]
-) -> Result<WorkflowResult, Error> {
-    let mut cmd = Command::new("nu");
-    cmd.arg("-c")
-       .arg(format!("use core/nulib/workflows/{}.nu *; {}", workflow, args.join(" ")));
-
-    let output = cmd.output().await?;
-    let result: WorkflowResult = serde_json::from_slice(&output.stdout)?;
-    Ok(result)
-}
-

Data Exchange Format:

-
{
-    "status": "success" | "error" | "partial",
-    "result": {
-        "operation": "server_create",
-        "resources": ["server-001", "server-002"],
-        "metadata": { ... }
-    },
-    "error": null | { "code": "ERR001", "message": "..." },
-    "context": { "workflow_id": "wf-123", "step": 2 }
-}
-
-

Nushell-to-Rust Communication Pattern

-

Use Case: Business logic submitting workflows to orchestrator

-

Implementation:

-
def submit-workflow [workflow: record] -> record {
-    let payload = $workflow | to json
-
-    http post "http://localhost:9090/workflows/submit" {
-        headers: { "Content-Type": "application/json" }
-        body: $payload
-    }
-    | from json
-}
-
-

API Contract:

-
{
-    "workflow_id": "wf-456",
-    "name": "multi_cloud_deployment",
-    "operations": [...],
-    "dependencies": { ... },
-    "configuration": { ... }
-}
-
-

2. Provider Abstraction Pattern

-

Standard Provider Interface

-

Purpose: Uniform API across different cloud providers

-

Interface Definition:

-
# Standard provider interface that all providers must implement
-export def list-servers [] -> table {
-    # Provider-specific implementation
-}
-
-export def create-server [config: record] -> record {
-    # Provider-specific implementation
-}
-
-export def delete-server [id: string] -> nothing {
-    # Provider-specific implementation
-}
-
-export def get-server [id: string] -> record {
-    # Provider-specific implementation
-}
-
-

Configuration Integration:

-
[providers.aws]
-region = "us-west-2"
-credentials_profile = "default"
-timeout = 300
-
-[providers.upcloud]
-zone = "de-fra1"
-api_endpoint = "https://api.upcloud.com"
-timeout = 180
-
-[providers.local]
-docker_socket = "/var/run/docker.sock"
-network_mode = "bridge"
-
-

Provider Discovery and Loading

-
def load-providers [] -> table {
-    let provider_dirs = glob "providers/*/nulib"
-
-    $provider_dirs
-    | each { |dir|
-        let provider_name = $dir | path basename | path dirname | path basename
-        let provider_config = get-provider-config $provider_name
-
-        {
-            name: $provider_name,
-            path: $dir,
-            config: $provider_config,
-            available: (test-provider-connectivity $provider_name)
-        }
-    }
-}
-
-

3. Configuration Resolution Pattern

-

Hierarchical Configuration Loading

-

Implementation:

-
def resolve-configuration [context: record] -> record {
-    let base_config = open config.defaults.toml
-    let user_config = if ("config.user.toml" | path exists) {
-        open config.user.toml
-    } else { {} }
-
-    let env_config = if ($env.PROVISIONING_ENV? | is-not-empty) {
-        let env_file = $"config.($env.PROVISIONING_ENV).toml"
-        if ($env_file | path exists) { open $env_file } else { {} }
-    } else { {} }
-
-    let merged_config = $base_config
-    | merge $user_config
-    | merge $env_config
-    | merge ($context.runtime_config? | default {})
-
-    interpolate-variables $merged_config
-}
-
-

Variable Interpolation Pattern

-
def interpolate-variables [config: record] -> record {
-    let interpolations = {
-        "{{paths.base}}": ($env.PWD),
-        "{{env.HOME}}": ($env.HOME),
-        "{{now.date}}": (date now | format date "%Y-%m-%d"),
-        "{{git.branch}}": (git branch --show-current | str trim)
-    }
-
-    $config
-    | to json
-    | str replace --all "{{paths.base}}" $interpolations."{{paths.base}}"
-    | str replace --all "{{env.HOME}}" $interpolations."{{env.HOME}}"
-    | str replace --all "{{now.date}}" $interpolations."{{now.date}}"
-    | str replace --all "{{git.branch}}" $interpolations."{{git.branch}}"
-    | from json
-}
-
-

4. Workflow Orchestration Patterns

-

Dependency Resolution Pattern

-

Use Case: Managing complex workflow dependencies

-

Implementation (Rust):

-
use petgraph::{Graph, Direction};
-use std::collections::HashMap;
-
-pub struct DependencyResolver {
-    graph: Graph<String, ()>,
-    node_map: HashMap<String, petgraph::graph::NodeIndex>,
-}
-
-impl DependencyResolver {
-    pub fn resolve_execution_order(&self) -> Result<Vec<String>, Error> {
-        let mut topo = petgraph::algo::toposort(&self.graph, None)
-            .map_err(|_| Error::CyclicDependency)?;
-
-        Ok(topo.into_iter()
-            .map(|idx| self.graph[idx].clone())
-            .collect())
-    }
-
-    pub fn add_dependency(&mut self, from: &str, to: &str) {
-        let from_idx = self.get_or_create_node(from);
-        let to_idx = self.get_or_create_node(to);
-        self.graph.add_edge(from_idx, to_idx, ());
-    }
-}
-

Parallel Execution Pattern

-
use tokio::task::JoinSet;
-use futures::stream::{FuturesUnordered, StreamExt};
-
-pub async fn execute_parallel_batch(
-    operations: Vec<Operation>,
-    parallelism_limit: usize
-) -> Result<Vec<OperationResult>, Error> {
-    let semaphore = tokio::sync::Semaphore::new(parallelism_limit);
-    let mut join_set = JoinSet::new();
-
-    for operation in operations {
-        let permit = semaphore.clone();
-        join_set.spawn(async move {
-            let _permit = permit.acquire().await?;
-            execute_operation(operation).await
-        });
-    }
-
-    let mut results = Vec::new();
-    while let Some(result) = join_set.join_next().await {
-        results.push(result??);
-    }
-
-    Ok(results)
-}
-

5. State Management Patterns

-

Checkpoint-Based Recovery Pattern

-

Use Case: Reliable state persistence and recovery

-

Implementation:

-
#[derive(Serialize, Deserialize)]
-pub struct WorkflowCheckpoint {
-    pub workflow_id: String,
-    pub step: usize,
-    pub completed_operations: Vec<String>,
-    pub current_state: serde_json::Value,
-    pub metadata: HashMap<String, String>,
-    pub timestamp: chrono::DateTime<chrono::Utc>,
-}
-
-pub struct CheckpointManager {
-    checkpoint_dir: PathBuf,
-}
-
-impl CheckpointManager {
-    pub fn save_checkpoint(&self, checkpoint: &WorkflowCheckpoint) -> Result<(), Error> {
-        let checkpoint_file = self.checkpoint_dir
-            .join(&checkpoint.workflow_id)
-            .with_extension("json");
-
-        let checkpoint_data = serde_json::to_string_pretty(checkpoint)?;
-        std::fs::write(checkpoint_file, checkpoint_data)?;
-        Ok(())
-    }
-
-    pub fn restore_checkpoint(&self, workflow_id: &str) -> Result<Option<WorkflowCheckpoint>, Error> {
-        let checkpoint_file = self.checkpoint_dir
-            .join(workflow_id)
-            .with_extension("json");
-
-        if checkpoint_file.exists() {
-            let checkpoint_data = std::fs::read_to_string(checkpoint_file)?;
-            let checkpoint = serde_json::from_str(&checkpoint_data)?;
-            Ok(Some(checkpoint))
-        } else {
-            Ok(None)
-        }
-    }
-}
-

Rollback Pattern

-
pub struct RollbackManager {
-    rollback_stack: Vec<RollbackAction>,
-}
-
-#[derive(Clone, Debug)]
-pub enum RollbackAction {
-    DeleteResource { provider: String, resource_id: String },
-    RestoreFile { path: PathBuf, content: String },
-    RevertConfiguration { key: String, value: serde_json::Value },
-    CustomAction { command: String, args: Vec<String> },
-}
-
-impl RollbackManager {
-    pub async fn execute_rollback(&self) -> Result<(), Error> {
-        // Execute rollback actions in reverse order
-        for action in self.rollback_stack.iter().rev() {
-            match action {
-                RollbackAction::DeleteResource { provider, resource_id } => {
-                    self.delete_resource(provider, resource_id).await?;
-                }
-                RollbackAction::RestoreFile { path, content } => {
-                    tokio::fs::write(path, content).await?;
-                }
-                // ... handle other rollback actions
-            }
-        }
-        Ok(())
-    }
-}
-

6. Event and Messaging Patterns

-

Event-Driven Architecture Pattern

-

Use Case: Decoupled communication between components

-

Event Definition:

-
#[derive(Serialize, Deserialize, Clone, Debug)]
-pub enum SystemEvent {
-    WorkflowStarted { workflow_id: String, name: String },
-    WorkflowCompleted { workflow_id: String, result: WorkflowResult },
-    WorkflowFailed { workflow_id: String, error: String },
-    ResourceCreated { provider: String, resource_type: String, resource_id: String },
-    ResourceDeleted { provider: String, resource_type: String, resource_id: String },
-    ConfigurationChanged { key: String, old_value: serde_json::Value, new_value: serde_json::Value },
-}
-

Event Bus Implementation:

-
use tokio::sync::broadcast;
-
-pub struct EventBus {
-    sender: broadcast::Sender<SystemEvent>,
-}
-
-impl EventBus {
-    pub fn new(capacity: usize) -> Self {
-        let (sender, _) = broadcast::channel(capacity);
-        Self { sender }
-    }
-
-    pub fn publish(&self, event: SystemEvent) -> Result<(), Error> {
-        self.sender.send(event)
-            .map_err(|_| Error::EventPublishFailed)?;
-        Ok(())
-    }
-
-    pub fn subscribe(&self) -> broadcast::Receiver<SystemEvent> {
-        self.sender.subscribe()
-    }
-}
-

7. Extension Integration Patterns

-

Extension Discovery and Loading

-
def discover-extensions [] -> table {
-    let extension_dirs = glob "extensions/*/extension.toml"
-
-    $extension_dirs
-    | each { |manifest_path|
-        let extension_dir = $manifest_path | path dirname
-        let manifest = open $manifest_path
-
-        {
-            name: $manifest.extension.name,
-            version: $manifest.extension.version,
-            type: $manifest.extension.type,
-            path: $extension_dir,
-            manifest: $manifest,
-            valid: (validate-extension $manifest),
-            compatible: (check-compatibility $manifest.compatibility)
-        }
-    }
-    | where valid and compatible
-}
-
-

Extension Interface Pattern

-
# Standard extension interface
-export def extension-info [] -> record {
-    {
-        name: "custom-provider",
-        version: "1.0.0",
-        type: "provider",
-        description: "Custom cloud provider integration",
-        entry_points: {
-            cli: "nulib/cli.nu",
-            provider: "nulib/provider.nu"
-        }
-    }
-}
-
-export def extension-validate [] -> bool {
-    # Validate extension configuration and dependencies
-    true
-}
-
-export def extension-activate [] -> nothing {
-    # Perform extension activation tasks
-}
-
-export def extension-deactivate [] -> nothing {
-    # Perform extension cleanup tasks
-}
-
-

8. API Design Patterns

-

REST API Standardization

-

Base API Structure:

-
use axum::{
-    extract::{Path, State},
-    response::Json,
-    routing::{get, post, delete},
-    Router,
-};
-
-pub fn create_api_router(state: AppState) -> Router {
-    Router::new()
-        .route("/health", get(health_check))
-        .route("/workflows", get(list_workflows).post(create_workflow))
-        .route("/workflows/:id", get(get_workflow).delete(delete_workflow))
-        .route("/workflows/:id/status", get(workflow_status))
-        .route("/workflows/:id/logs", get(workflow_logs))
-        .with_state(state)
-}
-

Standard Response Format:

-
{
-    "status": "success" | "error" | "pending",
-    "data": { ... },
-    "metadata": {
-        "timestamp": "2025-09-26T12:00:00Z",
-        "request_id": "req-123",
-        "version": "3.1.0"
-    },
-    "error": null | {
-        "code": "ERR001",
-        "message": "Human readable error",
-        "details": { ... }
-    }
-}
-
-

Error Handling Patterns

-

Structured Error Pattern

-
#[derive(thiserror::Error, Debug)]
-pub enum ProvisioningError {
-    #[error("Configuration error: {message}")]
-    Configuration { message: String },
-
-    #[error("Provider error [{provider}]: {message}")]
-    Provider { provider: String, message: String },
-
-    #[error("Workflow error [{workflow_id}]: {message}")]
-    Workflow { workflow_id: String, message: String },
-
-    #[error("Resource error [{resource_type}/{resource_id}]: {message}")]
-    Resource { resource_type: String, resource_id: String, message: String },
-}
-

Error Recovery Pattern

-
def with-retry [operation: closure, max_attempts: int = 3] {
-    mut attempts = 0
-    mut last_error = null
-
-    while $attempts < $max_attempts {
-        try {
-            return (do $operation)
-        } catch { |error|
-            $attempts = $attempts + 1
-            $last_error = $error
-
-            if $attempts < $max_attempts {
-                let delay = (2 ** ($attempts - 1)) * 1000  # Exponential backoff
-                sleep $"($delay)ms"
-            }
-        }
-    }
-
-    error make { msg: $"Operation failed after ($max_attempts) attempts: ($last_error)" }
-}
-
-

Performance Optimization Patterns

-

Caching Strategy Pattern

-
use std::sync::Arc;
-use tokio::sync::RwLock;
-use std::collections::HashMap;
-use chrono::{DateTime, Utc, Duration};
-
-#[derive(Clone)]
-pub struct CacheEntry<T> {
-    pub value: T,
-    pub expires_at: DateTime<Utc>,
-}
-
-pub struct Cache<T> {
-    store: Arc<RwLock<HashMap<String, CacheEntry<T>>>>,
-    default_ttl: Duration,
-}
-
-impl<T: Clone> Cache<T> {
-    pub async fn get(&self, key: &str) -> Option<T> {
-        let store = self.store.read().await;
-        if let Some(entry) = store.get(key) {
-            if entry.expires_at > Utc::now() {
-                Some(entry.value.clone())
-            } else {
-                None
-            }
-        } else {
-            None
-        }
-    }
-
-    pub async fn set(&self, key: String, value: T) {
-        let expires_at = Utc::now() + self.default_ttl;
-        let entry = CacheEntry { value, expires_at };
-
-        let mut store = self.store.write().await;
-        store.insert(key, entry);
-    }
-}
-

Streaming Pattern for Large Data

-
def process-large-dataset [source: string] -> nothing {
-    # Stream processing instead of loading entire dataset
-    open $source
-    | lines
-    | each { |line|
-        # Process line individually
-        $line | process-record
-    }
-    | save output.json
-}
-
-

Testing Integration Patterns

-

Integration Test Pattern

-
#[cfg(test)]
-mod integration_tests {
-    use super::*;
-    use tokio_test;
-
-    #[tokio::test]
-    async fn test_workflow_execution() {
-        let orchestrator = setup_test_orchestrator().await;
-        let workflow = create_test_workflow();
-
-        let result = orchestrator.execute_workflow(workflow).await;
-
-        assert!(result.is_ok());
-        assert_eq!(result.unwrap().status, WorkflowStatus::Completed);
-    }
-}
-

These integration patterns provide the foundation for the system’s sophisticated multi-component architecture, enabling reliable, scalable, and -maintainable infrastructure automation.

-

Orchestrator Integration Model - Deep Dive

-

Date: 2025-10-01 -Status: Clarification Document -Related: Multi-Repo Strategy, Hybrid Orchestrator v3.0

-

Executive Summary

-

This document clarifies how the Rust orchestrator integrates with Nushell core in both monorepo and multi-repo architectures. The orchestrator is -a critical performance layer that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing -functionality.

-
-

Current Architecture (Hybrid Orchestrator v3.0)

-

The Problem Being Solved

-

Original Issue:

-
Deep call stack in Nushell (template.nu:71)
-→ "Type not supported" errors
-→ Cannot handle complex nested workflows
-→ Performance bottlenecks with recursive calls
-
-

Solution: Rust orchestrator provides:

-
    -
  1. Task queue management (file-based, reliable)
  2. -
  3. Priority scheduling (intelligent task ordering)
  4. -
  5. Deep call stack elimination (Rust handles recursion)
  6. -
  7. Performance optimization (async/await, parallel execution)
  8. -
  9. State management (workflow checkpointing)
  10. -
-

How It Works Today (Monorepo)

-
┌─────────────────────────────────────────────────────────────┐
-│                        User                                  │
-└───────────────────────────┬─────────────────────────────────┘
-                            │ calls
-                            ↓
-                    ┌───────────────┐
-                    │ provisioning  │ (Nushell CLI)
-                    │      CLI      │
-                    └───────┬───────┘
-                            │
-        ┌───────────────────┼───────────────────┐
-        │                   │                   │
-        ↓                   ↓                   ↓
-┌───────────────┐   ┌───────────────┐   ┌──────────────┐
-│ Direct Mode   │   │Orchestrated   │   │ Workflow     │
-│ (Simple ops)  │   │ Mode          │   │ Mode         │
-└───────────────┘   └───────┬───────┘   └──────┬───────┘
-                            │                   │
-                            ↓                   ↓
-                    ┌────────────────────────────────┐
-                    │   Rust Orchestrator Service    │
-                    │   (Background daemon)           │
-                    │                                 │
-                    │ • Task Queue (file-based)      │
-                    │ • Priority Scheduler           │
-                    │ • Workflow Engine              │
-                    │ • REST API Server              │
-                    └────────┬───────────────────────┘
-                            │ spawns
-                            ↓
-                    ┌────────────────┐
-                    │ Nushell        │
-                    │ Business Logic │
-                    │                │
-                    │ • servers.nu   │
-                    │ • taskservs.nu │
-                    │ • clusters.nu  │
-                    └────────────────┘
-
-

Three Execution Modes

-

Mode 1: Direct Mode (Simple Operations)

-
# No orchestrator needed
-provisioning server list
-provisioning env
-provisioning help
-
-# Direct Nushell execution
-provisioning (CLI) → Nushell scripts → Result
-
-

Mode 2: Orchestrated Mode (Complex Operations)

-
# Uses orchestrator for coordination
-provisioning server create --orchestrated
-
-# Flow:
-provisioning CLI → Orchestrator API → Task Queue → Nushell executor
-                                                 ↓
-                                            Result back to user
-
-

Mode 3: Workflow Mode (Batch Operations)

-
# Complex workflows with dependencies
-provisioning workflow submit server-cluster.ncl
-
-# Flow:
-provisioning CLI → Orchestrator Workflow Engine → Dependency Graph
-                                                 ↓
-                                            Parallel task execution
-                                                 ↓
-                                            Nushell scripts for each task
-                                                 ↓
-                                            Checkpoint state
-
-
-

Integration Patterns

-

Pattern 1: CLI Submits Tasks to Orchestrator

-

Current Implementation:

-

Nushell CLI (core/nulib/workflows/server_create.nu):

-
# Submit server creation workflow to orchestrator
-export def server_create_workflow [
-    infra_name: string
-    --orchestrated
-] {
-    if $orchestrated {
-        # Submit task to orchestrator
-        let task = {
-            type: "server_create"
-            infra: $infra_name
-            params: { ... }
-        }
-
-        # POST to orchestrator REST API
-        http post http://localhost:9090/workflows/servers/create $task
-    } else {
-        # Direct execution (old way)
-        do-server-create $infra_name
-    }
-}
-
-

Rust Orchestrator (platform/orchestrator/src/api/workflows.rs):

-
// Receive workflow submission from Nushell CLI
-#[axum::debug_handler]
-async fn create_server_workflow(
-    State(state): State<Arc<AppState>>,
-    Json(request): Json<ServerCreateRequest>,
-) -> Result<Json<WorkflowResponse>, ApiError> {
-    // Create task
-    let task = Task {
-        id: Uuid::new_v4(),
-        task_type: TaskType::ServerCreate,
-        payload: serde_json::to_value(&request)?,
-        priority: Priority::Normal,
-        status: TaskStatus::Pending,
-        created_at: Utc::now(),
-    };
-
-    // Queue task
-    state.task_queue.enqueue(task).await?;
-
-    // Return immediately (async execution)
-    Ok(Json(WorkflowResponse {
-        workflow_id: task.id,
-        status: "queued",
-    }))
-}
-

Flow:

-
User → provisioning server create --orchestrated
-     ↓
-Nushell CLI prepares task
-     ↓
-HTTP POST to orchestrator (localhost:9090)
-     ↓
-Orchestrator queues task
-     ↓
-Returns workflow ID immediately
-     ↓
-User can monitor: provisioning workflow monitor <id>
-
-

Pattern 2: Orchestrator Executes Nushell Scripts

-

Orchestrator Task Executor (platform/orchestrator/src/executor.rs):

-
// Orchestrator spawns Nushell to execute business logic
-pub async fn execute_task(task: Task) -> Result<TaskResult> {
-    match task.task_type {
-        TaskType::ServerCreate => {
-            // Orchestrator calls Nushell script via subprocess
-            let output = Command::new("nu")
-                .arg("-c")
-                .arg(format!(
-                    "use {}/servers/create.nu; create-server '{}'",
-                    PROVISIONING_LIB_PATH,
-                    task.payload.infra_name
-                ))
-                .output()
-                .await?;
-
-            // Parse Nushell output
-            let result = parse_nushell_output(&output)?;
-
-            Ok(TaskResult {
-                task_id: task.id,
-                status: if result.success { "completed" } else { "failed" },
-                output: result.data,
-            })
-        }
-        // Other task types...
-    }
-}
-

Flow:

-
Orchestrator task queue has pending task
-     ↓
-Executor picks up task
-     ↓
-Spawns Nushell subprocess: nu -c "use servers/create.nu; create-server 'wuji'"
-     ↓
-Nushell executes business logic
-     ↓
-Returns result to orchestrator
-     ↓
-Orchestrator updates task status
-     ↓
-User monitors via: provisioning workflow status <id>
-
-

Pattern 3: Bidirectional Communication

-

Nushell Calls Orchestrator API:

-
# Nushell script checks orchestrator status during execution
-export def check-orchestrator-health [] {
-    let response = (http get http://localhost:9090/health)
-
-    if $response.status != "healthy" {
-        error make { msg: "Orchestrator not available" }
-    }
-
-    $response
-}
-
-# Nushell script reports progress to orchestrator
-export def report-progress [task_id: string, progress: int] {
-    http post http://localhost:9090/tasks/$task_id/progress {
-        progress: $progress
-        status: "in_progress"
-    }
-}
-
-

Orchestrator Monitors Nushell Execution:

-
// Orchestrator tracks Nushell subprocess
-pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
-    let mut child = Command::new("nu")
-        .arg("-c")
-        .arg(&task.script)
-        .stdout(Stdio::piped())
-        .stderr(Stdio::piped())
-        .spawn()?;
-
-    // Monitor stdout/stderr in real-time
-    let stdout = child.stdout.take().unwrap();
-    tokio::spawn(async move {
-        let reader = BufReader::new(stdout);
-        let mut lines = reader.lines();
-
-        while let Some(line) = lines.next_line().await.unwrap() {
-            // Parse progress updates from Nushell
-            if line.contains("PROGRESS:") {
-                update_task_progress(&line);
-            }
-        }
-    });
-
-    // Wait for completion with timeout
-    let result = tokio::time::timeout(
-        Duration::from_secs(3600),
-        child.wait()
-    ).await??;
-
-    Ok(TaskResult::from_exit_status(result))
-}
-
-

Multi-Repo Architecture Impact

-

Repository Split Doesn’t Change Integration Model

-

In Multi-Repo Setup:

-

Repository: provisioning-core

-
    -
  • Contains: Nushell business logic
  • -
  • Installs to: /usr/local/lib/provisioning/
  • -
  • Package: provisioning-core-3.2.1.tar.gz
  • -
-

Repository: provisioning-platform

-
    -
  • Contains: Rust orchestrator
  • -
  • Installs to: /usr/local/bin/provisioning-orchestrator
  • -
  • Package: provisioning-platform-2.5.3.tar.gz
  • -
-

Runtime Integration (Same as Monorepo):

-
User installs both packages:
-  provisioning-core-3.2.1     → /usr/local/lib/provisioning/
-  provisioning-platform-2.5.3 → /usr/local/bin/provisioning-orchestrator
-
-Orchestrator expects core at:  /usr/local/lib/provisioning/
-Core expects orchestrator at:  http://localhost:9090/
-
-No code dependencies, just runtime coordination!
-
-

Configuration-Based Integration

-

Core Package (provisioning-core) config:

-
# /usr/local/share/provisioning/config/config.defaults.toml
-
-[orchestrator]
-enabled = true
-endpoint = "http://localhost:9090"
-timeout = 60
-auto_start = true  # Start orchestrator if not running
-
-[execution]
-default_mode = "orchestrated"  # Use orchestrator by default
-fallback_to_direct = true      # Fall back if orchestrator down
-
-

Platform Package (provisioning-platform) config:

-
# /usr/local/share/provisioning/platform/config.toml
-
-[orchestrator]
-host = "127.0.0.1"
-port = 8080
-data_dir = "/var/lib/provisioning/orchestrator"
-
-[executor]
-nushell_binary = "nu"  # Expects nu in PATH
-provisioning_lib = "/usr/local/lib/provisioning"
-max_concurrent_tasks = 10
-task_timeout_seconds = 3600
-
-

Version Compatibility

-

Compatibility Matrix (provisioning-distribution/versions.toml):

-
[compatibility.platform."2.5.3"]
-core = "^3.2"  # Platform 2.5.3 compatible with core 3.2.x
-min-core = "3.2.0"
-api-version = "v1"
-
-[compatibility.core."3.2.1"]
-platform = "^2.5"  # Core 3.2.1 compatible with platform 2.5.x
-min-platform = "2.5.0"
-orchestrator-api = "v1"
-
-
-

Execution Flow Examples

-

Example 1: Simple Server Creation (Direct Mode)

-

No Orchestrator Needed:

-
provisioning server list
-
-# Flow:
-CLI → servers/list.nu → Query state → Return results
-(Orchestrator not involved)
-
-

Example 2: Server Creation with Orchestrator

-

Using Orchestrator:

-
provisioning server create --orchestrated --infra wuji
-
-# Detailed Flow:
-1. User executes command
-   ↓
-2. Nushell CLI (provisioning binary)
-   ↓
-3. Reads config: orchestrator.enabled = true
-   ↓
-4. Prepares task payload:
-   {
-     type: "server_create",
-     infra: "wuji",
-     params: { ... }
-   }
-   ↓
-5. HTTP POST → http://localhost:9090/workflows/servers/create
-   ↓
-6. Orchestrator receives request
-   ↓
-7. Creates task with UUID
-   ↓
-8. Enqueues to task queue (file-based: /var/lib/provisioning/queue/)
-   ↓
-9. Returns immediately: { workflow_id: "abc-123", status: "queued" }
-   ↓
-10. User sees: "Workflow submitted: abc-123"
-   ↓
-11. Orchestrator executor picks up task
-   ↓
-12. Spawns Nushell subprocess:
-    nu -c "use /usr/local/lib/provisioning/servers/create.nu; create-server 'wuji'"
-   ↓
-13. Nushell executes business logic:
-    - Reads Nickel config
-    - Calls provider API (UpCloud/AWS)
-    - Creates server
-    - Returns result
-   ↓
-14. Orchestrator captures output
-   ↓
-15. Updates task status: "completed"
-   ↓
-16. User monitors: provisioning workflow status abc-123
-    → Shows: "Server wuji created successfully"
-
-

Example 3: Batch Workflow with Dependencies

-

Complex Workflow:

-
provisioning batch submit multi-cloud-deployment.ncl
-
-# Workflow contains:
-- Create 5 servers (parallel)
-- Install Kubernetes on servers (depends on server creation)
-- Deploy applications (depends on Kubernetes)
-
-# Detailed Flow:
-1. CLI submits Nickel workflow to orchestrator
-   ↓
-2. Orchestrator parses workflow
-   ↓
-3. Builds dependency graph using petgraph (Rust)
-   ↓
-4. Topological sort determines execution order
-   ↓
-5. Creates tasks for each operation
-   ↓
-6. Executes in parallel where possible:
-
-   [Server 1] [Server 2] [Server 3] [Server 4] [Server 5]
-       ↓          ↓          ↓          ↓          ↓
-   (All execute in parallel via Nushell subprocesses)
-       ↓          ↓          ↓          ↓          ↓
-       └──────────┴──────────┴──────────┴──────────┘
-                           │
-                           ↓
-                    [All servers ready]
-                           ↓
-                  [Install Kubernetes]
-                  (Nushell subprocess)
-                           ↓
-                  [Kubernetes ready]
-                           ↓
-                  [Deploy applications]
-                  (Nushell subprocess)
-                           ↓
-                       [Complete]
-
-7. Orchestrator checkpoints state at each step
-   ↓
-8. If failure occurs, can retry from checkpoint
-   ↓
-9. User monitors real-time: provisioning batch monitor <id>
-
-
-

Why This Architecture

-

Orchestrator Benefits

-
    -
  1. -

    Eliminates Deep Call Stack Issues

    -
    
    -Without Orchestrator:
    -template.nu → calls → cluster.nu → calls → taskserv.nu → calls → provider.nu
    -(Deep nesting causes "Type not supported" errors)
    -
    -With Orchestrator:
    -Orchestrator → spawns → Nushell subprocess (flat execution)
    -(No deep nesting, fresh Nushell context for each task)
    -
    -
    -
  2. -
  3. -

    Performance Optimization

    -
    // Orchestrator executes tasks in parallel
    -let tasks = vec![task1, task2, task3, task4, task5];
    -
    -let results = futures::future::join_all(
    -    tasks.iter().map(|t| execute_task(t))
    -).await;
    -
    -// 5 Nushell subprocesses run concurrently
    -
  4. -
  5. -

    Reliable State Management

    -
  6. -
-
   Orchestrator maintains:
-   - Task queue (survives crashes)
-   - Workflow checkpoints (resume on failure)
-   - Progress tracking (real-time monitoring)
-   - Retry logic (automatic recovery)
-
-
    -
  1. Clean Separation
  2. -
-
   Orchestrator (Rust):     Performance, concurrency, state
-   Business Logic (Nushell): Providers, taskservs, workflows
-
-   Each does what it's best at!
-
-

Why NOT Pure Rust

-

Question: Why not implement everything in Rust?

-

Answer:

-
    -
  1. -

    Nushell is perfect for infrastructure automation:

    -
      -
    • Shell-like scripting for system operations
    • -
    • Built-in structured data handling
    • -
    • Easy template rendering
    • -
    • Readable business logic
    • -
    -
  2. -
  3. -

    Rapid iteration:

    -
      -
    • Change Nushell scripts without recompiling
    • -
    • Community can contribute Nushell modules
    • -
    • Template-based configuration generation
    • -
    -
  4. -
  5. -

    Best of both worlds:

    -
      -
    • Rust: Performance, type safety, concurrency
    • -
    • Nushell: Flexibility, readability, ease of use
    • -
    -
  6. -
-
-

Multi-Repo Integration Example

-

Installation

-

User installs bundle:

-
curl -fsSL https://get.provisioning.io | sh
-
-# Installs:
-1. provisioning-core-3.2.1.tar.gz
-   → /usr/local/bin/provisioning (Nushell CLI)
-   → /usr/local/lib/provisioning/ (Nushell libraries)
-   → /usr/local/share/provisioning/ (configs, templates)
-
-2. provisioning-platform-2.5.3.tar.gz
-   → /usr/local/bin/provisioning-orchestrator (Rust binary)
-   → /usr/local/share/provisioning/platform/ (platform configs)
-
-3. Sets up systemd/launchd service for orchestrator
-
-

Runtime Coordination

-

Core package expects orchestrator:

-
# core/nulib/lib_provisioning/orchestrator/client.nu
-
-# Check if orchestrator is running
-export def orchestrator-available [] {
-    let config = (load-config)
-    let endpoint = $config.orchestrator.endpoint
-
-    try {
-        let response = (http get $"($endpoint)/health")
-        $response.status == "healthy"
-    } catch {
-        false
-    }
-}
-
-# Auto-start orchestrator if needed
-export def ensure-orchestrator [] {
-    if not (orchestrator-available) {
-        if (load-config).orchestrator.auto_start {
-            print "Starting orchestrator..."
-            ^provisioning-orchestrator --daemon
-            sleep 2sec
-        }
-    }
-}
-
-

Platform package executes core scripts:

-
// platform/orchestrator/src/executor/nushell.rs
-
-pub struct NushellExecutor {
-    provisioning_lib: PathBuf,  // /usr/local/lib/provisioning
-    nu_binary: PathBuf,          // nu (from PATH)
-}
-
-impl NushellExecutor {
-    pub async fn execute_script(&self, script: &str) -> Result<Output> {
-        Command::new(&self.nu_binary)
-            .env("NU_LIB_DIRS", &self.provisioning_lib)
-            .arg("-c")
-            .arg(script)
-            .output()
-            .await
-    }
-
-    pub async fn execute_module_function(
-        &self,
-        module: &str,
-        function: &str,
-        args: &[String],
-    ) -> Result<Output> {
-        let script = format!(
-            "use {}/{}; {} {}",
-            self.provisioning_lib.display(),
-            module,
-            function,
-            args.join(" ")
-        );
-
-        self.execute_script(&script).await
-    }
-}
-
-

Configuration Examples

-

Core Package Config

-

/usr/local/share/provisioning/config/config.defaults.toml:

-
[orchestrator]
-enabled = true
-endpoint = "http://localhost:9090"
-timeout_seconds = 60
-auto_start = true
-fallback_to_direct = true
-
-[execution]
-# Modes: "direct", "orchestrated", "auto"
-default_mode = "auto"  # Auto-detect based on complexity
-
-# Operations that always use orchestrator
-force_orchestrated = [
-    "server.create",
-    "cluster.create",
-    "batch.*",
-    "workflow.*"
-]
-
-# Operations that always run direct
-force_direct = [
-    "*.list",
-    "*.show",
-    "help",
-    "version"
-]
-
-

Platform Package Config

-

/usr/local/share/provisioning/platform/config.toml:

-
[server]
-host = "127.0.0.1"
-port = 8080
-
-[storage]
-backend = "filesystem"  # or "surrealdb"
-data_dir = "/var/lib/provisioning/orchestrator"
-
-[executor]
-max_concurrent_tasks = 10
-task_timeout_seconds = 3600
-checkpoint_interval_seconds = 30
-
-[nushell]
-binary = "nu"  # Expects nu in PATH
-provisioning_lib = "/usr/local/lib/provisioning"
-env_vars = { NU_LIB_DIRS = "/usr/local/lib/provisioning" }
-
-
-

Key Takeaways

-

1. Orchestrator is Essential

-
    -
  • Solves deep call stack problems
  • -
  • Provides performance optimization
  • -
  • Enables complex workflows
  • -
  • NOT optional for production use
  • -
-

2. Integration is Loose but Coordinated

-
    -
  • No code dependencies between repos
  • -
  • Runtime integration via CLI + REST API
  • -
  • Configuration-driven coordination
  • -
  • Works in both monorepo and multi-repo
  • -
-

3. Best of Both Worlds

-
    -
  • Rust: High-performance coordination
  • -
  • Nushell: Flexible business logic
  • -
  • Clean separation of concerns
  • -
  • Each technology does what it’s best at
  • -
-

4. Multi-Repo Doesn’t Change Integration

-
    -
  • Same runtime model as monorepo
  • -
  • Package installation sets up paths
  • -
  • Configuration enables discovery
  • -
  • Versioning ensures compatibility
  • -
-
-

Conclusion

-

The confusing example in the multi-repo doc was oversimplified. The real architecture is:

-
✅ Orchestrator IS USED and IS ESSENTIAL
-✅ Platform (Rust) coordinates Core (Nushell) execution
-✅ Loose coupling via CLI + REST API (not code dependencies)
-✅ Works identically in monorepo and multi-repo
-✅ Configuration-based integration (no hardcoded paths)
-
-

The orchestrator provides:

-
    -
  • Performance layer (async, parallel execution)
  • -
  • Workflow engine (complex dependencies)
  • -
  • State management (checkpoints, recovery)
  • -
  • Task queue (reliable execution)
  • -
-

While Nushell provides:

-
    -
  • Business logic (providers, taskservs, clusters)
  • -
  • Template rendering (Jinja2 via nu_plugin_tera)
  • -
  • Configuration management (KCL integration)
  • -
  • User-facing scripting
  • -
-

Multi-repo just splits WHERE the code lives, not HOW it works together.

-

Multi-Repository Architecture with OCI Registry Support

-

Version: 1.0.0 -Date: 2025-10-06 -Status: Implementation Complete

-

Overview

-

This document describes the multi-repository architecture for the provisioning system, enabling modular development, independent versioning, and -distributed extension management through OCI registry integration.

-

Architecture Goals

-
    -
  1. Separation of Concerns: Core, Extensions, and Platform in separate repositories
  2. -
  3. Independent Versioning: Each component can be versioned and released independently
  4. -
  5. Distributed Development: Multiple teams can work on different repositories
  6. -
  7. OCI-Native Distribution: Extensions distributed as OCI artifacts
  8. -
  9. Dependency Management: Automated dependency resolution across repositories
  10. -
  11. Backward Compatibility: Support legacy monorepo structure during transition
  12. -
-

Repository Structure

-

Repository 1: provisioning-core

-

Purpose: Core system functionality - CLI, libraries, base schemas

-
provisioning-core/
-├── core/
-│   ├── cli/                    # Command-line interface
-│   │   ├── provisioning        # Main CLI entry point
-│   │   └── module-loader       # Dynamic module loader
-│   ├── nulib/                  # Core Nushell libraries
-│   │   ├── lib_provisioning/   # Core library modules
-│   │   │   ├── config/         # Configuration management
-│   │   │   ├── oci/            # OCI client integration
-│   │   │   ├── dependencies/   # Dependency resolution
-│   │   │   ├── module/         # Module system
-│   │   │   ├── layer/          # Layer system
-│   │   │   └── workspace/      # Workspace management
-│   │   └── workflows/          # Core workflow system
-│   ├── plugins/                # System plugins
-│   └── scripts/                # Utility scripts
-├── schemas/                    # Base Nickel schemas
-│   ├── main.ncl                # Main schema entry
-│   ├── lib.ncl                 # Core library types
-│   ├── settings.ncl            # Settings schema
-│   ├── dependencies.ncl        # Dependency schemas (with OCI support)
-│   ├── server.ncl              # Server schemas
-│   ├── cluster.ncl             # Cluster schemas
-│   └── workflows.ncl           # Workflow schemas
-├── config/                     # Core configuration templates
-├── templates/                  # Core templates
-├── tools/                      # Build and distribution tools
-│   ├── oci-package.nu          # OCI packaging tool
-│   ├── build-core.nu           # Core build script
-│   └── release-core.nu         # Core release script
-├── tests/                      # Core system tests
-└── docs/                       # Core documentation
-    ├── api/                    # API documentation
-    ├── architecture/           # Architecture docs
-    └── development/            # Development guides
-
-
-

Distribution:

-
    -
  • Published as OCI artifact: oci://registry/provisioning-core:v3.5.0
  • -
  • Contains all core functionality needed to run the provisioning system
  • -
  • Version format: v{major}.{minor}.{patch} (for example, v3.5.0)
  • -
-

CI/CD:

-
    -
  • Build on commit to main
  • -
  • Publish OCI artifact on git tag (v*)
  • -
  • Run integration tests before publishing
  • -
  • Update changelog automatically
  • -
-
-

Repository 2: provisioning-extensions

-

Purpose: All provider, taskserv, and cluster extensions

-
provisioning-extensions/
-├── providers/
-│   ├── aws/
-│   │   ├── schemas/            # Nickel schemas
-│   │   │   ├── manifest.toml   # Nickel dependencies
-│   │   │   ├── aws.ncl         # Main provider schema
-│   │   │   ├── defaults_aws.ncl # AWS defaults
-│   │   │   └── server_aws.ncl  # AWS server schema
-│   │   ├── scripts/            # Nushell scripts
-│   │   │   └── install.nu      # Installation script
-│   │   ├── templates/          # Provider templates
-│   │   ├── docs/               # Provider documentation
-│   │   └── manifest.yaml       # Extension manifest
-│   ├── upcloud/
-│   │   └── (same structure)
-│   └── local/
-│       └── (same structure)
-├── taskservs/
-│   ├── kubernetes/
-│   │   ├── schemas/
-│   │   │   ├── manifest.toml
-│   │   │   ├── kubernetes.ncl  # Main taskserv schema
-│   │   │   ├── version.ncl     # Version management
-│   │   │   └── dependencies.ncl # Taskserv dependencies
-│   │   ├── scripts/
-│   │   │   ├── install.nu      # Installation script
-│   │   │   ├── check.nu        # Health check script
-│   │   │   └── uninstall.nu    # Uninstall script
-│   │   ├── templates/          # Config templates
-│   │   ├── docs/               # Taskserv docs
-│   │   ├── tests/              # Taskserv tests
-│   │   └── manifest.yaml       # Extension manifest
-│   ├── containerd/
-│   ├── cilium/
-│   ├── postgres/
-│   └── (50+ more taskservs...)
-├── clusters/
-│   ├── buildkit/
-│   │   └── (same structure)
-│   ├── web/
-│   └── (other clusters...)
-├── tools/
-│   ├── extension-builder.nu   # Build individual extensions
-│   ├── mass-publish.nu         # Publish all extensions
-│   └── validate-extensions.nu # Validate all extensions
-└── docs/
-    ├── extension-guide.md      # Extension development guide
-    └── publishing.md           # Publishing guide
-
-
-

Distribution: -Each extension published separately as OCI artifact:

-
    -
  • oci://registry/provisioning-extensions/kubernetes:1.28.0
  • -
  • oci://registry/provisioning-extensions/aws:2.0.0
  • -
  • oci://registry/provisioning-extensions/buildkit:0.12.0
  • -
-

Extension Manifest (manifest.yaml):

-
name: kubernetes
-type: taskserv
-version: 1.28.0
-description: Kubernetes container orchestration platform
-author: Provisioning Team
-license: MIT
-homepage: https://kubernetes.io
-repository: https://gitea.example.com/provisioning-extensions/kubernetes
-
-dependencies:
-  containerd: ">=1.7.0"
-  etcd: ">=3.5.0"
-
-tags:
-  - kubernetes
-  - container-orchestration
-  - cncf
-
-platforms:
-  - linux/amd64
-  - linux/arm64
-
-min_provisioning_version: "3.0.0"
-
-

CI/CD:

-
    -
  • Build and publish each extension independently
  • -
  • Git tag format: {extension-type}/{extension-name}/v{version} -
      -
    • Example: taskservs/kubernetes/v1.28.0
    • -
    -
  • -
  • Automated publishing to OCI registry on tag
  • -
  • Run extension-specific tests before publishing
  • -
-
-

Repository 3: provisioning-platform

-

Purpose: Platform services (orchestrator, control-center, MCP server, API gateway)

-
provisioning-platform/
-├── orchestrator/               # Rust orchestrator service
-│   ├── src/
-│   ├── Cargo.toml
-│   ├── Dockerfile
-│   └── README.md
-├── control-center/             # Web control center
-│   ├── src/
-│   ├── package.json
-│   ├── Dockerfile
-│   └── README.md
-├── mcp-server/                 # Model Context Protocol server
-│   ├── src/
-│   ├── Cargo.toml
-│   ├── Dockerfile
-│   └── README.md
-├── api-gateway/                # REST API gateway
-│   ├── src/
-│   ├── Cargo.toml
-│   ├── Dockerfile
-│   └── README.md
-├── docker-compose.yml          # Local development stack
-├── kubernetes/                 # K8s deployment manifests
-│   ├── orchestrator.yaml
-│   ├── control-center.yaml
-│   ├── mcp-server.yaml
-│   └── api-gateway.yaml
-└── docs/
-    ├── deployment.md
-    └── api-reference.md
-
-
-

Distribution: -Standard Docker images in OCI registry:

-
    -
  • oci://registry/provisioning-platform/orchestrator:v1.2.0
  • -
  • oci://registry/provisioning-platform/control-center:v1.2.0
  • -
  • oci://registry/provisioning-platform/mcp-server:v1.0.0
  • -
  • oci://registry/provisioning-platform/api-gateway:v1.0.0
  • -
-

CI/CD:

-
    -
  • Build Docker images on commit to main
  • -
  • Publish images on git tag (v*)
  • -
  • Multi-architecture builds (amd64, arm64)
  • -
  • Security scanning before publishing
  • -
-
-

OCI Registry Integration

-

Registry Structure

-
OCI Registry (localhost:5000 or harbor.company.com)
-├── provisioning-core/
-│   ├── v3.5.0                  # Core system artifact
-│   ├── v3.4.0
-│   └── latest -> v3.5.0
-├── provisioning-extensions/
-│   ├── kubernetes:1.28.0       # Individual extension artifacts
-│   ├── kubernetes:1.27.0
-│   ├── containerd:1.7.0
-│   ├── aws:2.0.0
-│   ├── upcloud:1.5.0
-│   └── (100+ more extensions)
-└── provisioning-platform/
-    ├── orchestrator:v1.2.0     # Platform service images
-    ├── control-center:v1.2.0
-    ├── mcp-server:v1.0.0
-    └── api-gateway:v1.0.0
-
-
-

OCI Artifact Structure

-

Each extension packaged as OCI artifact:

-
kubernetes-1.28.0.tar.gz
-├── schemas/                    # Nickel schemas
-│   ├── kubernetes.ncl
-│   ├── version.ncl
-│   └── dependencies.ncl
-├── scripts/                    # Nushell scripts
-│   ├── install.nu
-│   ├── check.nu
-│   └── uninstall.nu
-├── templates/                  # Template files
-│   ├── kubeconfig.j2
-│   └── kubelet-config.yaml.j2
-├── docs/                       # Documentation
-│   └── README.md
-├── manifest.yaml               # Extension manifest
-└── oci-manifest.json           # OCI manifest metadata
-
-
-
-

Dependency Management

-

Workspace Configuration

-

File: workspace/config/provisioning.yaml

-
# Core system dependency
-dependencies:
-  core:
-    source: "oci://harbor.company.com/provisioning-core:v3.5.0"
-    # Alternative: source: "gitea://provisioning-core"
-
-  # Extensions repository configuration
-  extensions:
-    source_type: "oci"          # oci, gitea, local
-
-    # OCI registry configuration
-    oci:
-      registry: "localhost:5000"
-      namespace: "provisioning-extensions"
-      tls_enabled: false
-      auth_token_path: "~/.provisioning/tokens/oci"
-
-    # Loaded extension modules
-    modules:
-      providers:
-        - "oci://localhost:5000/provisioning-extensions/aws:2.0.0"
-        - "oci://localhost:5000/provisioning-extensions/upcloud:1.5.0"
-
-      taskservs:
-        - "oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0"
-        - "oci://localhost:5000/provisioning-extensions/containerd:1.7.0"
-        - "oci://localhost:5000/provisioning-extensions/cilium:1.14.0"
-
-      clusters:
-        - "oci://localhost:5000/provisioning-extensions/buildkit:0.12.0"
-
-  # Platform services
-  platform:
-    source_type: "oci"
-
-    oci:
-      registry: "harbor.company.com"
-      namespace: "provisioning-platform"
-
-      images:
-        orchestrator: "harbor.company.com/provisioning-platform/orchestrator:v1.2.0"
-        control_center: "harbor.company.com/provisioning-platform/control-center:v1.2.0"
-
-  # OCI registry configuration
-  registry:
-    type: "oci"                 # oci, gitea, http
-
-    oci:
-      endpoint: "localhost:5000"
-      namespaces:
-        extensions: "provisioning-extensions"
-        nickel: "provisioning-nickel"
-        platform: "provisioning-platform"
-        test: "provisioning-test"
-
-

Dependency Resolution

-

The system resolves dependencies in this order:

-
    -
  1. Parse Configuration: Read provisioning.yaml and extract dependencies
  2. -
  3. Resolve Core: Ensure core system version is compatible
  4. -
  5. Resolve Extensions: For each extension: -
      -
    • Check if already installed and version matches
    • -
    • Pull from OCI registry if needed
    • -
    • Recursively resolve extension dependencies
    • -
    -
  6. -
  7. Validate Graph: Check for dependency cycles and conflicts
  8. -
  9. Install: Install extensions in topological order
  10. -
-

Dependency Resolution Commands

-
# Resolve and install all dependencies
-provisioning dep resolve
-
-# Check for dependency updates
-provisioning dep check-updates
-
-# Update specific extension
-provisioning dep update kubernetes
-
-# Validate dependency graph
-provisioning dep validate
-
-# Show dependency tree
-provisioning dep tree kubernetes
-
-
-

OCI Client Operations

-

CLI Commands

-
# Pull extension from OCI registry
-provisioning oci pull kubernetes:1.28.0
-
-# Push extension to OCI registry
-provisioning oci push ./extensions/kubernetes kubernetes 1.28.0
-
-# List available extensions
-provisioning oci list --namespace provisioning-extensions
-
-# Search for extensions
-provisioning oci search kubernetes
-
-# Show extension versions
-provisioning oci tags kubernetes
-
-# Inspect extension manifest
-provisioning oci inspect kubernetes:1.28.0
-
-# Login to OCI registry
-provisioning oci login localhost:5000 --username _token --password-stdin
-
-# Delete extension
-provisioning oci delete kubernetes:1.28.0
-
-# Copy extension between registries
-provisioning oci copy \
-  localhost:5000/provisioning-extensions/kubernetes:1.28.0 \
-  harbor.company.com/provisioning-extensions/kubernetes:1.28.0
-
-

OCI Configuration

-
# Show OCI configuration
-provisioning oci config
-
-# Output:
-{
-  tool: "oras"  # or "crane" or "skopeo"
-  registry: "localhost:5000"
-  namespace: {
-    extensions: "provisioning-extensions"
-    platform: "provisioning-platform"
-  }
-  cache_dir: "~/.provisioning/oci-cache"
-  tls_enabled: false
-}
-
-
-

Extension Development Workflow

-

1. Develop Extension

-
# Create new extension from template
-provisioning generate extension taskserv redis
-
-# Directory structure created:
-# extensions/taskservs/redis/
-# ├── schemas/
-# │   ├── manifest.toml
-# │   ├── redis.ncl
-# │   ├── version.ncl
-# │   └── dependencies.ncl
-# ├── scripts/
-# │   ├── install.nu
-# │   ├── check.nu
-# │   └── uninstall.nu
-# ├── templates/
-# ├── docs/
-# │   └── README.md
-# ├── tests/
-# └── manifest.yaml
-
-

2. Test Extension Locally

-
# Load extension from local path
-provisioning module load taskserv workspace_dev redis --source local
-
-# Test installation
-provisioning taskserv create redis --infra test-env --check
-
-# Run extension tests
-provisioning test extension redis
-
-

3. Package Extension

-
# Validate extension structure
-provisioning oci package validate ./extensions/taskservs/redis
-
-# Package as OCI artifact
-provisioning oci package ./extensions/taskservs/redis
-
-# Output: redis-1.0.0.tar.gz
-
-

4. Publish Extension

-
# Login to registry (one-time)
-provisioning oci login localhost:5000
-
-# Publish extension
-provisioning oci push ./extensions/taskservs/redis redis 1.0.0
-
-# Verify publication
-provisioning oci tags redis
-
-# Output:
-# ┬───────────┬─────────┬───────────────────────────────────────────────────┐
-# │ artifact  │ version │ reference                                         │
-# ├───────────┼─────────┼───────────────────────────────────────────────────┤
-# │ redis     │ 1.0.0   │ localhost:5000/provisioning-extensions/redis:1.0.0│
-# └───────────┴─────────┴───────────────────────────────────────────────────┘
-
-

5. Use Published Extension

-
# Add to workspace configuration
-# workspace/config/provisioning.yaml:
-# dependencies:
-#   extensions:
-#     modules:
-#       taskservs:
-#         - "oci://localhost:5000/provisioning-extensions/redis:1.0.0"
-
-# Pull and install
-provisioning dep resolve
-
-# Extension automatically downloaded and installed
-
-
-

Registry Deployment Options

-

Local Registry (Solo Development)

-

Using Zot (lightweight OCI registry):

-
# Start local OCI registry
-provisioning oci-registry start
-
-# Configuration:
-# - Endpoint: localhost:5000
-# - Storage: ~/.provisioning/oci-registry/
-# - No authentication by default
-# - TLS disabled (local only)
-
-# Stop registry
-provisioning oci-registry stop
-
-# Check status
-provisioning oci-registry status
-
-

Remote Registry (Multi-User/Enterprise)

-

Using Harbor:

-
# workspace/config/provisioning.yaml
-dependencies:
-  registry:
-    type: "oci"
-    oci:
-      endpoint: "https://harbor.company.com"
-      namespaces:
-        extensions: "provisioning/extensions"
-        platform: "provisioning/platform"
-      tls_enabled: true
-      auth_token_path: "~/.provisioning/tokens/harbor"
-
-

Features:

-
    -
  • Multi-user authentication
  • -
  • Role-based access control (RBAC)
  • -
  • Vulnerability scanning
  • -
  • Replication across registries
  • -
  • Webhook notifications
  • -
  • Image signing (cosign/notation)
  • -
-
-

Migration from Monorepo

-

Phase 1: Parallel Structure (Current)

-
    -
  • Monorepo still exists and works
  • -
  • OCI distribution layer added on top
  • -
  • Extensions can be loaded from local or OCI
  • -
  • No breaking changes
  • -
-

Phase 2: Gradual Migration

-
# Migrate extensions one by one
-for ext in (ls provisioning/extensions/taskservs); do
-  provisioning oci publish $ext.name
-done
-
-# Update workspace configurations to use OCI
-provisioning workspace migrate-to-oci workspace_prod
-
-

Phase 3: Repository Split

-
    -
  1. -

    Create provisioning-core repository

    -
      -
    • Extract core/ and schemas/ directories
    • -
    • Set up CI/CD for core publishing
    • -
    • Publish initial OCI artifact
    • -
    -
  2. -
  3. -

    Create provisioning-extensions repository

    -
      -
    • Extract extensions/ directory
    • -
    • Set up CI/CD for extension publishing
    • -
    • Publish all extensions to OCI registry
    • -
    -
  4. -
  5. -

    Create provisioning-platform repository

    -
      -
    • Extract platform/ directory
    • -
    • Set up Docker image builds
    • -
    • Publish platform services
    • -
    -
  6. -
  7. -

    Update workspaces

    -
      -
    • Reconfigure to use OCI dependencies
    • -
    • Test multi-repo setup
    • -
    • Verify all functionality works
    • -
    -
  8. -
-

Phase 4: Deprecate Monorepo

-
    -
  • Archive monorepo
  • -
  • Redirect to new repositories
  • -
  • Update documentation
  • -
  • Announce migration complete
  • -
-
-

Benefits Summary

-

Modularity

-

✅ Independent repositories for core, extensions, and platform -✅ Extensions can be developed and versioned separately -✅ Clear ownership and responsibility boundaries

-

Distribution

-

✅ OCI-native distribution (industry standard) -✅ Built-in versioning with OCI tags -✅ Efficient caching with OCI layers -✅ Works with standard tools (skopeo, crane, oras)

-

Security

-

✅ TLS support for registries -✅ Authentication and authorization -✅ Vulnerability scanning (Harbor) -✅ Image signing (cosign, notation) -✅ RBAC for access control

-

Developer Experience

-

✅ Simple CLI commands for extension management -✅ Automatic dependency resolution -✅ Local testing before publishing -✅ Easy extension discovery and installation

-

Operations

-

✅ Air-gapped deployments (mirror OCI registry) -✅ Bandwidth efficient (only download what’s needed) -✅ Version pinning for reproducibility -✅ Rollback support (use previous versions)

-

Ecosystem

-

✅ Compatible with existing OCI tooling -✅ Can use public registries (DockerHub, GitHub, etc.) -✅ Mirror to multiple registries -✅ Replication for high availability

-
-

Implementation Status

-
- - - - - - - - - - -
ComponentStatusNotes
Nickel Schemas✅ CompleteOCI schemas in dependencies.ncl
OCI Client✅ Completeoci/client.nu with skopeo/crane/oras
OCI Commands✅ Completeoci/commands.nu CLI interface
Dependency Resolver✅ Completedependencies/resolver.nu
OCI Packaging✅ Completetools/oci-package.nu
Repository Design✅ CompleteThis document
Migration Plan✅ CompletePhased approach defined
Documentation✅ CompleteUser guides and API docs
CI/CD Setup⏳ PendingAutomated publishing pipelines
Registry Deployment⏳ PendingZot/Harbor setup
-
-
- -
    -
  • OCI Packaging Tool - Extension packaging
  • -
  • OCI Client Library - OCI operations
  • -
  • Dependency Resolver - Dependency management
  • -
  • Nickel Schemas - Type definitions
  • -
  • Extension Development Guide - How to create extensions
  • -
-
-

Maintained By: Architecture Team -Review Cycle: Quarterly -Next Review: 2026-01-06

-

Multi-Repository Strategy Analysis

-

Date: 2025-10-01 -Status: Strategic Analysis -Related: Repository Distribution Analysis

-

Executive Summary

-

This document analyzes a multi-repository strategy as an alternative to the monorepo approach. After careful consideration of the provisioning -system’s architecture, a hybrid approach with 4 core repositories is recommended, avoiding submodules in favor of a cleaner package-based -dependency model.

-
-

Repository Architecture Options

-

Option A: Pure Monorepo (Original Recommendation)

-

Single repository: provisioning

-

Pros:

-
    -
  • Simplest development workflow
  • -
  • Atomic cross-component changes
  • -
  • Single version number
  • -
  • One CI/CD pipeline
  • -
-

Cons:

-
    -
  • Large repository size
  • -
  • Mixed language tooling (Rust + Nushell)
  • -
  • All-or-nothing updates
  • -
  • Unclear ownership boundaries
  • -
- -

Repositories:

-
    -
  • provisioning-core (main, contains submodules)
  • -
  • provisioning-platform (submodule)
  • -
  • provisioning-extensions (submodule)
  • -
  • provisioning-workspace (submodule)
  • -
-

Why Not Recommended:

-
    -
  • Submodule hell: complex, error-prone workflows
  • -
  • Detached HEAD issues
  • -
  • Update synchronization nightmares
  • -
  • Clone complexity for users
  • -
  • Difficult to maintain version compatibility
  • -
  • Poor developer experience
  • -
- -

Independent repositories with package-based integration:

-
    -
  • provisioning-core - Nushell libraries and Nickel schemas
  • -
  • provisioning-platform - Rust services (orchestrator, control-center, MCP)
  • -
  • provisioning-extensions - Extension marketplace/catalog
  • -
  • provisioning-workspace - Project templates and examples
  • -
  • provisioning-distribution - Release automation and packaging
  • -
-

Why Recommended:

-
    -
  • Clean separation of concerns
  • -
  • Independent versioning and release cycles
  • -
  • Language-specific tooling and workflows
  • -
  • Clear ownership boundaries
  • -
  • Package-based dependencies (no submodules)
  • -
  • Easier community contributions
  • -
-
- -

Repository 1: provisioning-core

-

Purpose: Core Nushell infrastructure automation engine

-

Contents:

-
provisioning-core/
-├── nulib/                   # Nushell libraries
-│   ├── lib_provisioning/    # Core library functions
-│   ├── servers/             # Server management
-│   ├── taskservs/           # Task service management
-│   ├── clusters/            # Cluster management
-│   └── workflows/           # Workflow orchestration
-├── cli/                     # CLI entry point
-│   └── provisioning         # Pure Nushell CLI
-├── schemas/                 # Nickel schemas
-│   ├── main.ncl
-│   ├── settings.ncl
-│   ├── server.ncl
-│   ├── cluster.ncl
-│   └── workflows.ncl
-├── config/                  # Default configurations
-│   └── config.defaults.toml
-├── templates/               # Core templates
-├── tools/                   # Build and packaging tools
-├── tests/                   # Core tests
-├── docs/                    # Core documentation
-├── LICENSE
-├── README.md
-├── CHANGELOG.md
-└── version.toml             # Core version file
-
-

Technology: Nushell, Nickel -Primary Language: Nushell -Release Frequency: Monthly (stable) -Ownership: Core team -Dependencies: None (foundation)

-

Package Output:

-
    -
  • provisioning-core-{version}.tar.gz - Installable package
  • -
  • Published to package registry
  • -
-

Installation Path:

-
/usr/local/
-├── bin/provisioning
-├── lib/provisioning/
-└── share/provisioning/
-
-
-

Repository 2: provisioning-platform

-

Purpose: High-performance Rust platform services

-

Contents:

-
provisioning-platform/
-├── orchestrator/            # Rust orchestrator
-│   ├── src/
-│   ├── tests/
-│   ├── benches/
-│   └── Cargo.toml
-├── control-center/          # Web control center (Leptos)
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── mcp-server/              # Model Context Protocol server
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── api-gateway/             # REST API gateway
-│   ├── src/
-│   ├── tests/
-│   └── Cargo.toml
-├── shared/                  # Shared Rust libraries
-│   ├── types/
-│   └── utils/
-├── docs/                    # Platform documentation
-├── Cargo.toml               # Workspace root
-├── Cargo.lock
-├── LICENSE
-├── README.md
-└── CHANGELOG.md
-
-

Technology: Rust, WebAssembly -Primary Language: Rust -Release Frequency: Bi-weekly (fast iteration) -Ownership: Platform team -Dependencies:

-
    -
  • provisioning-core (runtime integration, loose coupling)
  • -
-

Package Output:

-
    -
  • provisioning-platform-{version}.tar.gz - Binaries
  • -
  • Binaries for: Linux (x86_64, arm64), macOS (x86_64, arm64)
  • -
-

Installation Path:

-
/usr/local/
-├── bin/
-│   ├── provisioning-orchestrator
-│   └── provisioning-control-center
-└── share/provisioning/platform/
-
-

Integration with Core:

-
    -
  • Platform services call provisioning CLI via subprocess
  • -
  • No direct code dependencies
  • -
  • Communication via REST API and file-based queues
  • -
  • Core and Platform can be deployed independently
  • -
-
-

Repository 3: provisioning-extensions

-

Purpose: Extension marketplace and community modules

-

Contents:

-
provisioning-extensions/
-├── registry/                # Extension registry
-│   ├── index.json          # Searchable index
-│   └── catalog/            # Extension metadata
-├── providers/               # Additional cloud providers
-│   ├── azure/
-│   ├── gcp/
-│   ├── digitalocean/
-│   └── hetzner/
-├── taskservs/               # Community task services
-│   ├── databases/
-│   │   ├── mongodb/
-│   │   ├── redis/
-│   │   └── cassandra/
-│   ├── development/
-│   │   ├── gitlab/
-│   │   ├── jenkins/
-│   │   └── sonarqube/
-│   └── observability/
-│       ├── prometheus/
-│       ├── grafana/
-│       └── loki/
-├── clusters/                # Cluster templates
-│   ├── ml-platform/
-│   ├── data-pipeline/
-│   └── gaming-backend/
-├── workflows/               # Workflow templates
-├── tools/                   # Extension development tools
-├── docs/                    # Extension development guide
-├── LICENSE
-└── README.md
-
-

Technology: Nushell, Nickel -Primary Language: Nushell -Release Frequency: Continuous (per-extension) -Ownership: Community + Core team -Dependencies:

-
    -
  • provisioning-core (extends core functionality)
  • -
-

Package Output:

-
    -
  • Individual extension packages: provisioning-ext-{name}-{version}.tar.gz
  • -
  • Registry index for discovery
  • -
-

Installation:

-
# Install extension via core CLI
-provisioning extension install mongodb
-provisioning extension install azure-provider
-
-

Extension Structure: -Each extension is self-contained:

-
mongodb/
-├── manifest.toml           # Extension metadata
-├── taskserv.nu             # Implementation
-├── templates/              # Templates
-├── schemas/                # Nickel schemas
-├── tests/                  # Tests
-└── README.md
-
-
-

Repository 4: provisioning-workspace

-

Purpose: Project templates and starter kits

-

Contents:

-
provisioning-workspace/
-├── templates/               # Workspace templates
-│   ├── minimal/            # Minimal starter
-│   ├── kubernetes/         # Full K8s cluster
-│   ├── multi-cloud/        # Multi-cloud setup
-│   ├── microservices/      # Microservices platform
-│   ├── data-platform/      # Data engineering
-│   └── ml-ops/             # MLOps platform
-├── examples/               # Complete examples
-│   ├── blog-deployment/
-│   ├── e-commerce/
-│   └── saas-platform/
-├── blueprints/             # Architecture blueprints
-├── docs/                   # Template documentation
-├── tools/                  # Template scaffolding
-│   └── create-workspace.nu
-├── LICENSE
-└── README.md
-
-

Technology: Configuration files, Nickel -Primary Language: TOML, Nickel, YAML -Release Frequency: Quarterly (stable templates) -Ownership: Community + Documentation team -Dependencies:

-
    -
  • provisioning-core (templates use core)
  • -
  • provisioning-extensions (may reference extensions)
  • -
-

Package Output:

-
    -
  • provisioning-templates-{version}.tar.gz
  • -
-

Usage:

-
# Create workspace from template
-provisioning workspace init my-project --template kubernetes
-
-# Or use separate tool
-gh repo create my-project --template provisioning-workspace
-cd my-project
-provisioning workspace init
-
-
-

Repository 5: provisioning-distribution

-

Purpose: Release automation, packaging, and distribution infrastructure

-

Contents:

-
provisioning-distribution/
-├── release-automation/      # Automated release workflows
-│   ├── build-all.nu        # Build all packages
-│   ├── publish.nu          # Publish to registries
-│   └── validate.nu         # Validation suite
-├── installers/             # Installation scripts
-│   ├── install.nu          # Nushell installer
-│   ├── install.sh          # Bash installer
-│   └── install.ps1         # PowerShell installer
-├── packaging/              # Package builders
-│   ├── core/
-│   ├── platform/
-│   └── extensions/
-├── registry/               # Package registry backend
-│   ├── api/               # Registry REST API
-│   └── storage/           # Package storage
-├── ci-cd/                  # CI/CD configurations
-│   ├── github/            # GitHub Actions
-│   ├── gitlab/            # GitLab CI
-│   └── jenkins/           # Jenkins pipelines
-├── version-management/     # Cross-repo version coordination
-│   ├── versions.toml      # Version matrix
-│   └── compatibility.toml  # Compatibility matrix
-├── docs/                   # Distribution documentation
-│   ├── release-process.md
-│   └── packaging-guide.md
-├── LICENSE
-└── README.md
-
-

Technology: Nushell, Bash, CI/CD -Primary Language: Nushell, YAML -Release Frequency: As needed -Ownership: Release engineering team -Dependencies: All repositories (orchestrates releases)

-

Responsibilities:

-
    -
  • Build packages from all repositories
  • -
  • Coordinate multi-repo releases
  • -
  • Publish to package registries
  • -
  • Manage version compatibility
  • -
  • Generate release notes
  • -
  • Host package registry
  • -
-
-

Dependency and Integration Model

-

Package-Based Dependencies (Not Submodules)

-
┌─────────────────────────────────────────────────────────────┐
-│                  provisioning-distribution                   │
-│              (Release orchestration & registry)              │
-└──────────────────────────┬──────────────────────────────────┘
-                           │ publishes packages
-                           ↓
-                    ┌──────────────┐
-                    │   Registry   │
-                    └──────┬───────┘
-                           │
-        ┌──────────────────┼──────────────────┐
-        ↓                  ↓                  ↓
-┌───────────────┐  ┌──────────────┐  ┌──────────────┐
-│  provisioning │  │ provisioning │  │ provisioning │
-│     -core     │  │  -platform   │  │  -extensions │
-└───────┬───────┘  └──────┬───────┘  └──────┬───────┘
-        │                 │                  │
-        │                 │ depends on       │ extends
-        │                 └─────────┐        │
-        │                           ↓        │
-        └───────────────────────────────────→┘
-                    runtime integration
-
-

Integration Mechanisms

-

1. Core ↔ Platform Integration

-

Method: Loose coupling via CLI + REST API

-
# Platform calls Core CLI (subprocess)
-def create-server [name: string] {
-    # Orchestrator executes Core CLI
-    ^provisioning server create $name --infra production
-}
-
-# Core calls Platform API (HTTP)
-def submit-workflow [workflow: record] {
-    http post http://localhost:9090/workflows/submit $workflow
-}
-
-

Version Compatibility:

-
# platform/Cargo.toml
-[package.metadata.provisioning]
-core-version = "^3.0"  # Compatible with core 3.x
-
-

2. Core ↔ Extensions Integration

-

Method: Plugin/module system

-
# Extension manifest
-# extensions/mongodb/manifest.toml
-[extension]
-name = "mongodb"
-version = "1.0.0"
-type = "taskserv"
-core-version = "^3.0"
-
-[dependencies]
-provisioning-core = "^3.0"
-
-# Extension installation
-# Core downloads and validates extension
-provisioning extension install mongodb
-# → Downloads from registry
-# → Validates compatibility
-# → Installs to ~/.provisioning/extensions/mongodb
-
-

3. Workspace Templates

-

Method: Git templates or package templates

-
# Option 1: GitHub template repository
-gh repo create my-infra --template provisioning-workspace
-cd my-infra
-provisioning workspace init
-
-# Option 2: Template package
-provisioning workspace create my-infra --template kubernetes
-# → Downloads template package
-# → Scaffolds workspace
-# → Initializes configuration
-
-
-

Version Management Strategy

-

Semantic Versioning Per Repository

-

Each repository maintains independent semantic versioning:

-
provisioning-core:       3.2.1
-provisioning-platform:   2.5.3
-provisioning-extensions: (per-extension versioning)
-provisioning-workspace:  1.4.0
-
-

Compatibility Matrix

-

provisioning-distribution/version-management/versions.toml:

-
# Version compatibility matrix
-[compatibility]
-
-# Core versions and compatible platform versions
-[compatibility.core]
-"3.2.1" = { platform = "^2.5", extensions = "^1.0", workspace = "^1.0" }
-"3.2.0" = { platform = "^2.4", extensions = "^1.0", workspace = "^1.0" }
-"3.1.0" = { platform = "^2.3", extensions = "^0.9", workspace = "^1.0" }
-
-# Platform versions and compatible core versions
-[compatibility.platform]
-"2.5.3" = { core = "^3.2", min-core = "3.2.0" }
-"2.5.0" = { core = "^3.1", min-core = "3.1.0" }
-
-# Release bundles (tested combinations)
-[bundles]
-
-[bundles.stable-3.2]
-name = "Stable 3.2 Bundle"
-release-date = "2025-10-15"
-core = "3.2.1"
-platform = "2.5.3"
-extensions = ["mongodb@1.2.0", "redis@1.1.0", "azure@2.0.0"]
-workspace = "1.4.0"
-
-[bundles.lts-3.1]
-name = "LTS 3.1 Bundle"
-release-date = "2025-09-01"
-lts-until = "2026-09-01"
-core = "3.1.5"
-platform = "2.4.8"
-workspace = "1.3.0"
-
-

Release Coordination

-

Coordinated releases for major versions:

-
# Major release: All repos release together
-provisioning-core:     3.0.0
-provisioning-platform: 2.0.0
-provisioning-workspace: 1.0.0
-
-# Minor/patch releases: Independent
-provisioning-core:     3.1.0 (adds features, platform stays 2.0.x)
-provisioning-platform: 2.1.0 (improves orchestrator, core stays 3.1.x)
-
-
-

Development Workflow

-

Working on Single Repository

-
# Developer working on core only
-git clone https://github.com/yourorg/provisioning-core
-cd provisioning-core
-
-# Install dependencies
-just install-deps
-
-# Development
-just dev-check
-just test
-
-# Build package
-just build
-
-# Test installation locally
-just install-dev
-
-

Working Across Repositories

-
# Scenario: Adding new feature requiring core + platform changes
-
-# 1. Clone both repositories
-git clone https://github.com/yourorg/provisioning-core
-git clone https://github.com/yourorg/provisioning-platform
-
-# 2. Create feature branches
-cd provisioning-core
-git checkout -b feat/batch-workflow-v2
-
-cd ../provisioning-platform
-git checkout -b feat/batch-workflow-v2
-
-# 3. Develop with local linking
-cd provisioning-core
-just install-dev  # Installs to /usr/local/bin/provisioning
-
-cd ../provisioning-platform
-# Platform uses system provisioning CLI (local dev version)
-cargo run
-
-# 4. Test integration
-cd ../provisioning-core
-just test-integration
-
-cd ../provisioning-platform
-cargo test
-
-# 5. Create PRs in both repositories
-# PR #123 in provisioning-core
-# PR #456 in provisioning-platform (references core PR)
-
-# 6. Coordinate merge
-# Merge core PR first, cut release 3.3.0
-# Update platform dependency to core 3.3.0
-# Merge platform PR, cut release 2.6.0
-
-

Testing Cross-Repo Integration

-
# Integration tests in provisioning-distribution
-cd provisioning-distribution
-
-# Test specific version combination
-just test-integration \
-    --core 3.3.0 \
-    --platform 2.6.0
-
-# Test bundle
-just test-bundle stable-3.3
-
-
-

Distribution Strategy

-

Individual Repository Releases

-

Each repository releases independently:

-
# Core release
-cd provisioning-core
-git tag v3.2.1
-git push --tags
-# → GitHub Actions builds package
-# → Publishes to package registry
-
-# Platform release
-cd provisioning-platform
-git tag v2.5.3
-git push --tags
-# → GitHub Actions builds binaries
-# → Publishes to package registry
-
-

Bundle Releases (Coordinated)

-

Distribution repository creates tested bundles:

-
cd provisioning-distribution
-
-# Create bundle
-just create-bundle stable-3.2 \
-    --core 3.2.1 \
-    --platform 2.5.3 \
-    --workspace 1.4.0
-
-# Test bundle
-just test-bundle stable-3.2
-
-# Publish bundle
-just publish-bundle stable-3.2
-# → Creates meta-package with all components
-# → Publishes bundle to registry
-# → Updates documentation
-
-

User Installation Options

- -
# Install stable bundle (easiest)
-curl -fsSL https://get.provisioning.io | sh
-
-# Installs:
-# - provisioning-core 3.2.1
-# - provisioning-platform 2.5.3
-# - provisioning-workspace 1.4.0
-
-

Option 2: Individual Component Installation

-
# Install only core (minimal)
-curl -fsSL https://get.provisioning.io/core | sh
-
-# Add platform later
-provisioning install platform
-
-# Add extensions
-provisioning extension install mongodb
-
-

Option 3: Custom Combination

-
# Install specific versions
-provisioning install core@3.1.0
-provisioning install platform@2.4.0
-
-
-

Repository Ownership and Contribution Model

-

Core Team Ownership

-
- - - - - -
RepositoryPrimary OwnerContribution Model
provisioning-coreCore TeamStrict review, stable API
provisioning-platformPlatform TeamFast iteration, performance focus
provisioning-extensionsCommunity + CoreOpen contributions, moderated
provisioning-workspaceDocs TeamTemplate contributions welcome
provisioning-distributionRelease EngineeringCore team only
-
-

Contribution Workflow

-

For Core:

-
    -
  1. Create issue in provisioning-core
  2. -
  3. Discuss design
  4. -
  5. Submit PR with tests
  6. -
  7. Strict code review
  8. -
  9. Merge to main
  10. -
  11. Release when ready
  12. -
-

For Extensions:

-
    -
  1. Create extension in provisioning-extensions
  2. -
  3. Follow extension guidelines
  4. -
  5. Submit PR
  6. -
  7. Community review
  8. -
  9. Merge and publish to registry
  10. -
  11. Independent versioning
  12. -
-

For Platform:

-
    -
  1. Create issue in provisioning-platform
  2. -
  3. Implement with benchmarks
  4. -
  5. Submit PR
  6. -
  7. Performance review
  8. -
  9. Merge and release
  10. -
-
-

CI/CD Strategy

-

Per-Repository CI/CD

-

Core CI (provisioning-core/.github/workflows/ci.yml):

-
name: Core CI
-
-on: [push, pull_request]
-
-jobs:
-  test:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: Install Nushell
-        run: cargo install nu
-      - name: Run tests
-        run: just test
-      - name: Validate Nickel schemas
-        run: just validate-nickel
-
-  package:
-    runs-on: ubuntu-latest
-    if: startsWith(github.ref, 'refs/tags/v')
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build package
-        run: just build
-      - name: Publish to registry
-        run: just publish
-        env:
-          REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
-
-

Platform CI (provisioning-platform/.github/workflows/ci.yml):

-
name: Platform CI
-
-on: [push, pull_request]
-
-jobs:
-  test:
-    strategy:
-      matrix:
-        os: [ubuntu-latest, macos-latest]
-    runs-on: ${{ matrix.os }}
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build
-        run: cargo build --release
-      - name: Test
-        run: cargo test --workspace
-      - name: Benchmark
-        run: cargo bench
-
-  cross-compile:
-    runs-on: ubuntu-latest
-    if: startsWith(github.ref, 'refs/tags/v')
-    steps:
-      - uses: actions/checkout@v3
-      - name: Build for Linux x86_64
-        run: cargo build --release --target x86_64-unknown-linux-gnu
-      - name: Build for Linux arm64
-        run: cargo build --release --target aarch64-unknown-linux-gnu
-      - name: Publish binaries
-        run: just publish-binaries
-
-

Integration Testing (Distribution Repo)

-

Distribution CI (provisioning-distribution/.github/workflows/integration.yml):

-
name: Integration Tests
-
-on:
-  schedule:
-    - cron: '0 0 * * *'  # Daily
-  workflow_dispatch:
-
-jobs:
-  test-bundle:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Install bundle
-        run: |
-          nu release-automation/install-bundle.nu stable-3.2
-
-      - name: Run integration tests
-        run: |
-          nu tests/integration/test-all.nu
-
-      - name: Test upgrade path
-        run: |
-          nu tests/integration/test-upgrade.nu 3.1.0 3.2.1
-
-
-

File and Directory Structure Comparison

-

Monorepo Structure

-
provisioning/                          (One repo, ~500 MB)
-├── core/                             (Nushell)
-├── platform/                         (Rust)
-├── extensions/                       (Community)
-├── workspace/                        (Templates)
-└── distribution/                     (Build)
-
-

Multi-Repo Structure

-
provisioning-core/                     (Repo 1, ~50 MB)
-├── nulib/
-├── cli/
-├── schemas/
-└── tools/
-
-provisioning-platform/                 (Repo 2, ~150 MB with target/)
-├── orchestrator/
-├── control-center/
-├── mcp-server/
-└── Cargo.toml
-
-provisioning-extensions/               (Repo 3, ~100 MB)
-├── registry/
-├── providers/
-├── taskservs/
-└── clusters/
-
-provisioning-workspace/                (Repo 4, ~20 MB)
-├── templates/
-├── examples/
-└── blueprints/
-
-provisioning-distribution/             (Repo 5, ~30 MB)
-├── release-automation/
-├── installers/
-├── packaging/
-└── registry/
-
-
-

Decision Matrix

-
- - - - - - - - - - - - -
CriterionMonorepoMulti-Repo
Development ComplexitySimpleModerate
Clone SizeLarge (~500 MB)Small (50-150 MB each)
Cross-Component ChangesEasy (atomic)Moderate (coordinated)
Independent ReleasesDifficultEasy
Language-Specific ToolingMixedClean
Community ContributionsHarder (big repo)Easier (focused repos)
Version ManagementSimple (one version)Complex (matrix)
CI/CD ComplexitySimple (one pipeline)Moderate (multiple)
Ownership ClarityUnclearClear
Extension EcosystemMonolithicModular
Build TimeLong (build all)Short (build one)
Testing IsolationDifficultEasy
-
-
- -

Why Multi-Repo Wins for This Project

-
    -
  1. -

    Clear Separation of Concerns

    -
      -
    • Nushell core vs Rust platform are different domains
    • -
    • Different teams can own different repos
    • -
    • Different release cadences make sense
    • -
    -
  2. -
  3. -

    Language-Specific Tooling

    -
      -
    • provisioning-core: Nushell-focused, simple testing
    • -
    • provisioning-platform: Rust workspace, Cargo tooling
    • -
    • No mixed tooling confusion
    • -
    -
  4. -
  5. -

    Community Contributions

    -
      -
    • Extensions repo is easier to contribute to
    • -
    • Don’t need to clone entire monorepo
    • -
    • Clearer contribution guidelines per repo
    • -
    -
  6. -
  7. -

    Independent Versioning

    -
      -
    • Core can stay stable (3.x for months)
    • -
    • Platform can iterate fast (2.x weekly)
    • -
    • Extensions have own lifecycles
    • -
    -
  8. -
  9. -

    Build Performance

    -
      -
    • Only build what changed
    • -
    • Faster CI/CD per repo
    • -
    • Parallel builds across repos
    • -
    -
  10. -
  11. -

    Extension Ecosystem

    -
      -
    • Extensions repo becomes marketplace
    • -
    • Third-party extensions can live separately
    • -
    • Registry becomes discovery mechanism
    • -
    -
  12. -
-

Implementation Strategy

-

Phase 1: Split Repositories (Week 1-2)

-
    -
  1. Create 5 new repositories
  2. -
  3. Extract code from monorepo
  4. -
  5. Set up CI/CD for each
  6. -
  7. Create initial packages
  8. -
-

Phase 2: Package Integration (Week 3)

-
    -
  1. Implement package registry
  2. -
  3. Create installers
  4. -
  5. Set up version compatibility matrix
  6. -
  7. Test cross-repo integration
  8. -
-

Phase 3: Distribution System (Week 4)

-
    -
  1. Implement bundle system
  2. -
  3. Create release automation
  4. -
  5. Set up package hosting
  6. -
  7. Document release process
  8. -
-

Phase 4: Migration (Week 5)

-
    -
  1. Migrate existing users
  2. -
  3. Update documentation
  4. -
  5. Archive monorepo
  6. -
  7. Announce new structure
  8. -
-
-

Conclusion

-

Recommendation: Multi-Repository Architecture with Package-Based Integration

-

The multi-repo approach provides:

-
    -
  • ✅ Clear separation between Nushell core and Rust platform
  • -
  • ✅ Independent release cycles for different components
  • -
  • ✅ Better community contribution experience
  • -
  • ✅ Language-specific tooling and workflows
  • -
  • ✅ Modular extension ecosystem
  • -
  • ✅ Faster builds and CI/CD
  • -
  • ✅ Clear ownership boundaries
  • -
-

Avoid: Submodules (complexity nightmare)

-

Use: Package-based dependencies with version compatibility matrix

-

This architecture scales better for your project’s growth, supports a community extension ecosystem, and provides professional-grade separation of -concerns while maintaining integration through a well-designed package system.

-
-

Next Steps

-
    -
  1. Approve multi-repo strategy
  2. -
  3. Create repository split plan
  4. -
  5. Set up GitHub organizations/teams
  6. -
  7. Implement package registry
  8. -
  9. Begin repository extraction
  10. -
-

Would you like me to create a detailed repository split implementation plan next?

-

Database and Configuration Architecture

-

Date: 2025-10-07 -Status: ACTIVE DOCUMENTATION

-
-

Control-Center Database (DBS)

-

Database Type: SurrealDB (In-Memory Backend)

-

Control-Center uses SurrealDB with kv-mem backend, an embedded in-memory database - no separate database server required.

-

Database Configuration

-
[database]
-url = "memory"  # In-memory backend
-namespace = "control_center"
-database = "main"
-
-

Storage: In-memory (data persists during process lifetime)

-

Production Alternative: Switch to remote WebSocket connection for persistent storage:

-
[database]
-url = "ws://localhost:8000"
-namespace = "control_center"
-database = "main"
-username = "root"
-password = "secret"
-
-

Why SurrealDB kv-mem

-
- - - - - - -
FeatureSurrealDB kv-memRocksDBPostgreSQL
DeploymentEmbedded (no server)EmbeddedServer only
Build DepsNonelibclang, bzip2Many
DockerSimpleComplexExternal service
PerformanceVery fast (memory)Very fast (disk)Network latency
Use CaseDev/test, graphsProduction K/VRelational data
GraphQLBuilt-inNoneExternal
-
-

Control-Center choice: SurrealDB kv-mem for zero-dependency embedded storage, perfect for:

-
    -
  • Policy engine state
  • -
  • Session management
  • -
  • Configuration cache
  • -
  • Audit logs
  • -
  • User credentials
  • -
  • Graph-based policy relationships
  • -
-

Additional Database Support

-

Control-Center also supports (via Cargo.toml dependencies):

-
    -
  1. -

    SurrealDB (WebSocket) - For production persistent storage

    -
    surrealdb = { version = "2.3", features = ["kv-mem", "protocol-ws", "protocol-http"] }
    -
    -
  2. -
  3. -

    SQLx - For SQL database backends (optional)

    -
    sqlx = { workspace = true }
    -
    -
  4. -
-

Default: SurrealDB kv-mem (embedded, no extra setup, no build dependencies)

-
-

Orchestrator Database

-

Storage Type: Filesystem (File-based Queue)

-

Orchestrator uses simple file-based storage by default:

-
[orchestrator.storage]
-type = "filesystem"  # Default
-backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
-
-

Resolved Path:

-
{{workspace.path}}/.orchestrator/data/queue.rkvs
-
-

Optional: SurrealDB Backend

-

For production deployments, switch to SurrealDB:

-
[orchestrator.storage]
-type = "surrealdb-server"  # or surrealdb-embedded
-
-[orchestrator.storage.surrealdb]
-url = "ws://localhost:8000"
-namespace = "orchestrator"
-database = "tasks"
-username = "root"
-password = "secret"
-
-
-

Configuration Loading Architecture

-

Hierarchical Configuration System

-

All services load configuration in this order (priority: low → high):

-
1. System Defaults       provisioning/config/config.defaults.toml
-2. Service Defaults      provisioning/platform/{service}/config.defaults.toml
-3. Workspace Config      workspace/{name}/config/provisioning.yaml
-4. User Config           ~/Library/Application Support/provisioning/user_config.yaml
-5. Environment Variables PROVISIONING_*, CONTROL_CENTER_*, ORCHESTRATOR_*
-6. Runtime Overrides     --config flag or API updates
-
-

Variable Interpolation

-

Configs support dynamic variable interpolation:

-
[paths]
-base = "/Users/Akasha/project-provisioning/provisioning"
-data_dir = "{{paths.base}}/data"  # Resolves to: /Users/.../data
-
-[database]
-url = "rocksdb://{{paths.data_dir}}/control-center.db"
-# Resolves to: rocksdb:///Users/.../data/control-center.db
-
-

Supported Variables:

-
    -
  • {{paths.*}} - Path variables from config
  • -
  • {{workspace.path}} - Current workspace path
  • -
  • {{env.HOME}} - Environment variables
  • -
  • {{now.date}} - Current date/time
  • -
  • {{git.branch}} - Git branch name
  • -
-

Service-Specific Config Files

-

Each platform service has its own config.defaults.toml:

-
- - - - -
ServiceConfig FilePurpose
Orchestratorprovisioning/platform/orchestrator/config.defaults.tomlWorkflow management, queue settings
Control-Centerprovisioning/platform/control-center/config.defaults.tomlWeb UI, auth, database
MCP Serverprovisioning/platform/mcp-server/config.defaults.tomlAI integration settings
KMSprovisioning/core/services/kms/config.defaults.tomlKey management
-
-

Central Configuration

-

Master config: provisioning/config/config.defaults.toml

-

Contains:

-
    -
  • Global paths
  • -
  • Provider configurations
  • -
  • Cache settings
  • -
  • Debug flags
  • -
  • Environment-specific overrides
  • -
-

Workspace-Aware Paths

-

All services use workspace-aware paths:

-

Orchestrator:

-
[orchestrator.paths]
-base = "{{workspace.path}}/.orchestrator"
-data_dir = "{{orchestrator.paths.base}}/data"
-logs_dir = "{{orchestrator.paths.base}}/logs"
-queue_dir = "{{orchestrator.paths.data_dir}}/queue"
-
-

Control-Center:

-
[paths]
-base = "{{workspace.path}}/.control-center"
-data_dir = "{{paths.base}}/data"
-logs_dir = "{{paths.base}}/logs"
-
-

Result (workspace: workspace-librecloud):

-
workspace-librecloud/
-├── .orchestrator/
-│   ├── data/
-│   │   └── queue.rkvs
-│   └── logs/
-└── .control-center/
-    ├── data/
-    │   └── control-center.db
-    └── logs/
-
-
-

Environment Variable Overrides

-

Any config value can be overridden via environment variables:

-

Control-Center

-
# Override server port
-export CONTROL_CENTER_SERVER_PORT=8081
-
-# Override database URL
-export CONTROL_CENTER_DATABASE_URL="rocksdb:///custom/path/db"
-
-# Override JWT secret
-export CONTROL_CENTER_JWT_ISSUER="my-issuer"
-
-

Orchestrator

-
# Override orchestrator port
-export ORCHESTRATOR_SERVER_PORT=8080
-
-# Override storage backend
-export ORCHESTRATOR_STORAGE_TYPE="surrealdb-server"
-export ORCHESTRATOR_STORAGE_SURREALDB_URL="ws://localhost:8000"
-
-# Override concurrency
-export ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS=10
-
-

Naming Convention

-
{SERVICE}_{SECTION}_{KEY} = value
-
-

Examples:

-
    -
  • CONTROL_CENTER_SERVER_PORT[server] port
  • -
  • ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS[queue] max_concurrent_tasks
  • -
  • PROVISIONING_DEBUG_ENABLED[debug] enabled
  • -
-
-

Docker vs Native Configuration

-

Docker Deployment

-

Container paths (resolved inside container):

-
[paths]
-base = "/app/provisioning"
-data_dir = "/data"  # Mounted volume
-logs_dir = "/var/log/orchestrator"  # Mounted volume
-
-

Docker Compose volumes:

-
services:
-  orchestrator:
-    volumes:
-      - orchestrator-data:/data
-      - orchestrator-logs:/var/log/orchestrator
-
-  control-center:
-    volumes:
-      - control-center-data:/data
-
-volumes:
-  orchestrator-data:
-  orchestrator-logs:
-  control-center-data:
-
-

Native Deployment

-

Host paths (macOS/Linux):

-
[paths]
-base = "/Users/Akasha/project-provisioning/provisioning"
-data_dir = "{{workspace.path}}/.orchestrator/data"
-logs_dir = "{{workspace.path}}/.orchestrator/logs"
-
-
-

Configuration Validation

-

Check current configuration:

-
# Show effective configuration
-provisioning env
-
-# Show all config and environment
-provisioning allenv
-
-# Validate configuration
-provisioning validate config
-
-# Show service-specific config
-PROVISIONING_DEBUG=true ./orchestrator --show-config
-
-
-

KMS Database

-

Cosmian KMS uses its own database (when deployed):

-
# KMS database location (Docker)
-/data/kms.db  # SQLite database inside KMS container
-
-# KMS database location (Native)
-{{workspace.path}}/.kms/data/kms.db
-
-

KMS also integrates with Control-Center’s KMS hybrid backend (local + remote):

-
[kms]
-mode = "hybrid"  # local, remote, or hybrid
-
-[kms.local]
-database_path = "{{paths.data_dir}}/kms.db"
-
-[kms.remote]
-server_url = "http://localhost:9998"  # Cosmian KMS server
-
-
-

Summary

-

Control-Center Database

-
    -
  • Type: RocksDB (embedded)
  • -
  • Location: {{workspace.path}}/.control-center/data/control-center.db
  • -
  • No server required: Embedded in control-center process
  • -
-

Orchestrator Database

-
    -
  • Type: Filesystem (default) or SurrealDB (production)
  • -
  • Location: {{workspace.path}}/.orchestrator/data/queue.rkvs
  • -
  • Optional server: SurrealDB for production
  • -
-

Configuration Loading

-
    -
  1. System defaults (provisioning/config/)
  2. -
  3. Service defaults (platform/{service}/)
  4. -
  5. Workspace config
  6. -
  7. User config
  8. -
  9. Environment variables
  10. -
  11. Runtime overrides
  12. -
-

Best Practices

-
    -
  • ✅ Use workspace-aware paths
  • -
  • ✅ Override via environment variables in Docker
  • -
  • ✅ Keep secrets in KMS, not config files
  • -
  • ✅ Use RocksDB for single-node deployments
  • -
  • ✅ Use SurrealDB for distributed/production deployments
  • -
-
-

Related Documentation:

- -

Prov-Ecosystem & Provctl Integration

-

Date: 2025-11-23 -Version: 1.0.0 -Status: ✅ Implementation Complete

-

Overview

-

This document describes the hybrid selective integration of prov-ecosystem and provctl with provisioning, providing access to four critical functionalities:

-
    -
  1. Runtime Abstraction - Unified Docker/Podman/OrbStack/Colima/nerdctl
  2. -
  3. SSH Advanced - Pooling, circuit breaker, retry strategies, distributed operations
  4. -
  5. Backup System - Multi-backend (Restic, Borg, Tar, Rsync) with retention policies
  6. -
  7. GitOps Events - Event-driven deployments from Git
  8. -
-
-

Architecture

-

Three-Layer Integration

-
┌─────────────────────────────────────────────┐
-│  Provisioning CLI (provisioning/core/cli/)  │
-│  ✅ 80+ command shortcuts                   │
-│  ✅ Domain-driven architecture              │
-│  ✅ Modular CLI commands                    │
-└─────────────────────────────────────────────┘
-                    ↓
-┌─────────────────────────────────────────────┐
-│  Nushell Integration Layer                  │
-│  (provisioning/core/nulib/integrations/)    │
-│  ✅ 5 modules with full type safety         │
-│  ✅ Follows 17 Nushell guidelines           │
-│  ✅ Early return, atomic operations         │
-└─────────────────────────────────────────────┘
-                    ↓
-┌─────────────────────────────────────────────┐
-│  Rust Bridge Crate                          │
-│  (provisioning/platform/integrations/      │
-│   provisioning-bridge/)                    │
-│  ✅ Zero unsafe code                        │
-│  ✅ Idiomatic error handling (Result<T>)    │
-│  ✅ 5 modules (runtime, ssh, backup, etc)   │
-│  ✅ Comprehensive tests                     │
-└─────────────────────────────────────────────┘
-                    ↓
-┌─────────────────────────────────────────────┐
-│  Prov-Ecosystem & Provctl Crates            │
-│  (../../prov-ecosystem/ & ../../provctl/)   │
-│  ✅ runtime: Container abstraction          │
-│  ✅ init-servs: Service management          │
-│  ✅ backup: Multi-backend backup            │
-│  ✅ gitops: Event-driven automation         │
-│  ✅ provctl-machines: SSH advanced          │
-└─────────────────────────────────────────────┘
-
-
-

Components

-

1. Runtime Abstraction

-

Location: provisioning/platform/integrations/provisioning-bridge/src/runtime.rs -Nushell: provisioning/core/nulib/integrations/runtime.nu -Nickel Schema: provisioning/schemas/integrations/runtime.ncl

-

Purpose: Unified interface for Docker, Podman, OrbStack, Colima, nerdctl

-

Key Types:

-
pub enum ContainerRuntime {
-    Docker,
-    Podman,
-    OrbStack,
-    Colima,
-    Nerdctl,
-}
-
-pub struct RuntimeDetector { ... }
-pub struct ComposeAdapter { ... }
-

Nushell Functions:

-
runtime-detect        # Auto-detect available runtime
-runtime-exec          # Execute command in detected runtime
-runtime-compose       # Adapt docker-compose for runtime
-runtime-info          # Get runtime details
-runtime-list          # List all available runtimes
-
-

Benefits:

-
    -
  • ✅ Eliminates Docker hardcoding
  • -
  • ✅ Platform-aware detection
  • -
  • ✅ Automatic runtime selection
  • -
  • ✅ Docker Compose adaptation
  • -
-
-

2. SSH Advanced

-

Location: provisioning/platform/integrations/provisioning-bridge/src/ssh.rs -Nushell: provisioning/core/nulib/integrations/ssh_advanced.nu -Nickel Schema: provisioning/schemas/integrations/ssh_advanced.ncl

-

Purpose: Advanced SSH operations with pooling, circuit breaker, retry strategies

-

Key Types:

-
pub struct SshConfig { ... }
-pub struct SshPool { ... }
-pub enum DeploymentStrategy {
-    Rolling,
-    BlueGreen,
-    Canary,
-}
-

Nushell Functions:

-
ssh-pool-connect          # Create SSH pool connection
-ssh-pool-exec             # Execute on SSH pool
-ssh-pool-status           # Check pool status
-ssh-deployment-strategies # List strategies
-ssh-retry-config          # Configure retry strategy
-ssh-circuit-breaker-status # Check circuit breaker
-
-

Features:

-
    -
  • ✅ Connection pooling (90% faster)
  • -
  • ✅ Circuit breaker for fault isolation
  • -
  • ✅ Three deployment strategies (rolling, blue-green, canary)
  • -
  • ✅ Retry strategies (exponential, linear, fibonacci)
  • -
  • ✅ Health check integration
  • -
-
-

3. Backup System

-

Location: provisioning/platform/integrations/provisioning-bridge/src/backup.rs -Nushell: provisioning/core/nulib/integrations/backup.nu -Nickel Schema: provisioning/schemas/integrations/backup.ncl

-

Purpose: Multi-backend backup with retention policies

-

Key Types:

-
pub enum BackupBackend {
-    Restic,
-    Borg,
-    Tar,
-    Rsync,
-    Cpio,
-}
-
-pub struct BackupJob { ... }
-pub struct RetentionPolicy { ... }
-pub struct BackupManager { ... }
-

Nushell Functions:

-
backup-create            # Create backup job
-backup-restore           # Restore from snapshot
-backup-list              # List snapshots
-backup-schedule          # Schedule regular backups
-backup-retention         # Configure retention policy
-backup-status            # Check backup status
-
-

Features:

-
    -
  • ✅ Multiple backends (Restic, Borg, Tar, Rsync, CPIO)
  • -
  • ✅ Flexible repositories (local, S3, SFTP, REST, B2)
  • -
  • ✅ Retention policies (daily/weekly/monthly/yearly)
  • -
  • ✅ Pre/post backup hooks
  • -
  • ✅ Automatic scheduling
  • -
  • ✅ Compression support
  • -
-
-

4. GitOps Events

-

Location: provisioning/platform/integrations/provisioning-bridge/src/gitops.rs -Nushell: provisioning/core/nulib/integrations/gitops.nu -Nickel Schema: provisioning/schemas/integrations/gitops.ncl

-

Purpose: Event-driven deployments from Git

-

Key Types:

-
pub enum GitProvider {
-    GitHub,
-    GitLab,
-    Gitea,
-}
-
-pub struct GitOpsRule { ... }
-pub struct GitOpsOrchestrator { ... }
-

Nushell Functions:

-
gitops-rules             # Load rules from config
-gitops-watch             # Watch for Git events
-gitops-trigger           # Manually trigger deployment
-gitops-event-types       # List supported events
-gitops-rule-config       # Configure GitOps rule
-gitops-deployments       # List active deployments
-gitops-status            # Get GitOps status
-
-

Features:

-
    -
  • ✅ Event-driven automation (push, PR, webhook, scheduled)
  • -
  • ✅ Multi-provider support (GitHub, GitLab, Gitea)
  • -
  • ✅ Three deployment strategies
  • -
  • ✅ Manual approval workflow
  • -
  • ✅ Health check triggers
  • -
  • ✅ Audit logging
  • -
-
-

5. Service Management

-

Location: provisioning/platform/integrations/provisioning-bridge/src/service.rs -Nushell: provisioning/core/nulib/integrations/service.nu -Nickel Schema: provisioning/schemas/integrations/service.ncl

-

Purpose: Cross-platform service management (systemd, launchd, runit, OpenRC)

-

Nushell Functions:

-
service-install          # Install service
-service-start            # Start service
-service-stop             # Stop service
-service-restart          # Restart service
-service-status           # Get service status
-service-list             # List all services
-service-restart-policy   # Configure restart policy
-service-detect-init      # Detect init system
-
-

Features:

-
    -
  • ✅ Multi-platform support (systemd, launchd, runit, OpenRC)
  • -
  • ✅ Service file generation
  • -
  • ✅ Restart policies (always, on-failure, no)
  • -
  • ✅ Health checks
  • -
  • ✅ Logging configuration
  • -
  • ✅ Metrics collection
  • -
-
-

Code Quality Standards

-

All implementations follow project standards:

-

Rust (provisioning-bridge)

-
    -
  • Zero unsafe code - #![forbid(unsafe_code)]
  • -
  • Idiomatic error handling - Result<T, BridgeError> pattern
  • -
  • Comprehensive docs - Full rustdoc with examples
  • -
  • Tests - Unit and integration tests for each module
  • -
  • No unwrap() - Only in tests with comments
  • -
  • No clippy warnings - All warnings suppressed
  • -
-

Nushell

-
    -
  • 17 Nushell rules - See Nushell Development Guide
  • -
  • Explicit types - Colon notation: [param: type]: return_type
  • -
  • Early return - Validate inputs immediately
  • -
  • Single purpose - Each function does one thing
  • -
  • Atomic operations - Succeed or fail completely
  • -
  • Pure functions - No hidden side effects
  • -
-

Nickel

-
    -
  • Schema-first - All configs have schemas
  • -
  • Explicit types - Full type annotations
  • -
  • Direct imports - No re-exports
  • -
  • Immutability-first - Mutable only when needed
  • -
  • Lazy evaluation - Efficient computation
  • -
  • Security defaults - TLS enabled, secrets referenced
  • -
-
-

File Structure

-
provisioning/
-├── platform/integrations/
-│   └── provisioning-bridge/          # Rust bridge crate
-│       ├── Cargo.toml
-│       └── src/
-│           ├── lib.rs
-│           ├── error.rs              # Error types
-│           ├── runtime.rs            # Runtime abstraction
-│           ├── ssh.rs                # SSH advanced
-│           ├── backup.rs             # Backup system
-│           ├── gitops.rs             # GitOps events
-│           └── service.rs            # Service management
-│
-├── core/nulib/lib_provisioning/
-│   └── integrations/                 # Nushell modules
-│       ├── mod.nu                    # Module root
-│       ├── runtime.nu                # Runtime functions
-│       ├── ssh_advanced.nu           # SSH functions
-│       ├── backup.nu                 # Backup functions
-│       ├── gitops.nu                 # GitOps functions
-│       └── service.nu                # Service functions
-│
-└── schemas/integrations/             # Nickel schemas
-    ├── main.ncl                      # Main integration schema
-    ├── runtime.ncl                   # Runtime schema
-    ├── ssh_advanced.ncl              # SSH schema
-    ├── backup.ncl                    # Backup schema
-    ├── gitops.ncl                    # GitOps schema
-    └── service.ncl                   # Service schema
-
-
-

Usage

-

Runtime Abstraction

-
# Auto-detect available runtime
-let runtime = (runtime-detect)
-
-# Execute command in detected runtime
-runtime-exec "docker ps" --check
-
-# Adapt compose file
-let compose_cmd = (runtime-compose "./docker-compose.yml")
-
-

SSH Advanced

-
# Connect to SSH pool
-let pool = (ssh-pool-connect "server01.example.com" "root" --port 22)
-
-# Execute distributed command
-let results = (ssh-pool-exec $hosts "systemctl status provisioning" --strategy parallel)
-
-# Check circuit breaker
-ssh-circuit-breaker-status
-
-

Backup System

-
# Schedule regular backups
-backup-schedule "daily-app-backup" "0 2 * * *" \
-  --paths ["/opt/app" "/var/lib/app"] \
-  --backend "restic"
-
-# Create one-time backup
-backup-create "full-backup" ["/home" "/opt"] \
-  --backend "restic" \
-  --repository "/backups"
-
-# Restore from snapshot
-backup-restore "snapshot-001" --restore_path "."
-
-

GitOps Events

-
# Load GitOps rules
-let rules = (gitops-rules "./gitops-rules.yaml")
-
-# Watch for Git events
-gitops-watch --provider "github" --webhook-port 8080
-
-# Manually trigger deployment
-gitops-trigger "deploy-app" --environment "prod"
-
-

Service Management

-
# Install service
-service-install "my-app" "/usr/local/bin/my-app" \
-  --user "appuser" \
-  --working-dir "/opt/myapp"
-
-# Start service
-service-start "my-app"
-
-# Check status
-service-status "my-app"
-
-# Set restart policy
-service-restart-policy "my-app" --policy "on-failure" --delay-secs 5
-
-
-

Integration Points

-

CLI Commands

-

Existing provisioning CLI will gain new command tree:

-
provisioning runtime detect|exec|compose|info|list
-provisioning ssh pool connect|exec|status|strategies
-provisioning backup create|restore|list|schedule|retention|status
-provisioning gitops rules|watch|trigger|events|config|deployments|status
-provisioning service install|start|stop|restart|status|list|policy|detect-init
-
-

Configuration

-

All integrations use Nickel schemas from provisioning/schemas/integrations/:

-
let { IntegrationConfig } = import "provisioning/integrations.ncl" in
-{
-  runtime = { ... },
-  ssh = { ... },
-  backup = { ... },
-  gitops = { ... },
-  service = { ... },
-}
-
-

Plugins

-

Nushell plugins can be created for performance-critical operations:

-
provisioning plugin list
-# [installed]
-# nu_plugin_runtime
-# nu_plugin_ssh_advanced
-# nu_plugin_backup
-# nu_plugin_gitops
-
-
-

Testing

-

Rust Tests

-
cd provisioning/platform/integrations/provisioning-bridge
-cargo test --all
-cargo test -p provisioning-bridge --lib
-cargo test -p provisioning-bridge --doc
-
-

Nushell Tests

-
nu provisioning/core/nulib/integrations/runtime.nu
-nu provisioning/core/nulib/integrations/ssh_advanced.nu
-
-
-

Performance

-
- - - - - -
OperationPerformance
Runtime detection~50 ms (cached: ~1 ms)
SSH pool init~100 ms per connection
SSH command exec90% faster with pooling
Backup initiation<100 ms
GitOps rule load<10 ms
-
-
-

Migration Path

-

If you want to fully migrate from provisioning to provctl + prov-ecosystem:

-
    -
  1. Phase 1: Use integrations for new features (runtime, backup, gitops)
  2. -
  3. Phase 2: Migrate SSH operations to provctl-machines
  4. -
  5. Phase 3: Adopt provctl CLI for machine orchestration
  6. -
  7. Phase 4: Use prov-ecosystem crates directly where beneficial
  8. -
-

Currently we implement Phase 1 with selective integration.

-
-

Next Steps

-
    -
  1. Implement: Integrate bridge into provisioning CLI
  2. -
  3. Document: Add to docs/user/ for end users
  4. -
  5. Examples: Create example configurations
  6. -
  7. Tests: Integration tests with real providers
  8. -
  9. Plugins: Nushell plugins for performance
  10. -
-
-

References

-
    -
  • Rust Bridge: provisioning/platform/integrations/provisioning-bridge/
  • -
  • Nushell Integration: provisioning/core/nulib/integrations/
  • -
  • Nickel Schemas: provisioning/schemas/integrations/
  • -
  • Prov-Ecosystem: /Users/Akasha/Development/prov-ecosystem/
  • -
  • Provctl: /Users/Akasha/Development/provctl/
  • -
  • Rust Guidelines: See Rust Development
  • -
  • Nushell Guidelines: See Nushell Development
  • -
  • Nickel Guidelines: See Nickel Module System
  • -
-

Nickel Package and Module Loader System

-

This document describes the package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a -flexible module discovery and loading system using Nickel for type-safe configuration.

-

Architecture Overview

-

The system consists of two main components:

-
    -
  1. Core Nickel Package: Distributable core provisioning schemas with type safety
  2. -
  3. Module Loader System: Dynamic discovery and loading of extensions
  4. -
-

Benefits

-
    -
  • Type-Safe Configuration: Nickel ensures configuration validity at evaluation time
  • -
  • Clean Separation: Core package is self-contained and distributable
  • -
  • Plug-and-Play Extensions: Taskservs, providers, and clusters can be loaded dynamically
  • -
  • Version Management: Core package and extensions can be versioned independently
  • -
  • Developer Friendly: Easy workspace setup and module management with lazy evaluation
  • -
-

Components

-

1. Core Nickel Package (/provisioning/schemas/)

-

Contains fundamental schemas for provisioning:

-
    -
  • main.ncl - Primary provisioning configuration
  • -
  • server.ncl - Server definitions and schemas
  • -
  • defaults.ncl - Default configurations
  • -
  • lib.ncl - Common library schemas
  • -
  • dependencies.ncl - Dependency management schemas
  • -
-

Key Features:

-
    -
  • No hardcoded extension paths
  • -
  • Self-contained and distributable
  • -
  • Type-safe package-based imports
  • -
  • Lazy evaluation of expensive computations
  • -
-

2. Module Discovery System

-

Discovery Commands

-
# Discover available modules
-module-loader discover taskservs              # List all taskservs
-module-loader discover providers --format yaml # List providers as YAML
-module-loader discover clusters redis          # Search for redis clusters
-
-

Supported Module Types

-
    -
  • Taskservs: Infrastructure services (kubernetes, redis, postgres, etc.)
  • -
  • Providers: Cloud providers (upcloud, aws, local)
  • -
  • Clusters: Complete configurations (buildkit, web, oci-reg)
  • -
-

3. Module Loading System

-

Loading Commands

-
# Load modules into workspace
-module-loader load taskservs . [kubernetes, cilium, containerd]
-module-loader load providers . [upcloud]
-module-loader load clusters . [buildkit]
-
-# Initialize workspace with modules
-module-loader init workspace/infra/production \
-    --taskservs [kubernetes, cilium] \
-    --providers [upcloud]
-
-

Generated Files

-
    -
  • taskservs.ncl - Auto-generated taskserv imports
  • -
  • providers.ncl - Auto-generated provider imports
  • -
  • clusters.ncl - Auto-generated cluster imports
  • -
  • .manifest/*.yaml - Module loading manifests
  • -
-

Workspace Structure

-

New Workspace Layout

-
workspace/infra/my-project/
-├── kcl.mod                    # Package dependencies
-├── servers.ncl                  # Main server configuration
-├── taskservs.ncl               # Auto-generated taskserv imports
-├── providers.ncl               # Auto-generated provider imports
-├── clusters.ncl                # Auto-generated cluster imports
-├── .taskservs/               # Loaded taskserv modules
-│   ├── kubernetes/
-│   ├── cilium/
-│   └── containerd/
-├── .providers/               # Loaded provider modules
-│   └── upcloud/
-├── .clusters/                # Loaded cluster modules
-│   └── buildkit/
-├── .manifest/                # Module manifests
-│   ├── taskservs.yaml
-│   ├── providers.yaml
-│   └── clusters.yaml
-├── data/                     # Runtime data
-├── tmp/                      # Temporary files
-├── resources/                # Resource definitions
-└── clusters/                 # Cluster configurations
-
-

Import Patterns

-

Before (Old System)

-
# Hardcoded relative paths
-import ../../../kcl/server as server
-import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s
-
-

After (New System)

-
# Package-based imports
-import provisioning.server as server
-
-# Auto-generated module imports (after loading)
-import .taskservs.nclubernetes.kubernetes as k8s
-
-

Package Distribution

-

Building Core Package

-
# Build distributable package
-./provisioning/tools/kcl-packager.nu build --version 1.0.0
-
-# Install locally
-./provisioning/tools/kcl-packager.nu install dist/provisioning-1.0.0.tar.gz
-
-# Create release
-./provisioning/tools/kcl-packager.nu build --format tar.gz --include-docs
-
-

Package Installation Methods

- -
[dependencies]
-provisioning = { path = "~/.kcl/packages/provisioning", version = "0.0.1" }
-
-

Method 2: Git Repository (For distributed teams)

-
[dependencies]
-provisioning = { git = "https://github.com/your-org/provisioning-kcl", version = "v0.0.1" }
-
-

Method 3: KCL Registry (When available)

-
[dependencies]
-provisioning = { version = "0.0.1" }
-
-

Developer Workflows

-

1. New Project Setup

-
# Create workspace from template
-cp -r provisioning/templates/workspaces/kubernetes ./my-k8s-cluster
-cd my-k8s-cluster
-
-# Initialize with modules
-workspace-init.nu . init
-
-# Load required modules
-module-loader load taskservs . [kubernetes, cilium, containerd]
-module-loader load providers . [upcloud]
-
-# Validate and deploy
-kcl run servers.ncl
-provisioning server create --infra . --check
-
-

2. Extension Development

-
# Create new taskserv
-mkdir -p extensions/taskservs/my-service/kcl
-cd extensions/taskservs/my-service/kcl
-
-# Initialize KCL module
-kcl mod init my-service
-echo 'provisioning = { path = "~/.kcl/packages/provisioning", version = "0.0.1" }' >> kcl.mod
-
-# Develop and test
-module-loader discover taskservs   # Should find your service
-
-

3. Workspace Migration

-
# Analyze existing workspace
-workspace-migrate.nu workspace/infra/old-project dry-run
-
-# Perform migration
-workspace-migrate.nu workspace/infra/old-project
-
-# Verify migration
-module-loader validate workspace/infra/old-project
-
-

4. Multi-Environment Management

-
# Development environment
-cd workspace/infra/dev
-module-loader load taskservs . [redis, postgres]
-module-loader load providers . [local]
-
-# Production environment
-cd workspace/infra/prod
-module-loader load taskservs . [redis, postgres, kubernetes, monitoring]
-module-loader load providers . [upcloud, aws]  # Multi-cloud
-
-

Module Management

-

Listing and Validation

-
# List loaded modules
-module-loader list taskservs .
-module-loader list providers .
-module-loader list clusters .
-
-# Validate workspace
-module-loader validate .
-
-# Show workspace info
-workspace-init.nu . info
-
-

Unloading Modules

-
# Remove specific modules
-module-loader unload taskservs . redis
-module-loader unload providers . aws
-
-# This regenerates import files automatically
-
-

Module Information

-
# Get detailed module info
-module-loader info taskservs kubernetes
-module-loader info providers upcloud
-module-loader info clusters buildkit
-
-

CI/CD Integration

-

Pipeline Example

-
#!/usr/bin/env nu
-# deploy-pipeline.nu
-
-# Install specific versions
-kcl-packager.nu install --version $env.PROVISIONING_VERSION
-
-# Load production modules
-module-loader init $env.WORKSPACE_PATH \
-    --taskservs $env.REQUIRED_TASKSERVS \
-    --providers [$env.CLOUD_PROVIDER]
-
-# Validate configuration
-module-loader validate $env.WORKSPACE_PATH
-
-# Deploy infrastructure
-provisioning server create --infra $env.WORKSPACE_PATH
-
-

Troubleshooting

-

Common Issues

-

Module Import Errors

-
Error: module not found
-
-

Solution: Verify modules are loaded and regenerate imports

-
module-loader list taskservs .
-module-loader load taskservs . [kubernetes, cilium, containerd]
-
-

Provider Configuration Issues

-

Solution: Check provider-specific configuration in .providers/ directory

-

KCL Compilation Errors

-

Solution: Verify core package installation and kcl.mod configuration

-
kcl-packager.nu install --version latest
-kcl run --dry-run servers.ncl
-
-

Debug Commands

-
# Show workspace structure
-tree -a workspace/infra/my-project
-
-# Check generated imports
-cat workspace/infra/my-project/taskservs.ncl
-
-# Validate KCL files
-nickel typecheck workspace/infra/my-project/*.ncl
-
-# Show module manifests
-cat workspace/infra/my-project/.manifest/taskservs.yaml
-
-

Best Practices

-

1. Version Management

-
    -
  • Pin core package versions in production
  • -
  • Use semantic versioning for extensions
  • -
  • Test compatibility before upgrading
  • -
-

2. Module Organization

-
    -
  • Load only required modules to keep workspaces clean
  • -
  • Use meaningful workspace names
  • -
  • Document required modules in README
  • -
-

3. Security

-
    -
  • Exclude .manifest/ and data/ from version control
  • -
  • Use secrets management for sensitive configuration
  • -
  • Validate modules before loading in production
  • -
-

4. Performance

-
    -
  • Load modules at workspace initialization, not runtime
  • -
  • Cache discovery results when possible
  • -
  • Use parallel loading for multiple modules
  • -
-

Migration Guide

-

For existing workspaces, follow these steps:

-

1. Backup Current Workspace

-
cp -r workspace/infra/existing workspace/infra/existing-backup
-
-

2. Analyze Migration Requirements

-
workspace-migrate.nu workspace/infra/existing dry-run
-
-

3. Perform Migration

-
workspace-migrate.nu workspace/infra/existing
-
-

4. Load Required Modules

-
cd workspace/infra/existing
-module-loader load taskservs . [kubernetes, cilium]
-module-loader load providers . [upcloud]
-
-

5. Test and Validate

-
kcl run servers.ncl
-module-loader validate .
-
-

6. Deploy

-
provisioning server create --infra . --check
-
-

Future Enhancements

-
    -
  • Registry-based module distribution
  • -
  • Module dependency resolution
  • -
  • Automatic version updates
  • -
  • Module templates and scaffolding
  • -
  • Integration with external package managers
  • -
-

Modular Configuration Loading Architecture

-

Overview

-

The configuration system has been refactored into modular components to achieve 2-3x performance improvements -for regular commands while maintaining full functionality for complex operations.

-

Architecture Layers

-

Layer 1: Minimal Loader (0.023s)

-

File: loader-minimal.nu (~150 lines)

-

Contains only essential functions needed for:

-
    -
  • Workspace detection
  • -
  • Environment determination
  • -
  • Project root discovery
  • -
  • Fast path detection
  • -
-

Exported Functions:

-
    -
  • get-active-workspace - Get current workspace
  • -
  • detect-current-environment - Determine dev/test/prod
  • -
  • get-project-root - Find project directory
  • -
  • get-defaults-config-path - Path to default config
  • -
  • check-if-sops-encrypted - SOPS file detection
  • -
  • find-sops-config-path - Locate SOPS config
  • -
-

Used by:

-
    -
  • Help commands (help infrastructure, help workspace, etc.)
  • -
  • Status commands
  • -
  • Workspace listing
  • -
  • Quick reference operations
  • -
-

Layer 2: Lazy Loader (decision layer)

-

File: loader-lazy.nu (~80 lines)

-

Smart loader that decides which configuration to load:

-
    -
  • Fast path for help/status commands
  • -
  • Full path for operations that need config
  • -
-

Key Function:

-
    -
  • command-needs-full-config - Determines if full config required
  • -
-

Layer 3: Full Loader (0.091s)

-

File: loader.nu (1990 lines)

-

Original comprehensive loader that handles:

-
    -
  • Hierarchical config loading
  • -
  • Variable interpolation
  • -
  • Config validation
  • -
  • Provider configuration
  • -
  • Platform configuration
  • -
-

Used by:

-
    -
  • Server creation
  • -
  • Infrastructure operations
  • -
  • Deployment commands
  • -
  • Anything needing full config
  • -
-

Performance Characteristics

-

Benchmarks

-
- - - - - -
OperationTimeNotes
Workspace detection0.023s23ms for minimal load
Full config load0.091s~4x slower than minimal
Help command0.040sUses minimal loader only
Status command0.030sFast path, no full config
Server operations0.150s+Requires full config load
-
-

Performance Gains

-
    -
  • Help commands: 30-40% faster (40ms vs 60ms with full config)
  • -
  • Workspace operations: 50% faster (uses minimal loader)
  • -
  • Status checks: Nearly instant (23ms)
  • -
-

Module Dependency Graph

-
Help/Status Commands
-    ↓
-loader-lazy.nu
-    ↓
-loader-minimal.nu (workspace, environment detection)
-    ↓
-     (no further deps)
-
-Infrastructure/Server Commands
-    ↓
-loader-lazy.nu
-    ↓
-loader.nu (full configuration)
-    ├── loader-minimal.nu (for workspace detection)
-    ├── Interpolation functions
-    ├── Validation functions
-    └── Config merging logic
-
-

Usage Examples

-

Fast Path (Help Commands)

-
# Uses minimal loader - 23ms
-./provisioning help infrastructure
-./provisioning workspace list
-./provisioning version
-
-

Medium Path (Status Operations)

-
# Uses minimal loader with some full config - ~50ms
-./provisioning status
-./provisioning workspace active
-./provisioning config validate
-
-

Full Path (Infrastructure Operations)

-
# Uses full loader - ~150ms
-./provisioning server create --infra myinfra
-./provisioning taskserv create kubernetes
-./provisioning workflow submit batch.yaml
-
-

Implementation Details

-

Lazy Loading Decision Logic

-
# In loader-lazy.nu
-let is_fast_command = (
-    $command == "help" or
-    $command == "status" or
-    $command == "version"
-)
-
-if $is_fast_command {
-    # Use minimal loader only (0.023s)
-    get-minimal-config
-} else {
-    # Load full configuration (0.091s)
-    load-provisioning-config
-}
-
-

Minimal Config Structure

-

The minimal loader returns a lightweight config record:

-
{
-    workspace: {
-        name: "librecloud"
-        path: "/path/to/workspace_librecloud"
-    }
-    environment: "dev"
-    debug: false
-    paths: {
-        base: "/path/to/workspace_librecloud"
-    }
-}
-
-

This is sufficient for:

-
    -
  • Workspace identification
  • -
  • Environment determination
  • -
  • Path resolution
  • -
  • Help text generation
  • -
-

Full Config Structure

-

The full loader returns comprehensive configuration with:

-
    -
  • Workspace settings
  • -
  • Provider configurations
  • -
  • Platform settings
  • -
  • Interpolated variables
  • -
  • Validation results
  • -
  • Environment-specific overrides
  • -
-

Migration Path

-

For CLI Commands

-
    -
  1. Commands are already categorized (help, workspace, server, etc.)
  2. -
  3. Help system uses fast path (minimal loader)
  4. -
  5. Infrastructure commands use full path (full loader)
  6. -
  7. No changes needed to command implementations
  8. -
-

For New Modules

-

When creating new modules:

-
    -
  1. Check if full config is needed
  2. -
  3. If not, use loader-minimal.nu functions only
  4. -
  5. If yes, use get-config from main config accessor
  6. -
-

Future Optimizations

-

Phase 2: Per-Command Config Caching

-
    -
  • Cache full config for 60 seconds
  • -
  • Reuse config across related commands
  • -
  • Potential: Additional 50% improvement
  • -
-

Phase 3: Configuration Profiles

-
    -
  • Create thin config profiles for common scenarios
  • -
  • Pre-loaded templates for workspace/infra combinations
  • -
  • Fast switching between profiles
  • -
-

Phase 4: Parallel Config Loading

-
    -
  • Load workspace and provider configs in parallel
  • -
  • Async validation and interpolation
  • -
  • Potential: 30% improvement for full config load
  • -
-

Maintenance Notes

-

Adding New Functions to Minimal Loader

-

Only add if:

-
    -
  1. Used by help/status commands
  2. -
  3. Doesn’t require full config
  4. -
  5. Performance-critical path
  6. -
-

Modifying Full Loader

-
    -
  • Changes are backward compatible
  • -
  • Validate against existing config files
  • -
  • Update tests in test suite
  • -
-

Performance Testing

-
# Benchmark minimal loader
-time nu -n -c "use loader-minimal.nu *; get-active-workspace"
-
-# Benchmark full loader
-time nu -c "use config/accessor.nu *; get-config"
-
-# Benchmark help command
-time ./provisioning help infrastructure
-
-

See Also

-
    -
  • loader.nu - Full configuration loading system
  • -
  • loader-minimal.nu - Fast path loader
  • -
  • loader-lazy.nu - Smart loader decision logic
  • -
  • config/ARCHITECTURE.md - Configuration architecture details
  • -
-

Nickel Executable Examples & Test Cases

-

Status: Practical Developer Guide -Last Updated: 2025-12-15 -Purpose: Copy-paste ready examples, validatable patterns, runnable test cases

-
-

Setup: Run Examples Locally

-

Prerequisites

-
# Install Nickel
-brew install nickel
-# or from source: https://nickel-lang.org/getting-started/
-
-# Verify installation
-nickel --version  # Should be 1.0+
-
-

Directory Structure for Examples

-
mkdir -p ~/nickel-examples/{simple,complex,production}
-cd ~/nickel-examples
-
-
-

Example 1: Simple Server Configuration (Executable)

-

Step 1: Create Contract File

-
cat > simple/server_contracts.ncl << 'EOF'
-{
-  ServerConfig = {
-    name | String,
-    cpu_cores | Number,
-    memory_gb | Number,
-    zone | String,
-  },
-}
-EOF
-
-

Step 2: Create Defaults File

-
cat > simple/server_defaults.ncl << 'EOF'
-{
-  web_server = {
-    name = "web-01",
-    cpu_cores = 4,
-    memory_gb = 8,
-    zone = "us-nyc1",
-  },
-
-  database_server = {
-    name = "db-01",
-    cpu_cores = 8,
-    memory_gb = 16,
-    zone = "us-nyc1",
-  },
-
-  cache_server = {
-    name = "cache-01",
-    cpu_cores = 2,
-    memory_gb = 4,
-    zone = "us-nyc1",
-  },
-}
-EOF
-
-

Step 3: Create Main Module with Hybrid Interface

-
cat > simple/server.ncl << 'EOF'
-let contracts = import "./server_contracts.ncl" in
-let defaults = import "./server_defaults.ncl" in
-
-{
-  defaults = defaults,
-
-  # Level 1: Maker functions (90% of use cases)
-  make_server | not_exported = fun overrides =>
-    let base = defaults.web_server in
-    base & overrides,
-
-  # Level 2: Pre-built instances (inspection/reference)
-  DefaultWebServer = defaults.web_server,
-  DefaultDatabaseServer = defaults.database_server,
-  DefaultCacheServer = defaults.cache_server,
-
-  # Level 3: Custom combinations
-  production_web_server = defaults.web_server & {
-    cpu_cores = 8,
-    memory_gb = 16,
-  },
-
-  production_database_stack = [
-    defaults.database_server & { name = "db-01", zone = "us-nyc1" },
-    defaults.database_server & { name = "db-02", zone = "eu-fra1" },
-  ],
-}
-EOF
-
-

Test: Export and Validate JSON

-
cd simple/
-
-# Export to JSON
-nickel export server.ncl --format json | jq .
-
-# Expected output:
-# {
-#   "defaults": { ... },
-#   "DefaultWebServer": { "name": "web-01", "cpu_cores": 4, ... },
-#   "DefaultDatabaseServer": { ... },
-#   "DefaultCacheServer": { ... },
-#   "production_web_server": { "name": "web-01", "cpu_cores": 8, ... },
-#   "production_database_stack": [ ... ]
-# }
-
-# Verify specific fields
-nickel export server.ncl --format json | jq '.production_web_server.cpu_cores'
-# Output: 8
-
-

Usage in Consumer Module

-
cat > simple/consumer.ncl << 'EOF'
-let server = import "./server.ncl" in
-
-{
-  # Use maker function
-  staging_web = server.make_server {
-    name = "staging-web",
-    zone = "eu-fra1",
-  },
-
-  # Reference defaults
-  default_db = server.DefaultDatabaseServer,
-
-  # Use pre-built
-  production_stack = server.production_database_stack,
-}
-EOF
-
-# Export and verify
-nickel export consumer.ncl --format json | jq '.staging_web'
-
-
-

Example 2: Complex Provider Extension (Production Pattern)

-

Create Provider Structure

-
mkdir -p complex/upcloud/{contracts,defaults,main}
-cd complex/upcloud
-
-

Provider Contracts

-
cat > upcloud_contracts.ncl << 'EOF'
-{
-  StorageBackup = {
-    backup_id | String,
-    frequency | String,
-    retention_days | Number,
-  },
-
-  ServerConfig = {
-    name | String,
-    plan | String,
-    zone | String,
-    backups | Array,
-  },
-
-  ProviderConfig = {
-    api_key | String,
-    api_password | String,
-    servers | Array,
-  },
-}
-EOF
-
-

Provider Defaults

-
cat > upcloud_defaults.ncl << 'EOF'
-{
-  backup = {
-    backup_id = "",
-    frequency = "daily",
-    retention_days = 7,
-  },
-
-  server = {
-    name = "",
-    plan = "1xCPU-1 GB",
-    zone = "us-nyc1",
-    backups = [],
-  },
-
-  provider = {
-    api_key = "",
-    api_password = "",
-    servers = [],
-  },
-}
-EOF
-
-

Provider Main Module

-
cat > upcloud_main.ncl << 'EOF'
-let contracts = import "./upcloud_contracts.ncl" in
-let defaults = import "./upcloud_defaults.ncl" in
-
-{
-  defaults = defaults,
-
-  # Makers (90% use case)
-  make_backup | not_exported = fun overrides =>
-    defaults.backup & overrides,
-
-  make_server | not_exported = fun overrides =>
-    defaults.server & overrides,
-
-  make_provider | not_exported = fun overrides =>
-    defaults.provider & overrides,
-
-  # Pre-built instances
-  DefaultBackup = defaults.backup,
-  DefaultServer = defaults.server,
-  DefaultProvider = defaults.provider,
-
-  # Production configs
-  production_high_availability = defaults.provider & {
-    servers = [
-      defaults.server & {
-        name = "web-01",
-        plan = "2xCPU-4 GB",
-        zone = "us-nyc1",
-        backups = [
-          defaults.backup & { frequency = "hourly" },
-        ],
-      },
-      defaults.server & {
-        name = "web-02",
-        plan = "2xCPU-4 GB",
-        zone = "eu-fra1",
-        backups = [
-          defaults.backup & { frequency = "hourly" },
-        ],
-      },
-      defaults.server & {
-        name = "db-01",
-        plan = "4xCPU-16 GB",
-        zone = "us-nyc1",
-        backups = [
-          defaults.backup & { frequency = "every-6h", retention_days = 30 },
-        ],
-      },
-    ],
-  },
-}
-EOF
-
-

Test Provider Configuration

-
# Export provider config
-nickel export upcloud_main.ncl --format json | jq '.production_high_availability'
-
-# Export as TOML (for IaC config files)
-nickel export upcloud_main.ncl --format toml > upcloud.toml
-cat upcloud.toml
-
-# Count servers in production config
-nickel export upcloud_main.ncl --format json | jq '.production_high_availability.servers | length'
-# Output: 3
-
-

Consumer Using Provider

-
cat > upcloud_consumer.ncl << 'EOF'
-let upcloud = import "./upcloud_main.ncl" in
-
-{
-  # Simple production setup
-  simple_production = upcloud.make_provider {
-    api_key = "prod-key",
-    api_password = "prod-secret",
-    servers = [
-      upcloud.make_server { name = "web-01", plan = "2xCPU-4 GB" },
-      upcloud.make_server { name = "web-02", plan = "2xCPU-4 GB" },
-    ],
-  },
-
-  # Advanced HA setup with custom fields
-  ha_stack = upcloud.production_high_availability & {
-    api_key = "prod-key",
-    api_password = "prod-secret",
-    monitoring_enabled = true,
-    alerting_email = "ops@company.com",
-    custom_vpc_id = "vpc-prod-001",
-  },
-}
-EOF
-
-# Validate structure
-nickel export upcloud_consumer.ncl --format json | jq '.ha_stack | keys'
-
-
-

Example 3: Real-World Pattern - Taskserv Configuration

-

Taskserv Contracts (from wuji)

-
cat > production/taskserv_contracts.ncl << 'EOF'
-{
-  Dependency = {
-    name | String,
-    wait_for_health | Bool,
-  },
-
-  TaskServ = {
-    name | String,
-    version | String,
-    dependencies | Array,
-    enabled | Bool,
-  },
-}
-EOF
-
-

Taskserv Defaults

-
cat > production/taskserv_defaults.ncl << 'EOF'
-{
-  kubernetes = {
-    name = "kubernetes",
-    version = "1.28.0",
-    enabled = true,
-    dependencies = [
-      { name = "containerd", wait_for_health = true },
-      { name = "etcd", wait_for_health = true },
-    ],
-  },
-
-  cilium = {
-    name = "cilium",
-    version = "1.14.0",
-    enabled = true,
-    dependencies = [
-      { name = "kubernetes", wait_for_health = true },
-    ],
-  },
-
-  containerd = {
-    name = "containerd",
-    version = "1.7.0",
-    enabled = true,
-    dependencies = [],
-  },
-
-  etcd = {
-    name = "etcd",
-    version = "3.5.0",
-    enabled = true,
-    dependencies = [],
-  },
-
-  postgres = {
-    name = "postgres",
-    version = "15.0",
-    enabled = true,
-    dependencies = [],
-  },
-
-  redis = {
-    name = "redis",
-    version = "7.0.0",
-    enabled = true,
-    dependencies = [],
-  },
-}
-EOF
-
-

Taskserv Main

-
cat > production/taskserv.ncl << 'EOF'
-let contracts = import "./taskserv_contracts.ncl" in
-let defaults = import "./taskserv_defaults.ncl" in
-
-{
-  defaults = defaults,
-
-  make_taskserv | not_exported = fun overrides =>
-    defaults.kubernetes & overrides,
-
-  # Pre-built
-  DefaultKubernetes = defaults.kubernetes,
-  DefaultCilium = defaults.cilium,
-  DefaultContainerd = defaults.containerd,
-  DefaultEtcd = defaults.etcd,
-  DefaultPostgres = defaults.postgres,
-  DefaultRedis = defaults.redis,
-
-  # Wuji infrastructure (20 taskservs similar to actual)
-  wuji_k8s_stack = {
-    kubernetes = defaults.kubernetes,
-    cilium = defaults.cilium,
-    containerd = defaults.containerd,
-    etcd = defaults.etcd,
-  },
-
-  wuji_data_stack = {
-    postgres = defaults.postgres & { version = "15.3" },
-    redis = defaults.redis & { version = "7.2.0" },
-  },
-
-  # Staging with different versions
-  staging_stack = {
-    kubernetes = defaults.kubernetes & { version = "1.27.0" },
-    cilium = defaults.cilium & { version = "1.13.0" },
-    containerd = defaults.containerd & { version = "1.6.0" },
-    etcd = defaults.etcd & { version = "3.4.0" },
-    postgres = defaults.postgres & { version = "14.0" },
-  },
-}
-EOF
-
-

Test Taskserv Setup

-
# Export stack
-nickel export taskserv.ncl --format json | jq '.wuji_k8s_stack | keys'
-# Output: ["kubernetes", "cilium", "containerd", "etcd"]
-
-# Get specific version
-nickel export taskserv.ncl --format json | \
-  jq '.staging_stack.kubernetes.version'
-# Output: "1.27.0"
-
-# Count taskservs in stacks
-echo "Wuji K8S stack:"
-nickel export taskserv.ncl --format json | jq '.wuji_k8s_stack | length'
-
-echo "Staging stack:"
-nickel export taskserv.ncl --format json | jq '.staging_stack | length'
-
-
-

Example 4: Composition & Extension Pattern

-

Base Infrastructure

-
cat > production/infrastructure.ncl << 'EOF'
-let servers = import "./server.ncl" in
-let taskservs = import "./taskserv.ncl" in
-
-{
-  # Infrastructure with servers + taskservs
-  development = {
-    servers = {
-      app = servers.make_server { name = "dev-app", cpu_cores = 2 },
-      db = servers.make_server { name = "dev-db", cpu_cores = 4 },
-    },
-    taskservs = taskservs.staging_stack,
-  },
-
-  production = {
-    servers = [
-      servers.make_server { name = "prod-app-01", cpu_cores = 8 },
-      servers.make_server { name = "prod-app-02", cpu_cores = 8 },
-      servers.make_server { name = "prod-db-01", cpu_cores = 16 },
-    ],
-    taskservs = taskservs.wuji_k8s_stack & {
-      prometheus = {
-        name = "prometheus",
-        version = "2.45.0",
-        enabled = true,
-        dependencies = [],
-      },
-    },
-  },
-}
-EOF
-
-# Validate composition
-nickel export infrastructure.ncl --format json | jq '.production.servers | length'
-# Output: 3
-
-nickel export infrastructure.ncl --format json | jq '.production.taskservs | keys | length'
-# Output: 5
-
-

Extending Infrastructure (Nickel Advantage!)

-
cat > production/infrastructure_extended.ncl << 'EOF'
-let infra = import "./infrastructure.ncl" in
-
-# Add custom fields without modifying base!
-{
-  development = infra.development & {
-    monitoring_enabled = false,
-    cost_optimization = true,
-    auto_shutdown = true,
-  },
-
-  production = infra.production & {
-    monitoring_enabled = true,
-    alert_email = "ops@company.com",
-    backup_enabled = true,
-    backup_frequency = "6h",
-    disaster_recovery_enabled = true,
-    dr_region = "eu-fra1",
-    compliance_level = "SOC2",
-    security_scanning = true,
-  },
-}
-EOF
-
-# Verify extension works (custom fields are preserved!)
-nickel export infrastructure_extended.ncl --format json | \
-  jq '.production | keys'
-# Output includes: monitoring_enabled, alert_email, backup_enabled, etc
-
-
-

Example 5: Validation & Error Handling

-

Validation Functions

-
cat > production/validation.ncl << 'EOF'
-let validate_server = fun server =>
-  if server.cpu_cores <= 0 then
-    std.record.fail "CPU cores must be positive"
-  else if server.memory_gb <= 0 then
-    std.record.fail "Memory must be positive"
-  else
-    server
-in
-
-let validate_taskserv = fun ts =>
-  if std.string.length ts.name == 0 then
-    std.record.fail "TaskServ name required"
-  else if std.string.length ts.version == 0 then
-    std.record.fail "TaskServ version required"
-  else
-    ts
-in
-
-{
-  validate_server = validate_server,
-  validate_taskserv = validate_taskserv,
-}
-EOF
-
-

Using Validations

-
cat > production/validated_config.ncl << 'EOF'
-let server = import "./server.ncl" in
-let taskserv = import "./taskserv.ncl" in
-let validation = import "./validation.ncl" in
-
-{
-  # Valid server (passes validation)
-  valid_server = validation.validate_server {
-    name = "web-01",
-    cpu_cores = 4,
-    memory_gb = 8,
-    zone = "us-nyc1",
-  },
-
-  # Valid taskserv
-  valid_taskserv = validation.validate_taskserv {
-    name = "kubernetes",
-    version = "1.28.0",
-    dependencies = [],
-    enabled = true,
-  },
-}
-EOF
-
-# Test validation
-nickel export validated_config.ncl --format json
-# Should succeed without errors
-
-# Test invalid (uncomment to see error)
-# {
-#   invalid_server = validation.validate_server {
-#     name = "bad-server",
-#     cpu_cores = -1,  # Invalid!
-#     memory_gb = 8,
-#     zone = "us-nyc1",
-#   },
-# }
-
-
-

Test Suite: Bash Script

-

Run All Examples

-
#!/bin/bash
-# test_all_examples.sh
-
-set -e
-
-echo "=== Testing Nickel Examples ==="
-
-cd ~/nickel-examples
-
-echo "1. Simple Server Configuration..."
-cd simple
-nickel export server.ncl --format json > /dev/null
-echo "   ✓ Simple server config valid"
-
-echo "2. Complex Provider (UpCloud)..."
-cd ../complex/upcloud
-nickel export upcloud_main.ncl --format json > /dev/null
-echo "   ✓ UpCloud provider config valid"
-
-echo "3. Production Taskserv..."
-cd ../../production
-nickel export taskserv.ncl --format json > /dev/null
-echo "   ✓ Taskserv config valid"
-
-echo "4. Infrastructure Composition..."
-nickel export infrastructure.ncl --format json > /dev/null
-echo "   ✓ Infrastructure composition valid"
-
-echo "5. Extended Infrastructure..."
-nickel export infrastructure_extended.ncl --format json > /dev/null
-echo "   ✓ Extended infrastructure valid"
-
-echo "6. Validated Config..."
-nickel export validated_config.ncl --format json > /dev/null
-echo "   ✓ Validated config valid"
-
-echo ""
-echo "=== All Tests Passed ✓ ==="
-
-
-

Quick Commands Reference

-

Common Nickel Operations

-
# Validate Nickel syntax
-nickel export config.ncl
-
-# Export as JSON (for inspecting)
-nickel export config.ncl --format json
-
-# Export as TOML (for config files)
-nickel export config.ncl --format toml
-
-# Export as YAML
-nickel export config.ncl --format yaml
-
-# Pretty print JSON output
-nickel export config.ncl --format json | jq .
-
-# Extract specific field
-nickel export config.ncl --format json | jq '.production_server'
-
-# Count array elements
-nickel export config.ncl --format json | jq '.servers | length'
-
-# Check if file has valid syntax only
-nickel typecheck config.ncl
-
-
-

Troubleshooting Examples

-

Problem: “unexpected token” with multiple let

-
# ❌ WRONG
-let A = {x = 1}
-let B = {y = 2}
-{A = A, B = B}
-
-# ✅ CORRECT
-let A = {x = 1} in
-let B = {y = 2} in
-{A = A, B = B}
-
-

Problem: Function serialization fails

-
# ❌ WRONG - function will fail to serialize
-{
-  get_value = fun x => x + 1,
-  result = get_value 5,
-}
-
-# ✅ CORRECT - mark function not_exported
-{
-  get_value | not_exported = fun x => x + 1,
-  result = get_value 5,
-}
-
-

Problem: Null values cause export issues

-
# ❌ WRONG
-{ optional_field = null }
-
-# ✅ CORRECT - use empty string/array/object
-{ optional_field = "" }      # for strings
-{ optional_field = [] }      # for arrays
-{ optional_field = {} }      # for objects
-
-
-

Summary

-

These examples are:

-
    -
  • Copy-paste ready - Can run directly
  • -
  • Executable - Validated with nickel export
  • -
  • Progressive - Simple → Complex → Production
  • -
  • Real patterns - Based on actual codebase (wuji, upcloud)
  • -
  • Self-contained - Each example works independently
  • -
  • Comparable - Shows KCL vs Nickel equivalence
  • -
-

Next: Use these as templates for your own Nickel configurations.

-
-

Version: 1.0.0 -Status: Tested & Verified -Last Updated: 2025-12-15

-

Execution Complete

-

Perfect question. Here’s a clear explanation:

-

The Orchestrator IS USED and IS CRITICAL

-

That code example was misleading. Here’s the real architecture:

-

How It Actually Works

-

┌───────────────────────────────────────────────── -─┐ -│ User runs: provisioning server create –orchestrated -└───────────────────┬───────────────────────────── -─┘ -↓ -┌───────────────────────┐ -│ Nushell CLI │ -│ (provisioning) │ -└───────────┬───────────┘ -↓ HTTP POST -┌───────────────────────────────┐ -│ Rust Orchestrator Daemon │ -│ (provisioning-orchestrator) │ -│ │ -│ • Task Queue │ -│ • Workflow Engine │ -│ • Dependency Resolution │ -│ • Parallel Execution │ -└───────────┬───────────────────┘ -↓ spawns subprocess -┌───────────────────────────────┐ -│ Nushell Business Logic │ -│ nu -c “use servers/create.nu”│ -│ │ -│ Executes actual provider │ -│ API calls, configuration │ -└───────────────────────────────┘ -The Flow in Detail

-
    -
  1. User Command:
  2. -
-

provisioning server create wuji –orchestrated -2. Nushell CLI submits to orchestrator:

-

CLI code

-

http post http://localhost:9090/workflows/servers/create { -infra: “wuji” -params: {…} -}

-

Returns: workflow_id = “abc-123”

-
    -
  1. Orchestrator receives and queues:
  2. -
-
// Orchestrator receives HTTP request
-async fn create_server_workflow(request) {
-    let task = Task::new(TaskType::ServerCreate, request);
-    task_queue.enqueue(task).await;  // Queue for execution
-    return workflow_id;              // Return immediately
-}
-```text
-
-2. Orchestrator executes via Nushell subprocess:
-
-```rust
-// Orchestrator spawns Nushell to run business logic
-async fn execute_task(task: Task) {
-    let output = Command::new("nu")
-        .arg("-c")
-        .arg("use /usr/local/lib/provisioning/servers/create.nu; create-server 'wuji'")
-        .output()
-        .await?;
-
-    // Orchestrator manages: retry, checkpointing, monitoring
-}
-```text
-
-3. Nushell executes the actual work:
-
-```nu
-servers/create.nu
-
-export def create-server [name: string] {
-    This is the business logic
-    Calls UpCloud API, creates server, etc.
-    let provider = (load-provider)
-    $provider | create-vm $name
-}
-Why This Architecture?
-
-Problem It Solves
-
-Without Orchestrator (Old Way):
-
-provisioning → template.nu → cluster.nu → taskserv.nu → provider.nu
-                (Deep call stack = crashes!)
-With Orchestrator (Current):
-
-provisioning → Orchestrator → spawns fresh Nushell subprocess for each task
-                (No deep nesting, parallel execution, recovery)
-What Orchestrator Provides
-
-Task Queue - Reliable execution even if system crashes
-Parallel Execution - Run 10 tasks at once (Rust async)
-Workflow Engine - Handle complex dependencies
-Checkpointing - Resume from failure
-Monitoring - Real-time progress tracking
-What Nushell Provides
-
-Business Logic - Provider integrations, config generation
-Flexibility - Easy to modify without recompiling
-Readability - Shell-like syntax for infrastructure ops
-Multi-Repo Impact: NONE on Integration
-
-In Monorepo:
-
-provisioning/
-├── core/nulib/          # Nushell code
-└── platform/orchestrator/  # Rust code
-In Multi-Repo:
-
-provisioning-core/       # Separate repo, installs to /usr/local/lib/provisioning
-provisioning-platform/   # Separate repo, installs to /usr/local/bin/provisioning-orchestrator
-Integration is the same:
-
-Orchestrator calls: nu -c "use /usr/local/lib/provisioning/servers/create.nu"
-Nushell calls: http post <http://localhost:9090/workflows/>...
-No code dependency, just runtime coordination!
-
-The Orchestrator IS Essential
-
-The orchestrator:
-
-✅ IS USED for all complex operations
-✅ IS CRITICAL for workflow system (v3.0)
-✅ IS REQUIRED for batch operations (v3.1)
-✅ SOLVES deep call stack issues
-✅ PROVIDES performance and reliability
-That misleading code example showed how Platform doesn't link to Core code, but it absolutely uses the orchestrator for coordination.
-
-Does this clear it up? The orchestrator is the performance and reliability layer that makes the whole system work!
-
-Cost: $0.1565 USD
-Duration: 137.69s
-Turns: 40
-Total tokens: 7466(7 in, 7459 out)
-

Orchestrator Authentication & Authorization Integration

-

Version: 1.0.0 -Date: 2025-10-08 -Status: Implemented

-

Overview

-

Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA -verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.

-

Architecture

-

Security Middleware Chain

-

The middleware chain is applied in this specific order to ensure proper security:

-
┌─────────────────────────────────────────────────────────────────┐
-│                    Incoming HTTP Request                        │
-└────────────────────────┬────────────────────────────────────────┘
-                         │
-                         ▼
-        ┌────────────────────────────────┐
-        │  1. Rate Limiting Middleware   │
-        │  - Per-IP request limits       │
-        │  - Sliding window              │
-        │  - Exempt IPs                  │
-        └────────────┬───────────────────┘
-                     │ (429 if exceeded)
-                     ▼
-        ┌────────────────────────────────┐
-        │  2. Authentication Middleware  │
-        │  - Extract Bearer token        │
-        │  - Validate JWT signature      │
-        │  - Check expiry, issuer, aud   │
-        │  - Check revocation            │
-        └────────────┬───────────────────┘
-                     │ (401 if invalid)
-                     ▼
-        ┌────────────────────────────────┐
-        │  3. MFA Verification           │
-        │  - Check MFA status in token   │
-        │  - Enforce for sensitive ops   │
-        │  - Production deployments      │
-        │  - All DELETE operations       │
-        └────────────┬───────────────────┘
-                     │ (403 if required but missing)
-                     ▼
-        ┌────────────────────────────────┐
-        │  4. Authorization Middleware   │
-        │  - Build Cedar request         │
-        │  - Evaluate policies           │
-        │  - Check permissions           │
-        │  - Log decision                │
-        └────────────┬───────────────────┘
-                     │ (403 if denied)
-                     ▼
-        ┌────────────────────────────────┐
-        │  5. Audit Logging Middleware   │
-        │  - Log complete request        │
-        │  - User, action, resource      │
-        │  - Authorization decision      │
-        │  - Response status             │
-        └────────────┬───────────────────┘
-                     │
-                     ▼
-        ┌────────────────────────────────┐
-        │      Protected Handler         │
-        │  - Access security context     │
-        │  - Execute business logic      │
-        └────────────────────────────────┘
-
-

Implementation Details

-

1. Security Context Builder (middleware/security_context.rs)

-

Purpose: Build complete security context from authenticated requests.

-

Key Features:

-
    -
  • Extracts JWT token claims
  • -
  • Determines MFA verification status
  • -
  • Extracts IP address (X-Forwarded-For, X-Real-IP)
  • -
  • Extracts user agent and session info
  • -
  • Provides permission checking methods
  • -
-

Lines of Code: 275

-

Example:

-
pub struct SecurityContext {
-    pub user_id: String,
-    pub token: ValidatedToken,
-    pub mfa_verified: bool,
-    pub ip_address: IpAddr,
-    pub user_agent: Option<String>,
-    pub permissions: Vec<String>,
-    pub workspace: String,
-    pub request_id: String,
-    pub session_id: Option<String>,
-}
-
-impl SecurityContext {
-    pub fn has_permission(&self, permission: &str) -> bool { ... }
-    pub fn has_any_permission(&self, permissions: &[&str]) -> bool { ... }
-    pub fn has_all_permissions(&self, permissions: &[&str]) -> bool { ... }
-}
-

2. Enhanced Authentication Middleware (middleware/auth.rs)

-

Purpose: JWT token validation with revocation checking.

-

Key Features:

-
    -
  • Bearer token extraction
  • -
  • JWT signature validation (RS256)
  • -
  • Expiry, issuer, audience checks
  • -
  • Token revocation status
  • -
  • Security context injection
  • -
-

Lines of Code: 245

-

Flow:

-
    -
  1. Extract Authorization: Bearer <token> header
  2. -
  3. Validate JWT with TokenValidator
  4. -
  5. Build SecurityContext
  6. -
  7. Inject into request extensions
  8. -
  9. Continue to next middleware or return 401
  10. -
-

Error Responses:

-
    -
  • 401 Unauthorized: Missing/invalid token, expired, revoked
  • -
  • 403 Forbidden: Insufficient permissions
  • -
-

3. MFA Verification Middleware (middleware/mfa.rs)

-

Purpose: Enforce MFA for sensitive operations.

-

Key Features:

-
    -
  • Path-based MFA requirements
  • -
  • Method-based enforcement (all DELETEs)
  • -
  • Production environment protection
  • -
  • Clear error messages
  • -
-

Lines of Code: 290

-

MFA Required For:

-
    -
  • Production deployments (/production/, /prod/)
  • -
  • All DELETE operations
  • -
  • Server operations (POST, PUT, DELETE)
  • -
  • Cluster operations (POST, PUT, DELETE)
  • -
  • Batch submissions
  • -
  • Rollback operations
  • -
  • Configuration changes (POST, PUT, DELETE)
  • -
  • Secret management
  • -
  • User/role management
  • -
-

Example:

-
fn requires_mfa(method: &str, path: &str) -> bool {
-    if path.contains("/production/") { return true; }
-    if method == "DELETE" { return true; }
-    if path.contains("/deploy") { return true; }
-    // ...
-}
-

4. Enhanced Authorization Middleware (middleware/authz.rs)

-

Purpose: Cedar policy evaluation with audit logging.

-

Key Features:

-
    -
  • Builds Cedar authorization request from HTTP request
  • -
  • Maps HTTP methods to Cedar actions (GET→Read, POST→Create, etc.)
  • -
  • Extracts resource types from paths
  • -
  • Evaluates Cedar policies with context (MFA, IP, time, workspace)
  • -
  • Logs all authorization decisions to audit log
  • -
  • Non-blocking audit logging (tokio::spawn)
  • -
-

Lines of Code: 380

-

Resource Mapping:

-
/api/v1/servers/srv-123    → Resource::Server("srv-123")
-/api/v1/taskserv/kubernetes → Resource::TaskService("kubernetes")
-/api/v1/cluster/prod        → Resource::Cluster("prod")
-/api/v1/config/settings     → Resource::Config("settings")
-

Action Mapping:

-
GET    → Action::Read
-POST   → Action::Create
-PUT    → Action::Update
-DELETE → Action::Delete
-

5. Rate Limiting Middleware (middleware/rate_limit.rs)

-

Purpose: Prevent API abuse with per-IP rate limiting.

-

Key Features:

-
    -
  • Sliding window rate limiting
  • -
  • Per-IP request tracking
  • -
  • Configurable limits and windows
  • -
  • Exempt IP support
  • -
  • Automatic cleanup of old entries
  • -
  • Statistics tracking
  • -
-

Lines of Code: 420

-

Configuration:

-
pub struct RateLimitConfig {
-    pub max_requests: u32,          // for example, 100
-    pub window_duration: Duration,  // for example, 60 seconds
-    pub exempt_ips: Vec<IpAddr>,    // for example, internal services
-    pub enabled: bool,
-}
-
-// Default: 100 requests per minute
-

Statistics:

-
pub struct RateLimitStats {
-    pub total_ips: usize,      // Number of tracked IPs
-    pub total_requests: u32,   // Total requests made
-    pub limited_ips: usize,    // IPs that hit the limit
-    pub config: RateLimitConfig,
-}
-

6. Security Integration Module (security_integration.rs)

-

Purpose: Helper module to integrate all security components.

-

Key Features:

-
    -
  • SecurityComponents struct grouping all middleware
  • -
  • SecurityConfig for configuration
  • -
  • initialize() method to set up all components
  • -
  • disabled() method for development mode
  • -
  • apply_security_middleware() helper for router setup
  • -
-

Lines of Code: 265

-

Usage Example:

-
use provisioning_orchestrator::security_integration::{
-    SecurityComponents, SecurityConfig
-};
-
-// Initialize security
-let config = SecurityConfig {
-    public_key_path: PathBuf::from("keys/public.pem"),
-    jwt_issuer: "control-center".to_string(),
-    jwt_audience: "orchestrator".to_string(),
-    cedar_policies_path: PathBuf::from("policies"),
-    auth_enabled: true,
-    authz_enabled: true,
-    mfa_enabled: true,
-    rate_limit_config: RateLimitConfig::new(100, 60),
-};
-
-let security = SecurityComponents::initialize(config, audit_logger).await?;
-
-// Apply to router
-let app = Router::new()
-    .route("/api/v1/servers", post(create_server))
-    .route("/api/v1/servers/:id", delete(delete_server));
-
-let secured_app = apply_security_middleware(app, &security);
-

Integration with AppState

-

Updated AppState Structure

-
pub struct AppState {
-    // Existing fields
-    pub task_storage: Arc<dyn TaskStorage>,
-    pub batch_coordinator: BatchCoordinator,
-    pub dependency_resolver: DependencyResolver,
-    pub state_manager: Arc<WorkflowStateManager>,
-    pub monitoring_system: Arc<MonitoringSystem>,
-    pub progress_tracker: Arc<ProgressTracker>,
-    pub rollback_system: Arc<RollbackSystem>,
-    pub test_orchestrator: Arc<TestOrchestrator>,
-    pub dns_manager: Arc<DnsManager>,
-    pub extension_manager: Arc<ExtensionManager>,
-    pub oci_manager: Arc<OciManager>,
-    pub service_orchestrator: Arc<ServiceOrchestrator>,
-    pub audit_logger: Arc<AuditLogger>,
-    pub args: Args,
-
-    // NEW: Security components
-    pub security: SecurityComponents,
-}
-

Initialization in main.rs

-
#[tokio::main]
-async fn main() -> Result<()> {
-    let args = Args::parse();
-
-    // Initialize AppState (creates audit_logger)
-    let state = Arc::new(AppState::new(args).await?);
-
-    // Initialize security components
-    let security_config = SecurityConfig {
-        public_key_path: PathBuf::from("keys/public.pem"),
-        jwt_issuer: env::var("JWT_ISSUER").unwrap_or("control-center".to_string()),
-        jwt_audience: "orchestrator".to_string(),
-        cedar_policies_path: PathBuf::from("policies"),
-        auth_enabled: env::var("AUTH_ENABLED").unwrap_or("true".to_string()) == "true",
-        authz_enabled: env::var("AUTHZ_ENABLED").unwrap_or("true".to_string()) == "true",
-        mfa_enabled: env::var("MFA_ENABLED").unwrap_or("true".to_string()) == "true",
-        rate_limit_config: RateLimitConfig::new(
-            env::var("RATE_LIMIT_MAX").unwrap_or("100".to_string()).parse().unwrap(),
-            env::var("RATE_LIMIT_WINDOW").unwrap_or("60".to_string()).parse().unwrap(),
-        ),
-    };
-
-    let security = SecurityComponents::initialize(
-        security_config,
-        state.audit_logger.clone()
-    ).await?;
-
-    // Public routes (no auth)
-    let public_routes = Router::new()
-        .route("/health", get(health_check));
-
-    // Protected routes (full security chain)
-    let protected_routes = Router::new()
-        .route("/api/v1/servers", post(create_server))
-        .route("/api/v1/servers/:id", delete(delete_server))
-        .route("/api/v1/taskserv", post(create_taskserv))
-        .route("/api/v1/cluster", post(create_cluster))
-        // ... more routes
-        ;
-
-    // Apply security middleware to protected routes
-    let secured_routes = apply_security_middleware(protected_routes, &security)
-        .with_state(state.clone());
-
-    // Combine routes
-    let app = Router::new()
-        .merge(public_routes)
-        .merge(secured_routes)
-        .layer(CorsLayer::permissive());
-
-    // Start server
-    let listener = tokio::net::TcpListener::bind("0.0.0.0:9090").await?;
-    axum::serve(listener, app).await?;
-
-    Ok(())
-}
-

Protected Endpoints

-

Endpoint Categories

-
- - - - - - - - - - - -
CategoryExample EndpointsAuth RequiredMFA RequiredCedar Policy
Health/health
Read-OnlyGET /api/v1/servers
Server MgmtPOST /api/v1/servers
Server DeleteDELETE /api/v1/servers/:id
Taskserv MgmtPOST /api/v1/taskserv
Cluster MgmtPOST /api/v1/cluster
ProductionPOST /api/v1/production/*
Batch OpsPOST /api/v1/batch/submit
RollbackPOST /api/v1/rollback
Config WritePOST /api/v1/config
SecretsGET /api/v1/secret/*
-
-

Complete Authentication Flow

-

Step-by-Step Flow

-
1. CLIENT REQUEST
-   ├─ Headers:
-   │  ├─ Authorization: Bearer <jwt_token>
-   │  ├─ X-Forwarded-For: 192.168.1.100
-   │  ├─ User-Agent: MyClient/1.0
-   │  └─ X-MFA-Verified: true
-   └─ Path: DELETE /api/v1/servers/prod-srv-01
-
-2. RATE LIMITING MIDDLEWARE
-   ├─ Extract IP: 192.168.1.100
-   ├─ Check limit: 45/100 requests in window
-   ├─ Decision: ALLOW (under limit)
-   └─ Continue →
-
-3. AUTHENTICATION MIDDLEWARE
-   ├─ Extract Bearer token
-   ├─ Validate JWT:
-   │  ├─ Signature: ✅ Valid (RS256)
-   │  ├─ Expiry: ✅ Valid until 2025-10-09 10:00:00
-   │  ├─ Issuer: ✅ control-center
-   │  ├─ Audience: ✅ orchestrator
-   │  └─ Revoked: ✅ Not revoked
-   ├─ Build SecurityContext:
-   │  ├─ user_id: "user-456"
-   │  ├─ workspace: "production"
-   │  ├─ permissions: ["read", "write", "delete"]
-   │  ├─ mfa_verified: true
-   │  └─ ip_address: 192.168.1.100
-   ├─ Decision: ALLOW (valid token)
-   └─ Continue →
-
-4. MFA VERIFICATION MIDDLEWARE
-   ├─ Check endpoint: DELETE /api/v1/servers/prod-srv-01
-   ├─ Requires MFA: ✅ YES (DELETE operation)
-   ├─ MFA status: ✅ Verified
-   ├─ Decision: ALLOW (MFA verified)
-   └─ Continue →
-
-5. AUTHORIZATION MIDDLEWARE
-   ├─ Build Cedar request:
-   │  ├─ Principal: User("user-456")
-   │  ├─ Action: Delete
-   │  ├─ Resource: Server("prod-srv-01")
-   │  └─ Context:
-   │     ├─ mfa_verified: true
-   │     ├─ ip_address: "192.168.1.100"
-   │     ├─ time: 2025-10-08T14:30:00Z
-   │     └─ workspace: "production"
-   ├─ Evaluate Cedar policies:
-   │  ├─ Policy 1: Allow if user.role == "admin" ✅
-   │  ├─ Policy 2: Allow if mfa_verified == true ✅
-   │  └─ Policy 3: Deny if not business_hours ❌
-   ├─ Decision: ALLOW (2 allow, 1 deny = allow)
-   ├─ Log to audit: Authorization GRANTED
-   └─ Continue →
-
-6. AUDIT LOGGING MIDDLEWARE
-   ├─ Record:
-   │  ├─ User: user-456 (IP: 192.168.1.100)
-   │  ├─ Action: ServerDelete
-   │  ├─ Resource: prod-srv-01
-   │  ├─ Authorization: GRANTED
-   │  ├─ MFA: Verified
-   │  └─ Timestamp: 2025-10-08T14:30:00Z
-   └─ Continue →
-
-7. PROTECTED HANDLER
-   ├─ Execute business logic
-   ├─ Delete server prod-srv-01
-   └─ Return: 200 OK
-
-8. AUDIT LOGGING (Response)
-   ├─ Update event:
-   │  ├─ Status: 200 OK
-   │  ├─ Duration: 1.234s
-   │  └─ Result: SUCCESS
-   └─ Write to audit log
-
-9. CLIENT RESPONSE
-   └─ 200 OK: Server deleted successfully
-
-

Configuration

-

Environment Variables

-
# JWT Configuration
-JWT_ISSUER=control-center
-JWT_AUDIENCE=orchestrator
-PUBLIC_KEY_PATH=/path/to/keys/public.pem
-
-# Cedar Policies
-CEDAR_POLICIES_PATH=/path/to/policies
-
-# Security Toggles
-AUTH_ENABLED=true
-AUTHZ_ENABLED=true
-MFA_ENABLED=true
-
-# Rate Limiting
-RATE_LIMIT_MAX=100
-RATE_LIMIT_WINDOW=60
-RATE_LIMIT_EXEMPT_IPS=10.0.0.1,10.0.0.2
-
-# Audit Logging
-AUDIT_ENABLED=true
-AUDIT_RETENTION_DAYS=365
-
-

Development Mode

-

For development/testing, all security can be disabled:

-
// In main.rs
-let security = if env::var("DEVELOPMENT_MODE").unwrap_or("false".to_string()) == "true" {
-    SecurityComponents::disabled(audit_logger.clone())
-} else {
-    SecurityComponents::initialize(security_config, audit_logger.clone()).await?
-};
-

Testing

-

Integration Tests

-

Location: provisioning/platform/orchestrator/tests/security_integration_tests.rs

-

Test Coverage:

-
    -
  • ✅ Rate limiting enforcement
  • -
  • ✅ Rate limit statistics
  • -
  • ✅ Exempt IP handling
  • -
  • ✅ Authentication missing token
  • -
  • ✅ MFA verification for sensitive operations
  • -
  • ✅ Cedar policy evaluation
  • -
  • ✅ Complete security flow
  • -
  • ✅ Security components initialization
  • -
  • ✅ Configuration defaults
  • -
-

Lines of Code: 340

-

Run Tests:

-
cd provisioning/platform/orchestrator
-cargo test security_integration_tests
-
-

File Summary

-
- - - - - - - - - -
FilePurposeLinesTests
middleware/security_context.rsSecurity context builder2758
middleware/auth.rsJWT authentication2455
middleware/mfa.rsMFA verification29015
middleware/authz.rsCedar authorization3804
middleware/rate_limit.rsRate limiting4208
middleware/mod.rsModule exports250
security_integration.rsIntegration helpers2652
tests/security_integration_tests.rsIntegration tests34011
Total2,24053
-
-

Benefits

-

Security

-
    -
  • ✅ Complete authentication flow with JWT validation
  • -
  • ✅ MFA enforcement for sensitive operations
  • -
  • ✅ Fine-grained authorization with Cedar policies
  • -
  • ✅ Rate limiting prevents API abuse
  • -
  • ✅ Complete audit trail for compliance
  • -
-

Architecture

-
    -
  • ✅ Modular middleware design
  • -
  • ✅ Clear separation of concerns
  • -
  • ✅ Reusable security components
  • -
  • ✅ Easy to test and maintain
  • -
  • ✅ Configuration-driven behavior
  • -
-

Operations

-
    -
  • ✅ Can enable/disable features independently
  • -
  • ✅ Development mode for testing
  • -
  • ✅ Comprehensive error messages
  • -
  • ✅ Real-time statistics and monitoring
  • -
  • ✅ Non-blocking audit logging
  • -
-

Future Enhancements

-
    -
  1. Token Refresh: Automatic token refresh before expiry
  2. -
  3. IP Whitelisting: Additional IP-based access control
  4. -
  5. Geolocation: Block requests from specific countries
  6. -
  7. Advanced Rate Limiting: Per-user, per-endpoint limits
  8. -
  9. Session Management: Track active sessions, force logout
  10. -
  11. 2FA Integration: Direct integration with TOTP/SMS providers
  12. -
  13. Policy Hot Reload: Update Cedar policies without restart
  14. -
  15. Metrics Dashboard: Real-time security metrics visualization
  16. -
- -
    -
  • Cedar Policy Language
  • -
  • JWT Token Management
  • -
  • MFA Setup Guide
  • -
  • Audit Log Format
  • -
  • Rate Limiting Best Practices
  • -
-

Version History

-
- -
VersionDateChanges
1.0.02025-10-08Initial implementation
-
-
-

Maintained By: Security Team -Review Cycle: Quarterly -Last Reviewed: 2025-10-08

-

Repository and Distribution Architecture Analysis

-

Date: 2025-10-01 -Status: Analysis Complete - Implementation Planning -Author: Architecture Review

-

Executive Summary

-

This document analyzes the current project structure and provides a comprehensive plan for optimizing the repository organization and distribution -strategy. The goal is to create a professional-grade infrastructure automation system with clear separation of concerns, efficient development -workflow, and user-friendly distribution.

-
-

Current State Analysis

-

Strengths

-
    -
  1. -

    Clean Core Separation

    -
      -
    • provisioning/ contains the core system
    • -
    • workspace/ concept for user data
    • -
    • Clear extension points (providers, taskservs, clusters)
    • -
    -
  2. -
  3. -

    Hybrid Architecture

    -
      -
    • Rust orchestrator for performance-critical operations
    • -
    • Nushell for business logic and scripting
    • -
    • KCL for type-safe configuration
    • -
    -
  4. -
  5. -

    Modular Design

    -
      -
    • Extension system for providers and services
    • -
    • Plugin architecture for Nushell
    • -
    • Template-based code generation
    • -
    -
  6. -
  7. -

    Advanced Features

    -
      -
    • Batch workflow system (v3.1.0)
    • -
    • Hybrid orchestrator (v3.0.0)
    • -
    • Token-optimized agent architecture
    • -
    -
  8. -
-

Critical Issues

-
    -
  1. -

    Confusing Root Structure

    -
      -
    • Multiple workspace variants: _workspace/, backup-workspace/, workspace-librecloud/
    • -
    • Development artifacts at root: wrks/, NO/, target/
    • -
    • Unclear which workspace is active
    • -
    -
  2. -
  3. -

    Mixed Concerns

    -
      -
    • Runtime data intermixed with source code
    • -
    • Build artifacts not properly isolated
    • -
    • Presentations and demos in main repo
    • -
    -
  4. -
  5. -

    Distribution Challenges

    -
      -
    • Bash wrapper for CLI entry point (provisioning/core/cli/provisioning)
    • -
    • No clear installation mechanism
    • -
    • Missing package management system
    • -
    • Undefined installation paths
    • -
    -
  6. -
  7. -

    Documentation Fragmentation

    -
      -
    • Multiple docs/ locations
    • -
    • Scattered README files
    • -
    • No unified documentation structure
    • -
    -
  8. -
  9. -

    Configuration Complexity

    -
      -
    • TOML-based system is good, but paths are unclear
    • -
    • User vs system config separation needs clarification
    • -
    • Installation paths not standardized
    • -
    -
  10. -
-
- -

1. Monorepo Structure

-
project-provisioning/
-│
-├── provisioning/                    # CORE SYSTEM (distribution source)
-│   ├── core/                        # Core engine
-│   │   ├── cli/                     # Main CLI entry
-│   │   │   └── provisioning         # Pure Nushell entry point
-│   │   ├── nulib/                   # Nushell libraries
-│   │   │   ├── lib_provisioning/    # Core library functions
-│   │   │   ├── main_provisioning/   # CLI handlers
-│   │   │   ├── servers/             # Server management
-│   │   │   ├── taskservs/           # Task service management
-│   │   │   ├── clusters/            # Cluster management
-│   │   │   └── workflows/           # Workflow orchestration
-│   │   ├── plugins/                 # System plugins
-│   │   │   └── nushell-plugins/     # Nushell plugin sources
-│   │   └── scripts/                 # Utility scripts
-│   │
-│   ├── extensions/                  # Extensible modules
-│   │   ├── providers/               # Cloud providers (aws, upcloud, local)
-│   │   ├── taskservs/               # Infrastructure services
-│   │   │   ├── container-runtime/   # Container runtimes
-│   │   │   ├── kubernetes/          # Kubernetes
-│   │   │   ├── networking/          # Network services
-│   │   │   ├── storage/             # Storage services
-│   │   │   ├── databases/           # Database services
-│   │   │   └── development/         # Dev tools
-│   │   ├── clusters/                # Complete cluster configurations
-│   │   └── workflows/               # Workflow templates
-│   │
-│   ├── platform/                    # Platform services (Rust)
-│   │   ├── orchestrator/            # Rust coordination layer
-│   │   ├── control-center/          # Web management UI
-│   │   ├── control-center-ui/       # UI frontend
-│   │   ├── mcp-server/              # Model Context Protocol server
-│   │   └── api-gateway/             # REST API gateway
-│   │
-│   ├── kcl/                         # KCL configuration schemas
-│   │   ├── main.ncl                   # Main entry point
-│   │   ├── settings.ncl               # Settings schema
-│   │   ├── server.ncl                 # Server definitions
-│   │   ├── cluster.ncl                # Cluster definitions
-│   │   ├── workflows.ncl              # Workflow definitions
-│   │   └── docs/                    # KCL documentation
-│   │
-│   ├── templates/                   # Jinja2 templates
-│   │   ├── extensions/              # Extension templates
-│   │   ├── services/                # Service templates
-│   │   └── workspace/               # Workspace templates
-│   │
-│   ├── config/                      # Default system configuration
-│   │   ├── config.defaults.toml     # System defaults
-│   │   └── config-examples/         # Example configs
-│   │
-│   ├── tools/                       # Build and packaging tools
-│   │   ├── build/                   # Build scripts
-│   │   ├── package/                 # Packaging tools
-│   │   ├── distribution/            # Distribution tools
-│   │   └── release/                 # Release automation
-│   │
-│   └── resources/                   # Static resources (images, assets)
-│
-├── workspace/                       # RUNTIME DATA (gitignored except templates)
-│   ├── infra/                       # Infrastructure instances (gitignored)
-│   │   └── .gitkeep
-│   ├── config/                      # User configuration (gitignored)
-│   │   └── .gitkeep
-│   ├── extensions/                  # User extensions (gitignored)
-│   │   └── .gitkeep
-│   ├── runtime/                     # Runtime data (gitignored)
-│   │   ├── logs/
-│   │   ├── cache/
-│   │   ├── state/
-│   │   └── tmp/
-│   └── templates/                   # Workspace templates (tracked)
-│       ├── minimal/
-│       ├── kubernetes/
-│       └── multi-cloud/
-│
-├── distribution/                    # DISTRIBUTION ARTIFACTS (gitignored)
-│   ├── packages/                    # Built packages
-│   │   ├── provisioning-core-*.tar.gz
-│   │   ├── provisioning-platform-*.tar.gz
-│   │   ├── provisioning-extensions-*.tar.gz
-│   │   └── checksums.txt
-│   ├── installers/                  # Installation scripts
-│   │   ├── install.sh               # Bash installer
-│   │   └── install.nu               # Nushell installer
-│   └── registry/                    # Package registry metadata
-│       └── index.json
-│
-├── docs/                            # UNIFIED DOCUMENTATION
-│   ├── README.md                    # Documentation index
-│   ├── user/                        # User guides
-│   │   ├── installation.md
-│   │   ├── quick-start.md
-│   │   ├── configuration.md
-│   │   └── guides/
-│   ├── api/                         # API reference
-│   │   ├── rest-api.md
-│   │   ├── nushell-api.md
-│   │   └── kcl-schemas.md
-│   ├── architecture/                # Architecture documentation
-│   │   ├── overview.md
-│   │   ├── decisions/               # ADRs
-│   │   └── repo-dist-analysis.md    # This document
-│   └── development/                 # Development guides
-│       ├── contributing.md
-│       ├── building.md
-│       ├── testing.md
-│       └── releasing.md
-│
-├── examples/                        # EXAMPLE CONFIGURATIONS
-│   ├── minimal/                     # Minimal setup
-│   ├── kubernetes-cluster/          # Full K8s cluster
-│   ├── multi-cloud/                 # Multi-provider setup
-│   └── README.md
-│
-├── tests/                           # INTEGRATION TESTS
-│   ├── e2e/                         # End-to-end tests
-│   ├── integration/                 # Integration tests
-│   ├── fixtures/                    # Test fixtures
-│   └── README.md
-│
-├── tools/                           # DEVELOPMENT TOOLS
-│   ├── build/                       # Build scripts
-│   ├── dev-env/                     # Development environment setup
-│   └── scripts/                     # Utility scripts
-│
-├── .github/                         # GitHub configuration
-│   ├── workflows/                   # CI/CD workflows
-│   │   ├── build.yml
-│   │   ├── test.yml
-│   │   └── release.yml
-│   └── ISSUE_TEMPLATE/
-│
-├── .coder/                          # Coder configuration (tracked)
-│
-├── .gitignore                       # Git ignore rules
-├── .gitattributes                   # Git attributes
-├── Cargo.toml                       # Rust workspace root
-├── Justfile                         # Task runner (unified)
-├── LICENSE                          # License file
-├── README.md                        # Project README
-├── CHANGELOG.md                     # Changelog
-└── CLAUDE.md                        # AI assistant instructions
-
-

Key Principles

-
    -
  1. Clear Separation: Source code (provisioning/), runtime data (workspace/), build artifacts (distribution/)
  2. -
  3. Single Source of Truth: One location for each type of content
  4. -
  5. Gitignore Strategy: Runtime and build artifacts ignored, templates tracked
  6. -
  7. Standard Paths: Follow Unix conventions for installation
  8. -
-
-

Distribution Strategy

-

Package Types

-

1. provisioning-core (Required)

-

Contents:

-
    -
  • Nushell CLI and libraries
  • -
  • Core providers (local, upcloud, aws)
  • -
  • Essential taskservs (kubernetes, containerd, cilium)
  • -
  • KCL schemas
  • -
  • Configuration system
  • -
  • Templates
  • -
-

Size: ~50 MB (compressed)

-

Installation:

-
/usr/local/
-├── bin/
-│   └── provisioning
-├── lib/
-│   └── provisioning/
-│       ├── core/
-│       ├── extensions/
-│       └── kcl/
-└── share/
-    └── provisioning/
-        ├── templates/
-        ├── config/
-        └── docs/
-
-

2. provisioning-platform (Optional)

-

Contents:

-
    -
  • Rust orchestrator binary
  • -
  • Control center web UI
  • -
  • MCP server
  • -
  • API gateway
  • -
-

Size: ~30 MB (compressed)

-

Installation:

-
/usr/local/
-├── bin/
-│   ├── provisioning-orchestrator
-│   └── provisioning-control-center
-└── share/
-    └── provisioning/
-        └── platform/
-
-

3. provisioning-extensions (Optional)

-

Contents:

-
    -
  • Additional taskservs (radicle, gitea, postgres, etc.)
  • -
  • Cluster templates
  • -
  • Workflow templates
  • -
-

Size: ~20 MB (compressed)

-

Installation:

-
/usr/local/lib/provisioning/extensions/
-├── taskservs/
-├── clusters/
-└── workflows/
-
-

4. provisioning-plugins (Optional)

-

Contents:

-
    -
  • Pre-built Nushell plugins
  • -
  • nu_plugin_kcl
  • -
  • nu_plugin_tera
  • -
  • Other custom plugins
  • -
-

Size: ~15 MB (compressed)

-

Installation:

-
~/.config/nushell/plugins/
-
-

Installation Paths

-

System Installation (Root)

-
/usr/local/
-├── bin/
-│   ├── provisioning                      # Main CLI
-│   ├── provisioning-orchestrator         # Orchestrator binary
-│   └── provisioning-control-center       # Control center binary
-├── lib/
-│   └── provisioning/
-│       ├── core/                         # Core Nushell libraries
-│       │   ├── nulib/
-│       │   └── plugins/
-│       ├── extensions/                   # Extensions
-│       │   ├── providers/
-│       │   ├── taskservs/
-│       │   └── clusters/
-│       └── kcl/                          # KCL schemas
-└── share/
-    └── provisioning/
-        ├── templates/                    # System templates
-        ├── config/                       # Default configs
-        │   └── config.defaults.toml
-        └── docs/                         # Documentation
-
-

User Configuration

-
~/.provisioning/
-├── config/
-│   └── config.user.toml                  # User overrides
-├── extensions/                           # User extensions
-│   ├── providers/
-│   ├── taskservs/
-│   └── clusters/
-├── cache/                                # Cache directory
-└── plugins/                              # User plugins
-
-

Project Workspace

-
./workspace/
-├── infra/                                # Infrastructure definitions
-│   ├── my-cluster/
-│   │   ├── config.toml
-│   │   ├── servers.yaml
-│   │   └── taskservs.yaml
-│   └── production/
-├── config/                               # Project configuration
-│   └── config.toml
-├── runtime/                              # Runtime data
-│   ├── logs/
-│   ├── state/
-│   └── cache/
-└── extensions/                           # Project-specific extensions
-
-

Configuration Hierarchy

-
Priority (highest to lowest):
-1. CLI flags                              --debug, --infra=my-cluster
-2. Runtime overrides                      PROVISIONING_DEBUG=true
-3. Project config                         ./workspace/config/config.toml
-4. User config                            ~/.provisioning/config/config.user.toml
-5. System config                          /usr/local/share/provisioning/config/config.defaults.toml
-
-
-

Build System

-

Build Tools Structure

-

provisioning/tools/build/:

-
build/
-├── build-system.nu                       # Main build orchestrator
-├── package-core.nu                       # Core packaging
-├── package-platform.nu                   # Platform packaging
-├── package-extensions.nu                 # Extensions packaging
-├── package-plugins.nu                    # Plugins packaging
-├── create-installers.nu                  # Installer generation
-├── validate-package.nu                   # Package validation
-└── publish-registry.nu                   # Registry publishing
-
-

Build System Implementation

-

provisioning/tools/build/build-system.nu:

-
#!/usr/bin/env nu
-# Build system for provisioning project
-
-use ../core/nulib/lib_provisioning/config/accessor.nu *
-
-# Build all packages
-export def "main build-all" [
-    --version: string = "dev"             # Version to build
-    --output: string = "distribution/packages"  # Output directory
-] {
-    print $"Building all packages version: ($version)"
-
-    let results = {
-        core: (build-core $version $output)
-        platform: (build-platform $version $output)
-        extensions: (build-extensions $version $output)
-        plugins: (build-plugins $version $output)
-    }
-
-    # Generate checksums
-    create-checksums $output
-
-    print "✅ All packages built successfully"
-    $results
-}
-
-# Build core package
-export def "build-core" [
-    version: string
-    output: string
-] -> record {
-    print "📦 Building provisioning-core..."
-
-    nu package-core.nu build --version $version --output $output
-}
-
-# Build platform package (Rust binaries)
-export def "build-platform" [
-    version: string
-    output: string
-] -> record {
-    print "📦 Building provisioning-platform..."
-
-    nu package-platform.nu build --version $version --output $output
-}
-
-# Build extensions package
-export def "build-extensions" [
-    version: string
-    output: string
-] -> record {
-    print "📦 Building provisioning-extensions..."
-
-    nu package-extensions.nu build --version $version --output $output
-}
-
-# Build plugins package
-export def "build-plugins" [
-    version: string
-    output: string
-] -> record {
-    print "📦 Building provisioning-plugins..."
-
-    nu package-plugins.nu build --version $version --output $output
-}
-
-# Create release artifacts
-export def "main release" [
-    version: string                       # Release version
-    --upload                              # Upload to release server
-] {
-    print $"🚀 Creating release ($version)"
-
-    # Build all packages
-    let packages = (build-all --version $version)
-
-    # Create installers
-    create-installers $version
-
-    # Generate release notes
-    generate-release-notes $version
-
-    # Upload if requested
-    if $upload {
-        upload-release $version
-    }
-
-    print $"✅ Release ($version) ready"
-}
-
-# Create installers
-def create-installers [version: string] {
-    print "📝 Creating installers..."
-
-    nu create-installers.nu --version $version
-}
-
-# Generate release notes
-def generate-release-notes [version: string] {
-    print "📝 Generating release notes..."
-
-    let changelog = (open CHANGELOG.md)
-    let notes = ($changelog | parse-version-section $version)
-
-    $notes | save $"distribution/packages/RELEASE_NOTES_($version).md"
-}
-
-# Upload release
-def upload-release [version: string] {
-    print "⬆️  Uploading release..."
-
-    # Implementation depends on your release infrastructure
-    # Could use: GitHub releases, S3, custom server, etc.
-}
-
-# Create checksums for all packages
-def create-checksums [output: string] {
-    print "🔐 Creating checksums..."
-
-    ls ($output | path join "*.tar.gz")
-    | each { |file|
-        let hash = (sha256sum $file.name | split row ' ' | get 0)
-        $"($hash)  (($file.name | path basename))"
-    }
-    | str join "\n"
-    | save ($output | path join "checksums.txt")
-}
-
-# Clean build artifacts
-export def "main clean" [
-    --all                                 # Clean all build artifacts
-] {
-    print "🧹 Cleaning build artifacts..."
-
-    if ($all) {
-        rm -rf distribution/packages
-        rm -rf target/
-        rm -rf provisioning/platform/target/
-    } else {
-        rm -rf distribution/packages
-    }
-
-    print "✅ Clean complete"
-}
-
-# Validate built packages
-export def "main validate" [
-    package_path: string                  # Package to validate
-] {
-    print $"🔍 Validating package: ($package_path)"
-
-    nu validate-package.nu $package_path
-}
-
-# Show build status
-export def "main status" [] {
-    print "📊 Build Status"
-    print "─" * 60
-
-    let core_exists = ("distribution/packages" | path join "provisioning-core-*.tar.gz" | glob | is-not-empty)
-    let platform_exists = ("distribution/packages" | path join "provisioning-platform-*.tar.gz" | glob | is-not-empty)
-
-    print $"Core package:       (if $core_exists { '✅ Built' } else { '❌ Not built' })"
-    print $"Platform package:   (if $platform_exists { '✅ Built' } else { '❌ Not built' })"
-
-    if ("distribution/packages" | path exists) {
-        let packages = (ls distribution/packages | where name =~ ".tar.gz")
-        print $"\nTotal packages: (($packages | length))"
-        $packages | select name size
-    }
-}
-
-

Justfile Integration

-

Justfile:

-
# Provisioning Build System
-# Use 'just --list' to see all available commands
-
-# Default recipe
-default:
-    @just --list
-
-# Development tasks
-alias d := dev-check
-alias t := test
-alias b := build
-
-# Build all packages
-build VERSION="dev":
-    nu provisioning/tools/build/build-system.nu build-all --version {{VERSION}}
-
-# Build core package only
-build-core VERSION="dev":
-    nu provisioning/tools/build/build-system.nu build-core {{VERSION}}
-
-# Build platform binaries
-build-platform VERSION="dev":
-    cargo build --release --workspace --manifest-path provisioning/platform/Cargo.toml
-    nu provisioning/tools/build/build-system.nu build-platform {{VERSION}}
-
-# Run development checks
-dev-check:
-    @echo "🔍 Running development checks..."
-    cargo check --workspace --manifest-path provisioning/platform/Cargo.toml
-    cargo clippy --workspace --manifest-path provisioning/platform/Cargo.toml
-    nu provisioning/tools/build/validate-nushell.nu
-
-# Run tests
-test:
-    @echo "🧪 Running tests..."
-    cargo test --workspace --manifest-path provisioning/platform/Cargo.toml
-    nu tests/run-all-tests.nu
-
-# Run integration tests
-test-e2e:
-    @echo "🔬 Running E2E tests..."
-    nu tests/e2e/run-e2e.nu
-
-# Format code
-fmt:
-    cargo fmt --all --manifest-path provisioning/platform/Cargo.toml
-    nu provisioning/tools/build/format-nushell.nu
-
-# Clean build artifacts
-clean:
-    nu provisioning/tools/build/build-system.nu clean
-
-# Clean all (including Rust target/)
-clean-all:
-    nu provisioning/tools/build/build-system.nu clean --all
-    cargo clean --manifest-path provisioning/platform/Cargo.toml
-
-# Create release
-release VERSION:
-    @echo "🚀 Creating release {{VERSION}}..."
-    nu provisioning/tools/build/build-system.nu release {{VERSION}}
-
-# Install from source
-install:
-    @echo "📦 Installing from source..."
-    just build
-    sudo nu distribution/installers/install.nu --from-source
-
-# Install development version (symlink)
-install-dev:
-    @echo "🔗 Installing development version..."
-    sudo ln -sf $(pwd)/provisioning/core/cli/provisioning /usr/local/bin/provisioning
-    @echo "✅ Development installation complete"
-
-# Uninstall
-uninstall:
-    @echo "🗑️  Uninstalling..."
-    sudo rm -f /usr/local/bin/provisioning
-    sudo rm -rf /usr/local/lib/provisioning
-    sudo rm -rf /usr/local/share/provisioning
-
-# Show build status
-status:
-    nu provisioning/tools/build/build-system.nu status
-
-# Validate package
-validate PACKAGE:
-    nu provisioning/tools/build/build-system.nu validate {{PACKAGE}}
-
-# Start development environment
-dev-start:
-    @echo "🚀 Starting development environment..."
-    cd provisioning/platform/orchestrator && cargo run
-
-# Watch and rebuild on changes
-watch:
-    @echo "👀 Watching for changes..."
-    cargo watch -x 'check --workspace --manifest-path provisioning/platform/Cargo.toml'
-
-# Update dependencies
-update-deps:
-    cargo update --manifest-path provisioning/platform/Cargo.toml
-    nu provisioning/tools/build/update-nushell-deps.nu
-
-# Generate documentation
-docs:
-    @echo "📚 Generating documentation..."
-    cargo doc --workspace --no-deps --manifest-path provisioning/platform/Cargo.toml
-    nu provisioning/tools/build/generate-docs.nu
-
-# Benchmark
-bench:
-    cargo bench --workspace --manifest-path provisioning/platform/Cargo.toml
-
-# Check licenses
-check-licenses:
-    cargo deny check licenses --manifest-path provisioning/platform/Cargo.toml
-
-# Security audit
-audit:
-    cargo audit --file provisioning/platform/Cargo.lock
-
-
-

Installation System

-

Installer Script

-

distribution/installers/install.nu:

-
#!/usr/bin/env nu
-# Provisioning installation script
-
-const DEFAULT_PREFIX = "/usr/local"
-const REPO_URL = "https://releases.provisioning.io"
-
-# Main installation command
-def main [
-    --prefix: string = $DEFAULT_PREFIX    # Installation prefix
-    --version: string = "latest"          # Version to install
-    --from-source                         # Install from source (development)
-    --packages: list<string> = ["core"]   # Packages to install
-] {
-    print "📦 Provisioning Installation"
-    print "─" * 60
-
-    # Check prerequisites
-    check-prerequisites
-
-    # Install packages
-    if $from_source {
-        install-from-source $prefix
-    } else {
-        install-from-release $prefix $version $packages
-    }
-
-    # Post-installation
-    post-install $prefix
-
-    print ""
-    print "✅ Installation complete!"
-    print $"Run 'provisioning --help' to get started"
-}
-
-# Check prerequisites
-def check-prerequisites [] {
-    print "🔍 Checking prerequisites..."
-
-    # Check for Nushell
-    if (which nu | is-empty) {
-        error make {
-            msg: "Nushell not found. Please install Nushell first: https://nushell.sh"
-        }
-    }
-
-    let nu_version = (nu --version | parse "{name} {version}" | get 0.version)
-    print $"  ✓ Nushell ($nu_version)"
-
-    # Check for required tools
-    if (which tar | is-empty) {
-        error make { msg: "tar not found" }
-    }
-
-    if (which curl | is-empty) and (which wget | is-empty) {
-        error make { msg: "curl or wget required" }
-    }
-
-    print "  ✓ All prerequisites met"
-}
-
-# Install from source
-def install-from-source [prefix: string] {
-    print "📦 Installing from source..."
-
-    # Check if we're in the source directory
-    if not ("provisioning" | path exists) {
-        error make { msg: "Must run from project root" }
-    }
-
-    # Create installation directories
-    create-install-dirs $prefix
-
-    # Copy files
-    print "  Copying core files..."
-    cp -r provisioning/core/nulib $"($prefix)/lib/provisioning/core/"
-    cp -r provisioning/extensions $"($prefix)/lib/provisioning/"
-    cp -r provisioning/kcl $"($prefix)/lib/provisioning/"
-    cp -r provisioning/templates $"($prefix)/share/provisioning/"
-    cp -r provisioning/config $"($prefix)/share/provisioning/"
-
-    # Create CLI wrapper
-    create-cli-wrapper $prefix
-
-    print "  ✓ Source installation complete"
-}
-
-# Install from release
-def install-from-release [
-    prefix: string
-    version: string
-    packages: list<string>
-] {
-    print $"📦 Installing version ($version)..."
-
-    # Download packages
-    for package in $packages {
-        download-package $package $version
-        extract-package $package $version $prefix
-    }
-}
-
-# Download package
-def download-package [package: string, version: string] {
-    let filename = $"provisioning-($package)-($version).tar.gz"
-    let url = $"($REPO_URL)/($version)/($filename)"
-
-    print $"  Downloading ($package)..."
-
-    if (which curl | is-not-empty) {
-        curl -fsSL -o $"/tmp/($filename)" $url
-    } else {
-        wget -q -O $"/tmp/($filename)" $url
-    }
-}
-
-# Extract package
-def extract-package [package: string, version: string, prefix: string] {
-    let filename = $"provisioning-($package)-($version).tar.gz"
-
-    print $"  Installing ($package)..."
-
-    tar xzf $"/tmp/($filename)" -C $prefix
-    rm $"/tmp/($filename)"
-}
-
-# Create installation directories
-def create-install-dirs [prefix: string] {
-    mkdir ($prefix | path join "bin")
-    mkdir ($prefix | path join "lib" "provisioning" "core")
-    mkdir ($prefix | path join "lib" "provisioning" "extensions")
-    mkdir ($prefix | path join "share" "provisioning" "templates")
-    mkdir ($prefix | path join "share" "provisioning" "config")
-    mkdir ($prefix | path join "share" "provisioning" "docs")
-}
-
-# Create CLI wrapper
-def create-cli-wrapper [prefix: string] {
-    let wrapper = $"#!/usr/bin/env nu
-# Provisioning CLI wrapper
-
-# Load provisioning library
-const PROVISIONING_LIB = \"($prefix)/lib/provisioning\"
-const PROVISIONING_SHARE = \"($prefix)/share/provisioning\"
-
-$env.PROVISIONING_ROOT = $PROVISIONING_LIB
-$env.PROVISIONING_SHARE = $PROVISIONING_SHARE
-
-# Add to Nushell path
-$env.NU_LIB_DIRS = ($env.NU_LIB_DIRS | append $\"($PROVISIONING_LIB)/core/nulib\")
-
-# Load main provisioning module
-use ($PROVISIONING_LIB)/core/nulib/main_provisioning/dispatcher.nu *
-
-# Main entry point
-def main [...args] {
-    dispatch-command $args
-}
-
-main ...$args
-"
-
-    $wrapper | save ($prefix | path join "bin" "provisioning")
-    chmod +x ($prefix | path join "bin" "provisioning")
-}
-
-# Post-installation tasks
-def post-install [prefix: string] {
-    print "🔧 Post-installation setup..."
-
-    # Create user config directory
-    let user_config = ($env.HOME | path join ".provisioning")
-    if not ($user_config | path exists) {
-        mkdir ($user_config | path join "config")
-        mkdir ($user_config | path join "extensions")
-        mkdir ($user_config | path join "cache")
-
-        # Copy example config
-        let example = ($prefix | path join "share" "provisioning" "config" "config-examples" "config.user.toml")
-        if ($example | path exists) {
-            cp $example ($user_config | path join "config" "config.user.toml")
-        }
-
-        print $"  ✓ Created user config directory: ($user_config)"
-    }
-
-    # Check if prefix is in PATH
-    if not ($env.PATH | any { |p| $p == ($prefix | path join "bin") }) {
-        print ""
-        print "⚠️  Note: ($prefix)/bin is not in your PATH"
-        print "   Add this to your shell configuration:"
-        print $"   export PATH=\"($prefix)/bin:$PATH\""
-    }
-}
-
-# Uninstall provisioning
-export def "main uninstall" [
-    --prefix: string = $DEFAULT_PREFIX    # Installation prefix
-    --keep-config                         # Keep user configuration
-] {
-    print "🗑️  Uninstalling provisioning..."
-
-    # Remove installed files
-    rm -rf ($prefix | path join "bin" "provisioning")
-    rm -rf ($prefix | path join "lib" "provisioning")
-    rm -rf ($prefix | path join "share" "provisioning")
-
-    # Remove user config if requested
-    if not $keep_config {
-        let user_config = ($env.HOME | path join ".provisioning")
-        if ($user_config | path exists) {
-            rm -rf $user_config
-            print "  ✓ Removed user configuration"
-        }
-    }
-
-    print "✅ Uninstallation complete"
-}
-
-# Upgrade provisioning
-export def "main upgrade" [
-    --version: string = "latest"          # Version to upgrade to
-    --prefix: string = $DEFAULT_PREFIX    # Installation prefix
-] {
-    print $"⬆️  Upgrading to version ($version)..."
-
-    # Check current version
-    let current = (^provisioning version | parse "{version}" | get 0.version)
-    print $"  Current version: ($current)"
-
-    if $current == $version {
-        print "  Already at latest version"
-        return
-    }
-
-    # Backup current installation
-    print "  Backing up current installation..."
-    let backup = ($prefix | path join "lib" "provisioning.backup")
-    mv ($prefix | path join "lib" "provisioning") $backup
-
-    # Install new version
-    try {
-        install-from-release $prefix $version ["core"]
-        print $"  ✅ Upgraded to version ($version)"
-        rm -rf $backup
-    } catch {
-        print "  ❌ Upgrade failed, restoring backup..."
-        mv $backup ($prefix | path join "lib" "provisioning")
-        error make { msg: "Upgrade failed" }
-    }
-}
-
-

Bash Installer (For Systems Without Nushell)

-

distribution/installers/install.sh:

-
#!/usr/bin/env bash
-# Provisioning installation script (Bash version)
-# This script installs Nushell first, then runs the Nushell installer
-
-set -euo pipefail
-
-DEFAULT_PREFIX="/usr/local"
-REPO_URL="https://releases.provisioning.io"
-
-# Colors
-RED='\033[0;31m'
-GREEN='\033[0;32m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-info() {
-    echo -e "${GREEN}✓${NC} $*"
-}
-
-warn() {
-    echo -e "${YELLOW}⚠${NC} $*"
-}
-
-error() {
-    echo -e "${RED}✗${NC} $*" >&2
-    exit 1
-}
-
-# Check if Nushell is installed
-check_nushell() {
-    if command -v nu >/dev/null 2>&1; then
-        info "Nushell is already installed"
-        return 0
-    else
-        warn "Nushell not found"
-        return 1
-    fi
-}
-
-# Install Nushell
-install_nushell() {
-    echo "📦 Installing Nushell..."
-
-    # Detect OS and architecture
-    OS="$(uname -s)"
-    ARCH="$(uname -m)"
-
-    case "$OS" in
-        Linux*)
-            if command -v apt-get >/dev/null 2>&1; then
-                sudo apt-get update && sudo apt-get install -y nushell
-            elif command -v dnf >/dev/null 2>&1; then
-                sudo dnf install -y nushell
-            elif command -v brew >/dev/null 2>&1; then
-                brew install nushell
-            else
-                error "Cannot automatically install Nushell. Please install manually: https://nushell.sh"
-            fi
-            ;;
-        Darwin*)
-            if command -v brew >/dev/null 2>&1; then
-                brew install nushell
-            else
-                error "Homebrew not found. Install from: https://brew.sh"
-            fi
-            ;;
-        *)
-            error "Unsupported operating system: $OS"
-            ;;
-    esac
-
-    info "Nushell installed successfully"
-}
-
-# Main installation
-main() {
-    echo "📦 Provisioning Installation"
-    echo "────────────────────────────────────────────────────────────"
-
-    # Check for Nushell
-    if ! check_nushell; then
-        read -p "Install Nushell? (y/N) " -n 1 -r
-        echo
-        if [[ $REPLY =~ ^[Yy]$ ]]; then
-            install_nushell
-        else
-            error "Nushell is required. Install from: https://nushell.sh"
-        fi
-    fi
-
-    # Download Nushell installer
-    echo "📥 Downloading installer..."
-    INSTALLER_URL="$REPO_URL/latest/install.nu"
-    curl -fsSL "$INSTALLER_URL" -o /tmp/install.nu
-
-    # Run Nushell installer
-    echo "🚀 Running installer..."
-    nu /tmp/install.nu "$@"
-
-    # Cleanup
-    rm -f /tmp/install.nu
-
-    info "Installation complete!"
-}
-
-# Run main
-main "$@"
-
-
-

Implementation Plan

-

Phase 1: Repository Restructuring (3-4 days)

-

Day 1: Cleanup and Preparation

-

Tasks:

-
    -
  1. Create backup of current state
  2. -
  3. Analyze and document all workspace directories
  4. -
  5. Identify active workspace vs backups
  6. -
  7. Map all file dependencies
  8. -
-

Commands:

-
# Backup current state
-cp -r /Users/Akasha/project-provisioning /Users/Akasha/project-provisioning.backup
-
-# Analyze workspaces
-fd workspace -t d > workspace-dirs.txt
-
-

Deliverables:

-
    -
  • Complete backup
  • -
  • Workspace analysis document
  • -
  • Dependency map
  • -
-

Day 2: Directory Restructuring

-

Tasks:

-
    -
  1. Consolidate workspace directories
  2. -
  3. Move build artifacts to distribution/
  4. -
  5. Remove obsolete directories (NO/, wrks/, presentation artifacts)
  6. -
  7. Create proper .gitignore
  8. -
-

Commands:

-
# Create distribution directory
-mkdir -p distribution/{packages,installers,registry}
-
-# Move build artifacts
-mv target distribution/
-mv provisioning/tools/dist distribution/packages/
-
-# Remove obsolete
-rm -rf NO/ wrks/ presentations/
-
-

Deliverables:

-
    -
  • Clean directory structure
  • -
  • Updated .gitignore
  • -
  • Migration log
  • -
-

Day 3: Update Path References

-

Tasks:

-
    -
  1. Update all hardcoded paths in Nushell scripts
  2. -
  3. Update CLAUDE.md with new paths
  4. -
  5. Update documentation references
  6. -
  7. Test all path changes
  8. -
-

Files to Update:

-
    -
  • provisioning/core/nulib/**/*.nu (~65 files)
  • -
  • CLAUDE.md
  • -
  • docs/**/*.md
  • -
-

Deliverables:

-
    -
  • Updated scripts
  • -
  • Updated documentation
  • -
  • Test results
  • -
-

Day 4: Validation and Documentation

-

Tasks:

-
    -
  1. Run full test suite
  2. -
  3. Verify all commands work
  4. -
  5. Update README.md
  6. -
  7. Create migration guide
  8. -
-

Deliverables:

-
    -
  • Passing tests
  • -
  • Updated README
  • -
  • Migration guide for users
  • -
-

Phase 2: Build System Implementation (3-4 days)

-

Day 5: Build System Core

-

Tasks:

-
    -
  1. Create provisioning/tools/build/ structure
  2. -
  3. Implement build-system.nu
  4. -
  5. Implement package-core.nu
  6. -
  7. Create Justfile
  8. -
-

Files to Create:

-
    -
  • provisioning/tools/build/build-system.nu
  • -
  • provisioning/tools/build/package-core.nu
  • -
  • provisioning/tools/build/validate-package.nu
  • -
  • Justfile
  • -
-

Deliverables:

-
    -
  • Working build system
  • -
  • Core packaging capability
  • -
  • Justfile with basic recipes
  • -
-

Day 6: Platform and Extension Packaging

-

Tasks:

-
    -
  1. Implement package-platform.nu
  2. -
  3. Implement package-extensions.nu
  4. -
  5. Implement package-plugins.nu
  6. -
  7. Add checksum generation
  8. -
-

Deliverables:

-
    -
  • Platform packaging
  • -
  • Extension packaging
  • -
  • Plugin packaging
  • -
  • Checksum generation
  • -
-

Day 7: Package Validation

-

Tasks:

-
    -
  1. Create package validation system
  2. -
  3. Implement integrity checks
  4. -
  5. Create test suite for packages
  6. -
  7. Document package format
  8. -
-

Deliverables:

-
    -
  • Package validation
  • -
  • Test suite
  • -
  • Package format documentation
  • -
-

Day 8: Build System Testing

-

Tasks:

-
    -
  1. Test full build pipeline
  2. -
  3. Test all package types
  4. -
  5. Optimize build performance
  6. -
  7. Document build system
  8. -
-

Deliverables:

-
    -
  • Tested build system
  • -
  • Performance optimizations
  • -
  • Build system documentation
  • -
-

Phase 3: Installation System (2-3 days)

-

Day 9: Nushell Installer

-

Tasks:

-
    -
  1. Create install.nu
  2. -
  3. Implement installation logic
  4. -
  5. Implement upgrade logic
  6. -
  7. Implement uninstallation
  8. -
-

Files to Create:

-
    -
  • distribution/installers/install.nu
  • -
-

Deliverables:

-
    -
  • Working Nushell installer
  • -
  • Upgrade mechanism
  • -
  • Uninstall mechanism
  • -
-

Day 10: Bash Installer and CLI

-

Tasks:

-
    -
  1. Create install.sh
  2. -
  3. Replace bash CLI wrapper with pure Nushell
  4. -
  5. Update PATH handling
  6. -
  7. Test installation on clean system
  8. -
-

Files to Create:

-
    -
  • distribution/installers/install.sh
  • -
  • Updated provisioning/core/cli/provisioning
  • -
-

Deliverables:

-
    -
  • Bash installer
  • -
  • Pure Nushell CLI
  • -
  • Installation tests
  • -
-

Day 11: Installation Testing

-

Tasks:

-
    -
  1. Test installation on multiple OSes
  2. -
  3. Test upgrade scenarios
  4. -
  5. Test uninstallation
  6. -
  7. Create installation documentation
  8. -
-

Deliverables:

-
    -
  • Multi-OS installation tests
  • -
  • Installation guide
  • -
  • Troubleshooting guide
  • -
-

Phase 4: Package Registry (Optional, 2-3 days)

-

Day 12: Registry System

-

Tasks:

-
    -
  1. Design registry format
  2. -
  3. Implement registry indexing
  4. -
  5. Create package metadata
  6. -
  7. Implement search functionality
  8. -
-

Files to Create:

-
    -
  • provisioning/tools/build/publish-registry.nu
  • -
  • distribution/registry/index.json
  • -
-

Deliverables:

-
    -
  • Registry system
  • -
  • Package metadata
  • -
  • Search functionality
  • -
-

Day 13: Registry Commands

-

Tasks:

-
    -
  1. Implement provisioning registry list
  2. -
  3. Implement provisioning registry search
  4. -
  5. Implement provisioning registry install
  6. -
  7. Implement provisioning registry update
  8. -
-

Deliverables:

-
    -
  • Registry commands
  • -
  • Package installation from registry
  • -
  • Update mechanism
  • -
-

Day 14: Registry Hosting

-

Tasks:

-
    -
  1. Set up registry hosting (S3, GitHub releases, etc.)
  2. -
  3. Implement upload mechanism
  4. -
  5. Create CI/CD for automatic publishing
  6. -
  7. Document registry system
  8. -
-

Deliverables:

-
    -
  • Hosted registry
  • -
  • CI/CD pipeline
  • -
  • Registry documentation
  • -
-

Phase 5: Documentation and Release (2 days)

-

Day 15: Documentation

-

Tasks:

-
    -
  1. Update all documentation for new structure
  2. -
  3. Create user guides
  4. -
  5. Create development guides
  6. -
  7. Create API documentation
  8. -
-

Deliverables:

-
    -
  • Updated documentation
  • -
  • User guides
  • -
  • Developer guides
  • -
  • API docs
  • -
-

Day 16: Release Preparation

-

Tasks:

-
    -
  1. Create CHANGELOG.md
  2. -
  3. Build release packages
  4. -
  5. Test installation from packages
  6. -
  7. Create release announcement
  8. -
-

Deliverables:

-
    -
  • CHANGELOG
  • -
  • Release packages
  • -
  • Installation verification
  • -
  • Release announcement
  • -
-
-

Migration Strategy

-

For Existing Users

-

Option 1: Clean Migration

-
# Backup current workspace
-cp -r workspace workspace.backup
-
-# Upgrade to new version
-provisioning upgrade --version 3.2.0
-
-# Migrate workspace
-provisioning workspace migrate --from workspace.backup --to workspace/
-
-

Option 2: In-Place Migration

-
# Run migration script
-provisioning migrate --check  # Dry run
-provisioning migrate          # Execute migration
-
-

For Developers

-
# Pull latest changes
-git pull origin main
-
-# Rebuild
-just clean-all
-just build
-
-# Reinstall development version
-just install-dev
+brew install nickel  # macOS
+cargo install nickel-lang-cli  # Linux
 
 # Verify
-provisioning --version
+nickel --version
 
-
-

Success Criteria

-

Repository Structure

-
    -
  • ✅ Single workspace/ directory for all runtime data
  • -
  • ✅ Clear separation: source (provisioning/), runtime (workspace/), artifacts (distribution/)
  • -
  • ✅ All build artifacts in distribution/ and gitignored
  • -
  • ✅ Clean root directory (no wrks/, NO/, etc.)
  • -
  • ✅ Unified documentation in docs/
  • -
-

Build System

-
    -
  • ✅ Single command builds all packages: just build
  • -
  • ✅ Packages can be built independently
  • -
  • ✅ Checksums generated automatically
  • -
  • ✅ Validation before packaging
  • -
  • ✅ Build time < 5 minutes for full build
  • -
-

Installation

-
    -
  • ✅ One-line installation: curl -fsSL https://get.provisioning.io | sh
  • -
  • ✅ Works on Linux and macOS
  • -
  • ✅ Standard installation paths (/usr/local/)
  • -
  • ✅ User configuration in ~/.provisioning/
  • -
  • ✅ Clean uninstallation
  • -
-

Distribution

-
    -
  • ✅ Packages available at stable URL
  • -
  • ✅ Automated releases via CI/CD
  • -
  • ✅ Package registry for extensions
  • -
  • ✅ Upgrade mechanism works reliably
  • -
-

Documentation

-
    -
  • ✅ Complete installation guide
  • -
  • ✅ Quick start guide
  • -
  • ✅ Developer contributing guide
  • -
  • ✅ API documentation
  • -
  • ✅ Architecture documentation
  • -
-
-

Risks and Mitigations

-

Risk 1: Breaking Changes for Existing Users

-

Impact: High -Probability: High -Mitigation:

-
    -
  • Provide migration script
  • -
  • Support both old and new paths during transition (v3.2.x)
  • -
  • Clear migration guide
  • -
  • Automated backup before migration
  • -
-

Risk 2: Build System Complexity

-

Impact: Medium -Probability: Medium -Mitigation:

-
    -
  • Start with simple packaging
  • -
  • Iterate and improve
  • -
  • Document thoroughly
  • -
  • Provide examples
  • -
-

Risk 3: Installation Path Conflicts

-

Impact: Medium -Probability: Low -Mitigation:

-
    -
  • Check for existing installations
  • -
  • Support custom prefix
  • -
  • Clear uninstallation
  • -
  • Non-conflicting binary names
  • -
-

Risk 4: Cross-Platform Issues

-

Impact: High -Probability: Medium -Mitigation:

-
    -
  • Test on multiple OSes (Linux, macOS)
  • -
  • Use portable commands
  • -
  • Provide fallbacks
  • -
  • Clear error messages
  • -
-

Risk 5: Dependency Management

-

Impact: Medium -Probability: Medium -Mitigation:

-
    -
  • Document all dependencies
  • -
  • Check prerequisites during installation
  • -
  • Provide installation instructions for dependencies
  • -
  • Consider bundling critical dependencies
  • -
-
-

Timeline Summary

-
- - - - - - -
PhaseDurationKey Deliverables
Phase 1: Restructuring3-4 daysClean directory structure, updated paths
Phase 2: Build System3-4 daysWorking build system, all package types
Phase 3: Installation2-3 daysInstallers, pure Nushell CLI
Phase 4: Registry (Optional)2-3 daysPackage registry, extension management
Phase 5: Documentation2 daysComplete documentation, release
Total12-16 daysProduction-ready distribution system
-
-
-

Next Steps

-
    -
  1. -

    Review and Approval (Day 0)

    -
      -
    • Review this analysis
    • -
    • Approve implementation plan
    • -
    • Assign resources
    • -
    -
  2. -
  3. -

    Kickoff (Day 1)

    -
      -
    • Create implementation branch
    • -
    • Set up project tracking
    • -
    • Begin Phase 1
    • -
    -
  4. -
  5. -

    Weekly Reviews

    -
      -
    • End of Phase 1: Structure review
    • -
    • End of Phase 2: Build system review
    • -
    • End of Phase 3: Installation review
    • -
    • Final review before release
    • -
    -
  6. -
-
-

Conclusion

-

This comprehensive plan transforms the provisioning system into a professional-grade infrastructure automation platform with:

-
    -
  • Clean Architecture: Clear separation of concerns
  • -
  • Professional Distribution: Standard installation paths and packaging
  • -
  • Easy Installation: One-command installation for users
  • -
  • Developer Friendly: Simple build system and clear development workflow
  • -
  • Extensible: Package registry for community extensions
  • -
  • Well Documented: Complete guides for users and developers
  • -
-

The implementation will take approximately 2-3 weeks and will result in a production-ready system suitable for both individual developers and -enterprise deployments.

-
-

References

-
    -
  • Current codebase structure
  • -
  • Unix FHS (Filesystem Hierarchy Standard)
  • -
  • Rust cargo packaging conventions
  • -
  • npm/yarn package management patterns
  • -
  • Homebrew formula best practices
  • -
  • KCL package management design
  • -
-

TypeDialog + Nickel Integration Guide

-

Status: Implementation Guide -Last Updated: 2025-12-15 -Project: TypeDialog at /Users/Akasha/Development/typedialog -Purpose: Type-safe UI generation from Nickel schemas

-
-

What is TypeDialog

-

TypeDialog generates type-safe interactive forms from configuration schemas with bidirectional Nickel integration.

-
Nickel Schema
-    ↓
-TypeDialog Form (Auto-generated)
-    ↓
-User fills form interactively
-    ↓
-Nickel output config (Type-safe)
+

Verification

+

Confirm successful installation:

+
# Complete installation check
+provisioning version      # CLI version
+provisioning env          # Environment configuration
+provisioning providers    # Available cloud providers
+provisioning validate config  # Configuration validation
+provisioning help         # Help system
 
-
-

Architecture

-

Three Layers

-
CLI/TUI/Web Layer
-    ↓
-TypeDialog Form Engine
-    ↓
-Nickel Integration
-    ↓
-Schema Contracts
+

Next Steps

+

Once installation is complete:

+ +

Quick Start

+

Deploy your first infrastructure in 5 minutes using the Provisioning platform.

+

Prerequisites

+ +

5-Minute Deployment

+

Step 1: Create Workspace (30 seconds)

+
# Initialize workspace
+provisioning workspace init quickstart-demo
+cd quickstart-demo
 
-

Data Flow

-
Input (Nickel)
-    ↓
-Form Definition (TOML)
-    ↓
-Form Rendering (CLI/TUI/Web)
-    ↓
-User Input
-    ↓
-Validation (against Nickel contracts)
-    ↓
-Output (JSON/YAML/TOML/Nickel)
+

Workspace structure created:

+
quickstart-demo/
+├── infra/       # Infrastructure definitions
+├── config/      # Workspace configuration
+├── extensions/  # Custom providers/taskservs
+└── runtime/     # State and logs
 
-
-

Setup

-

Installation

-
# Clone TypeDialog
-git clone https://github.com/jesusperezlorenzo/typedialog.git
-cd typedialog
-
-# Build
-cargo build --release
-
-# Install (optional)
-cargo install --path ./crates/typedialog
-
-

Verify Installation

-
typedialog --version
-typedialog --help
-
-
-

Basic Workflow

-

Step 1: Define Nickel Schema

-
# server_config.ncl
-let contracts = import "./contracts.ncl" in
-let defaults = import "./defaults.ncl" in
-
+

Step 2: Define Infrastructure (1 minute)

+

Create a simple server configuration using Nickel:

+
# Create infrastructure schema
+cat > infra/demo-server.ncl <<'EOF'
 {
-  defaults = defaults,
-
-  make_server | not_exported = fun overrides =>
-    defaults.server & overrides,
-
-  DefaultServer = defaults.server,
-}
-
-

Step 2: Define TypeDialog Form (TOML)

-
# server_form.toml
-[form]
-title = "Server Configuration"
-description = "Create a new server configuration"
-
-[[fields]]
-name = "server_name"
-label = "Server Name"
-type = "text"
-required = true
-help = "Unique identifier for the server"
-placeholder = "web-01"
-
-[[fields]]
-name = "cpu_cores"
-label = "CPU Cores"
-type = "number"
-required = true
-default = 4
-help = "Number of CPU cores (1-32)"
-
-[[fields]]
-name = "memory_gb"
-label = "Memory (GB)"
-type = "number"
-required = true
-default = 8
-help = "Memory in GB (1-256)"
-
-[[fields]]
-name = "zone"
-label = "Availability Zone"
-type = "select"
-required = true
-options = ["us-nyc1", "eu-fra1", "ap-syd1"]
-default = "us-nyc1"
-
-[[fields]]
-name = "monitoring"
-label = "Enable Monitoring"
-type = "confirm"
-default = true
-
-[[fields]]
-name = "tags"
-label = "Tags"
-type = "multiselect"
-options = ["production", "staging", "testing", "development"]
-help = "Select applicable tags"
-
-

Step 3: Render Form (CLI)

-
typedialog form --config server_form.toml --backend cli
-
-

Output:

-
Server Configuration
-Create a new server configuration
-
-? Server Name: web-01
-? CPU Cores: 4
-? Memory (GB): 8
-? Availability Zone: (us-nyc1/eu-fra1/ap-syd1) us-nyc1
-? Enable Monitoring: (y/n) y
-? Tags: (Select multiple with space)
-  ◉ production
-  ◯ staging
-  ◯ testing
-  ◯ development
-
-

Step 4: Validate Against Nickel Schema

-
# Validation happens automatically
-# If input matches Nickel contract, proceeds to output
-
-

Step 5: Output to Nickel

-
typedialog form \
-  --config server_form.toml \
-  --output nickel \
-  --backend cli
-
-

Output file (server_config_output.ncl):

-
{
-  server_name = "web-01",
-  cpu_cores = 4,
-  memory_gb = 8,
-  zone = "us-nyc1",
-  monitoring = true,
-  tags = ["production"],
-}
-
-
-

Real-World Example 1: Infrastructure Wizard

-

Scenario

-

You want an interactive CLI wizard for infrastructure provisioning.

-

Step 1: Define Nickel Schema for Infrastructure

-
# infrastructure_schema.ncl
-{
-  InfrastructureConfig = {
-    workspace_name | String,
-    deployment_mode | [| 'solo, 'multiuser, 'cicd, 'enterprise |],
-    provider | [| 'upcloud, 'aws, 'hetzner |],
-    taskservs | Array,
-    enable_monitoring | Bool,
-    enable_backup | Bool,
-    backup_retention_days | Number,
-  },
-
-  defaults = {
-    workspace_name = "",
-    deployment_mode = 'solo,
-    provider = 'upcloud,
-    taskservs = [],
-    enable_monitoring = true,
-    enable_backup = true,
-    backup_retention_days = 7,
-  },
-
-  DefaultInfra = defaults,
-}
-
-

Step 2: Create Comprehensive Form

-
# infrastructure_wizard.toml
-[form]
-title = "Infrastructure Provisioning Wizard"
-description = "Create a complete infrastructure setup"
-
-[[fields]]
-name = "workspace_name"
-label = "Workspace Name"
-type = "text"
-required = true
-validation_pattern = "^[a-z0-9-]{3,32}$"
-help = "3-32 chars, lowercase alphanumeric and hyphens only"
-placeholder = "my-workspace"
-
-[[fields]]
-name = "deployment_mode"
-label = "Deployment Mode"
-type = "select"
-required = true
-options = [
-  { value = "solo", label = "Solo (Single user, 2 CPU, 4 GB RAM)" },
-  { value = "multiuser", label = "MultiUser (Team, 4 CPU, 8 GB RAM)" },
-  { value = "cicd", label = "CI/CD (Pipelines, 8 CPU, 16 GB RAM)" },
-  { value = "enterprise", label = "Enterprise (Production, 16 CPU, 32 GB RAM)" },
-]
-default = "solo"
-
-[[fields]]
-name = "provider"
-label = "Cloud Provider"
-type = "select"
-required = true
-options = [
-  { value = "upcloud", label = "UpCloud (EU)" },
-  { value = "aws", label = "AWS (Global)" },
-  { value = "hetzner", label = "Hetzner (EU)" },
-]
-default = "upcloud"
-
-[[fields]]
-name = "taskservs"
-label = "Task Services"
-type = "multiselect"
-required = false
-options = [
-  { value = "kubernetes", label = "Kubernetes (Container orchestration)" },
-  { value = "cilium", label = "Cilium (Network policy)" },
-  { value = "postgres", label = "PostgreSQL (Database)" },
-  { value = "redis", label = "Redis (Cache)" },
-  { value = "prometheus", label = "Prometheus (Monitoring)" },
-  { value = "etcd", label = "etcd (Distributed config)" },
-]
-help = "Select task services to deploy"
-
-[[fields]]
-name = "enable_monitoring"
-label = "Enable Monitoring"
-type = "confirm"
-default = true
-help = "Prometheus + Grafana dashboards"
-
-[[fields]]
-name = "enable_backup"
-label = "Enable Backup"
-type = "confirm"
-default = true
-
-[[fields]]
-name = "backup_retention_days"
-label = "Backup Retention (days)"
-type = "number"
-required = false
-default = 7
-help = "How long to keep backups (if enabled)"
-visible_if = "enable_backup == true"
-
-[[fields]]
-name = "email"
-label = "Admin Email"
-type = "text"
-required = true
-validation_pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
-help = "For alerts and notifications"
-placeholder = "admin@company.com"
-
-

Step 3: Run Interactive Wizard

-
typedialog form \
-  --config infrastructure_wizard.toml \
-  --backend tui \
-  --output nickel
-
-

Output (infrastructure_config.ncl):

-
{
-  workspace_name = "production-eu",
-  deployment_mode = 'enterprise,
-  provider = 'upcloud,
-  taskservs = ["kubernetes", "cilium", "postgres", "redis", "prometheus"],
-  enable_monitoring = true,
-  enable_backup = true,
-  backup_retention_days = 30,
-  email = "ops@company.com",
-}
-
-

Step 4: Use Output in Infrastructure

-
# main_infrastructure.ncl
-let config = import "./infrastructure_config.ncl" in
-let schemas = import "../../provisioning/schemas/main.ncl" in
-
-{
-  # Build infrastructure based on config
-  infrastructure = if config.deployment_mode == 'solo then
-    {
-      servers = [
-        schemas.lib.make_server {
-          name = config.workspace_name,
-          cpu_cores = 2,
-          memory_gb = 4,
-        },
-      ],
-      taskservs = config.taskservs,
-    }
-  else if config.deployment_mode == 'enterprise then
-    {
-      servers = [
-        schemas.lib.make_server { name = "app-01", cpu_cores = 16, memory_gb = 32 },
-        schemas.lib.make_server { name = "app-02", cpu_cores = 16, memory_gb = 32 },
-        schemas.lib.make_server { name = "db-01", cpu_cores = 16, memory_gb = 32 },
-      ],
-      taskservs = config.taskservs,
-      monitoring = { enabled = config.enable_monitoring, email = config.email },
-    }
-  else
-    # default fallback
-    {},
-}
-
-
-

Real-World Example 2: Server Configuration Form

-

Form Definition (Advanced)

-
# server_advanced_form.toml
-[form]
-title = "Server Configuration"
-description = "Configure server settings with validation"
-
-# Section 1: Basic Info
-[[sections]]
-name = "basic"
-title = "Basic Information"
-
-[[fields]]
-name = "server_name"
-section = "basic"
-label = "Server Name"
-type = "text"
-required = true
-validation_pattern = "^[a-z0-9-]{3,32}$"
-
-[[fields]]
-name = "description"
-section = "basic"
-label = "Description"
-type = "textarea"
-required = false
-placeholder = "Server purpose and details"
-
-# Section 2: Resources
-[[sections]]
-name = "resources"
-title = "Resources"
-
-[[fields]]
-name = "cpu_cores"
-section = "resources"
-label = "CPU Cores"
-type = "number"
-required = true
-default = 4
-min = 1
-max = 32
-
-[[fields]]
-name = "memory_gb"
-section = "resources"
-label = "Memory (GB)"
-type = "number"
-required = true
-default = 8
-min = 1
-max = 256
-
-[[fields]]
-name = "disk_gb"
-section = "resources"
-label = "Disk (GB)"
-type = "number"
-required = true
-default = 100
-min = 10
-max = 2000
-
-# Section 3: Network
-[[sections]]
-name = "network"
-title = "Network Configuration"
-
-[[fields]]
-name = "zone"
-section = "network"
-label = "Availability Zone"
-type = "select"
-required = true
-options = ["us-nyc1", "eu-fra1", "ap-syd1"]
-
-[[fields]]
-name = "enable_ipv6"
-section = "network"
-label = "Enable IPv6"
-type = "confirm"
-default = false
-
-[[fields]]
-name = "allowed_ports"
-section = "network"
-label = "Allowed Ports"
-type = "multiselect"
-options = [
-  { value = "22", label = "SSH (22)" },
-  { value = "80", label = "HTTP (80)" },
-  { value = "443", label = "HTTPS (443)" },
-  { value = "3306", label = "MySQL (3306)" },
-  { value = "5432", label = "PostgreSQL (5432)" },
-]
-
-# Section 4: Advanced
-[[sections]]
-name = "advanced"
-title = "Advanced Options"
-
-[[fields]]
-name = "kernel_version"
-section = "advanced"
-label = "Kernel Version"
-type = "text"
-required = false
-placeholder = "5.15.0 (or leave blank for latest)"
-
-[[fields]]
-name = "enable_monitoring"
-section = "advanced"
-label = "Enable Monitoring"
-type = "confirm"
-default = true
-
-[[fields]]
-name = "monitoring_interval"
-section = "advanced"
-label = "Monitoring Interval (seconds)"
-type = "number"
-required = false
-default = 60
-visible_if = "enable_monitoring == true"
-
-[[fields]]
-name = "tags"
-section = "advanced"
-label = "Tags"
-type = "multiselect"
-options = ["production", "staging", "testing", "development"]
-
-

Output Structure

-
{
-  # Basic
-  server_name = "web-prod-01",
-  description = "Primary web server",
-
-  # Resources
-  cpu_cores = 16,
-  memory_gb = 32,
-  disk_gb = 500,
-
-  # Network
-  zone = "eu-fra1",
-  enable_ipv6 = true,
-  allowed_ports = ["22", "80", "443"],
-
-  # Advanced
-  kernel_version = "5.15.0",
-  enable_monitoring = true,
-  monitoring_interval = 30,
-  tags = ["production"],
-}
-
-
-

API Integration

-

TypeDialog REST Endpoints

-
# Start TypeDialog server
-typedialog server --port 8080
-
-# Render form via HTTP
-curl -X POST http://localhost:8080/forms \
-  -H "Content-Type: application/json" \
-  -d @server_form.toml
-
-

Response Format

-
{
-  "form_id": "srv_abc123",
-  "status": "rendered",
-  "fields": [
-    {
-      "name": "server_name",
-      "label": "Server Name",
-      "type": "text",
-      "required": true,
-      "placeholder": "web-01"
-    }
-  ]
-}
-
-

Submit Form

-
curl -X POST http://localhost:8080/forms/srv_abc123/submit \
-  -H "Content-Type: application/json" \
-  -d '{
-    "server_name": "web-01",
-    "cpu_cores": 4,
-    "memory_gb": 8,
-    "zone": "us-nyc1",
-    "monitoring": true,
-    "tags": ["production"]
-  }'
-
-

Response

-
{
-  "status": "success",
-  "validation": "passed",
-  "output_format": "nickel",
-  "output": {
-    "server_name": "web-01",
-    "cpu_cores": 4,
-    "memory_gb": 8,
-    "zone": "us-nyc1",
-    "monitoring": true,
-    "tags": ["production"]
-  }
-}
-
-
-

Validation

-

Contract-Based Validation

-

TypeDialog validates user input against Nickel contracts:

-
# Nickel contract
-ServerConfig = {
-  cpu_cores | Number,  # Must be number
-  memory_gb | Number,  # Must be number
-  zone | [| 'us-nyc1, 'eu-fra1 |],  # Enum
-}
-
-# If user enters invalid value
-# TypeDialog rejects before serializing
-
-

Validation Rules in Form

-
[[fields]]
-name = "cpu_cores"
-type = "number"
-min = 1
-max = 32
-help = "Must be 1-32 cores"
-# TypeDialog enforces before user can submit
-
-
-

Integration with Provisioning Platform

-

Use Case: Infrastructure Initialization

-
# 1. User runs initialization
-provisioning init --wizard
-
-# 2. Behind the scenes:
-#    - Loads infrastructure_wizard.toml
-#    - Starts TypeDialog (CLI or TUI)
-#    - User fills form interactively
-
-# 3. Output saved as config
-#    ~/.config/provisioning/infrastructure_config.ncl
-
-# 4. Provisioning uses output
-#    provisioning server create --from-config infrastructure_config.ncl
-
-

Implementation in Nushell

-
# provisioning/core/nulib/provisioning_init.nu
-
-def provisioning_init_wizard [] {
-  # Launch TypeDialog form
-  let config = (
-    typedialog form \
-      --config "provisioning/config/infrastructure_wizard.toml" \
-      --backend tui \
-      --output nickel
-  )
-
-  # Save output
-  $config | save ~/.config/provisioning/workspace_config.ncl
-
-  # Validate with provisioning schemas
-  let provisioning = (import "provisioning/schemas/main.ncl")
-  let validated = (
-    nickel export ~/.config/provisioning/workspace_config.ncl
-      | jq . | to json
-  )
-
-  print "Infrastructure configuration created!"
-  print "Use: provisioning deploy --from-config"
-}
-
-
-

Advanced Features

-

Conditional Visibility

-

Show/hide fields based on user selections:

-
[[fields]]
-name = "backup_retention"
-label = "Backup Retention (days)"
-type = "number"
-visible_if = "enable_backup == true"  # Only shown if backup enabled
-
-

Dynamic Defaults

-

Set defaults based on other fields:

-
[[fields]]
-name = "deployment_mode"
-type = "select"
-options = ["solo", "enterprise"]
-
-[[fields]]
-name = "cpu_cores"
-type = "number"
-default_from = "deployment_mode"  # Can reference other fields
-# solo → default 2, enterprise → default 16
-
-

Custom Validation

-
[[fields]]
-name = "memory_gb"
-type = "number"
-validation_rule = "memory_gb >= cpu_cores * 2"
-help = "Memory must be at least 2 GB per CPU core"
-
-
-

Output Formats

-

TypeDialog can output to multiple formats:

-
# Output to Nickel (recommended for IaC)
-typedialog form --config form.toml --output nickel
-
-# Output to JSON (for APIs)
-typedialog form --config form.toml --output json
-
-# Output to YAML (for K8s)
-typedialog form --config form.toml --output yaml
-
-# Output to TOML (for application config)
-typedialog form --config form.toml --output toml
-
-
-

Backends

-

TypeDialog supports three rendering backends:

-

1. CLI (Command-line prompts)

-
typedialog form --config form.toml --backend cli
-
-

Pros: Lightweight, SSH-friendly, no dependencies -Cons: Basic UI

-

2. TUI (Terminal User Interface - Ratatui)

-
typedialog form --config form.toml --backend tui
-
-

Pros: Rich UI, keyboard navigation, sections -Cons: Requires terminal support

-

3. Web (HTTP Server - Axum)

-
typedialog form --config form.toml --backend web --port 3000
-# Opens http://localhost:3000
-
-

Pros: Beautiful UI, remote access, multi-user -Cons: Requires browser, network

-
-

Troubleshooting

-

Problem: Form doesn’t match Nickel contract

-

Cause: Field names or types don’t match contract

-

Solution: Verify field definitions match Nickel schema:

-
# Form field
-[[fields]]
-name = "cpu_cores"  # Must match Nickel field name
-type = "number"     # Must match Nickel type
-
-

Problem: Validation fails

-

Cause: User input violates contract constraints

-

Solution: Add help text and validation rules:

-
[[fields]]
-name = "cpu_cores"
-validation_pattern = "^[1-9][0-9]*$"
-help = "Must be positive integer"
-
-

Problem: Output not valid Nickel

-

Cause: Missing required fields

-

Solution: Ensure all required fields in form:

-
[[fields]]
-name = "required_field"
-required = true  # User must provide value
-
-
-

Complete Example: End-to-End Workflow

-

Step 1: Define Nickel Schema

-
# workspace_schema.ncl
-{
-  workspace = {
-    name = "",
-    mode = 'solo,
-    provider = 'upcloud,
-    monitoring = true,
-    email = "",
-  },
-}
-
-

Step 2: Define Form

-
# workspace_form.toml
-[[fields]]
-name = "name"
-type = "text"
-required = true
-
-[[fields]]
-name = "mode"
-type = "select"
-options = ["solo", "enterprise"]
-
-[[fields]]
-name = "provider"
-type = "select"
-options = ["upcloud", "aws"]
-
-[[fields]]
-name = "monitoring"
-type = "confirm"
-
-[[fields]]
-name = "email"
-type = "text"
-required = true
-
-

Step 3: User Interaction

-
$ typedialog form --config workspace_form.toml --backend tui
-# User fills form interactively
-
-

Step 4: Output

-
{
-  workspace = {
-    name = "production",
-    mode = 'enterprise,
-    provider = 'upcloud,
-    monitoring = true,
-    email = "ops@company.com",
-  },
-}
-
-

Step 5: Use in Provisioning

-
# main.ncl
-let config = import "./workspace.ncl" in
-let schemas = import "provisioning/schemas/main.ncl" in
-
-{
-  # Build infrastructure
-  infrastructure = schemas.deployment.modes.make_mode {
-    deployment_type = config.workspace.mode,
-    provider = config.workspace.provider,
-  },
-}
-
-
-

Summary

-

TypeDialog + Nickel provides:

-

Type-Safe UIs: Forms validated against Nickel contracts -✅ Auto-Generated: No UI code to maintain -✅ Bidirectional: Nickel → Forms → Nickel -✅ Multiple Outputs: JSON, YAML, TOML, Nickel -✅ Three Backends: CLI, TUI, Web -✅ Production-Ready: Used in real infrastructure

-

Key Benefit: Reduce configuration errors by enforcing schema validation at UI level, not after deployment.

-
-

Version: 1.0.0 -Status: Implementation Guide -Last Updated: 2025-12-15

-

ADR-001: Project Structure Decision

-

Status

-

Accepted

-

Context

-

Provisioning had evolved from a monolithic structure into a complex system with mixed organizational patterns. The original structure had multiple issues:

-
    -
  1. Provider-specific code scattered: Cloud provider implementations were mixed with core logic
  2. -
  3. Task services fragmented: Infrastructure services lacked consistent structure
  4. -
  5. Domain boundaries unclear: No clear separation between core, providers, and services
  6. -
  7. Development artifacts mixed with distribution: User-facing tools mixed with development utilities
  8. -
  9. Deep call stack limitations: Nushell’s runtime limitations required architectural solutions
  10. -
  11. Configuration complexity: 200+ environment variables across 65+ files needed systematic organization
  12. -
-

The system needed a clear, maintainable structure that supports:

-
    -
  • Multi-provider infrastructure provisioning (AWS, UpCloud, local)
  • -
  • Modular task services (Kubernetes, container runtimes, storage, networking)
  • -
  • Clear separation of concerns
  • -
  • Hybrid Rust/Nushell architecture
  • -
  • Configuration-driven workflows
  • -
  • Clean distribution without development artifacts
  • -
-

Decision

-

Adopt a domain-driven hybrid structure organized around functional boundaries:

-
src/
-├── core/           # Core system and CLI entry point
-├── platform/       # High-performance coordination layer (Rust orchestrator)
-├── orchestrator/   # Legacy orchestrator location (to be consolidated)
-├── provisioning/   # Main provisioning with domain modules
-├── control-center/ # Web UI management interface
-├── tools/          # Development and utility tools
-└── extensions/     # Plugin and extension framework
-
-

Key Structural Principles

-
    -
  1. Domain Separation: Each major component has clear boundaries and responsibilities
  2. -
  3. Hybrid Architecture: Rust for performance-critical coordination, Nushell for business logic
  4. -
  5. Provider Abstraction: Standardized interfaces across cloud providers
  6. -
  7. Service Modularity: Reusable task services with consistent structure
  8. -
  9. Clean Distribution: Development tools separated from user-facing components
  10. -
  11. Configuration Hierarchy: Systematic config management with interpolation support
  12. -
-

Domain Organization

-
    -
  • Core: CLI interface, library modules, and common utilities
  • -
  • Platform: High-performance Rust orchestrator for workflow coordination
  • -
  • Provisioning: Main business logic with providers, task services, and clusters
  • -
  • Control Center: Web-based management interface
  • -
  • Tools: Development utilities and build systems
  • -
  • Extensions: Plugin framework and custom extensions
  • -
-

Consequences

-

Positive

-
    -
  • Clear Boundaries: Each domain has well-defined responsibilities and interfaces
  • -
  • Scalable Growth: New providers and services can be added without structural changes
  • -
  • Development Efficiency: Developers can focus on specific domains without system-wide knowledge
  • -
  • Clean Distribution: Users receive only necessary components without development artifacts
  • -
  • Maintenance Clarity: Issues can be isolated to specific domains
  • -
  • Hybrid Benefits: Leverage Rust performance where needed while maintaining Nushell productivity
  • -
  • Configuration Consistency: Systematic approach to configuration management across all domains
  • -
-

Negative

-
    -
  • Migration Complexity: Required systematic migration of existing components
  • -
  • Learning Curve: New developers need to understand domain boundaries
  • -
  • Coordination Overhead: Cross-domain features require careful interface design
  • -
  • Path Management: More complex path resolution with domain separation
  • -
  • Build Complexity: Multiple domains require coordinated build processes
  • -
-

Neutral

-
    -
  • Development Patterns: Each domain may develop its own patterns within architectural guidelines
  • -
  • Testing Strategy: Domain-specific testing strategies while maintaining integration coverage
  • -
  • Documentation: Domain-specific documentation with clear cross-references
  • -
-

Alternatives Considered

-

Alternative 1: Monolithic Structure

-

Keep all code in a single flat structure with minimal organization. -Rejected: Would not solve maintainability or scalability issues. Continued technical debt accumulation.

-

Alternative 2: Microservice Architecture

-

Split into completely separate services with network communication. -Rejected: Overhead too high for single-machine deployment use case. Would complicate installation and configuration.

-

Alternative 3: Language-Based Organization

-

Organize by implementation language (rust/, nushell/, kcl/). -Rejected: Does not align with functional boundaries. Cross-cutting concerns would be scattered.

-

Alternative 4: Feature-Based Organization

-

Organize by user-facing features (servers/, clusters/, networking/). -Rejected: Would duplicate cross-cutting infrastructure and provider logic across features.

-

Alternative 5: Layer-Based Architecture

-

Organize by architectural layers (presentation/, business/, data/). -Rejected: Does not align with domain complexity. Infrastructure provisioning has different layering needs.

-

References

-
    -
  • Configuration System Migration (ADR-002)
  • -
  • Hybrid Architecture Decision (ADR-004)
  • -
  • Extension Framework Design (ADR-005)
  • -
  • Project Architecture Principles (PAP) Guidelines
  • -
-

ADR-002: Distribution Strategy

-

Status

-

Accepted

-

Context

-

Provisioning needed a clean distribution strategy that separates user-facing tools from development artifacts. Key challenges included:

-
    -
  1. Development Artifacts Mixed with Production: Build tools, test files, and development utilities scattered throughout user directories
  2. -
  3. Complex Installation Process: Users had to navigate through development-specific directories and files
  4. -
  5. Unclear User Experience: No clear distinction between what users need versus what developers need
  6. -
  7. Configuration Complexity: Multiple configuration files with unclear precedence and purpose
  8. -
  9. Workspace Pollution: User workspaces contained development-only files and directories
  10. -
  11. Path Resolution Issues: Complex path resolution logic mixing development and production concerns
  12. -
-

The system required a distribution strategy that provides:

-
    -
  • Clean user experience without development artifacts
  • -
  • Clear separation between user and development tools
  • -
  • Simplified configuration management
  • -
  • Consistent installation and deployment patterns
  • -
  • Maintainable development workflow
  • -
-

Decision

-

Implement a layered distribution strategy with clear separation between development and user environments:

-

Distribution Layers

-
    -
  1. -

    Core Distribution Layer: Essential user-facing components

    -
      -
    • Main CLI tools and libraries
    • -
    • Configuration templates and defaults
    • -
    • Provider implementations
    • -
    • Task service definitions
    • -
    -
  2. -
  3. -

    Development Layer: Development-specific tools and artifacts

    -
      -
    • Build scripts and development utilities
    • -
    • Test suites and validation tools
    • -
    • Development configuration templates
    • -
    • Code generation tools
    • -
    -
  4. -
  5. -

    Workspace Layer: User-specific customization and data

    -
      -
    • User configurations and overrides
    • -
    • Local state and cache files
    • -
    • Custom extensions and plugins
    • -
    • User-specific templates and workflows
    • -
    -
  6. -
-

Distribution Structure

-
# User Distribution
-/usr/local/bin/
-├── provisioning              # Main CLI entry point
-└── provisioning-*           # Supporting utilities
-
-/usr/local/share/provisioning/
-├── core/                    # Core libraries and modules
-├── providers/               # Provider implementations
-├── taskservs/              # Task service definitions
-├── templates/              # Configuration templates
-└── config.defaults.toml    # System-wide defaults
-
-# User Workspace
-~/workspace/provisioning/
-├── config.user.toml        # User preferences
-├── infra/                  # User infrastructure definitions
-├── extensions/             # User extensions
-└── cache/                  # Local cache and state
-
-# Development Environment
-<project-root>/
-├── src/                    # Source code
-├── scripts/                # Development tools
-├── tests/                  # Test suites
-└── tools/                  # Build and development utilities
-
-

Key Distribution Principles

-
    -
  1. Clean Separation: Development artifacts never appear in user installations
  2. -
  3. Hierarchical Configuration: Clear precedence from system defaults to user overrides
  4. -
  5. Self-Contained User Tools: Users can work without accessing development directories
  6. -
  7. Workspace Isolation: User data and customizations isolated from system installation
  8. -
  9. Consistent Paths: Predictable path resolution across different installation types
  10. -
  11. Version Management: Clear versioning and upgrade paths for distributed components
  12. -
-

Consequences

-

Positive

-
    -
  • Clean User Experience: Users interact only with production-ready tools and interfaces
  • -
  • Simplified Installation: Clear installation process without development complexity
  • -
  • Workspace Isolation: User customizations don’t interfere with system installation
  • -
  • Development Efficiency: Developers can work with full toolset without affecting users
  • -
  • Configuration Clarity: Clear hierarchy and precedence for configuration settings
  • -
  • Maintainable Updates: System updates don’t affect user customizations
  • -
  • Path Simplicity: Predictable path resolution without development-specific logic
  • -
  • Security Isolation: User workspace separated from system components
  • -
-

Negative

-
    -
  • Distribution Complexity: Multiple distribution targets require coordinated build processes
  • -
  • Path Management: More complex path resolution logic to support multiple layers
  • -
  • Migration Overhead: Existing users need to migrate to new workspace structure
  • -
  • Documentation Burden: Need clear documentation for different user types
  • -
  • Testing Complexity: Must validate distribution across different installation scenarios
  • -
-

Neutral

-
    -
  • Development Patterns: Different patterns for development versus production deployment
  • -
  • Configuration Strategy: Layer-specific configuration management approaches
  • -
  • Tool Integration: Different integration patterns for development versus user tools
  • -
-

Alternatives Considered

-

Alternative 1: Monolithic Distribution

-

Ship everything (development and production) in single package. -Rejected: Creates confusing user experience and bloated installations. Mixes development concerns with user needs.

-

Alternative 2: Container-Only Distribution

-

Package entire system as container images only. -Rejected: Limits deployment flexibility and complicates local development workflows. Not suitable for all use cases.

-

Alternative 3: Source-Only Distribution

-

Require users to build from source with development environment. -Rejected: Creates high barrier to entry and mixes user concerns with development complexity.

-

Alternative 4: Plugin-Based Distribution

-

Minimal core with everything else as downloadable plugins. -Rejected: Would fragment essential functionality and complicate initial setup. Network dependency for basic functionality.

-

Alternative 5: Environment-Based Distribution

-

Use environment variables to control what gets installed. -Rejected: Creates complex configuration matrix and potential for inconsistent installations.

-

Implementation Details

-

Distribution Build Process

-
    -
  1. Core Layer Build: Extract essential user components from source
  2. -
  3. Template Processing: Generate configuration templates with proper defaults
  4. -
  5. Path Resolution: Generate path resolution logic for different installation types
  6. -
  7. Documentation Generation: Create user-specific documentation excluding development details
  8. -
  9. Package Creation: Build distribution packages for different platforms
  10. -
  11. Validation Testing: Test installations in clean environments
  12. -
-

Configuration Hierarchy

-
System Defaults (lowest precedence)
-└── User Configuration
-    └── Project Configuration
-        └── Infrastructure Configuration
-            └── Environment Configuration
-                └── Runtime Configuration (highest precedence)
-
-

Workspace Management

-
    -
  • Automatic Creation: User workspace created on first run
  • -
  • Template Initialization: Workspace populated with configuration templates
  • -
  • Version Tracking: Workspace tracks compatible system versions
  • -
  • Migration Support: Automatic migration between workspace versions
  • -
  • Backup Integration: Workspace backup and restore capabilities
  • -
-

References

-
    -
  • Project Structure Decision (ADR-001)
  • -
  • Workspace Isolation Decision (ADR-003)
  • -
  • Configuration System Migration (CLAUDE.md)
  • -
  • User Experience Guidelines (Design Principles)
  • -
  • Installation and Deployment Procedures
  • -
-

ADR-003: Workspace Isolation

-

Status

-

Accepted

-

Context

-

Provisioning required a clear strategy for managing user-specific data, configurations, -and customizations separate from system-wide installations. Key challenges included:

-
    -
  1. Configuration Conflicts: User settings mixed with system defaults, causing unclear precedence
  2. -
  3. State Management: User state (cache, logs, temporary files) scattered across filesystem
  4. -
  5. Customization Isolation: User extensions and customizations affecting system behavior
  6. -
  7. Multi-User Support: Multiple users on same system interfering with each other
  8. -
  9. Development vs Production: Developer needs different from end-user needs
  10. -
  11. Path Resolution Complexity: Complex logic to locate user-specific resources
  12. -
  13. Backup and Migration: Difficulty backing up and migrating user-specific settings
  14. -
  15. Security Boundaries: Need clear separation between system and user-writable areas
  16. -
-

The system needed workspace isolation that provides:

-
    -
  • Clear separation of user data from system installation
  • -
  • Predictable configuration precedence and inheritance
  • -
  • User-specific customization without system impact
  • -
  • Multi-user support on shared systems
  • -
  • Easy backup and migration of user settings
  • -
  • Security isolation between system and user areas
  • -
-

Decision

-

Implement isolated user workspaces with clear boundaries and hierarchical configuration:

-

Workspace Structure

-
~/workspace/provisioning/           # User workspace root
-├── config/
-│   ├── user.toml                  # User preferences and overrides
-│   ├── environments/              # Environment-specific configs
-│   │   ├── dev.toml
-│   │   ├── test.toml
-│   │   └── prod.toml
-│   └── secrets/                   # User-specific encrypted secrets
-├── infra/                         # User infrastructure definitions
-│   ├── personal/                  # Personal infrastructure
-│   ├── work/                      # Work-related infrastructure
-│   └── shared/                    # Shared infrastructure definitions
-├── extensions/                    # User-installed extensions
-│   ├── providers/                 # Custom providers
-│   ├── taskservs/                 # Custom task services
-│   └── plugins/                   # User plugins
-├── templates/                     # User-specific templates
-├── cache/                         # Local cache and temporary data
-│   ├── provider-cache/            # Provider API cache
-│   ├── version-cache/             # Version information cache
-│   └── build-cache/               # Build and generation cache
-├── logs/                          # User-specific logs
-├── state/                         # Local state files
-└── backups/                       # Automatic workspace backups
-
-

Configuration Hierarchy (Precedence Order)

-
    -
  1. Runtime Parameters (command line, environment variables)
  2. -
  3. Environment Configuration (config/environments/{env}.toml)
  4. -
  5. Infrastructure Configuration (infra/{name}/config.toml)
  6. -
  7. Project Configuration (project-specific settings)
  8. -
  9. User Configuration (config/user.toml)
  10. -
  11. System Defaults (system-wide defaults)
  12. -
-

Key Isolation Principles

-
    -
  1. Complete Isolation: User workspace completely independent of system installation
  2. -
  3. Hierarchical Inheritance: Clear configuration inheritance with user overrides
  4. -
  5. Security Boundaries: User workspace in user-writable area only
  6. -
  7. Multi-User Safe: Multiple users can have independent workspaces
  8. -
  9. Portable: Entire user workspace can be backed up and restored
  10. -
  11. Version Independent: Workspace compatible across system version upgrades
  12. -
  13. Extension Safe: User extensions cannot affect system behavior
  14. -
  15. State Isolation: All user state contained within workspace
  16. -
-

Consequences

-

Positive

-
    -
  • User Independence: Users can customize without affecting system or other users
  • -
  • Configuration Clarity: Clear hierarchy and precedence for all configuration
  • -
  • Security Isolation: User modifications cannot compromise system installation
  • -
  • Easy Backup: Complete user environment can be backed up and restored
  • -
  • Development Flexibility: Developers can have multiple isolated workspaces
  • -
  • System Upgrades: System updates don’t affect user customizations
  • -
  • Multi-User Support: Multiple users can work independently on same system
  • -
  • Portable Configurations: User workspace can be moved between systems
  • -
  • State Management: All user state in predictable locations
  • -
-

Negative

-
    -
  • Initial Setup: Users must initialize workspace before first use
  • -
  • Path Complexity: More complex path resolution to support workspace isolation
  • -
  • Disk Usage: Each user maintains separate cache and state
  • -
  • Configuration Duplication: Some configuration may be duplicated across users
  • -
  • Migration Overhead: Existing users need workspace migration
  • -
  • Documentation Complexity: Need clear documentation for workspace management
  • -
-

Neutral

-
    -
  • Backup Strategy: Users responsible for their own workspace backup
  • -
  • Extension Management: User-specific extension installation and management
  • -
  • Version Compatibility: Workspace versions must be compatible with system versions
  • -
  • Performance Implications: Additional path resolution overhead
  • -
-

Alternatives Considered

-

Alternative 1: System-Wide Configuration Only

-

All configuration in system directories with user overrides via environment variables. -Rejected: Creates conflicts between users and makes customization difficult. Poor isolation and security.

-

Alternative 2: Home Directory Dotfiles

-

Use traditional dotfile approach (~/.provisioning/). -Rejected: Clutters home directory and provides less structured organization. Harder to backup and migrate.

-

Alternative 3: XDG Base Directory Specification

-

Follow XDG specification for config/data/cache separation. -Rejected: While standards-compliant, would fragment user data across multiple directories making management complex.

-

Alternative 4: Container-Based Isolation

-

Each user gets containerized environment. -Rejected: Too heavy for simple configuration isolation. Adds deployment complexity without sufficient benefits.

-

Alternative 5: Database-Based Configuration

-

Store all user configuration in database. -Rejected: Adds dependency complexity and makes backup/restore more difficult. Over-engineering for configuration needs.

-

Implementation Details

-

Workspace Initialization

-
# Automatic workspace creation on first run
-provisioning workspace init
-
-# Manual workspace creation with template
-provisioning workspace init --template=developer
-
-# Workspace status and validation
-provisioning workspace status
-provisioning workspace validate
-
-

Configuration Resolution Process

-
    -
  1. Workspace Discovery: Locate user workspace (env var → default location)
  2. -
  3. Configuration Loading: Load configuration hierarchy with proper precedence
  4. -
  5. Path Resolution: Resolve all paths relative to workspace and system installation
  6. -
  7. Variable Interpolation: Process configuration variables and templates
  8. -
  9. Validation: Validate merged configuration for completeness and correctness
  10. -
-

Backup and Migration

-
# Backup entire workspace
-provisioning workspace backup --output ~/backup/provisioning-workspace.tar.gz
-
-# Restore workspace from backup
-provisioning workspace restore --input ~/backup/provisioning-workspace.tar.gz
-
-# Migrate workspace to new version
-provisioning workspace migrate --from-version 2.0.0 --to-version 3.0.0
-
-

Security Considerations

-
    -
  • File Permissions: Workspace created with appropriate user permissions
  • -
  • Secret Management: Secrets encrypted and isolated within workspace
  • -
  • Extension Sandboxing: User extensions cannot access system directories
  • -
  • Path Validation: All paths validated to prevent directory traversal
  • -
  • Configuration Validation: User configuration validated against schemas
  • -
-

References

-
    -
  • Distribution Strategy (ADR-002)
  • -
  • Configuration System Migration (CLAUDE.md)
  • -
  • Security Guidelines (Design Principles)
  • -
  • Extension Framework (ADR-005)
  • -
  • Multi-User Deployment Patterns
  • -
-

ADR-004: Hybrid Architecture

-

Status

-

Accepted

-

Context

-

Provisioning encountered fundamental limitations with a pure Nushell implementation that required architectural solutions:

-
    -
  1. Deep Call Stack Limitations: Nushell’s open command fails in deep call contexts -(enumerate | each), causing “Type not supported” errors in template.nu:71
  2. -
  3. Performance Bottlenecks: Complex workflow orchestration hitting Nushell’s performance limits
  4. -
  5. Concurrency Constraints: Limited parallel processing capabilities in Nushell for batch operations
  6. -
  7. Integration Complexity: Need for REST API endpoints and external system integration
  8. -
  9. State Management: Complex state tracking and persistence requirements beyond Nushell’s capabilities
  10. -
  11. Business Logic Preservation: 65+ existing Nushell files with domain expertise that shouldn’t be rewritten
  12. -
  13. Developer Productivity: Nushell excels for configuration management and domain-specific operations
  14. -
-

The system needed an architecture that:

-
    -
  • Solves Nushell’s technical limitations without losing business logic
  • -
  • Leverages each language’s strengths appropriately
  • -
  • Maintains existing investment in Nushell domain knowledge
  • -
  • Provides performance for coordination-heavy operations
  • -
  • Enables modern integration patterns (REST APIs, async workflows)
  • -
  • Preserves configuration-driven, Infrastructure as Code principles
  • -
-

Decision

-

Implement a Hybrid Rust/Nushell Architecture with clear separation of concerns:

-

Architecture Layers

-

1. Coordination Layer (Rust)

-
    -
  • Orchestrator: High-performance workflow coordination and task scheduling
  • -
  • REST API Server: HTTP endpoints for external integration
  • -
  • State Management: Persistent state tracking with checkpoint recovery
  • -
  • Batch Processing: Parallel execution of complex workflows
  • -
  • File-based Persistence: Lightweight task queue using reliable file storage
  • -
  • Error Recovery: Sophisticated error handling and rollback capabilities
  • -
-

2. Business Logic Layer (Nushell)

-
    -
  • Provider Implementations: Cloud provider-specific operations (AWS, UpCloud, local)
  • -
  • Task Services: Infrastructure service management (Kubernetes, networking, storage)
  • -
  • Configuration Management: KCL-based configuration processing and validation
  • -
  • Template Processing: Infrastructure-as-Code template generation
  • -
  • CLI Interface: User-facing command-line tools and workflows
  • -
  • Domain Operations: All business-specific logic and operations
  • -
-

Integration Patterns

-

Rust → Nushell Communication

-
// Rust orchestrator invokes Nushell scripts via process execution
-let result = Command::new("nu")
-    .arg("-c")
-    .arg("use core/nulib/workflows/server_create.nu *; server_create_workflow 'name' '' []")
-    .output()?;
-

Nushell → Rust Communication

-
# Nushell submits workflows to Rust orchestrator via HTTP API
-http post "http://localhost:9090/workflows/servers/create" {
-    name: "server-name",
-    provider: "upcloud",
-    config: $server_config
-}
-
-

Data Exchange Format

-
    -
  • Structured JSON: All data exchange via JSON for type safety and interoperability
  • -
  • Configuration TOML: Configuration data in TOML format for human readability
  • -
  • State Files: Lightweight file-based state exchange between layers
  • -
-

Key Architectural Principles

-
    -
  1. Language Strengths: Use each language for what it does best
  2. -
  3. Business Logic Preservation: All existing domain knowledge stays in Nushell
  4. -
  5. Performance Critical Path: Coordination and orchestration in Rust
  6. -
  7. Clear Boundaries: Well-defined interfaces between layers
  8. -
  9. Configuration Driven: Both layers respect configuration-driven architecture
  10. -
  11. Error Handling: Coordinated error handling across language boundaries
  12. -
  13. State Consistency: Consistent state management across hybrid system
  14. -
-

Consequences

-

Positive

-
    -
  • Technical Limitations Solved: Eliminates Nushell deep call stack issues
  • -
  • Performance Optimized: High-performance coordination while preserving productivity
  • -
  • Business Logic Preserved: 65+ Nushell files with domain expertise maintained
  • -
  • Modern Integration: REST APIs and async workflows enabled
  • -
  • Development Efficiency: Developers can use optimal language for each task
  • -
  • Batch Processing: Parallel workflow execution with sophisticated state management
  • -
  • Error Recovery: Advanced error handling and rollback capabilities
  • -
  • Scalability: Architecture scales to complex multi-provider workflows
  • -
  • Maintainability: Clear separation of concerns between layers
  • -
-

Negative

-
    -
  • Complexity Increase: Two-language system requires more architectural coordination
  • -
  • Integration Overhead: Data serialization/deserialization between languages
  • -
  • Development Skills: Team needs expertise in both Rust and Nushell
  • -
  • Testing Complexity: Must test integration between language layers
  • -
  • Deployment Complexity: Two runtime environments must be coordinated
  • -
  • Debugging Challenges: Debugging across language boundaries more complex
  • -
-

Neutral

-
    -
  • Development Patterns: Different patterns for each layer while maintaining consistency
  • -
  • Documentation Strategy: Language-specific documentation with integration guides
  • -
  • Tool Chain: Multiple development tool chains must be maintained
  • -
  • Performance Characteristics: Different performance characteristics for different operations
  • -
-

Alternatives Considered

-

Alternative 1: Pure Nushell Implementation

-

Continue with Nushell-only approach and work around limitations. -Rejected: Technical limitations are fundamental and cannot be worked around without compromising functionality. Deep call stack issues are -architectural.

-

Alternative 2: Complete Rust Rewrite

-

Rewrite entire system in Rust for consistency. -Rejected: Would lose 65+ files of domain expertise and Nushell’s productivity advantages for configuration management. Massive development effort.

-

Alternative 3: Pure Go Implementation

-

Rewrite system in Go for simplicity and performance. -Rejected: Same issues as Rust rewrite - loses domain expertise and Nushell’s configuration strengths. Go doesn’t provide significant advantages.

-

Alternative 4: Python/Shell Hybrid

-

Use Python for coordination and shell scripts for operations. -Rejected: Loses type safety and configuration-driven advantages of current system. Python adds dependency complexity.

-

Alternative 5: Container-Based Separation

-

Run Nushell and coordination layer in separate containers. -Rejected: Adds deployment complexity and network communication overhead. Complicates local development significantly.

-

Implementation Details

-

Orchestrator Components

-
    -
  • Task Queue: File-based persistent queue for reliable workflow management
  • -
  • HTTP Server: REST API for workflow submission and monitoring
  • -
  • State Manager: Checkpoint-based state tracking with recovery
  • -
  • Process Manager: Nushell script execution with proper isolation
  • -
  • Error Handler: Comprehensive error recovery and rollback logic
  • -
-

Integration Protocols

-
    -
  • HTTP REST: Primary API for external integration
  • -
  • JSON Data Exchange: Structured data format for all communication
  • -
  • File-based State: Lightweight persistence without database dependencies
  • -
  • Process Execution: Secure subprocess execution for Nushell operations
  • -
-

Development Workflow

-
    -
  1. Rust Development: Focus on coordination, performance, and integration
  2. -
  3. Nushell Development: Focus on business logic, providers, and task services
  4. -
  5. Integration Testing: Validate communication between layers
  6. -
  7. End-to-End Validation: Complete workflow testing across both layers
  8. -
-

Monitoring and Observability

-
    -
  • Structured Logging: JSON logs from both Rust and Nushell components
  • -
  • Metrics Collection: Performance metrics from coordination layer
  • -
  • Health Checks: System health monitoring across both layers
  • -
  • Workflow Tracking: Complete audit trail of workflow execution
  • -
-

Migration Strategy

-

Phase 1: Core Infrastructure (Completed)

-
    -
  • ✅ Rust orchestrator implementation
  • -
  • ✅ REST API endpoints
  • -
  • ✅ File-based task queue
  • -
  • ✅ Basic Nushell integration
  • -
-

Phase 2: Workflow Integration (Completed)

-
    -
  • ✅ Server creation workflows
  • -
  • ✅ Task service workflows
  • -
  • ✅ Cluster deployment workflows
  • -
  • ✅ State management and recovery
  • -
-

Phase 3: Advanced Features (Completed)

-
    -
  • ✅ Batch workflow processing
  • -
  • ✅ Dependency resolution
  • -
  • ✅ Rollback capabilities
  • -
  • ✅ Real-time monitoring
  • -
-

References

-
    -
  • Deep Call Stack Limitations (CLAUDE.md - Architectural Lessons Learned)
  • -
  • Configuration-Driven Architecture (ADR-002)
  • -
  • Batch Workflow System (CLAUDE.md - v3.1.0)
  • -
  • Integration Patterns Documentation
  • -
  • Performance Benchmarking Results
  • -
-

ADR-005: Extension Framework

-

Status

-

Accepted

-

Context

-

Provisioning required a flexible extension mechanism to support:

-
    -
  1. Custom Providers: Organizations need to add custom cloud providers beyond AWS, UpCloud, and local
  2. -
  3. Custom Task Services: Users need to integrate proprietary infrastructure services
  4. -
  5. Custom Workflows: Complex organizations require custom orchestration patterns
  6. -
  7. Third-Party Integration: Need to integrate with existing toolchains and systems
  8. -
  9. User Customization: Power users want to extend and modify system behavior
  10. -
  11. Plugin Ecosystem: Enable community contributions and extensions
  12. -
  13. Isolation Requirements: Extensions must not compromise system stability
  14. -
  15. Discovery Mechanism: System must automatically discover and load extensions
  16. -
  17. Version Compatibility: Extensions must work across system version upgrades
  18. -
  19. Configuration Integration: Extensions should integrate with configuration-driven architecture
  20. -
-

The system needed an extension framework that provides:

-
    -
  • Clear extension API and interfaces
  • -
  • Safe isolation of extension code
  • -
  • Automatic discovery and loading
  • -
  • Configuration integration
  • -
  • Version compatibility management
  • -
  • Developer-friendly extension development patterns
  • -
-

Decision

-

Implement a registry-based extension framework with structured discovery and isolation:

-

Extension Architecture

-

Extension Types

-
    -
  1. Provider Extensions: Custom cloud providers and infrastructure backends
  2. -
  3. Task Service Extensions: Custom infrastructure services and components
  4. -
  5. Workflow Extensions: Custom orchestration and deployment patterns
  6. -
  7. CLI Extensions: Additional command-line tools and interfaces
  8. -
  9. Template Extensions: Custom configuration and code generation templates
  10. -
  11. Integration Extensions: External system integrations and connectors
  12. -
-

Extension Structure

-
extensions/
-├── providers/              # Provider extensions
-│   └── custom-cloud/
-│       ├── extension.toml  # Extension manifest
-│       ├── kcl/           # KCL configuration schemas
-│       ├── nulib/         # Nushell implementation
-│       └── templates/     # Configuration templates
-├── taskservs/             # Task service extensions
-│   └── custom-service/
-│       ├── extension.toml
-│       ├── kcl/
-│       ├── nulib/
-│       └── manifests/     # Kubernetes manifests
-├── workflows/             # Workflow extensions
-│   └── custom-workflow/
-│       ├── extension.toml
-│       └── nulib/
-├── cli/                   # CLI extensions
-│   └── custom-commands/
-│       ├── extension.toml
-│       └── nulib/
-└── integrations/          # Integration extensions
-    └── external-tool/
-        ├── extension.toml
-        └── nulib/
-
-

Extension Manifest (extension.toml)

-
[extension]
-name = "custom-provider"
-version = "1.0.0"
-type = "provider"
-description = "Custom cloud provider integration"
-author = "Organization Name"
-license = "MIT"
-homepage = "https://github.com/org/custom-provider"
-
-[compatibility]
-provisioning_version = ">=3.0.0,<4.0.0"
-nushell_version = ">=0.107.0"
-kcl_version = ">=0.11.0"
-
-[dependencies]
-http_client = ">=1.0.0"
-json_parser = ">=2.0.0"
-
-[entry_points]
-cli = "nulib/cli.nu"
-provider = "nulib/provider.nu"
-config_schema = "schemas/schema.ncl"
-
-[configuration]
-config_prefix = "custom_provider"
-required_env_vars = ["CUSTOM_PROVIDER_API_KEY"]
-optional_config = ["custom_provider.region", "custom_provider.timeout"]
-
-

Key Framework Principles

-
    -
  1. Registry-Based Discovery: Extensions registered in structured directories
  2. -
  3. Manifest-Driven Loading: Extension capabilities declared in manifest files
  4. -
  5. Version Compatibility: Explicit compatibility declarations and validation
  6. -
  7. Configuration Integration: Extensions integrate with system configuration hierarchy
  8. -
  9. Isolation Boundaries: Extensions isolated from core system and each other
  10. -
  11. Standard Interfaces: Consistent interfaces across extension types
  12. -
  13. Development Patterns: Clear patterns for extension development
  14. -
  15. Community Support: Framework designed for community contributions
  16. -
-

Consequences

-

Positive

-
    -
  • Extensibility: System can be extended without modifying core code
  • -
  • Community Growth: Enable community contributions and ecosystem development
  • -
  • Organization Customization: Organizations can add proprietary integrations
  • -
  • Innovation Support: New technologies can be integrated via extensions
  • -
  • Isolation Safety: Extensions cannot compromise system stability
  • -
  • Configuration Consistency: Extensions integrate with configuration-driven architecture
  • -
  • Development Efficiency: Clear patterns reduce extension development time
  • -
  • Version Management: Compatibility system prevents breaking changes
  • -
  • Discovery Automation: Extensions automatically discovered and loaded
  • -
-

Negative

-
    -
  • Complexity Increase: Additional layer of abstraction and management
  • -
  • Performance Overhead: Extension loading and isolation adds runtime cost
  • -
  • Testing Complexity: Must test extension framework and individual extensions
  • -
  • Documentation Burden: Need comprehensive extension development documentation
  • -
  • Version Coordination: Extension compatibility matrix requires management
  • -
  • Support Complexity: Community extensions may require support resources
  • -
-

Neutral

-
    -
  • Development Patterns: Different patterns for extension vs core development
  • -
  • Quality Control: Community extensions may vary in quality and maintenance
  • -
  • Security Considerations: Extensions need security review and validation
  • -
  • Dependency Management: Extension dependencies must be managed carefully
  • -
-

Alternatives Considered

-

Alternative 1: Filesystem-Based Extensions

-

Simple filesystem scanning for extension discovery. -Rejected: No manifest validation or version compatibility checking. Fragile discovery mechanism.

-

Alternative 2: Database-Backed Registry

-

Store extension metadata in database for discovery. -Rejected: Adds database dependency complexity. Over-engineering for extension discovery needs.

-

Alternative 3: Package Manager Integration

-

Use existing package managers (cargo, npm) for extension distribution. -Rejected: Complicates installation and creates external dependencies. Not suitable for corporate environments.

-

Alternative 4: Container-Based Extensions

-

Each extension runs in isolated container. -Rejected: Too heavy for simple extensions. Complicates development and deployment significantly.

-

Alternative 5: Plugin Architecture

-

Traditional plugin architecture with dynamic loading. -Rejected: Complex for shell-based system. Security and isolation challenges in Nushell environment.

-

Implementation Details

-

Extension Discovery Process

-
    -
  1. Directory Scanning: Scan extension directories for manifest files
  2. -
  3. Manifest Validation: Parse and validate extension manifest
  4. -
  5. Compatibility Check: Verify version compatibility requirements
  6. -
  7. Dependency Resolution: Resolve extension dependencies
  8. -
  9. Configuration Integration: Merge extension configuration schemas
  10. -
  11. Entry Point Registration: Register extension entry points with system
  12. -
-

Extension Loading Lifecycle

-
# Extension discovery and validation
-provisioning extension discover
-provisioning extension validate --extension custom-provider
-
-# Extension activation and configuration
-provisioning extension enable custom-provider
-provisioning extension configure custom-provider
-
-# Extension usage
-provisioning provider list  # Shows custom providers
-provisioning server create --provider custom-provider
-
-# Extension management
-provisioning extension disable custom-provider
-provisioning extension update custom-provider
-
-

Configuration Integration

-

Extensions integrate with hierarchical configuration system:

-
# System configuration includes extension settings
-[custom_provider]
-api_endpoint = "https://api.custom-cloud.com"
-region = "us-west-1"
-timeout = 30
-
-# Extension configuration follows same hierarchy rules
-# System defaults → User config → Environment config → Runtime
-
-

Security and Isolation

-
    -
  • Sandboxed Execution: Extensions run in controlled environment
  • -
  • Permission Model: Extensions declare required permissions in manifest
  • -
  • Code Review: Community extensions require review process
  • -
  • Digital Signatures: Extensions can be digitally signed for authenticity
  • -
  • Audit Logging: Extension usage tracked in system audit logs
  • -
-

Development Support

-
    -
  • Extension Templates: Scaffold new extensions from templates
  • -
  • Development Tools: Testing and validation tools for extension developers
  • -
  • Documentation Generation: Automatic documentation from extension manifests
  • -
  • Integration Testing: Framework for testing extensions with core system
  • -
-

Extension Development Patterns

-

Provider Extension Pattern

-
# extensions/providers/custom-cloud/nulib/provider.nu
-export def list-servers [] -> table {
-    http get $"($config.custom_provider.api_endpoint)/servers"
-    | from json
-    | select name status region
-}
-
-export def create-server [name: string, config: record] -> record {
-    let payload = {
-        name: $name,
-        instance_type: $config.plan,
-        region: $config.zone
-    }
-
-    http post $"($config.custom_provider.api_endpoint)/servers" $payload
-    | from json
-}
-
-

Task Service Extension Pattern

-
# extensions/taskservs/custom-service/nulib/service.nu
-export def install [server: string] -> nothing {
-    let manifest_data = open ./manifests/deployment.yaml
-    | str replace "{{server}}" $server
-
-    kubectl apply --server $server --data $manifest_data
-}
-
-export def uninstall [server: string] -> nothing {
-    kubectl delete deployment custom-service --server $server
-}
-
-

References

-
    -
  • Workspace Isolation (ADR-003)
  • -
  • Configuration System Architecture (ADR-002)
  • -
  • Hybrid Architecture Integration (ADR-004)
  • -
  • Community Extension Guidelines
  • -
  • Extension Security Framework
  • -
  • Extension Development Documentation
  • -
-

ADR-006: Provisioning CLI Refactoring to Modular Architecture

-

Status: Implemented ✅ -Date: 2025-09-30 -Authors: Infrastructure Team -Related: ADR-001 (Project Structure), ADR-004 (Hybrid Architecture)

-

Context

-

The main provisioning CLI script (provisioning/core/nulib/provisioning) had grown to -1,329 lines with a massive 1,100+ line match statement handling all commands. This -monolithic structure created multiple critical problems:

-

Problems Identified

-
    -
  1. -

    Maintainability Crisis

    -
      -
    • 54 command branches in one file
    • -
    • Code duplication: Flag handling repeated 50+ times
    • -
    • Hard to navigate: Finding specific command logic required scrolling through 1,000+ lines
    • -
    • Mixed concerns: Routing, validation, and execution all intertwined
    • -
    -
  2. -
  3. -

    Development Friction

    -
      -
    • Adding new commands required editing massive file
    • -
    • Testing was nearly impossible (monolithic, no isolation)
    • -
    • High cognitive load for contributors
    • -
    • Code review difficult due to file size
    • -
    -
  4. -
  5. -

    Technical Debt

    -
      -
    • 10+ lines of repetitive flag handling per command
    • -
    • No separation of concerns
    • -
    • Poor code reusability
    • -
    • Difficult to test individual command handlers
    • -
    -
  6. -
  7. -

    User Experience Issues

    -
      -
    • No bi-directional help system
    • -
    • Inconsistent command shortcuts
    • -
    • Help system not fully integrated
    • -
    -
  8. -
-

Decision

-

We refactored the monolithic CLI into a modular, domain-driven architecture with the following structure:

-
provisioning/core/nulib/
-├── provisioning (211 lines) ⬅️ 84% reduction
-├── main_provisioning/
-│   ├── flags.nu (139 lines) ⭐ Centralized flag handling
-│   ├── dispatcher.nu (264 lines) ⭐ Command routing
-│   ├── mod.nu (updated)
-│   └── commands/ ⭐ Domain-focused handlers
-│       ├── configuration.nu (316 lines)
-│       ├── development.nu (72 lines)
-│       ├── generation.nu (78 lines)
-│       ├── infrastructure.nu (117 lines)
-│       ├── orchestration.nu (64 lines)
-│       ├── utilities.nu (157 lines)
-│       └── workspace.nu (56 lines)
-
-

Key Components

-

1. Centralized Flag Handling (flags.nu)

-

Single source of truth for all flag parsing and argument building:

-
export def parse_common_flags [flags: record]: nothing -> record
-export def build_module_args [flags: record, extra: string = ""]: nothing -> string
-export def set_debug_env [flags: record]
-export def get_debug_flag [flags: record]: nothing -> string
-
-

Benefits:

-
    -
  • Eliminates 50+ instances of duplicate code
  • -
  • Single place to add/modify flags
  • -
  • Consistent flag handling across all commands
  • -
  • Reduced from 10 lines to 3 lines per command handler
  • -
-

2. Command Dispatcher (dispatcher.nu)

-

Central routing with 80+ command mappings:

-
export def get_command_registry []: nothing -> record  # 80+ shortcuts
-export def dispatch_command [args: list, flags: record]  # Main router
-
-

Features:

-
    -
  • Command registry with shortcuts (ws → workspace, orch → orchestrator, etc.)
  • -
  • Bi-directional help support (provisioning ws help works)
  • -
  • Domain-based routing (infrastructure, orchestration, development, etc.)
  • -
  • Special command handling (create, delete, price, etc.)
  • -
-

3. Domain Command Handlers (commands/*.nu)

-

Seven focused modules organized by domain:

-
- - - - - - - -
ModuleLinesResponsibility
infrastructure.nu117Server, taskserv, cluster, infra
orchestration.nu64Workflow, batch, orchestrator
development.nu72Module, layer, version, pack
workspace.nu56Workspace, template
generation.nu78Generate commands
utilities.nu157SSH, SOPS, cache, providers
configuration.nu316Env, show, init, validate
-
-

Each handler:

-
    -
  • Exports handle_<domain>_command function
  • -
  • Uses shared flag handling
  • -
  • Provides error messages with usage hints
  • -
  • Isolated and testable
  • -
-

Architecture Principles

-

1. Separation of Concerns

-
    -
  • Routingdispatcher.nu
  • -
  • Flag parsingflags.nu
  • -
  • Business logiccommands/*.nu
  • -
  • Help systemhelp_system.nu (existing)
  • -
-

2. Single Responsibility

-

Each module has ONE clear purpose:

-
    -
  • Command handlers execute specific domains
  • -
  • Dispatcher routes to correct handler
  • -
  • Flags module normalizes all inputs
  • -
-

3. DRY (Don’t Repeat Yourself)

-

Eliminated repetition:

-
    -
  • Flag handling: 50+ instances → 1 function
  • -
  • Command routing: Scattered logic → Command registry
  • -
  • Error handling: Consistent across all domains
  • -
-

4. Open/Closed Principle

-
    -
  • Open for extension: Add new handlers easily
  • -
  • Closed for modification: Core routing unchanged
  • -
-

5. Dependency Inversion

-

All handlers depend on abstractions (flag records, not concrete flags):

-
# Handler signature
-export def handle_infrastructure_command [
-  command: string
-  ops: string
-  flags: record  # ⬅️ Abstraction, not concrete flags
-]
-
-

Implementation Details

-

Migration Path (Completed in 2 Phases)

-

Phase 1: Foundation

-
    -
  1. ✅ Created commands/ directory structure
  2. -
  3. ✅ Created flags.nu with common flag handling
  4. -
  5. ✅ Created initial command handlers (infrastructure, utilities, configuration)
  6. -
  7. ✅ Created dispatcher.nu with routing logic
  8. -
  9. ✅ Refactored main file (1,329 → 211 lines)
  10. -
  11. ✅ Tested basic functionality
  12. -
-

Phase 2: Completion

-
    -
  1. ✅ Fixed bi-directional help (provisioning ws help now works)
  2. -
  3. ✅ Created remaining handlers (orchestration, development, workspace, generation)
  4. -
  5. ✅ Removed duplicate code from dispatcher
  6. -
  7. ✅ Added comprehensive test suite
  8. -
  9. ✅ Verified all shortcuts work
  10. -
-

Bi-directional Help System

-

Users can now access help in multiple ways:

-
# All these work equivalently:
-provisioning help workspace
-provisioning workspace help  # ⬅️ NEW: Bi-directional
-provisioning ws help         # ⬅️ NEW: With shortcuts
-provisioning help ws         # ⬅️ NEW: Shortcut in help
-
-

Implementation:

-
# Intercept "command help" → "help command"
-let first_op = if ($ops_list | length) > 0 { ($ops_list | get 0) } else { "" }
-if $first_op in ["help" "h"] {
-  exec $"($env.PROVISIONING_NAME)" help $task --notitles
-}
-
-

Command Shortcuts

-

Comprehensive shortcut system with 30+ mappings:

-

Infrastructure:

-
    -
  • sserver
  • -
  • t, tasktaskserv
  • -
  • clcluster
  • -
  • iinfra
  • -
-

Orchestration:

-
    -
  • wf, flowworkflow
  • -
  • batbatch
  • -
  • orchorchestrator
  • -
-

Development:

-
    -
  • modmodule
  • -
  • lyrlayer
  • -
-

Workspace:

-
    -
  • wsworkspace
  • -
  • tpl, tmpltemplate
  • -
-

Testing

-

Comprehensive test suite created (tests/test_provisioning_refactor.nu):

-

Test Coverage

-
    -
  • ✅ Main help display
  • -
  • ✅ Category help (infrastructure, orchestration, development, workspace)
  • -
  • ✅ Bi-directional help routing
  • -
  • ✅ All command shortcuts
  • -
  • ✅ Category shortcut help
  • -
  • ✅ Command routing to correct handlers
  • -
-

Test Results

-
📋 Testing main help... ✅
-📋 Testing category help... ✅
-🔄 Testing bi-directional help... ✅
-⚡ Testing command shortcuts... ✅
-📚 Testing category shortcut help... ✅
-🎯 Testing command routing... ✅
-
-📊 TEST RESULTS: 6 passed, 0 failed
-
-

Results

-

Quantitative Improvements

-
- - - - - - -
MetricBeforeAfterImprovement
Main file size1,329 lines211 lines84% reduction
Command handler1 massive match (1,100+ lines)7 focused modulesDomain separation
Flag handlingRepeated 50+ times1 function98% duplication removal
Code per command10 lines3 lines70% reduction
Modules count1 monolith9 modulesModular architecture
Test coverageNone6 test groupsComprehensive testing
-
-

Qualitative Improvements

-

Maintainability

-
    -
  • ✅ Easy to find specific command logic
  • -
  • ✅ Clear separation of concerns
  • -
  • ✅ Self-documenting structure
  • -
  • ✅ Focused modules (< 320 lines each)
  • -
-

Extensibility

-
    -
  • ✅ Add new commands: Just update appropriate handler
  • -
  • ✅ Add new flags: Single function update
  • -
  • ✅ Add new shortcuts: Update command registry
  • -
  • ✅ No massive file edits required
  • -
-

Testability

-
    -
  • ✅ Isolated command handlers
  • -
  • ✅ Mockable dependencies
  • -
  • ✅ Test individual domains
  • -
  • ✅ Fast test execution
  • -
-

Developer Experience

-
    -
  • ✅ Lower cognitive load
  • -
  • ✅ Faster onboarding
  • -
  • ✅ Easier code review
  • -
  • ✅ Better IDE navigation
  • -
-

Trade-offs

-

Advantages

-
    -
  1. Dramatically reduced complexity: 84% smaller main file
  2. -
  3. Better organization: Domain-focused modules
  4. -
  5. Easier testing: Isolated, testable units
  6. -
  7. Improved maintainability: Clear structure, less duplication
  8. -
  9. Enhanced UX: Bi-directional help, shortcuts
  10. -
  11. Future-proof: Easy to extend
  12. -
-

Disadvantages

-
    -
  1. More files: 1 file → 9 files (but smaller, focused)
  2. -
  3. Module imports: Need to import multiple modules (automated via mod.nu)
  4. -
  5. Learning curve: New structure requires documentation (this ADR)
  6. -
-

Decision: Advantages significantly outweigh disadvantages.

-

Examples

-

Before: Repetitive Flag Handling

-
"server" => {
-  let use_check = if $check { "--check "} else { "" }
-  let use_yes = if $yes { "--yes" } else { "" }
-  let use_wait = if $wait { "--wait" } else { "" }
-  let use_keepstorage = if $keepstorage { "--keepstorage "} else { "" }
-  let str_infra = if $infra != null  { $"--infra ($infra) "} else { "" }
-  let str_outfile = if $outfile != null  { $"--outfile ($outfile) "} else { "" }
-  let str_out = if $out != null  { $"--out ($out) "} else { "" }
-  let arg_include_notuse = if $include_notuse { $"--include_notuse "} else { "" }
-  run_module $"($str_ops) ($str_infra) ($use_check)..." "server" --exec
-}
-
-

After: Clean, Reusable

-
def handle_server [ops: string, flags: record] {
-  let args = build_module_args $flags $ops
-  run_module $args "server" --exec
-}
-
-

Reduction: 10 lines → 3 lines (70% reduction)

-

Future Considerations

-

Potential Enhancements

-
    -
  1. Unit test expansion: Add tests for each command handler
  2. -
  3. Integration tests: End-to-end workflow tests
  4. -
  5. Performance profiling: Measure routing overhead (expected to be negligible)
  6. -
  7. Documentation generation: Auto-generate docs from handlers
  8. -
  9. Plugin architecture: Allow third-party command extensions
  10. -
-

Migration Guide for Contributors

-

See docs/development/COMMAND_HANDLER_GUIDE.md for:

-
    -
  • How to add new commands
  • -
  • How to modify existing handlers
  • -
  • How to add new shortcuts
  • -
  • Testing guidelines
  • -
- -
    -
  • Architecture Overview: docs/architecture/system-overview.md
  • -
  • Developer Guide: docs/development/COMMAND_HANDLER_GUIDE.md
  • -
  • Main Project Docs: CLAUDE.md (updated with new structure)
  • -
  • Test Suite: tests/test_provisioning_refactor.nu
  • -
-

Conclusion

-

This refactoring transforms the provisioning CLI from a monolithic, hard-to-maintain script into a modular, well-organized system following software -engineering best practices. The 84% reduction in main file size, elimination of code duplication, and comprehensive test coverage position the project -for sustainable long-term growth.

-

The new architecture enables:

-
    -
  • Faster development: Add commands in minutes, not hours
  • -
  • Better quality: Isolated testing catches bugs early
  • -
  • Easier maintenance: Clear structure reduces cognitive load
  • -
  • Enhanced UX: Shortcuts and bi-directional help improve usability
  • -
-

Status: Successfully implemented and tested. All commands operational. Ready for production use.

-
-

This ADR documents a major architectural improvement completed on 2025-09-30.

-

ADR-007: KMS Service Simplification to Age and Cosmian Backends

-

Status: Accepted -Date: 2025-10-08 -Deciders: Architecture Team -Related: ADR-006 (KMS Service Integration)

-

Context

-

The KMS service initially supported 4 backends: HashiCorp Vault, AWS KMS, Age, and Cosmian KMS. This created unnecessary complexity and unclear -guidance about which backend to use for different environments.

-

Problems with 4-Backend Approach

-
    -
  1. Complexity: Supporting 4 different backends increased maintenance burden
  2. -
  3. Dependencies: AWS SDK added significant compile time (~30 s) and binary size
  4. -
  5. Confusion: No clear guidance on which backend to use when
  6. -
  7. Cloud Lock-in: AWS KMS dependency limited infrastructure flexibility
  8. -
  9. Operational Overhead: Vault requires server setup even for simple dev environments
  10. -
  11. Code Duplication: Similar logic implemented 4 different ways
  12. -
-

Key Insights

-
    -
  • Most development work doesn’t need server-based KMS
  • -
  • Production deployments need enterprise-grade security features
  • -
  • Age provides fast, offline encryption perfect for development
  • -
  • Cosmian KMS offers confidential computing and zero-knowledge architecture
  • -
  • Supporting Vault AND Cosmian is redundant (both are server-based KMS)
  • -
  • AWS KMS locks us into AWS infrastructure
  • -
-

Decision

-

Simplify the KMS service to support only 2 backends:

-
    -
  1. -

    Age: For development and local testing

    -
      -
    • Fast, offline, no server required
    • -
    • Simple key generation with age-keygen
    • -
    • X25519 encryption (modern, secure)
    • -
    • Perfect for dev/test environments
    • -
    -
  2. -
  3. -

    Cosmian KMS: For production deployments

    -
      -
    • Enterprise-grade key management
    • -
    • Confidential computing support (SGX/SEV)
    • -
    • Zero-knowledge architecture
    • -
    • Server-side key rotation
    • -
    • Audit logging and compliance
    • -
    • Multi-tenant support
    • -
    -
  4. -
-

Remove support for:

-
    -
  • ❌ HashiCorp Vault (redundant with Cosmian)
  • -
  • ❌ AWS KMS (cloud lock-in, complexity)
  • -
-

Consequences

-

Positive

-
    -
  1. Simpler Code: 2 backends instead of 4 reduces complexity by 50%
  2. -
  3. Faster Compilation: Removing AWS SDK saves ~30 seconds compile time
  4. -
  5. Clear Guidance: Age = dev, Cosmian = prod (no confusion)
  6. -
  7. Offline Development: Age works without network connectivity
  8. -
  9. Better Security: Cosmian provides confidential computing (TEE)
  10. -
  11. No Cloud Lock-in: Not dependent on AWS infrastructure
  12. -
  13. Easier Testing: Age backend requires no setup
  14. -
  15. Reduced Dependencies: Fewer external crates to maintain
  16. -
-

Negative

-
    -
  1. Migration Required: Existing Vault/AWS KMS users must migrate
  2. -
  3. Learning Curve: Teams must learn Age and Cosmian
  4. -
  5. Cosmian Dependency: Production depends on Cosmian availability
  6. -
  7. Cost: Cosmian may have licensing costs (cloud or self-hosted)
  8. -
-

Neutral

-
    -
  1. Feature Parity: Cosmian provides all features Vault/AWS had
  2. -
  3. API Compatibility: Encrypt/decrypt API remains primarily the same
  4. -
  5. Configuration Change: TOML config structure updated but similar
  6. -
-

Implementation

-

Files Created

-
    -
  1. src/age/client.rs (167 lines) - Age encryption client
  2. -
  3. src/age/mod.rs (3 lines) - Age module exports
  4. -
  5. src/cosmian/client.rs (294 lines) - Cosmian KMS client
  6. -
  7. src/cosmian/mod.rs (3 lines) - Cosmian module exports
  8. -
  9. docs/migration/KMS_SIMPLIFICATION.md (500+ lines) - Migration guide
  10. -
-

Files Modified

-
    -
  1. src/lib.rs - Updated exports (age, cosmian instead of aws, vault)
  2. -
  3. src/types.rs - Updated error types and config enum
  4. -
  5. src/service.rs - Simplified to 2 backends (180 lines, was 213)
  6. -
  7. Cargo.toml - Removed AWS deps, added age = "0.10"
  8. -
  9. README.md - Complete rewrite for new backends
  10. -
  11. provisioning/config/kms.toml - Simplified configuration
  12. -
-

Files Deleted

-
    -
  1. src/aws/client.rs - AWS KMS client
  2. -
  3. src/aws/envelope.rs - Envelope encryption helpers
  4. -
  5. src/aws/mod.rs - AWS module
  6. -
  7. src/vault/client.rs - Vault client
  8. -
  9. src/vault/mod.rs - Vault module
  10. -
-

Dependencies Changed

-

Removed:

-
    -
  • aws-sdk-kms = "1"
  • -
  • aws-config = "1"
  • -
  • aws-credential-types = "1"
  • -
  • aes-gcm = "0.10" (was only for AWS envelope encryption)
  • -
-

Added:

-
    -
  • age = "0.10"
  • -
  • tempfile = "3" (dev dependency for tests)
  • -
-

Kept:

-
    -
  • All Axum web framework deps
  • -
  • reqwest (for Cosmian HTTP API)
  • -
  • base64, serde, tokio, etc.
  • -
-

Migration Path

-

For Development

-
# 1. Install Age
-brew install age  # or apt install age
-
-# 2. Generate keys
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt
-
-# 3. Update config to use Age backend
-# 4. Re-encrypt development secrets
-
-

For Production

-
# 1. Set up Cosmian KMS (cloud or self-hosted)
-# 2. Create master key in Cosmian
-# 3. Migrate secrets from Vault/AWS to Cosmian
-# 4. Update production config
-# 5. Deploy new KMS service
-
-

See docs/migration/KMS_SIMPLIFICATION.md for detailed steps.

-

Alternatives Considered

-

Alternative 1: Keep All 4 Backends

-

Pros:

-
    -
  • No migration required
  • -
  • Maximum flexibility
  • -
-

Cons:

-
    -
  • Continued complexity
  • -
  • Maintenance burden
  • -
  • Unclear guidance
  • -
-

Rejected: Complexity outweighs benefits

-

Alternative 2: Only Cosmian (No Age)

-

Pros:

-
    -
  • Single backend
  • -
  • Enterprise-grade everywhere
  • -
-

Cons:

-
    -
  • Requires Cosmian server for development
  • -
  • Slower dev iteration
  • -
  • Network dependency for local dev
  • -
-

Rejected: Development experience matters

-

Alternative 3: Only Age (No Production Backend)

-

Pros:

-
    -
  • Simplest solution
  • -
  • No server required
  • -
-

Cons:

-
    -
  • Not suitable for production
  • -
  • No audit logging
  • -
  • No key rotation
  • -
  • No multi-tenant support
  • -
-

Rejected: Production needs enterprise features

-

Alternative 4: Age + HashiCorp Vault

-

Pros:

-
    -
  • Vault is widely known
  • -
  • No Cosmian dependency
  • -
-

Cons:

-
    -
  • Vault lacks confidential computing
  • -
  • Vault server still required
  • -
  • No zero-knowledge architecture
  • -
-

Rejected: Cosmian provides better security features

-

Metrics

-

Code Reduction

-
    -
  • Total Lines Removed: ~800 lines (AWS + Vault implementations)
  • -
  • Total Lines Added: ~470 lines (Age + Cosmian + docs)
  • -
  • Net Reduction: ~330 lines
  • -
-

Dependency Reduction

-
    -
  • Crates Removed: 4 (aws-sdk-kms, aws-config, aws-credential-types, aes-gcm)
  • -
  • Crates Added: 1 (age)
  • -
  • Net Reduction: 3 crates
  • -
-

Compilation Time

-
    -
  • Before: ~90 seconds (with AWS SDK)
  • -
  • After: ~60 seconds (without AWS SDK)
  • -
  • Improvement: 33% faster
  • -
-

Compliance

-

Security Considerations

-
    -
  1. Age Security: X25519 (Curve25519) encryption, modern and secure
  2. -
  3. Cosmian Security: Confidential computing, zero-knowledge, enterprise-grade
  4. -
  5. No Regression: Security features maintained or improved
  6. -
  7. Clear Separation: Dev (Age) never used for production secrets
  8. -
-

Testing Requirements

-
    -
  1. Unit Tests: Both backends have comprehensive test coverage
  2. -
  3. Integration Tests: Age tests run without external deps
  4. -
  5. Cosmian Tests: Require test server (marked as #[ignore])
  6. -
  7. Migration Tests: Verify old configs fail gracefully
  8. -
-

References

- -

Notes

-
    -
  • Age is designed by Filippo Valsorda (Google, Go security team)
  • -
  • Cosmian provides FIPS 140-2 Level 3 compliance (when using certified hardware)
  • -
  • This decision aligns with project goal of reducing cloud provider dependencies
  • -
  • Migration timeline: 6 weeks for full adoption
  • -
-

ADR-008: Cedar Authorization Policy Engine Integration

-

Status: Accepted -Date: 2025-10-08 -Deciders: Architecture Team -Tags: security, authorization, cedar, policy-engine

-

Context and Problem Statement

-

The Provisioning platform requires fine-grained authorization controls to manage access to infrastructure resources across multiple environments -(development, staging, production). The authorization system must:

-
    -
  1. Support complex authorization rules (MFA, IP restrictions, time windows, approvals)
  2. -
  3. Be auditable and version-controlled
  4. -
  5. Allow hot-reload of policies without restart
  6. -
  7. Integrate with JWT tokens for identity
  8. -
  9. Scale to thousands of authorization decisions per second
  10. -
  11. Be maintainable by security team without code changes
  12. -
-

Traditional code-based authorization (if/else statements) is difficult to audit, maintain, and scale.

-

Decision Drivers

-
    -
  • Security: Critical for production infrastructure access
  • -
  • Auditability: Compliance requirements demand clear authorization policies
  • -
  • Flexibility: Policies change more frequently than code
  • -
  • Performance: Low-latency authorization decisions (<10 ms)
  • -
  • Maintainability: Security team should update policies without developers
  • -
  • Type Safety: Prevent policy errors before deployment
  • -
-

Considered Options

-

Option 1: Code-Based Authorization (Current State)

-

Implement authorization logic directly in Rust/Nushell code.

-

Pros:

-
    -
  • Full control and flexibility
  • -
  • No external dependencies
  • -
  • Simple to understand for small use cases
  • -
-

Cons:

-
    -
  • Hard to audit and maintain
  • -
  • Requires code deployment for policy changes
  • -
  • No type safety for policies
  • -
  • Difficult to test all combinations
  • -
  • Not declarative
  • -
-

Option 2: OPA (Open Policy Agent)

-

Use OPA with Rego policy language.

-

Pros:

-
    -
  • Industry standard
  • -
  • Rich ecosystem
  • -
  • Rego is powerful
  • -
-

Cons:

-
    -
  • Rego is complex to learn
  • -
  • Requires separate service deployment
  • -
  • Performance overhead (HTTP calls)
  • -
  • Policies not type-checked
  • -
-

Option 3: Cedar Policy Engine (Chosen)

-

Use AWS Cedar policy language integrated directly into orchestrator.

-

Pros:

-
    -
  • Type-safe policy language
  • -
  • Fast (compiled, no network overhead)
  • -
  • Schema-based validation
  • -
  • Declarative and auditable
  • -
  • Hot-reload support
  • -
  • Rust library (no external service)
  • -
  • Deny-by-default security model
  • -
-

Cons:

-
    -
  • Recently introduced (2023)
  • -
  • Smaller ecosystem than OPA
  • -
  • Learning curve for policy authors
  • -
-

Option 4: Casbin

-

Use Casbin authorization library.

-

Pros:

-
    -
  • Multiple policy models (ACL, RBAC, ABAC)
  • -
  • Rust bindings available
  • -
-

Cons:

-
    -
  • Less declarative than Cedar
  • -
  • Weaker type safety
  • -
  • More imperative style
  • -
-

Decision Outcome

-

Chosen Option: Option 3 - Cedar Policy Engine

-

Rationale

-
    -
  1. Type Safety: Cedar’s schema validation prevents policy errors before deployment
  2. -
  3. Performance: Native Rust library, no network overhead, <1 ms authorization decisions
  4. -
  5. Auditability: Declarative policies in version control
  6. -
  7. Hot Reload: Update policies without orchestrator restart
  8. -
  9. AWS Standard: Used in production by AWS for AVP (Amazon Verified Permissions)
  10. -
  11. Deny-by-Default: Secure by design
  12. -
-

Implementation Details

-

Architecture

-
┌─────────────────────────────────────────────────────────┐
-│                  Orchestrator                           │
-├─────────────────────────────────────────────────────────┤
-│                                                         │
-│  HTTP Request                                           │
-│       ↓                                                 │
-│  ┌──────────────────┐                                  │
-│  │ JWT Validation   │ ← Token Validator                │
-│  └────────┬─────────┘                                  │
-│           ↓                                             │
-│  ┌──────────────────┐                                  │
-│  │ Cedar Engine     │ ← Policy Loader                  │
-│  │                  │   (Hot Reload)                   │
-│  │ • Check Policies │                                  │
-│  │ • Evaluate Rules │                                  │
-│  │ • Context Check  │                                  │
-│  └────────┬─────────┘                                  │
-│           ↓                                             │
-│  Allow / Deny                                           │
-│                                                         │
-└─────────────────────────────────────────────────────────┘
-
-

Policy Organization

-
provisioning/config/cedar-policies/
-├── schema.cedar          # Entity and action definitions
-├── production.cedar      # Production environment policies
-├── development.cedar     # Development environment policies
-├── admin.cedar          # Administrative policies
-└── README.md            # Documentation
-
-

Rust Implementation

-
provisioning/platform/orchestrator/src/security/
-├── cedar.rs             # Cedar engine integration (450 lines)
-├── policy_loader.rs     # Policy loading with hot reload (320 lines)
-├── authorization.rs     # Middleware integration (380 lines)
-├── mod.rs              # Module exports
-└── tests.rs            # Comprehensive tests (450 lines)
-
-

Key Components

-
    -
  1. -

    CedarEngine: Core authorization engine

    -
      -
    • Load policies from strings
    • -
    • Load schema for validation
    • -
    • Authorize requests
    • -
    • Policy statistics
    • -
    -
  2. -
  3. -

    PolicyLoader: File-based policy management

    -
      -
    • Load policies from directory
    • -
    • Hot reload on file changes (notify crate)
    • -
    • Validate policy syntax
    • -
    • Schema validation
    • -
    -
  4. -
  5. -

    Authorization Middleware: Axum integration

    -
      -
    • Extract JWT claims
    • -
    • Build authorization context (IP, MFA, time)
    • -
    • Check authorization
    • -
    • Return 403 Forbidden on deny
    • -
    -
  6. -
  7. -

    Policy Files: Declarative authorization rules

    -
      -
    • Production: MFA, approvals, IP restrictions, business hours
    • -
    • Development: Permissive for developers
    • -
    • Admin: Platform admin, SRE, audit team policies
    • -
    -
  8. -
-

Context Variables

-
AuthorizationContext {
-    mfa_verified: bool,          // MFA verification status
-    ip_address: String,          // Client IP address
-    time: String,                // ISO 8601 timestamp
-    approval_id: Option<String>, // Approval ID (optional)
-    reason: Option<String>,      // Reason for operation
-    force: bool,                 // Force flag
-    additional: HashMap,         // Additional context
-}
-

Example Policy

-
// Production deployments require MFA verification
-@id("prod-deploy-mfa")
-@description("All production deployments must have MFA verification")
-permit (
-  principal,
-  action == Provisioning::Action::"deploy",
-  resource in Provisioning::Environment::"production"
-) when {
-  context.mfa_verified == true
-};
-
-

Integration Points

-
    -
  1. JWT Tokens: Extract principal and context from validated JWT
  2. -
  3. Audit System: Log all authorization decisions
  4. -
  5. Control Center: UI for policy management and testing
  6. -
  7. CLI: Policy validation and testing commands
  8. -
-

Security Best Practices

-
    -
  1. Deny by Default: Cedar defaults to deny all actions
  2. -
  3. Schema Validation: Type-check policies before loading
  4. -
  5. Version Control: All policies in git for auditability
  6. -
  7. Principle of Least Privilege: Grant minimum necessary permissions
  8. -
  9. Defense in Depth: Combine with JWT validation and rate limiting
  10. -
  11. Separation of Concerns: Security team owns policies, developers own code
  12. -
-

Consequences

-

Positive

-
    -
  1. Auditable: All policies in version control
  2. -
  3. Type-Safe: Schema validation prevents errors
  4. -
  5. Fast: <1 ms authorization decisions
  6. -
  7. Maintainable: Security team can update policies independently
  8. -
  9. Hot Reload: No downtime for policy updates
  10. -
  11. Testable: Comprehensive test suite for policies
  12. -
  13. Declarative: Clear intent, no hidden logic
  14. -
-

Negative

-
    -
  1. Learning Curve: Team must learn Cedar policy language
  2. -
  3. New Technology: Cedar is relatively new (2023)
  4. -
  5. Ecosystem: Smaller community than OPA
  6. -
  7. Tooling: Limited IDE support compared to Rego
  8. -
-

Neutral

-
    -
  1. 🔶 Migration: Existing authorization logic needs migration to Cedar
  2. -
  3. 🔶 Policy Complexity: Complex rules may be harder to express
  4. -
  5. 🔶 Debugging: Policy debugging requires understanding Cedar evaluation
  6. -
-

Compliance

-

Security Standards

-
    -
  • SOC 2: Auditable access control policies
  • -
  • ISO 27001: Access control management
  • -
  • GDPR: Data access authorization and logging
  • -
  • NIST 800-53: AC-3 Access Enforcement
  • -
-

Audit Requirements

-

All authorization decisions include:

-
    -
  • Principal (user/team)
  • -
  • Action performed
  • -
  • Resource accessed
  • -
  • Context (MFA, IP, time)
  • -
  • Decision (allow/deny)
  • -
  • Policies evaluated
  • -
-

Migration Path

-

Phase 1: Implementation (Completed)

-
    -
  • ✅ Cedar engine integration
  • -
  • ✅ Policy loader with hot reload
  • -
  • ✅ Authorization middleware
  • -
  • ✅ Production, development, and admin policies
  • -
  • ✅ Comprehensive tests
  • -
-

Phase 2: Rollout (Next)

-
    -
  • 🔲 Enable Cedar authorization in orchestrator
  • -
  • 🔲 Migrate existing authorization logic to Cedar policies
  • -
  • 🔲 Add authorization checks to all API endpoints
  • -
  • 🔲 Integrate with audit logging
  • -
-

Phase 3: Enhancement (Future)

-
    -
  • 🔲 Control Center policy editor UI
  • -
  • 🔲 Policy testing UI
  • -
  • 🔲 Policy simulation and dry-run mode
  • -
  • 🔲 Policy analytics and insights
  • -
  • 🔲 Advanced context variables (location, device type)
  • -
-

Alternatives Considered

-

Alternative 1: Continue with Code-Based Authorization

-

Keep authorization logic in Rust/Nushell code.

-

Rejected Because:

-
    -
  • Not auditable
  • -
  • Requires code changes for policy updates
  • -
  • Difficult to test all combinations
  • -
  • Not compliant with security standards
  • -
-

Alternative 2: Hybrid Approach

-

Use Cedar for high-level policies, code for fine-grained checks.

-

Rejected Because:

-
    -
  • Complexity of two authorization systems
  • -
  • Unclear separation of concerns
  • -
  • Harder to audit
  • -
-

References

- - -
    -
  • ADR-003: JWT Token-Based Authentication
  • -
  • ADR-004: Audit Logging System
  • -
  • ADR-005: KMS Key Management
  • -
-

Notes

-

Cedar policy language is inspired by decades of authorization research (XACML, AWS IAM) and production experience at AWS. It balances expressiveness -with safety.

-
-

Approved By: Architecture Team -Implementation Date: 2025-10-08 -Review Date: 2026-01-08 (Quarterly)

-

ADR-009: Complete Security System Implementation

-

Status: Implemented -Date: 2025-10-08 -Decision Makers: Architecture Team

-
-

Context

-

The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA, -compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.

-
-

Decision

-

Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.

-
-

Implementation Summary

-

Total Implementation

-
    -
  • 39,699 lines of production-ready code
  • -
  • 136 files created/modified
  • -
  • 350+ tests implemented
  • -
  • 83+ REST endpoints available
  • -
  • 111+ CLI commands ready
  • -
-
-

Architecture Components

-

Group 1: Foundation (13,485 lines)

-

1. JWT Authentication (1,626 lines)

-

Location: provisioning/platform/control-center/src/auth/

-

Features:

-
    -
  • RS256 asymmetric signing
  • -
  • Access tokens (15 min) + refresh tokens (7 d)
  • -
  • Token rotation and revocation
  • -
  • Argon2id password hashing
  • -
  • 5 user roles (Admin, Developer, Operator, Viewer, Auditor)
  • -
  • Thread-safe blacklist
  • -
-

API: 6 endpoints -CLI: 8 commands -Tests: 30+

-

2. Cedar Authorization (5,117 lines)

-

Location: provisioning/config/cedar-policies/, provisioning/platform/orchestrator/src/security/

-

Features:

-
    -
  • Cedar policy engine integration
  • -
  • 4 policy files (schema, production, development, admin)
  • -
  • Context-aware authorization (MFA, IP, time windows)
  • -
  • Hot reload without restart
  • -
  • Policy validation
  • -
-

API: 4 endpoints -CLI: 6 commands -Tests: 30+

-

3. Audit Logging (3,434 lines)

-

Location: provisioning/platform/orchestrator/src/audit/

-

Features:

-
    -
  • Structured JSON logging
  • -
  • 40+ action types
  • -
  • GDPR compliance (PII anonymization)
  • -
  • 5 export formats (JSON, CSV, Splunk, ECS, JSON Lines)
  • -
  • Query API with advanced filtering
  • -
-

API: 7 endpoints -CLI: 8 commands -Tests: 25

-

4. Config Encryption (3,308 lines)

-

Location: provisioning/core/nulib/lib_provisioning/config/encryption.nu

-

Features:

-
    -
  • SOPS integration
  • -
  • 4 KMS backends (Age, AWS KMS, Vault, Cosmian)
  • -
  • Transparent encryption/decryption
  • -
  • Memory-only decryption
  • -
  • Auto-detection
  • -
-

CLI: 10 commands -Tests: 7

-
-

Group 2: KMS Integration (9,331 lines)

-

5. KMS Service (2,483 lines)

-

Location: provisioning/platform/kms-service/

-

Features:

-
    -
  • HashiCorp Vault (Transit engine)
  • -
  • AWS KMS (Direct + envelope encryption)
  • -
  • Context-based encryption (AAD)
  • -
  • Key rotation support
  • -
  • Multi-region support
  • -
-

API: 8 endpoints -CLI: 15 commands -Tests: 20

-

6. Dynamic Secrets (4,141 lines)

-

Location: provisioning/platform/orchestrator/src/secrets/

-

Features:

-
    -
  • AWS STS temporary credentials (15 min-12 h)
  • -
  • SSH key pair generation (Ed25519)
  • -
  • UpCloud API subaccounts
  • -
  • TTL manager with auto-cleanup
  • -
  • Vault dynamic secrets integration
  • -
-

API: 7 endpoints -CLI: 10 commands -Tests: 15

-

7. SSH Temporal Keys (2,707 lines)

-

Location: provisioning/platform/orchestrator/src/ssh/

-

Features:

-
    -
  • Ed25519 key generation
  • -
  • Vault OTP (one-time passwords)
  • -
  • Vault CA (certificate authority signing)
  • -
  • Auto-deployment to authorized_keys
  • -
  • Background cleanup every 5 min
  • -
-

API: 7 endpoints -CLI: 10 commands -Tests: 31

-
-

Group 3: Security Features (8,948 lines)

-

8. MFA Implementation (3,229 lines)

-

Location: provisioning/platform/control-center/src/mfa/

-

Features:

-
    -
  • TOTP (RFC 6238, 6-digit codes, 30 s window)
  • -
  • WebAuthn/FIDO2 (YubiKey, Touch ID, Windows Hello)
  • -
  • QR code generation
  • -
  • 10 backup codes per user
  • -
  • Multiple devices per user
  • -
  • Rate limiting (5 attempts/5 min)
  • -
-

API: 13 endpoints -CLI: 15 commands -Tests: 85+

-

9. Orchestrator Auth Flow (2,540 lines)

-

Location: provisioning/platform/orchestrator/src/middleware/

-

Features:

-
    -
  • Complete middleware chain (5 layers)
  • -
  • Security context builder
  • -
  • Rate limiting (100 req/min per IP)
  • -
  • JWT authentication middleware
  • -
  • MFA verification middleware
  • -
  • Cedar authorization middleware
  • -
  • Audit logging middleware
  • -
-

Tests: 53

-

10. Control Center UI (3,179 lines)

-

Location: provisioning/platform/control-center/web/

-

Features:

-
    -
  • React/TypeScript UI
  • -
  • Login with MFA (2-step flow)
  • -
  • MFA setup (TOTP + WebAuthn wizards)
  • -
  • Device management
  • -
  • Audit log viewer with filtering
  • -
  • API token management
  • -
  • Security settings dashboard
  • -
-

Components: 12 React components -API Integration: 17 methods

-
-

Group 4: Advanced Features (7,935 lines)

-

11. Break-Glass Emergency Access (3,840 lines)

-

Location: provisioning/platform/orchestrator/src/break_glass/

-

Features:

-
    -
  • Multi-party approval (2+ approvers, different teams)
  • -
  • Emergency JWT tokens (4 h max, special claims)
  • -
  • Auto-revocation (expiration + inactivity)
  • -
  • Enhanced audit (7-year retention)
  • -
  • Real-time alerts
  • -
  • Background monitoring
  • -
-

API: 12 endpoints -CLI: 10 commands -Tests: 985 lines (unit + integration)

-

12. Compliance (4,095 lines)

-

Location: provisioning/platform/orchestrator/src/compliance/

-

Features:

-
    -
  • GDPR: Data export, deletion, rectification, portability, objection
  • -
  • SOC2: 9 Trust Service Criteria verification
  • -
  • ISO 27001: 14 Annex A control families
  • -
  • Incident Response: Complete lifecycle management
  • -
  • Data Protection: 4-level classification, encryption controls
  • -
  • Access Control: RBAC matrix with role verification
  • -
-

API: 35 endpoints -CLI: 23 commands -Tests: 11

-
-

Security Architecture Flow

-

End-to-End Request Flow

-
1. User Request
-   ↓
-2. Rate Limiting (100 req/min per IP)
-   ↓
-3. JWT Authentication (RS256, 15 min tokens)
-   ↓
-4. MFA Verification (TOTP/WebAuthn for sensitive ops)
-   ↓
-5. Cedar Authorization (context-aware policies)
-   ↓
-6. Dynamic Secrets (AWS STS, SSH keys, 1h TTL)
-   ↓
-7. Operation Execution (encrypted configs, KMS)
-   ↓
-8. Audit Logging (structured JSON, GDPR-compliant)
-   ↓
-9. Response
-
-

Emergency Access Flow

-
1. Emergency Request (reason + justification)
-   ↓
-2. Multi-Party Approval (2+ approvers, different teams)
-   ↓
-3. Session Activation (special JWT, 4h max)
-   ↓
-4. Enhanced Audit (7-year retention, immutable)
-   ↓
-5. Auto-Revocation (expiration/inactivity)
-
-
-

Technology Stack

-

Backend (Rust)

-
    -
  • axum: HTTP framework
  • -
  • jsonwebtoken: JWT handling (RS256)
  • -
  • cedar-policy: Authorization engine
  • -
  • totp-rs: TOTP implementation
  • -
  • webauthn-rs: WebAuthn/FIDO2
  • -
  • aws-sdk-kms: AWS KMS integration
  • -
  • argon2: Password hashing
  • -
  • tracing: Structured logging
  • -
-

Frontend (TypeScript/React)

-
    -
  • React 18: UI framework
  • -
  • Leptos: Rust WASM framework
  • -
  • @simplewebauthn/browser: WebAuthn client
  • -
  • qrcode.react: QR code generation
  • -
-

CLI (Nushell)

-
    -
  • Nushell 0.107: Shell and scripting
  • -
  • nu_plugin_kcl: KCL integration
  • -
-

Infrastructure

-
    -
  • HashiCorp Vault: Secrets management, KMS, SSH CA
  • -
  • AWS KMS: Key management service
  • -
  • PostgreSQL/SurrealDB: Data storage
  • -
  • SOPS: Config encryption
  • -
-
-

Security Guarantees

-

Authentication

-

✅ RS256 asymmetric signing (no shared secrets) -✅ Short-lived access tokens (15 min) -✅ Token revocation support -✅ Argon2id password hashing (memory-hard) -✅ MFA enforced for production operations

-

Authorization

-

✅ Fine-grained permissions (Cedar policies) -✅ Context-aware (MFA, IP, time windows) -✅ Hot reload policies (no downtime) -✅ Deny by default

-

Secrets Management

-

✅ No static credentials stored -✅ Time-limited secrets (1h default) -✅ Auto-revocation on expiry -✅ Encryption at rest (KMS) -✅ Memory-only decryption

-

Audit & Compliance

-

✅ Immutable audit logs -✅ GDPR-compliant (PII anonymization) -✅ SOC2 controls implemented -✅ ISO 27001 controls verified -✅ 7-year retention for break-glass

-

Emergency Access

-

✅ Multi-party approval required -✅ Time-limited sessions (4h max) -✅ Enhanced audit logging -✅ Auto-revocation -✅ Cannot be disabled

-
-

Performance Characteristics

-
- - - - - - -
ComponentLatencyThroughputMemory
JWT Auth<5 ms10,000/s~10 MB
Cedar Authz<10 ms5,000/s~50 MB
Audit Log<5 ms20,000/s~100 MB
KMS Encrypt<50 ms1,000/s~20 MB
Dynamic Secrets<100 ms500/s~50 MB
MFA Verify<50 ms2,000/s~30 MB
-
-

Total Overhead: ~10-20 ms per request -Memory Usage: ~260 MB total for all security components

-
-

Deployment Options

-

Development

-
# Start all services
-cd provisioning/platform/kms-service && cargo run &
-cd provisioning/platform/orchestrator && cargo run &
-cd provisioning/platform/control-center && cargo run &
-
-

Production

-
# Kubernetes deployment
-kubectl apply -f k8s/security-stack.yaml
-
-# Docker Compose
-docker-compose up -d kms orchestrator control-center
-
-# Systemd services
-systemctl start provisioning-kms
-systemctl start provisioning-orchestrator
-systemctl start provisioning-control-center
-
-
-

Configuration

-

Environment Variables

-
# JWT
-export JWT_ISSUER="control-center"
-export JWT_AUDIENCE="orchestrator,cli"
-export JWT_PRIVATE_KEY_PATH="/keys/private.pem"
-export JWT_PUBLIC_KEY_PATH="/keys/public.pem"
-
-# Cedar
-export CEDAR_POLICIES_PATH="/config/cedar-policies"
-export CEDAR_ENABLE_HOT_RELOAD=true
-
-# KMS
-export KMS_BACKEND="vault"
-export VAULT_ADDR="https://vault.example.com"
-export VAULT_TOKEN="..."
-
-# MFA
-export MFA_TOTP_ISSUER="Provisioning"
-export MFA_WEBAUTHN_RP_ID="provisioning.example.com"
-
-

Config Files

-
# provisioning/config/security.toml
-[jwt]
-issuer = "control-center"
-audience = ["orchestrator", "cli"]
-access_token_ttl = "15m"
-refresh_token_ttl = "7d"
-
-[cedar]
-policies_path = "config/cedar-policies"
-hot_reload = true
-reload_interval = "60s"
-
-[mfa]
-totp_issuer = "Provisioning"
-webauthn_rp_id = "provisioning.example.com"
-rate_limit = 5
-rate_limit_window = "5m"
-
-[kms]
-backend = "vault"
-vault_address = "https://vault.example.com"
-vault_mount_point = "transit"
-
-[audit]
-retention_days = 365
-retention_break_glass_days = 2555  # 7 years
-export_format = "json"
-pii_anonymization = true
-
-
-

Testing

-

Run All Tests

-
# Control Center (JWT, MFA)
-cd provisioning/platform/control-center
-cargo test
-
-# Orchestrator (Cedar, Audit, Secrets, SSH, Break-Glass, Compliance)
-cd provisioning/platform/orchestrator
-cargo test
-
-# KMS Service
-cd provisioning/platform/kms-service
-cargo test
-
-# Config Encryption (Nushell)
-nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu
-
-

Integration Tests

-
# Full security flow
-cd provisioning/platform/orchestrator
-cargo test --test security_integration_tests
-cargo test --test break_glass_integration_tests
-
-
-

Monitoring & Alerts

-

Metrics to Monitor

-
    -
  • Authentication failures (rate, sources)
  • -
  • Authorization denials (policies, resources)
  • -
  • MFA failures (attempts, users)
  • -
  • Token revocations (rate, reasons)
  • -
  • Break-glass activations (frequency, duration)
  • -
  • Secrets generation (rate, types)
  • -
  • Audit log volume (events/sec)
  • -
-

Alerts to Configure

-
    -
  • Multiple failed auth attempts (5+ in 5 min)
  • -
  • Break-glass session created
  • -
  • Compliance report non-compliant
  • -
  • Incident severity critical/high
  • -
  • Token revocation spike
  • -
  • KMS errors
  • -
  • Audit log export failures
  • -
-
-

Maintenance

-

Daily

-
    -
  • Monitor audit logs for anomalies
  • -
  • Review failed authentication attempts
  • -
  • Check break-glass sessions (should be zero)
  • -
-

Weekly

-
    -
  • Review compliance reports
  • -
  • Check incident response status
  • -
  • Verify backup code usage
  • -
  • Review MFA device additions/removals
  • -
-

Monthly

-
    -
  • Rotate KMS keys
  • -
  • Review and update Cedar policies
  • -
  • Generate compliance reports (GDPR, SOC2, ISO)
  • -
  • Audit access control matrix
  • -
-

Quarterly

-
    -
  • Full security audit
  • -
  • Penetration testing
  • -
  • Compliance certification review
  • -
  • Update security documentation
  • -
-
-

Migration Path

-

From Existing System

-
    -
  1. -

    Phase 1: Deploy security infrastructure

    -
      -
    • KMS service
    • -
    • Orchestrator with auth middleware
    • -
    • Control Center
    • -
    -
  2. -
  3. -

    Phase 2: Migrate authentication

    -
      -
    • Enable JWT authentication
    • -
    • Migrate existing users
    • -
    • Disable old auth system
    • -
    -
  4. -
  5. -

    Phase 3: Enable MFA

    -
      -
    • Require MFA enrollment for admins
    • -
    • Gradual rollout to all users
    • -
    -
  6. -
  7. -

    Phase 4: Enable Cedar authorization

    -
      -
    • Deploy initial policies (permissive)
    • -
    • Monitor authorization decisions
    • -
    • Tighten policies incrementally
    • -
    -
  8. -
  9. -

    Phase 5: Enable advanced features

    -
      -
    • Break-glass procedures
    • -
    • Compliance reporting
    • -
    • Incident response
    • -
    -
  10. -
-
-

Future Enhancements

-

Planned (Not Implemented)

-
    -
  • Hardware Security Module (HSM) integration
  • -
  • OAuth2/OIDC federation
  • -
  • SAML SSO for enterprise
  • -
  • Risk-based authentication (IP reputation, device fingerprinting)
  • -
  • Behavioral analytics (anomaly detection)
  • -
  • Zero-Trust Network (service mesh integration)
  • -
-

Under Consideration

-
    -
  • Blockchain audit log (immutable append-only log)
  • -
  • Quantum-resistant cryptography (post-quantum algorithms)
  • -
  • Confidential computing (SGX/SEV enclaves)
  • -
  • Distributed break-glass (multi-region approval)
  • -
-
-

Consequences

-

Positive

-

Enterprise-grade security meeting GDPR, SOC2, ISO 27001 -✅ Zero static credentials (all dynamic, time-limited) -✅ Complete audit trail (immutable, GDPR-compliant) -✅ MFA-enforced for sensitive operations -✅ Emergency access with enhanced controls -✅ Fine-grained authorization (Cedar policies) -✅ Automated compliance (reports, incident response)

-

Negative

-

⚠️ Increased complexity (12 components to manage) -⚠️ Performance overhead (~10-20 ms per request) -⚠️ Memory footprint (~260 MB additional) -⚠️ Learning curve (Cedar policy language, MFA setup) -⚠️ Operational overhead (key rotation, policy updates)

-

Mitigations

-
    -
  • Comprehensive documentation (ADRs, guides, API docs)
  • -
  • CLI commands for all operations
  • -
  • Automated monitoring and alerting
  • -
  • Gradual rollout with feature flags
  • -
  • Training materials for operators
  • -
-
- -
    -
  • JWT Auth: docs/architecture/JWT_AUTH_IMPLEMENTATION.md
  • -
  • Cedar Authz: docs/architecture/CEDAR_AUTHORIZATION_IMPLEMENTATION.md
  • -
  • Audit Logging: docs/architecture/AUDIT_LOGGING_IMPLEMENTATION.md
  • -
  • MFA: docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
  • Break-Glass: docs/architecture/BREAK_GLASS_IMPLEMENTATION_SUMMARY.md
  • -
  • Compliance: docs/architecture/COMPLIANCE_IMPLEMENTATION_SUMMARY.md
  • -
  • Config Encryption: docs/user/CONFIG_ENCRYPTION_GUIDE.md
  • -
  • Dynamic Secrets: docs/user/DYNAMIC_SECRETS_QUICK_REFERENCE.md
  • -
  • SSH Keys: docs/user/SSH_TEMPORAL_KEYS_USER_GUIDE.md
  • -
-
-

Approval

-

Architecture Team: Approved -Security Team: Approved (pending penetration test) -Compliance Team: Approved (pending audit) -Engineering Team: Approved

-
-

Date: 2025-10-08 -Version: 1.0.0 -Status: Implemented and Production-Ready

-

ADR-010: Configuration File Format Strategy

-

Status: Accepted -Date: 2025-12-03 -Decision Makers: Architecture Team -Implementation: Multi-phase migration (KCL workspace configs + template reorganization)

-
-

Context

-

The provisioning project historically used a single configuration format (YAML/TOML environment variables) for all purposes. As the system evolved, -different parts naturally adopted different formats:

-
    -
  • TOML for modular provider and platform configurations (providers/*.toml, platform/*.toml)
  • -
  • KCL for infrastructure-as-code definitions with type safety
  • -
  • YAML for workspace metadata
  • -
-

However, the workspace configuration remained in YAML (provisioning.yaml), -creating inconsistency and leaving type-unsafe configuration handling. Meanwhile, -complete KCL schemas for workspace configuration were designed but unused.

-

Problem: Three different formats in the same system without documented rationale or consistent patterns.

-
-

Decision

-

Adopt a three-format strategy with clear separation of concerns:

-
- - - -
FormatPurposeUse Cases
KCLInfrastructure as Code & SchemasWorkspace config, infrastructure definitions, type-safe validation
TOMLApplication Configuration & SettingsSystem defaults, provider settings, user preferences, interpolation
YAMLMetadata & Kubernetes ResourcesK8s manifests, tool metadata, version tracking, CI/CD resources
-
-
-

Implementation Strategy

-

Phase 1: Documentation (Complete)

-

Define and document the three-format approach through:

-
    -
  1. ADR-010 (this document) - Rationale and strategy
  2. -
  3. CLAUDE.md updates - Quick reference for developers
  4. -
  5. Configuration hierarchy - Explicit precedence rules
  6. -
-

Phase 2: Workspace Config Migration (In Progress)

-

Migrate workspace configuration from YAML to KCL:

-
    -
  1. Create comprehensive workspace configuration schema in KCL
  2. -
  3. Implement backward-compatible config loader (KCL first, fallback to YAML)
  4. -
  5. Provide migration script to convert existing workspaces
  6. -
  7. Update workspace initialization to generate KCL configs
  8. -
-

Expected Outcome:

-
    -
  • workspace/config/provisioning.ncl (KCL, type-safe, validated)
  • -
  • Full schema validation with semantic versioning checks
  • -
  • Automatic validation at config load time
  • -
-

Phase 3: Template File Reorganization (In Progress)

-

Move template files to proper directory structure and correct extensions:

-
Previous (KCL):
-  provisioning/kcl/templates/*.k  (had Nushell/Jinja2 code, not KCL)
-
-Current (Nickel):
-  provisioning/templates/
-    ├── nushell/*.nu.j2
-    ├── config/*.toml.j2
-    ├── nickel/*.ncl.j2
-    └── README.md
-
-

Expected Outcome:

-
    -
  • Templates properly classified and discoverable
  • -
  • KCL validation passes (15/16 errors eliminated)
  • -
  • Template system clean and maintainable
  • -
-
-

Rationale for Each Format

-

KCL for Workspace Configuration

-

Why KCL over YAML or TOML?

-
    -
  1. -

    Type Safety: Catch configuration errors at schema validation time, not runtime

    -
    schema WorkspaceDeclaration:
    -    metadata: Metadata
    -    check:
    -        regex.match(metadata.version, r"^\d+\.\d+\.\d+$"), \
    -            "Version must be semantic versioning"
    -
    -
  2. -
  3. -

    Schema-First Development: Schemas are first-class citizens

    -
      -
    • Document expected structure upfront
    • -
    • IDE support for auto-completion
    • -
    • Enforce required fields and value ranges
    • -
    -
  4. -
  5. -

    Immutable by Default: Infrastructure configurations are immutable

    -
      -
    • Prevents accidental mutations
    • -
    • Better for reproducible deployments
    • -
    • Aligns with PAP principle: “configuration-driven, not hardcoded”
    • -
    -
  6. -
  7. -

    Complex Validation: KCL supports sophisticated validation rules

    -
      -
    • Semantic versioning validation
    • -
    • Dependency checking
    • -
    • Cross-field validation
    • -
    • Range constraints on numeric values
    • -
    -
  8. -
  9. -

    Ecosystem Consistency: KCL is already used for infrastructure definitions

    -
      -
    • Server configurations use KCL
    • -
    • Cluster definitions use KCL
    • -
    • Taskserv definitions use KCL
    • -
    • Using KCL for workspace config maintains consistency
    • -
    -
  10. -
  11. -

    Existing Schemas: provisioning/kcl/generator/declaration.ncl already defines complete workspace schemas

    -
      -
    • No design work needed
    • -
    • Production-ready schemas
    • -
    • Well-tested patterns
    • -
    -
  12. -
-

TOML for Application Configuration

-

Why TOML for settings?

-
    -
  1. -

    Hierarchical Structure: Native support for nested configurations

    -
    [http]
    -use_curl = false
    -timeout = 30
    -
    -[debug]
    -enabled = false
    -log_level = "info"
    -
    -
  2. -
  3. -

    Interpolation Support: Dynamic variable substitution

    -
    base_path = "/Users/home/provisioning"
    -cache_path = "{{base_path}}/.cache"
    -
    -
  4. -
  5. -

    Industry Standard: Widely used for application configuration (Rust, Python, Go)

    -
  6. -
  7. -

    Human Readable: Clear, explicit, easy to edit

    -
  8. -
  9. -

    Validation Support: Schema files (.schema.toml) for validation

    -
  10. -
-

Use Cases:

-
    -
  • System defaults: provisioning/config/config.defaults.toml
  • -
  • Provider settings: workspace/config/providers/*.toml
  • -
  • Platform services: workspace/config/platform/*.toml
  • -
  • User preferences: User config files
  • -
-

YAML for Metadata and Kubernetes Resources

-

Why YAML for metadata?

-
    -
  1. -

    Kubernetes Compatibility: YAML is K8s standard

    -
      -
    • K8s manifests use YAML
    • -
    • Consistent with ecosystem
    • -
    • Familiar to DevOps engineers
    • -
    -
  2. -
  3. -

    Lightweight: Good for simple data structures

    -
    workspace:
    -  name: "librecloud"
    -  version: "1.0.0"
    -  created: "2025-10-06T12:29:43Z"
    -
    -
  4. -
  5. -

    Version Control: Human-readable format

    -
      -
    • Diffs are clear and meaningful
    • -
    • Git-friendly
    • -
    • Comments supported
    • -
    -
  6. -
-

Use Cases:

-
    -
  • K8s resource definitions
  • -
  • Tool metadata (versions, sources, tags)
  • -
  • CI/CD configuration files
  • -
  • User workspace metadata (during transition)
  • -
-
-

Configuration Hierarchy (Priority)

-

When loading configuration, use this precedence (highest to lowest):

-
    -
  1. -

    Runtime Arguments (highest priority)

    -
      -
    • CLI flags passed to commands
    • -
    • Explicit user input
    • -
    -
  2. -
  3. -

    Environment Variables (PROVISIONING_*)

    -
      -
    • Override system settings
    • -
    • Deployment-specific overrides
    • -
    • Secrets via env vars
    • -
    -
  4. -
  5. -

    User Configuration (Centralized)

    -
      -
    • User preferences: ~/.config/provisioning/user_config.yaml
    • -
    • User workspace overrides: workspace/config/local-overrides.toml
    • -
    -
  6. -
  7. -

    Infrastructure Configuration

    -
      -
    • Workspace KCL config: workspace/config/provisioning.ncl
    • -
    • Platform services: workspace/config/platform/*.toml
    • -
    • Provider configs: workspace/config/providers/*.toml
    • -
    -
  8. -
  9. -

    System Defaults (lowest priority)

    -
      -
    • System config: provisioning/config/config.defaults.toml
    • -
    • Schema defaults: defined in KCL schemas
    • -
    -
  10. -
-
-

Migration Path

-

For Existing Workspaces

-
    -
  1. -

    Migration Path: Config loader checks for .ncl first, then falls back to .yaml for legacy systems

    -
    # Try Nickel first (current)
    -if ($config_nickel | path exists) {
    -    let config = (load_nickel_workspace_config $config_nickel)
    -} else if ($config_yaml | path exists) {
    -    # Legacy YAML support (from pre-migration)
    -    let config = (open $config_yaml)
    -}
    -
    -
  2. -
  3. -

    Automatic Migration: Migration script converts YAML/KCL → Nickel

    -
    provisioning workspace migrate-config --all
    -
    -
  4. -
  5. -

    Validation: New KCL configs validated against schemas

    -
  6. -
-

For New Workspaces

-
    -
  1. -

    Generate KCL: Workspace initialization creates .k files

    -
    provisioning workspace create my-workspace
    -# Creates: workspace/my-workspace/config/provisioning.ncl
    -
    -
  2. -
  3. -

    Use Existing Schemas: Leverage provisioning/kcl/generator/declaration.ncl

    -
  4. -
  5. -

    Schema Validation: Automatic validation during config load

    -
  6. -
-
-

File Format Guidelines for Developers

-

When to Use Each Format

-

Use KCL for:

-
    -
  • Infrastructure definitions (servers, clusters, taskservs)
  • -
  • Configuration with type requirements
  • -
  • Schema definitions
  • -
  • Any config that needs validation rules
  • -
  • Workspace configuration
  • -
-

Use TOML for:

-
    -
  • Application settings (HTTP client, logging, timeouts)
  • -
  • Provider-specific settings
  • -
  • Platform service configuration
  • -
  • User preferences and overrides
  • -
  • System defaults with interpolation
  • -
-

Use YAML for:

-
    -
  • Kubernetes manifests
  • -
  • CI/CD configuration (GitHub Actions, GitLab CI)
  • -
  • Tool metadata
  • -
  • Human-readable documentation files
  • -
  • Version control metadata
  • -
-
-

Consequences

-

Benefits

-

Type Safety: KCL schema validation catches config errors early -✅ Consistency: Infrastructure definitions and configs use same language -✅ Maintainability: Clear separation of concerns (IaC vs settings vs metadata) -✅ Validation: Semantic versioning, required fields, range checks -✅ Tooling: IDE support for KCL auto-completion -✅ Documentation: Self-documenting schemas with descriptions -✅ Ecosystem Alignment: TOML for settings (Rust standard), YAML for K8s

-

Trade-offs

-

⚠️ Learning Curve: Developers must understand three formats -⚠️ Migration Effort: Existing YAML configs need conversion -⚠️ Tooling Requirements: KCL compiler needed (already a dependency)

-

Risk Mitigation

-
    -
  1. Documentation: Clear guidelines in CLAUDE.md
  2. -
  3. Backward Compatibility: YAML support maintained during transition
  4. -
  5. Automation: Migration scripts for existing workspaces
  6. -
  7. Gradual Migration: No hard cutoff, both formats supported for extended period
  8. -
-
-

Template File Reorganization

-

Problem

-

Currently, 15/16 files in provisioning/kcl/templates/ have .k extension but contain Nushell/Jinja2 code, not KCL:

-
provisioning/kcl/templates/
-├── server.ncl          # Actually Nushell/Jinja2 template
-├── taskserv.ncl        # Actually Nushell/Jinja2 template
-└── ...               # 15 more template files
-
-

This causes:

-
    -
  • KCL validation failures (96.6% of errors)
  • -
  • Misclassification (templates in KCL directory)
  • -
  • Confusing directory structure
  • -
-

Solution

-

Reorganize into type-specific directories:

-
provisioning/templates/
-├── nushell/           # Nushell code generation (*.nu.j2)
-│   ├── server.nu.j2
-│   ├── taskserv.nu.j2
-│   └── ...
-├── config/            # Config file generation (*.toml.j2, *.yaml.j2)
-│   ├── provider.toml.j2
-│   └── ...
-├── kcl/               # KCL file generation (*.k.j2)
-│   ├── workspace.ncl.j2
-│   └── ...
-└── README.md
-
-

Outcome

-

✅ Correct file classification -✅ KCL validation passes completely -✅ Clear template organization -✅ Easier to discover and maintain templates

-
-

References

-

Existing KCL Schemas

-
    -
  1. -

    Workspace Declaration: provisioning/kcl/generator/declaration.ncl

    -
      -
    • WorkspaceDeclaration - Complete workspace specification
    • -
    • Metadata - Name, version, author, timestamps
    • -
    • DeploymentConfig - Deployment modes, servers, HA settings
    • -
    • Includes validation rules and semantic versioning
    • -
    -
  2. -
  3. -

    Workspace Layer: provisioning/workspace/layers/workspace.layer.ncl

    -
      -
    • WorkspaceLayer - Template paths, priorities, metadata
    • -
    -
  4. -
  5. -

    Core Settings: provisioning/kcl/settings.ncl

    -
      -
    • Settings - Main provisioning settings
    • -
    • SecretProvider - SOPS/KMS configuration
    • -
    • AIProvider - AI provider configuration
    • -
    -
  6. -
- -
    -
  • ADR-001: Project Structure
  • -
  • ADR-005: Extension Framework
  • -
  • ADR-006: Provisioning CLI Refactoring
  • -
  • ADR-009: Security System Complete
  • -
-
-

Decision Status

-

Status: Accepted

-

Next Steps:

-
    -
  1. ✅ Document strategy (this ADR)
  2. -
  3. ⏳ Create workspace configuration KCL schema
  4. -
  5. ⏳ Implement backward-compatible config loader
  6. -
  7. ⏳ Create migration script for YAML → KCL
  8. -
  9. ⏳ Move template files to proper directories
  10. -
  11. ⏳ Update documentation with examples
  12. -
  13. ⏳ Migrate workspace_librecloud to KCL
  14. -
-
-

Last Updated: 2025-12-03

-

ADR-011: Migration from KCL to Nickel

-

Status: Implemented -Date: 2025-12-15 -Decision Makers: Architecture Team -Implementation: Complete for platform schemas (100%)

-
-

Context

-

The provisioning platform historically used KCL (KLang) as the primary infrastructure-as-code language for all configuration schemas. As the system -evolved through four migration phases (Foundation, Core, Complex, Highly Complex), KCL’s limitations became increasingly apparent:

-

Problems with KCL

-
    -
  1. -

    Complex Type System: Heavyweight schema system with extensive boilerplate

    -
      -
    • schema Foo(bar.Baz) inheritance creates rigid hierarchies
    • -
    • Union types with null don’t work well in type annotations
    • -
    • Schema modifications propagate breaking changes
    • -
    -
  2. -
  3. -

    Limited Flexibility: Schema-first approach is too rigid for configuration evolution

    -
      -
    • Difficult to extend types without modifying base schemas
    • -
    • No easy way to add custom fields without validation conflicts
    • -
    • Hard to compose configurations dynamically
    • -
    -
  4. -
  5. -

    Import System Overhead: Non-standard module imports

    -
      -
    • import provisioning.lib as lib pattern differs from ecosystem standards
    • -
    • Re-export patterns create complexity in extension systems
    • -
    -
  6. -
  7. -

    Performance Overhead: Compile-time validation adds latency

    -
      -
    • Schema validation happens at compile time
    • -
    • Large configuration files slow down evaluation
    • -
    • No lazy evaluation built-in
    • -
    -
  8. -
  9. -

    Learning Curve: KCL is Python-like but with unique patterns

    -
      -
    • Team must learn KCL-specific semantics
    • -
    • Limited ecosystem and tooling support
    • -
    • Difficult to hire developers familiar with KCL
    • -
    -
  10. -
-

Project Needs

-

The provisioning system required:

-
    -
  • Greater flexibility in composing configurations
  • -
  • Better performance for large-scale deployments
  • -
  • Extensibility without modifying base schemas
  • -
  • Simpler mental model for team learning
  • -
  • Clean exports to JSON/TOML/YAML formats
  • -
-
-

Decision

-

Adopt Nickel as the primary infrastructure-as-code language for all schema definitions, configuration composition, and deployment declarations.

-

Key Changes

-
    -
  1. -

    Three-File Pattern per Module:

    -
      -
    • {module}_contracts.ncl - Type definitions using Nickel contracts
    • -
    • {module}_defaults.ncl - Default values for all fields
    • -
    • {module}.ncl - Instances combining both, with hybrid interface
    • -
    -
  2. -
  3. -

    Hybrid Interface (4 levels of access):

    -
      -
    • Level 1: Direct access to defaults (inspection, reference)
    • -
    • Level 2: Maker functions (90% of use cases)
    • -
    • Level 3: Default instances (pre-built, exported)
    • -
    • Level 4: Contracts (optional imports, advanced combinations)
    • -
    -
  4. -
  5. -

    Domain-Organized Architecture (8 top-level domains):

    -
      -
    • lib - Core library types
    • -
    • config - Settings, defaults, workspace configuration
    • -
    • infrastructure - Compute, storage, provisioning schemas
    • -
    • operations - Workflows, batch, dependencies, tasks
    • -
    • deployment - Kubernetes, execution modes
    • -
    • services - Gitea and other platform services
    • -
    • generator - Code generation and declarations
    • -
    • integrations - Runtime, GitOps, external integrations
    • -
    -
  6. -
  7. -

    Two Deployment Modes:

    -
      -
    • Development: Fast iteration with relative imports (Single Source of Truth)
    • -
    • Production: Frozen snapshots with immutable, self-contained deployment packages
    • -
    -
  8. -
-
-

Implementation Summary

-

Migration Complete

-
- - - - - - - -
MetricValue
KCL files migrated40
Nickel files created72
Modules converted24 core modules
Schemas migrated150+
Maker functions80+
Default instances90+
JSON output validation4,680+ lines
-
-

Platform Schemas (provisioning/schemas/)

-
    -
  • 422 Nickel files total
  • -
  • 8 domains with hierarchical organization
  • -
  • Entry point: main.ncl with domain-organized architecture
  • -
  • Clean imports: provisioning.lib, provisioning.config.settings, etc.
  • -
-

Extensions (provisioning/extensions/)

-
    -
  • 4 providers: hetzner, local, aws, upcloud
  • -
  • 1 cluster type: web
  • -
  • Consistent structure: Each extension has nickel/ subdirectory with contracts, defaults, main, version
  • -
-

Example - UpCloud Provider:

-
# upcloud/nickel/main.ncl (migrated from upcloud/kcl/)
-let contracts = import "./contracts.ncl" in
-let defaults = import "./defaults.ncl" in
-
-{
-  defaults = defaults,
-  make_storage | not_exported = fun overrides =>
-    defaults.storage & overrides,
-  DefaultStorage = defaults.storage,
-  DefaultStorageBackup = defaults.storage_backup,
-  DefaultProvisionEnv = defaults.provision_env,
-  DefaultProvisionUpcloud = defaults.provision_upcloud,
-  DefaultServerDefaults_upcloud = defaults.server_defaults_upcloud,
-  DefaultServerUpcloud = defaults.server_upcloud,
-}
-
-

Active Workspaces (workspace_librecloud/nickel/)

-
    -
  • 47 Nickel files in productive use
  • -
  • 2 infrastructures: -
      -
    • wuji - Kubernetes cluster with 20 taskservs
    • -
    • sgoyol - Support servers group
    • -
    -
  • -
  • Two deployment modes fully implemented and tested
  • -
  • Daily production usage validated ✅
  • -
-

Backward Compatibility

-
    -
  • 955 KCL files remain in workspaces/ (legacy user configs)
  • -
  • 100% backward compatible - old KCL code still works
  • -
  • Config loader supports both formats during transition
  • -
  • No breaking changes to APIs
  • -
-
-

Comparison: KCL vs Nickel

-
- - - - - - - - - - -
AspectKCLNickelWinner
Mental ModelPython-like with schemasJSON with functionsNickel
PerformanceBaseline60% faster evaluationNickel
Type SystemRigid schemasGradual typing + contractsNickel
CompositionSchema inheritanceRecord merging (&)Nickel
ExtensibilityRequires schema modificationsMerging with custom fieldsNickel
ValidationCompile-time (overhead)Runtime contracts (lazy)Nickel
BoilerplateHighLow (3-file pattern)Nickel
ExportsJSON/YAMLJSON/TOML/YAMLNickel
Learning CurveMedium-HighLowNickel
Lazy EvaluationNoYes (built-in)Nickel
-
-
-

Architecture Patterns

-

Three-File Pattern

-

File 1: Contracts (batch_contracts.ncl):

-
{
-  BatchScheduler = {
-    strategy | String,
-    resource_limits,
-    scheduling_interval | Number,
-    enable_preemption | Bool,
-  },
-}
-
-

File 2: Defaults (batch_defaults.ncl):

-
{
-  scheduler = {
-    strategy = "dependency_first",
-    resource_limits = {"max_cpu_cores" = 0},
-    scheduling_interval = 10,
-    enable_preemption = false,
-  },
-}
-
-

File 3: Main (batch.ncl):

-
let contracts = import "./batch_contracts.ncl" in
-let defaults = import "./batch_defaults.ncl" in
-
-{
-  defaults = defaults,                    # Level 1: Inspection
-  make_scheduler | not_exported = fun o =>
-    defaults.scheduler & o,               # Level 2: Makers
-  DefaultScheduler = defaults.scheduler,  # Level 3: Instances
-}
-
-

Hybrid Pattern Benefits

-
    -
  • 90% of users: Use makers for simple customization
  • -
  • 9% of users: Reference defaults for inspection
  • -
  • 1% of users: Access contracts for advanced combinations
  • -
  • No validation conflicts: Record merging works without contract constraints
  • -
-

Domain-Organized Architecture

-
provisioning/schemas/
-├── lib/                  # Storage, TaskServDef, ClusterDef
-├── config/               # Settings, defaults, workspace_config
-├── infrastructure/       # Compute, storage, provisioning
-├── operations/           # Workflows, batch, dependencies, tasks
-├── deployment/           # Kubernetes, modes (solo, multiuser, cicd, enterprise)
-├── services/             # Gitea, etc
-├── generator/            # Declarations, gap analysis, changes
-├── integrations/         # Runtime, GitOps, main
-└── main.ncl              # Entry point with namespace organization
-
-

Import pattern:

-
let provisioning = import "./main.ncl" in
-provisioning.lib              # For Storage, TaskServDef
-provisioning.config.settings  # For Settings, Defaults
-provisioning.infrastructure.compute.server
-provisioning.operations.workflows
-
-
-

Production Deployment Patterns

-

Two-Mode Strategy

-

1. Development Mode (Single Source of Truth)

-
    -
  • Relative imports to central provisioning
  • -
  • Fast iteration with immediate schema updates
  • -
  • No snapshot overhead
  • -
  • Usage: Local development, testing, experimentation
  • -
-
# workspace_librecloud/nickel/main.ncl
-import "../../provisioning/schemas/main.ncl"
-import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
-
-

2. Production Mode (Hermetic Deployment)

-

Create immutable snapshots for reproducible deployments:

-
provisioning workspace freeze --version "2025-12-15-prod-v1" --env production
-
-

Frozen structure (.frozen/{version}/):

-
├── provisioning/schemas/    # Snapshot of central schemas
-├── extensions/              # Snapshot of all extensions
-└── workspace/               # Snapshot of workspace configs
-
-

All imports rewritten to local paths:

-
    -
  • import "../../provisioning/schemas/main.ncl"import "./provisioning/schemas/main.ncl"
  • -
  • Guarantees immutability and reproducibility
  • -
  • No external dependencies
  • -
  • Can be deployed to air-gapped environments
  • -
-

Deploy from frozen snapshot:

-
provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji
-
-

Benefits:

-
    -
  • ✅ Development: Fast iteration with central updates
  • -
  • ✅ Production: Immutable, reproducible deployments
  • -
  • ✅ Audit trail: Each frozen version timestamped
  • -
  • ✅ Rollback: Easy rollback to previous versions
  • -
  • ✅ Air-gapped: Works in offline environments
  • -
-
-

Ecosystem Integration

-

TypeDialog (Bidirectional Nickel Integration)

-

Location: /Users/Akasha/Development/typedialog -Purpose: Type-safe prompts, forms, and schemas with Nickel output

-

Key Feature: Nickel schemas → Type-safe UIs → Nickel output

-
# Nickel schema → Interactive form
-typedialog form --schema server.ncl --output json
-
-# Interactive form → Nickel output
-typedialog form --input form.toml --output nickel
-
-

Value: Amplifies Nickel ecosystem beyond IaC:

-
    -
  • Schemas auto-generate type-safe UIs
  • -
  • Forms output configurations back to Nickel
  • -
  • Multiple backends: CLI, TUI, Web
  • -
  • Multiple output formats: JSON, YAML, TOML, Nickel
  • -
-
-

Technical Patterns

-

Expression-Based Structure

-
- -
KCLNickel
Multiple top-level let bindingsSingle root expression with let...in chaining
-
-

Schema Inheritance → Record Merging

-
- -
KCLNickel
schema Server(defaults.ServerDefaults)defaults.ServerDefaults & { overrides }
-
-

Optional Fields

-
- -
KCLNickel
field?: typefield = null or field = ""
-
-

Union Types

-
- -
KCLNickel
"ubuntu" &#124; "debian" &#124; "centos"[\\&#124; 'ubuntu, 'debian, 'centos \\&#124;]
-
-

Boolean/Null Conversion

-
- -
KCLNickel
True / False / Nonetrue / false / null
-
-
-

Quality Metrics

-
    -
  • Syntax Validation: 100% (all files compile)
  • -
  • JSON Export: 100% success rate (4,680+ lines)
  • -
  • Pattern Coverage: All 5 templates tested and proven
  • -
  • Backward Compatibility: 100%
  • -
  • Performance: 60% faster evaluation than KCL
  • -
  • Test Coverage: 422 Nickel files validated in production
  • -
-
-

Consequences

-

Positive ✅

-
    -
  • 60% performance gain in evaluation speed
  • -
  • Reduced boilerplate (contracts + defaults separation)
  • -
  • Greater flexibility (record merging without validation)
  • -
  • Extensibility without conflicts (custom fields allowed)
  • -
  • Simplified mental model (“JSON with functions”)
  • -
  • Lazy evaluation (better performance for large configs)
  • -
  • Clean exports (100% JSON/TOML compatible)
  • -
  • Hybrid pattern (4 levels covering all use cases)
  • -
  • Domain-organized architecture (8 logical domains, clear imports)
  • -
  • Production deployment with frozen snapshots (immutable, reproducible)
  • -
  • Ecosystem expansion (TypeDialog integration for UI generation)
  • -
  • Real-world validation (47 files in productive use)
  • -
  • 20 taskservs deployed in production infrastructure
  • -
-

Challenges ⚠️

-
    -
  • Dual format support during transition (KCL + Nickel)
  • -
  • Learning curve for team (new language)
  • -
  • Migration effort (40 files migrated manually)
  • -
  • Documentation updates (guides, examples, training)
  • -
  • 955 KCL files remain (gradual workspace migration)
  • -
  • Frozen snapshots workflow (requires understanding workspace freeze)
  • -
  • TypeDialog dependency (external Rust project)
  • -
-

Mitigations

-
    -
  • ✅ Complete documentation in docs/development/kcl-module-system.md
  • -
  • ✅ 100% backward compatibility maintained
  • -
  • ✅ Migration framework established (5 templates, validation checklist)
  • -
  • ✅ Validation checklist for each migration step
  • -
  • ✅ 100% syntax validation on all files
  • -
  • ✅ Real-world usage validated (47 files in production)
  • -
  • ✅ Frozen snapshots guarantee reproducibility
  • -
  • ✅ Two deployment modes cover development and production
  • -
  • ✅ Gradual migration strategy (workspace-level, no hard cutoff)
  • -
-
-

Migration Status

-

Completed (Phase 1-4)

-
    -
  • ✅ Foundation (8 files) - Basic schemas, validation library
  • -
  • ✅ Core Schemas (8 files) - Settings, workspace config, gitea
  • -
  • ✅ Complex Features (7 files) - VM lifecycle, system config, services
  • -
  • ✅ Very Complex (9+ files) - Modes, commands, orchestrator, main entry point
  • -
  • ✅ Platform schemas (422 files total)
  • -
  • ✅ Extensions (providers, clusters)
  • -
  • ✅ Production workspace (47 files, 20 taskservs)
  • -
-

In Progress (Workspace-Level)

-
    -
  • ⏳ Workspace migration (323+ files in workspace_librecloud)
  • -
  • ⏳ Extension migration (taskservs, clusters, providers)
  • -
  • ⏳ Parallel testing against original KCL
  • -
  • ⏳ CI/CD integration updates
  • -
-

Future (Optional)

-
    -
  • User workspace KCL to Nickel (gradual, as needed)
  • -
  • Full migration of legacy configurations
  • -
  • TypeDialog UI generation for infrastructure
  • -
-
- -

Development Guides

- - -
    -
  • ADR-010: Configuration Format Strategy (multi-format approach)
  • -
  • ADR-006: CLI Refactoring (domain-driven design)
  • -
  • ADR-004: Hybrid Rust/Nushell Architecture (platform architecture)
  • -
-

Referenced Files

-
    -
  • Entry point: provisioning/schemas/main.ncl
  • -
  • Workspace pattern: workspace_librecloud/nickel/main.ncl
  • -
  • Example extension: provisioning/extensions/providers/upcloud/nickel/main.ncl
  • -
  • Production infrastructure: workspace_librecloud/nickel/wuji/main.ncl (20 taskservs)
  • -
-
-

Approval

-

Status: Implemented and Production-Ready

-
    -
  • ✅ Architecture Team: Approved
  • -
  • ✅ Platform implementation: Complete (422 files)
  • -
  • ✅ Production validation: Passed (47 files active)
  • -
  • ✅ Backward compatibility: 100%
  • -
  • ✅ Real-world usage: Validated in wuji infrastructure
  • -
-
-

Last Updated: 2025-12-15 -Version: 1.0.0 -Implementation: Complete (Phase 1-4 finished, workspace-level in progress)

-

ADR-014: Nushell Nickel Plugin - CLI Wrapper Architecture

-

Status

-

Accepted - 2025-12-15

-

Context

-

The provisioning system integrates with Nickel for configuration management in advanced -scenarios. Users need to evaluate Nickel files and work with their output in Nushell -scripts. The nu_plugin_nickel plugin provides this integration.

-

The architectural decision was whether the plugin should:

-
    -
  1. Implement Nickel directly using pure Rust (nickel-lang-core crate)
  2. -
  3. Wrap the official Nickel CLI (nickel command)
  4. -
-

System Requirements

-

Nickel configurations in provisioning use the module system:

-
# config/database.ncl
-import "lib/defaults" as defaults
-import "lib/validation" as valid
-
-{
-  databases: {
-    primary = defaults.database & {
-      name = "primary"
-      host = "localhost"
-    }
-  }
-}
-
-

Module system includes:

-
    -
  • Import resolution with search paths
  • -
  • Standard library (builtins, stdlib packages)
  • -
  • Module caching
  • -
  • Complex evaluation context
  • -
-

Decision

-

Implement the nu_plugin_nickel plugin as a CLI wrapper that invokes the external nickel command.

-

Architecture Diagram

-
┌─────────────────────────────┐
-│   Nushell Script            │
-│                             │
-│  nickel-export json /file   │
-│  nickel-eval /file          │
-│  nickel-format /file        │
-└────────────┬────────────────┘
-             │
-             ▼
-┌─────────────────────────────┐
-│   nu_plugin_nickel          │
-│                             │
-│  - Command handling         │
-│  - Argument parsing         │
-│  - JSON output parsing      │
-│  - Caching logic            │
-└────────────┬────────────────┘
-             │
-             ▼
-┌─────────────────────────────┐
-│   std::process::Command     │
-│                             │
-│  "nickel export /file ..."  │
-└────────────┬────────────────┘
-             │
-             ▼
-┌─────────────────────────────┐
-│   Nickel Official CLI       │
-│                             │
-│  - Module resolution        │
-│  - Import handling          │
-│  - Standard library access  │
-│  - Output formatting        │
-│  - Error reporting          │
-└────────────┬────────────────┘
-             │
-             ▼
-┌─────────────────────────────┐
-│   Nushell Records/Lists     │
-│                             │
-│  ✅ Proper types            │
-│  ✅ Cell path access works  │
-│  ✅ Piping works            │
-└─────────────────────────────┘
-
-

Implementation Characteristics

-

Plugin provides:

-
    -
  • ✅ Nushell commands: nickel-export, nickel-eval, nickel-format, nickel-validate
  • -
  • ✅ JSON/YAML output parsing (serde_json → nu_protocol::Value)
  • -
  • ✅ Automatic caching (SHA256-based, ~80-90% hit rate)
  • -
  • ✅ Error handling (CLI errors → Nushell errors)
  • -
  • ✅ Type-safe output (nu_protocol::Value::Record, not strings)
  • -
-

Plugin delegates to Nickel CLI:

-
    -
  • ✅ Module resolution with search paths
  • -
  • ✅ Standard library access and discovery
  • -
  • ✅ Evaluation context setup
  • -
  • ✅ Module caching
  • -
  • ✅ Output formatting
  • -
-

Rationale

-

Why CLI Wrapper Is The Correct Choice

-
- - - - - - - - - -
AspectPure Rust (nickel-lang-core)CLI Wrapper (chosen)
Module resolution❓ Undocumented API✅ Official, proven
Search paths❓ How to configure?✅ CLI handles it
Standard library❓ How to access?✅ Automatic discovery
Import system❌ API unclear✅ Built-in
Evaluation context❌ Complex setup needed✅ CLI provides
Future versions⚠️ Maintain parity✅ Automatic support
Maintenance burden🔴 High🟢 Low
Complexity🔴 High🟢 Low
Correctness⚠️ Risk of divergence✅ Single source of truth
-
-

The Module System Problem

-

Using nickel-lang-core directly would require the plugin to:

-
    -
  1. -

    Configure import search paths:

    -
    // Where should Nickel look for modules?
    -// Current directory? Workspace? System paths?
    -// This is complex and configuration-dependent
    -
  2. -
  3. -

    Access standard library:

    -
    // Where is the Nickel stdlib installed?
    -// How to handle different Nickel versions?
    -// How to provide builtins?
    -
  4. -
  5. -

    Manage module evaluation context:

    -
    // Set up evaluation environment
    -// Configure cache locations
    -// Initialize type checker
    -// This is essentially re-implementing CLI logic
    -
  6. -
  7. -

    Maintain compatibility:

    -
      -
    • Every Nickel version change requires review
    • -
    • Risk of subtle behavioral differences
    • -
    • Duplicate bug fixes and features
    • -
    • Two implementations to maintain
    • -
    -
  8. -
-

Documentation Gap

-

The nickel-lang-core crate lacks clear documentation on:

-
    -
  • ❓ How to configure import search paths
  • -
  • ❓ How to access standard library
  • -
  • ❓ How to set up evaluation context
  • -
  • ❓ What is the public API contract?
  • -
-

This makes direct usage risky. The CLI is the documented, proven interface.

-

Why Nickel Is Different From Simple Use Cases

-

Simple use case (direct library usage works):

-
    -
  • Simple evaluation with built-in functions
  • -
  • No external dependencies
  • -
  • No modules or imports
  • -
-

Nickel reality (CLI wrapper necessary):

-
    -
  • Complex module system with search paths
  • -
  • External dependencies (standard library)
  • -
  • Import resolution with multiple fallbacks
  • -
  • Evaluation context that mirrors CLI
  • -
-

Consequences

-

Positive

-
    -
  • Correctness: Module resolution guaranteed by official Nickel CLI
  • -
  • Reliability: No risk from reverse-engineering undocumented APIs
  • -
  • Simplicity: Plugin code is lean (~300 lines total)
  • -
  • Maintainability: Automatic tracking of Nickel changes
  • -
  • Compatibility: Works with all Nickel versions
  • -
  • User Expectations: Same behavior as CLI users experience
  • -
  • Community Alignment: Uses official Nickel distribution
  • -
-

Negative

-
    -
  • External Dependency: Requires nickel binary installed in PATH
  • -
  • Process Overhead: ~100-200 ms per execution (heavily cached)
  • -
  • Subprocess Management: Spawn handling and stderr capture needed
  • -
  • Distribution: Provisioning must include Nickel binary
  • -
-

Mitigation Strategies

-

Dependency Management:

-
    -
  • Installation scripts handle Nickel setup
  • -
  • Docker images pre-install Nickel
  • -
  • Clear error messages if nickel not found
  • -
  • Documentation covers installation
  • -
-

Performance:

-
    -
  • Aggressive caching (80-90% typical hit rate)
  • -
  • Cache hits: ~1-5 ms (not 100-200 ms)
  • -
  • Cache directory: ~/.cache/provisioning/config-cache/
  • -
-

Distribution:

-
    -
  • Provisioning distributions include Nickel
  • -
  • Installers set up Nickel automatically
  • -
  • CI/CD has Nickel available
  • -
-

Alternatives Considered

-

Alternative 1: Pure Rust with nickel-lang-core

-

Pros: No external dependency -Cons: Undocumented API, high risk, maintenance burden -Decision: REJECTED - Too risky

-

Alternative 2: Hybrid (Pure Rust + CLI fallback)

-

Pros: Flexibility -Cons: Adds complexity, dual code paths, confusing behavior -Decision: REJECTED - Over-engineering

-

Alternative 3: WebAssembly Version

-

Pros: Standalone -Cons: WASM support unclear, additional infrastructure -Decision: REJECTED - Immature

-

Alternative 4: Use Nickel LSP

-

Pros: Uses official interface -Cons: LSP not designed for evaluation, wrong abstraction -Decision: REJECTED - Inappropriate tool

-

Implementation Details

-

Command Set

-
    -
  1. -

    nickel-export: Export/evaluate Nickel file

    -
    nickel-export json /path/to/file.ncl
    -nickel-export yaml /path/to/file.ncl
    -
    -
  2. -
  3. -

    nickel-eval: Evaluate with automatic caching (for config loader)

    -
    nickel-eval /workspace/config.ncl
    -
    -
  4. -
  5. -

    nickel-format: Format Nickel files

    -
    nickel-format /path/to/file.ncl
    -
    -
  6. -
  7. -

    nickel-validate: Validate Nickel files/project

    -
    nickel-validate /path/to/project
    -
    -
  8. -
-

Critical Implementation Detail: Command Syntax

-

The plugin uses the correct Nickel command syntax:

-
// Correct:
-cmd.arg("export").arg(file).arg("--format").arg(format);
-// Results in: "nickel export /file --format json"
-
-// WRONG (previously):
-cmd.arg("export").arg(format).arg(file);
-// Results in: "nickel export json /file"
-// ↑ This triggers auto-import of nonexistent JSON module
-

Caching Strategy

-

Cache Key: SHA256(file_content + format) -Cache Hit Rate: 80-90% (typical provisioning workflows) -Performance:

-
    -
  • Cache miss: ~100-200 ms (process fork)
  • -
  • Cache hit: ~1-5 ms (filesystem read + parse)
  • -
  • Speedup: 50-100x for cached runs
  • -
-

Storage: ~/.cache/provisioning/config-cache/

-

JSON Output Processing

-

Plugin correctly processes JSON output:

-
    -
  1. Invokes: nickel export /file.ncl --format json
  2. -
  3. Receives: JSON string from stdout
  4. -
  5. Parses: serde_json::Value
  6. -
  7. Converts: json_value_to_nu_value() (recursive)
  8. -
  9. Returns: nu_protocol::Value::Record (not string!)
  10. -
-

This enables Nushell cell path access:

-
nickel-export json /config.ncl | .database.host  # ✅ Works
-
-

Testing Strategy

-

Unit Tests:

-
    -
  • JSON parsing correctness
  • -
  • Value type conversions
  • -
  • Cache logic
  • -
-

Integration Tests:

-
    -
  • Real Nickel file execution
  • -
  • Module imports verification
  • -
  • Search path resolution
  • -
-

Manual Verification:

-
# Test module imports
-nickel-export json /workspace/config.ncl
-
-# Test cell path access
-nickel-export json /workspace/config.ncl | .database
-
-# Verify output types
-nickel-export json /workspace/config.ncl | type
-# Should show: record, not string
-
-

Configuration Integration

-

Plugin integrates with provisioning config system:

-
    -
  • Nickel path auto-detected: which nickel
  • -
  • Cache location: platform-specific cache_dir()
  • -
  • Errors: consistent with provisioning patterns
  • -
-

References

- -
-

Status: Accepted and Implemented -Last Updated: 2025-12-15 -Implementation: Complete -Tests: Passing

-

ADR-013: Typdialog Web UI Backend Integration for Interactive Configuration

-

Status

-

Accepted - 2025-01-08

-

Context

-

The provisioning system requires interactive user input for configuration workflows, workspace initialization, credential setup, and guided deployment -scenarios. The system architecture combines Rust (performance-critical), Nushell (scripting), and Nickel (declarative configuration), creating -challenges for interactive form-based input and multi-user collaboration.

-

The Interactive Configuration Problem

-

Current limitations:

-
    -
  1. -

    Nushell CLI: Terminal-only interaction

    -
      -
    • input command: Single-line text prompts only
    • -
    • No form validation, no complex multi-field forms
    • -
    • Limited to single-user, terminal-bound workflows
    • -
    • User experience: Basic and error-prone
    • -
    -
  2. -
  3. -

    Nickel: Declarative configuration language

    -
      -
    • Cannot handle interactive prompts (by design)
    • -
    • Pure evaluation model (no side effects)
    • -
    • Forms must be defined statically, not interactively
    • -
    • No runtime user interaction
    • -
    -
  4. -
  5. -

    Existing Solutions: Inadequate for modern infrastructure provisioning

    -
      -
    • Shell-based prompts: Error-prone, no validation, single-user
    • -
    • Custom web forms: High maintenance, inconsistent UX
    • -
    • Separate admin panels: Disconnected from IaC workflow
    • -
    • Terminal-only TUI: Limited to SSH sessions, no collaboration
    • -
    -
  6. -
-

Use Cases Requiring Interactive Input

-
    -
  1. -

    Workspace Initialization:

    -
    # Current: Error-prone prompts
    -let workspace_name = input "Workspace name: "
    -let provider = input "Provider (aws/azure/oci): "
    -# No validation, no autocomplete, no guidance
    -
    -
  2. -
  3. -

    Credential Setup:

    -
    # Current: Insecure and basic
    -let api_key = input "API Key: "  # Shows in terminal history
    -let region = input "Region: "    # No validation
    -
    -
  4. -
  5. -

    Configuration Wizards:

    -
      -
    • Database connection setup (host, port, credentials, SSL)
    • -
    • Network configuration (CIDR blocks, subnets, gateways)
    • -
    • Security policies (encryption, access control, audit)
    • -
    -
  6. -
  7. -

    Guided Deployments:

    -
      -
    • Multi-step infrastructure provisioning
    • -
    • Service selection with dependencies
    • -
    • Environment-specific overrides
    • -
    -
  8. -
-

Requirements for Interactive Input System

-
    -
  • Terminal UI widgets: Text input, password, select, multi-select, confirm
  • -
  • Validation: Type checking, regex patterns, custom validators
  • -
  • Security: Password masking, sensitive data handling
  • -
  • User Experience: Arrow key navigation, autocomplete, help text
  • -
  • Composability: Chain multiple prompts into forms
  • -
  • Error Handling: Clear validation errors, retry logic
  • -
  • Rust Integration: Native Rust library (no subprocess overhead)
  • -
  • Cross-Platform: Works on Linux, macOS, Windows
  • -
-

Decision

-

Integrate typdialog with its Web UI backend as the standard interactive configuration interface for the provisioning platform. The major -achievement of typdialog is not the TUI - it is the Web UI backend that enables browser-based forms, multi-user collaboration, and seamless -integration with the provisioning orchestrator.

-

Architecture Diagram

-
┌─────────────────────────────────────────┐
-│   Nushell Script                        │
-│                                         │
-│   provisioning workspace init           │
-│   provisioning config setup             │
-│   provisioning deploy guided            │
-└────────────┬────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────┐
-│   Rust CLI Handler                      │
-│   (provisioning/core/cli/)              │
-│                                         │
-│   - Parse command                       │
-│   - Determine if interactive needed     │
-│   - Invoke TUI dialog module            │
-└────────────┬────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────┐
-│   TUI Dialog Module                     │
-│   (typdialog wrapper)                   │
-│                                         │
-│   - Form definition (validation rules)  │
-│   - Widget rendering (text, select)     │
-│   - User input capture                  │
-│   - Validation execution                │
-│   - Result serialization (JSON/TOML)    │
-└────────────┬────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────┐
-│   typdialog Library                     │
-│                                         │
-│   - Terminal rendering (crossterm)      │
-│   - Event handling (keyboard, mouse)    │
-│   - Widget state management             │
-│   - Input validation engine             │
-└────────────┬────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────┐
-│   Terminal (stdout/stdin)               │
-│                                         │
-│   ✅ Rich TUI with validation           │
-│   ✅ Secure password input              │
-│   ✅ Guided multi-step forms            │
-└─────────────────────────────────────────┘
-
-

Implementation Characteristics

-

CLI Integration Provides:

-
    -
  • ✅ Native Rust commands with TUI dialogs
  • -
  • ✅ Form-based input for complex configurations
  • -
  • ✅ Validation rules defined in Rust (type-safe)
  • -
  • ✅ Secure input (password masking, no history)
  • -
  • ✅ Error handling with retry logic
  • -
  • ✅ Serialization to Nickel/TOML/JSON
  • -
-

TUI Dialog Library Handles:

-
    -
  • ✅ Terminal UI rendering and event loop
  • -
  • ✅ Widget management (text, select, checkbox, confirm)
  • -
  • ✅ Input validation and error display
  • -
  • ✅ Navigation (arrow keys, tab, enter)
  • -
  • ✅ Cross-platform terminal compatibility
  • -
-

Rationale

-

Why TUI Dialog Integration Is Required

-
- - - - - - - - -
AspectShell Prompts (current)Web FormsTUI Dialog (chosen)
User Experience❌ Basic text only✅ Rich UI✅ Rich TUI
Validation❌ Manual, error-prone✅ Built-in✅ Built-in
Security❌ Plain text, history⚠️ Network risk✅ Secure terminal
Setup Complexity✅ None❌ Server required✅ Minimal
Terminal Workflow✅ Native❌ Browser switch✅ Native
Offline Support✅ Always❌ Requires server✅ Always
Dependencies✅ None❌ Web stack✅ Single crate
Error Handling❌ Manual⚠️ Complex✅ Built-in retry
-
-

The Nushell Limitation

-

Nushell’s input command is limited:

-
# Current: No validation, no security
-let password = input "Password: "  # ❌ Shows in terminal
-let region = input "AWS Region: "   # ❌ No autocomplete/validation
-
-# Cannot do:
-# - Multi-select from options
-# - Conditional fields (if X then ask Y)
-# - Password masking
-# - Real-time validation
-# - Autocomplete/fuzzy search
-
-

The Nickel Constraint

-

Nickel is declarative and cannot prompt users:

-
# Nickel defines what the config looks like, NOT how to get it
-{
-  database = {
-    host | String,
-    port | Number,
-    credentials | { username: String, password: String },
-  }
-}
-
-# Nickel cannot:
-# - Prompt user for values
-# - Show interactive forms
-# - Validate input interactively
-
-

Why Rust + TUI Dialog Is The Solution

-

Rust provides:

-
    -
  • Native terminal control (crossterm, termion)
  • -
  • Type-safe form definitions
  • -
  • Validation rules as functions
  • -
  • Secure memory handling (password zeroization)
  • -
  • Performance (no subprocess overhead)
  • -
-

TUI Dialog provides:

-
    -
  • Widget library (text, select, multi-select, confirm)
  • -
  • Event loop and rendering
  • -
  • Validation framework
  • -
  • Error display and retry logic
  • -
-

Integration enables:

-
    -
  • Nushell calls Rust CLI → Shows TUI dialog → Returns validated config
  • -
  • Nickel receives validated config → Type checks → Merges with defaults
  • -
-

Consequences

-

Positive

-
    -
  • User Experience: Professional TUI with validation and guidance
  • -
  • Security: Password masking, sensitive data protection, no terminal history
  • -
  • Validation: Type-safe rules enforced before config generation
  • -
  • Developer Experience: Reusable form components across CLI commands
  • -
  • Error Handling: Clear validation errors with retry options
  • -
  • Offline First: No network dependencies for interactive input
  • -
  • Terminal Native: Fits CLI workflow, no context switching
  • -
  • Maintainability: Single library for all interactive input
  • -
-

Negative

-
    -
  • Terminal Dependency: Requires interactive terminal (not scriptable)
  • -
  • Learning Curve: Developers must learn TUI dialog patterns
  • -
  • Library Lock-in: Tied to specific TUI library API
  • -
  • Testing Complexity: Interactive tests require terminal mocking
  • -
  • Non-Interactive Fallback: Need alternative for CI/CD and scripts
  • -
-

Mitigation Strategies

-

Non-Interactive Mode:

-
// Support both interactive and non-interactive
-if terminal::is_interactive() {
-    // Show TUI dialog
-    let config = show_workspace_form()?;
-} else {
-    // Use config file or CLI args
-    let config = load_config_from_file(args.config)?;
-}
-

Testing:

-
// Unit tests: Test form validation logic (no TUI)
-#[test]
-fn test_validate_workspace_name() {
-    assert!(validate_name("my-workspace").is_ok());
-    assert!(validate_name("invalid name!").is_err());
-}
-
-// Integration tests: Use mock terminal or config files
-

Scriptability:

-
# Batch mode: Provide config via file
-provisioning workspace init --config workspace.toml
-
-# Interactive mode: Show TUI dialog
-provisioning workspace init --interactive
-
-

Documentation:

-
    -
  • Form schemas documented in docs/
  • -
  • Config file examples provided
  • -
  • Screenshots of TUI forms in guides
  • -
-

Alternatives Considered

-

Alternative 1: Shell-Based Prompts (Current State)

-

Pros: Simple, no dependencies -Cons: No validation, poor UX, security risks -Decision: REJECTED - Inadequate for production use

-

Alternative 2: Web-Based Forms

-

Pros: Rich UI, well-known patterns -Cons: Requires server, network dependency, context switch -Decision: REJECTED - Too complex for CLI tool

-

Alternative 3: Custom TUI Per Use Case

-

Pros: Tailored to each need -Cons: High maintenance, code duplication, inconsistent UX -Decision: REJECTED - Not sustainable

-

Alternative 4: External Form Tool (dialog, whiptail)

-

Pros: Mature, cross-platform -Cons: Subprocess overhead, limited validation, shell escaping issues -Decision: REJECTED - Poor Rust integration

-

Alternative 5: Text-Based Config Files Only

-

Pros: Fully scriptable, no interactive complexity -Cons: Steep learning curve, no guidance for new users -Decision: REJECTED - Poor user onboarding experience

-

Implementation Details

-

Form Definition Pattern

-
use typdialog::Form;
-
-pub fn workspace_initialization_form() -> Result<WorkspaceConfig> {
-    let form = Form::new("Workspace Initialization")
-        .add_text_input("name", "Workspace Name")
-            .required()
-            .validator(|s| validate_workspace_name(s))
-        .add_select("provider", "Cloud Provider")
-            .options(&["aws", "azure", "oci", "local"])
-            .required()
-        .add_text_input("region", "Region")
-            .default("us-west-2")
-            .validator(|s| validate_region(s))
-        .add_password("admin_password", "Admin Password")
-            .required()
-            .min_length(12)
-        .add_confirm("enable_monitoring", "Enable Monitoring?")
-            .default(true);
-
-    let responses = form.run()?;
-
-    // Convert to strongly-typed config
-    let config = WorkspaceConfig {
-        name: responses.get_string("name")?,
-        provider: responses.get_string("provider")?.parse()?,
-        region: responses.get_string("region")?,
-        admin_password: responses.get_password("admin_password")?,
-        enable_monitoring: responses.get_bool("enable_monitoring")?,
-    };
-
-    Ok(config)
-}
-

Integration with Nickel

-
// 1. Get validated input from TUI dialog
-let config = workspace_initialization_form()?;
-
-// 2. Serialize to TOML/JSON
-let config_toml = toml::to_string(&config)?;
-
-// 3. Write to workspace config
-fs::write("workspace/config.toml", config_toml)?;
-
-// 4. Nickel merges with defaults
-// nickel export workspace/main.ncl --format json
-// (uses workspace/config.toml as input)
-

CLI Command Structure

-
// provisioning/core/cli/src/commands/workspace.rs
-
-#[derive(Parser)]
-pub enum WorkspaceCommand {
-    Init {
-        #[arg(long)]
-        interactive: bool,
-
-        #[arg(long)]
-        config: Option<PathBuf>,
-    },
-}
-
-pub fn handle_workspace_init(args: InitArgs) -> Result<()> {
-    if args.interactive || terminal::is_interactive() {
-        // Show TUI dialog
-        let config = workspace_initialization_form()?;
-        config.save("workspace/config.toml")?;
-    } else if let Some(config_path) = args.config {
-        // Use provided config
-        let config = WorkspaceConfig::load(config_path)?;
-        config.save("workspace/config.toml")?;
-    } else {
-        bail!("Either --interactive or --config required");
-    }
-
-    // Continue with workspace setup
-    Ok(())
-}
-

Validation Rules

-
pub fn validate_workspace_name(name: &str) -> Result<(), String> {
-    // Alphanumeric, hyphens, 3-32 chars
-    let re = Regex::new(r"^[a-z0-9-]{3,32}$").unwrap();
-    if !re.is_match(name) {
-        return Err("Name must be 3-32 lowercase alphanumeric chars with hyphens".into());
-    }
-    Ok(())
-}
-
-pub fn validate_region(region: &str) -> Result<(), String> {
-    const VALID_REGIONS: &[&str] = &["us-west-1", "us-west-2", "us-east-1", "eu-west-1"];
-    if !VALID_REGIONS.contains(&region) {
-        return Err(format!("Invalid region. Must be one of: {}", VALID_REGIONS.join(", ")));
-    }
-    Ok(())
-}
-

Security: Password Handling

-
use zeroize::Zeroizing;
-
-pub fn get_secure_password() -> Result<Zeroizing<String>> {
-    let form = Form::new("Secure Input")
-        .add_password("password", "Password")
-            .required()
-            .min_length(12)
-            .validator(password_strength_check);
-
-    let responses = form.run()?;
-
-    // Password automatically zeroized when dropped
-    let password = Zeroizing::new(responses.get_password("password")?);
-
-    Ok(password)
-}
-

Testing Strategy

-

Unit Tests:

-
#[test]
-fn test_workspace_name_validation() {
-    assert!(validate_workspace_name("my-workspace").is_ok());
-    assert!(validate_workspace_name("UPPERCASE").is_err());
-    assert!(validate_workspace_name("ab").is_err()); // Too short
-}
-

Integration Tests:

-
// Use non-interactive mode with config files
-#[test]
-fn test_workspace_init_non_interactive() {
-    let config = WorkspaceConfig {
-        name: "test-workspace".into(),
-        provider: Provider::Local,
-        region: "us-west-2".into(),
-        admin_password: "secure-password-123".into(),
-        enable_monitoring: true,
-    };
-
-    config.save("/tmp/test-config.toml").unwrap();
-
-    let result = handle_workspace_init(InitArgs {
-        interactive: false,
-        config: Some("/tmp/test-config.toml".into()),
-    });
-
-    assert!(result.is_ok());
-}
-

Manual Testing:

-
# Test interactive flow
-cargo build --release
-./target/release/provisioning workspace init --interactive
-
-# Test validation errors
-# - Try invalid workspace name
-# - Try weak password
-# - Try invalid region
-
-

Configuration Integration

-

CLI Flag:

-
# provisioning/config/config.defaults.toml
-[ui]
-interactive_mode = "auto"  # "auto" | "always" | "never"
-dialog_theme = "default"   # "default" | "minimal" | "colorful"
-
-

Environment Override:

-
# Force non-interactive mode (for CI/CD)
-export PROVISIONING_INTERACTIVE=false
-
-# Force interactive mode
-export PROVISIONING_INTERACTIVE=true
-
-

Documentation Requirements

-

User Guides:

-
    -
  • docs/user/interactive-configuration.md - How to use TUI dialogs
  • -
  • docs/guides/workspace-setup.md - Workspace initialization with screenshots
  • -
-

Developer Documentation:

-
    -
  • docs/development/tui-forms.md - Creating new TUI forms
  • -
  • Form definition best practices
  • -
  • Validation rule patterns
  • -
-

Configuration Schema:

-
# provisioning/schemas/workspace.ncl
-{
-  WorkspaceConfig = {
-    name
-      | doc "Workspace identifier (3-32 alphanumeric chars with hyphens)"
-      | String,
-    provider
-      | doc "Cloud provider"
-      | [| 'aws, 'azure, 'oci, 'local |],
-    region
-      | doc "Deployment region"
-      | String,
-    admin_password
-      | doc "Admin password (min 12 characters)"
-      | String,
-    enable_monitoring
-      | doc "Enable monitoring services"
-      | Bool,
-  }
-}
-
-

Migration Path

-

Phase 1: Add Library

-
    -
  • Add typdialog dependency to provisioning/core/cli/Cargo.toml
  • -
  • Create TUI dialog wrapper module
  • -
  • Implement basic text/select widgets
  • -
-

Phase 2: Implement Forms

-
    -
  • Workspace initialization form
  • -
  • Credential setup form
  • -
  • Configuration wizard forms
  • -
-

Phase 3: CLI Integration

-
    -
  • Update CLI commands to use TUI dialogs
  • -
  • Add --interactive / --config flags
  • -
  • Implement non-interactive fallback
  • -
-

Phase 4: Documentation

-
    -
  • User guides with screenshots
  • -
  • Developer documentation for form creation
  • -
  • Example configs for non-interactive use
  • -
-

Phase 5: Testing

-
    -
  • Unit tests for validation logic
  • -
  • Integration tests with config files
  • -
  • Manual testing on all platforms
  • -
-

References

-
    -
  • typdialog Crate (or similar: dialoguer, inquire)
  • -
  • crossterm - Terminal manipulation
  • -
  • zeroize - Secure memory zeroization
  • -
  • ADR-004: Hybrid Architecture (Rust/Nushell integration)
  • -
  • ADR-011: Nickel Migration (declarative config language)
  • -
  • ADR-012: Nushell Plugins (CLI wrapper patterns)
  • -
  • Nushell input command limitations: Nushell Book - Input
  • -
-
-

Status: Accepted -Last Updated: 2025-01-08 -Implementation: Planned -Priority: High (User onboarding and security) -Estimated Complexity: Moderate

-

ADR-014: SecretumVault Integration for Secrets Management

-

Status

-

Accepted - 2025-01-08

-

Context

-

The provisioning system manages sensitive data across multiple infrastructure layers: cloud provider credentials, database passwords, API keys, SSH -keys, encryption keys, and service tokens. The current security architecture (ADR-009) includes SOPS for encrypted config files and Age for key -management, but lacks a centralized secrets management solution with dynamic secrets, access control, and audit logging.

-

Current Secrets Management Challenges

-

Existing Approach:

-
    -
  1. -

    SOPS + Age: Static secrets encrypted in config files

    -
      -
    • Good: Version-controlled, gitops-friendly
    • -
    • Limited: Static rotation, no audit trail, manual key distribution
    • -
    -
  2. -
  3. -

    Nickel Configuration: Declarative secrets references

    -
      -
    • Good: Type-safe configuration
    • -
    • Limited: Cannot generate dynamic secrets, no lifecycle management
    • -
    -
  4. -
  5. -

    Manual Secret Injection: Environment variables, CLI flags

    -
      -
    • Good: Simple for development
    • -
    • Limited: No security guarantees, prone to leakage
    • -
    -
  6. -
-

Problems Without Centralized Secrets Management

-

Security Issues:

-
    -
  • ❌ No centralized audit trail (who accessed which secret when)
  • -
  • ❌ No automatic secret rotation policies
  • -
  • ❌ No fine-grained access control (Cedar policies not enforced on secrets)
  • -
  • ❌ Secrets scattered across: SOPS files, env vars, config files, K8s secrets
  • -
  • ❌ No detection of secret sprawl or leaked credentials
  • -
-

Operational Issues:

-
    -
  • ❌ Manual secret rotation (error-prone, often neglected)
  • -
  • ❌ No secret versioning (cannot rollback to previous credentials)
  • -
  • ❌ Difficult onboarding (manual key distribution)
  • -
  • ❌ No dynamic secrets (credentials exist indefinitely)
  • -
-

Compliance Issues:

-
    -
  • ❌ Cannot prove compliance with secret access policies
  • -
  • ❌ No audit logs for regulatory requirements
  • -
  • ❌ Cannot enforce secret expiration policies
  • -
  • ❌ Difficult to demonstrate least-privilege access
  • -
-

Use Cases Requiring Centralized Secrets Management

-
    -
  1. -

    Dynamic Database Credentials:

    -
      -
    • Generate short-lived DB credentials for applications
    • -
    • Automatic rotation based on policies
    • -
    • Revocation on application termination
    • -
    -
  2. -
  3. -

    Cloud Provider API Keys:

    -
      -
    • Centralized storage with access control
    • -
    • Audit trail of credential usage
    • -
    • Automatic rotation schedules
    • -
    -
  4. -
  5. -

    Service-to-Service Authentication:

    -
      -
    • Dynamic tokens for microservices
    • -
    • Short-lived certificates for mTLS
    • -
    • Automatic renewal before expiration
    • -
    -
  6. -
  7. -

    SSH Key Management:

    -
      -
    • Temporal SSH keys (ADR-009 SSH integration)
    • -
    • Centralized certificate authority
    • -
    • Audit trail of SSH access
    • -
    -
  8. -
  9. -

    Encryption Key Management:

    -
      -
    • Master encryption keys for data at rest
    • -
    • Key rotation and versioning
    • -
    • Integration with KMS systems
    • -
    -
  10. -
-

Requirements for Secrets Management System

-
    -
  • Dynamic Secrets: Generate credentials on-demand with TTL
  • -
  • Access Control: Integration with Cedar authorization policies
  • -
  • Audit Logging: Complete trail of secret access and modifications
  • -
  • Secret Rotation: Automatic and manual rotation policies
  • -
  • Versioning: Track secret versions, enable rollback
  • -
  • High Availability: Distributed, fault-tolerant architecture
  • -
  • Encryption at Rest: AES-256-GCM for stored secrets
  • -
  • API-First: RESTful API for integration
  • -
  • Plugin Ecosystem: Extensible backends (AWS, Azure, databases)
  • -
  • Open Source: Self-hosted, no vendor lock-in
  • -
-

Decision

-

Integrate SecretumVault as the centralized secrets management system for the provisioning platform.

-

Architecture Diagram

-
┌─────────────────────────────────────────────────────────────┐
-│   Provisioning CLI / Orchestrator / Services                │
-│                                                             │
-│   - Workspace initialization (credentials)                  │
-│   - Infrastructure deployment (cloud API keys)              │
-│   - Service configuration (database passwords)              │
-│   - SSH temporal keys (certificate generation)              │
-└────────────┬────────────────────────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────────────────────────┐
-│   SecretumVault Client Library (Rust)                       │
-│   (provisioning/core/libs/secretum-client/)                 │
-│                                                             │
-│   - Authentication (token, mTLS)                            │
-│   - Secret CRUD operations                                  │
-│   - Dynamic secret generation                               │
-│   - Lease renewal and revocation                            │
-│   - Policy enforcement                                      │
-└────────────┬────────────────────────────────────────────────┘
-             │ HTTPS + mTLS
-             ▼
-┌─────────────────────────────────────────────────────────────┐
-│   SecretumVault Server                                      │
-│   (Rust-based Vault implementation)                         │
-│                                                             │
-│   ┌───────────────────────────────────────────────────┐    │
-│   │ API Layer (REST + gRPC)                           │    │
-│   ├───────────────────────────────────────────────────┤    │
-│   │ Authentication & Authorization                    │    │
-│   │ - Token auth, mTLS, OIDC integration              │    │
-│   │ - Cedar policy enforcement                        │    │
-│   ├───────────────────────────────────────────────────┤    │
-│   │ Secret Engines                                    │    │
-│   │ - KV (key-value v2 with versioning)               │    │
-│   │ - Database (dynamic credentials)                  │    │
-│   │ - SSH (certificate authority)                     │    │
-│   │ - PKI (X.509 certificates)                        │    │
-│   │ - Cloud Providers (AWS/Azure/OCI)                 │    │
-│   ├───────────────────────────────────────────────────┤    │
-│   │ Storage Backend                                   │    │
-│   │ - Encrypted storage (AES-256-GCM)                 │    │
-│   │ - PostgreSQL / Raft cluster                       │    │
-│   ├───────────────────────────────────────────────────┤    │
-│   │ Audit Backend                                     │    │
-│   │ - Structured logging (JSON)                       │    │
-│   │ - Syslog, file, database sinks                    │    │
-│   └───────────────────────────────────────────────────┘    │
-└─────────────────────────────────────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────────────────────────┐
-│   Backends (Dynamic Secret Generation)                      │
-│                                                             │
-│   - PostgreSQL/MySQL (database credentials)                 │
-│   - AWS IAM (temporary access keys)                         │
-│   - Azure AD (service principals)                           │
-│   - SSH CA (signed certificates)                            │
-│   - PKI (X.509 certificates)                                │
-└─────────────────────────────────────────────────────────────┘
-
-

Implementation Characteristics

-

SecretumVault Provides:

-
    -
  • ✅ Dynamic secret generation with configurable TTL
  • -
  • ✅ Secret versioning and rollback capabilities
  • -
  • ✅ Fine-grained access control (Cedar policies)
  • -
  • ✅ Complete audit trail (all operations logged)
  • -
  • ✅ Automatic secret rotation policies
  • -
  • ✅ High availability (Raft consensus)
  • -
  • ✅ Encryption at rest (AES-256-GCM)
  • -
  • ✅ Plugin architecture for secret backends
  • -
  • ✅ RESTful and gRPC APIs
  • -
  • ✅ Rust implementation (performance, safety)
  • -
-

Integration with Provisioning System:

-
    -
  • ✅ Rust client library (native integration)
  • -
  • ✅ Nushell commands via CLI wrapper
  • -
  • ✅ Nickel configuration references secrets
  • -
  • ✅ Cedar policies control secret access
  • -
  • ✅ Orchestrator manages secret lifecycle
  • -
  • ✅ SSH integration for temporal keys
  • -
  • ✅ KMS integration for encryption keys
  • -
-

Rationale

-

Why SecretumVault Is Required

-
- - - - - - - - - - - -
AspectSOPS + Age (current)HashiCorp VaultSecretumVault (chosen)
Dynamic Secrets❌ Static only✅ Full support✅ Full support
Rust Native⚠️ External CLI❌ Go binary✅ Pure Rust
Cedar Integration❌ None❌ Custom policies✅ Native Cedar
Audit Trail❌ Git only✅ Comprehensive✅ Comprehensive
Secret Rotation❌ Manual✅ Automatic✅ Automatic
Open Source✅ Yes⚠️ MPL 2.0 (BSL now)✅ Yes
Self-Hosted✅ Yes✅ Yes✅ Yes
License✅ Permissive⚠️ BSL (proprietary)✅ Permissive
Versioning⚠️ Git commits✅ Built-in✅ Built-in
High Availability❌ Single file✅ Raft cluster✅ Raft cluster
Performance✅ Fast (local)⚠️ Network latency✅ Rust performance
-
-

Why Not Continue with SOPS Alone

-

SOPS is excellent for static secrets in git, but inadequate for:

-
    -
  1. Dynamic Credentials: Cannot generate temporary DB passwords
  2. -
  3. Audit Trail: Git commits are insufficient for compliance
  4. -
  5. Rotation Policies: Manual rotation is error-prone
  6. -
  7. Access Control: No runtime policy enforcement
  8. -
  9. Secret Lifecycle: Cannot track usage or revoke access
  10. -
  11. Multi-System Integration: Limited to files, not API-accessible
  12. -
-

Complementary Approach:

-
    -
  • SOPS: Configuration files with long-lived secrets (gitops workflow)
  • -
  • SecretumVault: Runtime dynamic secrets, short-lived credentials, audit trail
  • -
-

Why SecretumVault Over HashiCorp Vault

-

HashiCorp Vault Limitations:

-
    -
  1. License Change: BSL (Business Source License) - proprietary for production
  2. -
  3. Not Rust Native: Go binary, subprocess overhead
  4. -
  5. Custom Policy Language: HCL policies, not Cedar (provisioning standard)
  6. -
  7. Complex Deployment: Heavy operational burden
  8. -
  9. Vendor Lock-In: HashiCorp ecosystem dependency
  10. -
-

SecretumVault Advantages:

-
    -
  1. Rust Native: Zero-cost integration, no subprocess spawning
  2. -
  3. Cedar Policies: Consistent with ADR-008 authorization model
  4. -
  5. Lightweight: Smaller binary, lower resource usage
  6. -
  7. Open Source: Permissive license, community-driven
  8. -
  9. Provisioning-First: Designed for IaC workflows
  10. -
-

Integration with Existing Security Architecture

-

ADR-009 (Security System):

-
    -
  • SOPS: Static config encryption (unchanged)
  • -
  • Age: Key management for SOPS (unchanged)
  • -
  • SecretumVault: Dynamic secrets, runtime access control (new)
  • -
-

ADR-008 (Cedar Authorization):

-
    -
  • Cedar policies control SecretumVault secret access
  • -
  • Fine-grained permissions: read:secret:database/prod/password
  • -
  • Audit trail records Cedar policy decisions
  • -
-

SSH Temporal Keys:

-
    -
  • SecretumVault SSH CA signs user certificates
  • -
  • Short-lived certificates (1-24 hours)
  • -
  • Audit trail of SSH access
  • -
-

Consequences

-

Positive

-
    -
  • Security Posture: Centralized secrets with audit trail and rotation
  • -
  • Compliance: Complete audit logs for regulatory requirements
  • -
  • Operational Excellence: Automatic rotation, dynamic credentials
  • -
  • Developer Experience: Simple API for secret access
  • -
  • Performance: Rust implementation, zero-cost abstractions
  • -
  • Consistency: Cedar policies across entire system (auth + secrets)
  • -
  • Observability: Metrics, logs, traces for secret access
  • -
  • Disaster Recovery: Secret versioning enables rollback
  • -
-

Negative

-
    -
  • Infrastructure Complexity: Additional service to deploy and operate
  • -
  • High Availability Requirements: Raft cluster needs 3+ nodes
  • -
  • Migration Effort: Existing SOPS secrets need migration path
  • -
  • Learning Curve: Operators must learn vault concepts
  • -
  • Dependency Risk: Critical path service (secrets unavailable = system down)
  • -
-

Mitigation Strategies

-

High Availability:

-
# Deploy SecretumVault cluster (3 nodes)
-provisioning deploy secretum-vault --ha --replicas 3
-
-# Automatic leader election via Raft
-# Clients auto-reconnect to leader
-
-

Migration from SOPS:

-
# Phase 1: Import existing SOPS secrets into SecretumVault
-provisioning secrets migrate --from-sops config/secrets.yaml
-
-# Phase 2: Update Nickel configs to reference vault paths
-# Phase 3: Deprecate SOPS for runtime secrets (keep for config files)
-
-

Fallback Strategy:

-
// Graceful degradation if vault unavailable
-let secret = match vault_client.get_secret("database/password").await {
-    Ok(s) => s,
-    Err(VaultError::Unavailable) => {
-        // Fallback to SOPS for read-only operations
-        warn!("Vault unavailable, using SOPS fallback");
-        sops_decrypt("config/secrets.yaml", "database.password")?
-    },
-    Err(e) => return Err(e),
-};
-

Operational Monitoring:

-
# prometheus metrics
-secretum_vault_request_duration_seconds
-secretum_vault_secret_lease_expiry
-secretum_vault_auth_failures_total
-secretum_vault_raft_leader_changes
-
-# Alerts: Vault unavailable, high auth failure rate, lease expiry
-
-

Alternatives Considered

-

Alternative 1: Continue with SOPS Only

-

Pros: No new infrastructure, simple -Cons: No dynamic secrets, no audit trail, manual rotation -Decision: REJECTED - Insufficient for production security

-

Alternative 2: HashiCorp Vault

-

Pros: Mature, feature-rich, widely adopted -Cons: BSL license, Go binary, HCL policies (not Cedar), complex deployment -Decision: REJECTED - License and integration concerns

-

Alternative 3: Cloud Provider Native (AWS Secrets Manager, Azure Key Vault)

-

Pros: Fully managed, high availability -Cons: Vendor lock-in, multi-cloud complexity, cost at scale -Decision: REJECTED - Against open-source and multi-cloud principles

-

Alternative 4: CyberArk, 1Password, and Others

-

Pros: Enterprise features -Cons: Proprietary, expensive, poor API integration -Decision: REJECTED - Not suitable for IaC automation

-

Alternative 5: Build Custom Secrets Manager

-

Pros: Full control, tailored to needs -Cons: High maintenance burden, security risk, reinventing wheel -Decision: REJECTED - SecretumVault provides this already

-

Implementation Details

-

SecretumVault Deployment

-
# Deploy via provisioning system
-provisioning deploy secretum-vault \
-  --ha \
-  --replicas 3 \
-  --storage postgres \
-  --tls-cert /path/to/cert.pem \
-  --tls-key /path/to/key.pem
-
-# Initialize and unseal
-provisioning vault init
-provisioning vault unseal --key-shares 5 --key-threshold 3
-
-

Rust Client Library

-
// provisioning/core/libs/secretum-client/src/lib.rs
-
-use secretum_vault::{Client, SecretEngine, Auth};
-
-pub struct VaultClient {
-    client: Client,
-}
-
-impl VaultClient {
-    pub async fn new(addr: &str, token: &str) -> Result<Self> {
-        let client = Client::new(addr)
-            .auth(Auth::Token(token))
-            .tls_config(TlsConfig::from_files("ca.pem", "cert.pem", "key.pem"))?
-            .build()?;
-
-        Ok(Self { client })
-    }
-
-    pub async fn get_secret(&self, path: &str) -> Result<Secret> {
-        self.client.kv2().get(path).await
-    }
-
-    pub async fn create_dynamic_db_credentials(&self, role: &str) -> Result<DbCredentials> {
-        self.client.database().generate_credentials(role).await
-    }
-
-    pub async fn sign_ssh_key(&self, public_key: &str, ttl: Duration) -> Result<Certificate> {
-        self.client.ssh().sign_key(public_key, ttl).await
-    }
-}
-

Nushell Integration

-
# Nushell commands via Rust CLI wrapper
-provisioning secrets get database/prod/password
-provisioning secrets set api/keys/stripe --value "sk_live_xyz"
-provisioning secrets rotate database/prod/password
-provisioning secrets lease renew lease_id_12345
-provisioning secrets list database/
-
-

Nickel Configuration Integration

-
# provisioning/schemas/database.ncl
-{
-  database = {
-    host = "postgres.example.com",
-    port = 5432,
-    username = secrets.get "database/prod/username",
-    password = secrets.get "database/prod/password",
-  }
-}
-
-# Nickel function: secrets.get resolves to SecretumVault API call
-
-

Cedar Policy for Secret Access

-
// policy: developers can read dev secrets, not prod
-permit(
-  principal in Group::"developers",
-  action == Action::"read",
-  resource in Secret::"database/dev"
-);
-
-forbid(
-  principal in Group::"developers",
-  action == Action::"read",
-  resource in Secret::"database/prod"
-);
-
-// policy: CI/CD can generate dynamic DB credentials
-permit(
-  principal == Service::"github-actions",
-  action == Action::"generate",
-  resource in Secret::"database/dynamic"
-) when {
-  context.ttl <= duration("1h")
-};
-
-

Dynamic Database Credentials

-
// Application requests temporary DB credentials
-let creds = vault_client
-    .database()
-    .generate_credentials("postgres-readonly")
-    .await?;
-
-println!("Username: {}", creds.username); // v-app-abcd1234
-println!("Password: {}", creds.password); // random-secure-password
-println!("TTL: {}", creds.lease_duration);  // 1h
-
-// Credentials automatically revoked after TTL
-// No manual cleanup needed
-

Secret Rotation Automation

-
# secretum-vault config
-[[rotation_policies]]
-path = "database/prod/password"
-schedule = "0 0 * * 0"  # Weekly on Sunday midnight
-max_age = "30d"
-
-[[rotation_policies]]
-path = "api/keys/stripe"
-schedule = "0 0 1 * *"  # Monthly on 1st
-max_age = "90d"
-
-

Audit Log Format

-
{
-  "timestamp": "2025-01-08T12:34:56Z",
-  "type": "request",
-  "auth": {
-    "client_token": "sha256:abc123...",
-    "accessor": "hmac:def456...",
-    "display_name": "service-orchestrator",
-    "policies": ["default", "service-policy"]
-  },
-  "request": {
-    "operation": "read",
-    "path": "secret/data/database/prod/password",
-    "remote_address": "10.0.1.5"
-  },
-  "response": {
-    "status": 200
-  },
-  "cedar_policy": {
-    "decision": "permit",
-    "policy_id": "allow-orchestrator-read-secrets"
-  }
-}
-
-

Testing Strategy

-

Unit Tests:

-
#[tokio::test]
-async fn test_get_secret() {
-    let vault = mock_vault_client();
-    let secret = vault.get_secret("test/secret").await.unwrap();
-    assert_eq!(secret.value, "expected-value");
-}
-
-#[tokio::test]
-async fn test_dynamic_credentials_generation() {
-    let vault = mock_vault_client();
-    let creds = vault.create_dynamic_db_credentials("postgres-readonly").await.unwrap();
-    assert!(creds.username.starts_with("v-"));
-    assert_eq!(creds.lease_duration, Duration::from_secs(3600));
-}
-

Integration Tests:

-
# Test vault deployment
-provisioning deploy secretum-vault --test-mode
-provisioning vault init
-provisioning vault unseal
-
-# Test secret operations
-provisioning secrets set test/secret --value "test-value"
-provisioning secrets get test/secret | assert "test-value"
-
-# Test dynamic credentials
-provisioning secrets db-creds postgres-readonly | jq '.username' | assert-contains "v-"
-
-# Test rotation
-provisioning secrets rotate test/secret
-
-

Security Tests:

-
#[tokio::test]
-async fn test_unauthorized_access_denied() {
-    let vault = vault_client_with_limited_token();
-    let result = vault.get_secret("database/prod/password").await;
-    assert!(matches!(result, Err(VaultError::PermissionDenied)));
-}
-

Configuration Integration

-

Provisioning Config:

-
# provisioning/config/config.defaults.toml
-[secrets]
-provider = "secretum-vault"  # "secretum-vault" | "sops" | "env"
-vault_addr = "https://vault.example.com:8200"
-vault_namespace = "provisioning"
-vault_mount = "secret"
-
-[secrets.tls]
-ca_cert = "/etc/provisioning/vault-ca.pem"
-client_cert = "/etc/provisioning/vault-client.pem"
-client_key = "/etc/provisioning/vault-client-key.pem"
-
-[secrets.cache]
-enabled = true
-ttl = "5m"
-max_size = "100MB"
-
-

Environment Variables:

-
export VAULT_ADDR="https://vault.example.com:8200"
-export VAULT_TOKEN="s.abc123def456..."
-export VAULT_NAMESPACE="provisioning"
-export VAULT_CACERT="/etc/provisioning/vault-ca.pem"
-
-

Migration Path

-

Phase 1: Deploy SecretumVault

-
    -
  • Deploy vault cluster in HA mode
  • -
  • Initialize and configure backends
  • -
  • Set up Cedar policies
  • -
-

Phase 2: Migrate Static Secrets

-
    -
  • Import SOPS secrets into vault KV store
  • -
  • Update Nickel configs to reference vault paths
  • -
  • Verify secret access via new API
  • -
-

Phase 3: Enable Dynamic Secrets

-
    -
  • Configure database secret engine
  • -
  • Configure SSH CA secret engine
  • -
  • Update applications to use dynamic credentials
  • -
-

Phase 4: Deprecate SOPS for Runtime

-
    -
  • SOPS remains for gitops config files
  • -
  • Runtime secrets exclusively from vault
  • -
  • Audit trail enforcement
  • -
-

Phase 5: Automation

-
    -
  • Automatic rotation policies
  • -
  • Lease renewal automation
  • -
  • Monitoring and alerting
  • -
-

Documentation Requirements

-

User Guides:

-
    -
  • docs/user/secrets-management.md - Using SecretumVault
  • -
  • docs/user/dynamic-credentials.md - Dynamic secret workflows
  • -
  • docs/user/secret-rotation.md - Rotation policies and procedures
  • -
-

Operations Documentation:

-
    -
  • docs/operations/vault-deployment.md - Deploying and configuring vault
  • -
  • docs/operations/vault-backup-restore.md - Backup and disaster recovery
  • -
  • docs/operations/vault-monitoring.md - Metrics, logs, alerts
  • -
-

Developer Documentation:

-
    -
  • docs/development/secrets-api.md - Rust client library usage
  • -
  • docs/development/cedar-secret-policies.md - Writing Cedar policies for secrets
  • -
  • Secret engine development guide
  • -
-

Security Documentation:

-
    -
  • docs/security/secrets-architecture.md - Security architecture overview
  • -
  • docs/security/audit-logging.md - Audit trail and compliance
  • -
  • Threat model and risk assessment
  • -
-

References

- -
-

Status: Accepted -Last Updated: 2025-01-08 -Implementation: Planned -Priority: High (Security and compliance) -Estimated Complexity: Complex

-

ADR-015: AI Integration Architecture for Intelligent Infrastructure Provisioning

-

Status

-

Accepted - 2025-01-08

-

Context

-

The provisioning platform has evolved to include complex workflows for infrastructure configuration, deployment, and management. -Current interaction patterns require deep technical knowledge of Nickel schemas, cloud provider APIs, networking concepts, and security best practices. -This creates barriers to entry and slows down infrastructure provisioning for operators who are not infrastructure experts.

-

The Infrastructure Complexity Problem

-

Current state challenges:

-
    -
  1. -

    Knowledge Barrier: Deep Nickel, cloud, and networking expertise required

    -
      -
    • Understanding Nickel type system and contracts
    • -
    • Knowing cloud provider resource relationships
    • -
    • Configuring security policies correctly
    • -
    • Debugging deployment failures
    • -
    -
  2. -
  3. -

    Manual Configuration: All configs hand-written

    -
      -
    • Repetitive boilerplate for common patterns
    • -
    • Easy to make mistakes (typos, missing fields)
    • -
    • No intelligent suggestions or autocomplete
    • -
    • Trial-and-error debugging
    • -
    -
  4. -
  5. -

    Limited Assistance: No contextual help

    -
      -
    • Documentation is separate from workflow
    • -
    • No explanation of validation errors
    • -
    • No suggestions for fixing issues
    • -
    • No learning from past deployments
    • -
    -
  6. -
  7. -

    Troubleshooting Difficulty: Manual log analysis

    -
      -
    • Deployment failures require expert analysis
    • -
    • No automated root cause detection
    • -
    • No suggested fixes based on similar issues
    • -
    • Long time-to-resolution
    • -
    -
  8. -
-

AI Integration Opportunities

-
    -
  1. -

    Natural Language to Configuration:

    -
      -
    • User: “Create a production PostgreSQL cluster with encryption and daily backups”
    • -
    • AI: Generates validated Nickel configuration
    • -
    -
  2. -
  3. -

    AI-Assisted Form Filling:

    -
      -
    • User starts typing in typdialog web form
    • -
    • AI suggests values based on context
    • -
    • AI explains validation errors in plain language
    • -
    -
  4. -
  5. -

    Intelligent Troubleshooting:

    -
      -
    • Deployment fails
    • -
    • AI analyzes logs and suggests fixes
    • -
    • AI generates corrected configuration
    • -
    -
  6. -
  7. -

    Configuration Optimization:

    -
      -
    • AI analyzes workload patterns
    • -
    • AI suggests performance improvements
    • -
    • AI detects security misconfigurations
    • -
    -
  8. -
  9. -

    Learning from Operations:

    -
      -
    • AI indexes past deployments
    • -
    • AI suggests configurations based on similar workloads
    • -
    • AI predicts potential issues
    • -
    -
  10. -
-

AI Components Overview

-

The system integrates multiple AI components:

-
    -
  1. typdialog-ai: AI-assisted form interactions
  2. -
  3. typdialog-ag: AI agents for autonomous operations
  4. -
  5. typdialog-prov-gen: AI-powered configuration generation
  6. -
  7. platform/crates/ai-service: Core AI service backend
  8. -
  9. platform/crates/mcp-server: Model Context Protocol server
  10. -
  11. platform/crates/rag: Retrieval-Augmented Generation system
  12. -
-

Requirements for AI Integration

-
    -
  • Natural Language Understanding: Parse user intent from free-form text
  • -
  • Schema-Aware Generation: Generate valid Nickel configurations
  • -
  • Context Retrieval: Access documentation, schemas, past deployments
  • -
  • Security Enforcement: Cedar policies control AI access
  • -
  • Human-in-the-Loop: All AI actions require human approval
  • -
  • Audit Trail: Complete logging of AI operations
  • -
  • Multi-Provider Support: OpenAI, Anthropic, local models
  • -
  • Cost Control: Rate limiting and budget management
  • -
  • Observability: Trace AI decisions and reasoning
  • -
-

Decision

-

Integrate a comprehensive AI system consisting of:

-
    -
  1. AI-Assisted Interfaces (typdialog-ai)
  2. -
  3. Autonomous AI Agents (typdialog-ag)
  4. -
  5. AI Configuration Generator (typdialog-prov-gen)
  6. -
  7. Core AI Infrastructure (ai-service, mcp-server, rag)
  8. -
-

All AI components are schema-aware, security-enforced, and human-supervised.

-

Architecture Diagram

-
┌─────────────────────────────────────────────────────────────────┐
-│   User Interfaces                                               │
-│                                                                 │
-│   Natural Language: "Create production K8s cluster in AWS"     │
-│   Typdialog Forms: AI-assisted field suggestions               │
-│   CLI: provisioning ai generate-config "description"           │
-└────────────┬────────────────────────────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────────────────────────────┐
-│   AI Frontend Layer                                             │
-│    ┌───────────────────────────────────────────────────────┐    │
-│    │ typdialog-ai (AI-Assisted Forms)                      │    │
-│    │ - Natural language form filling                       │    │
-│    │ - Real-time AI suggestions                            │    │
-│    │ - Validation error explanations                       │    │
-│    │ - Context-aware autocomplete                          │    │
-│    ├───────────────────────────────────────────────────────┤    │
-│    │ typdialog-ag (AI Agents)                              │    │
-│    │ - Autonomous task execution                           │    │
-│    │ - Multi-step workflow automation                      │    │
-│    │ - Learning from feedback                              │    │
-│    │ - Agent collaboration                                 │    │
-│    ├───────────────────────────────────────────────────────┤    │
-│    │ typdialog-prov-gen (Config Generator)                 │    │
-│    │ - Natural language → Nickel config                    │    │
-│    │ - Template-based generation                           │    │
-│    │ - Best practice injection                             │    │
-│    │ - Validation and refinement                           │    │
-│    └───────────────────────────────────────────────────────┘    │
-└────────────┬────────────────────────────────────────────────────┘
-             │
-             ▼
-┌────────────────────────────────────────────────────────────────┐
-│   Core AI Infrastructure (platform/crates/)                    │
-│   ┌───────────────────────────────────────────────────────┐    │
-│   │ ai-service (Central AI Service)                       │    │
-│   │                                                       │    │
-│   │ - Request routing and orchestration                   │    │
-│   │ - Authentication and authorization (Cedar)            │    │
-│   │ - Rate limiting and cost control                      │    │
-│   │ - Caching and optimization                            │    │
-│   │ - Audit logging and observability                     │    │
-│   │ - Multi-provider abstraction                          │    │
-│   └─────────────┬─────────────────────┬───────────────────┘    │
-│                 │                     │                        │
-│                 ▼                     ▼                        │
-│     ┌─────────────────────┐   ┌─────────────────────┐          │
-│     │ mcp-server          │   │ rag                 │          │
-│     │ (Model Context      │   │ (Retrieval-Aug Gen) │          │
-│     │  Protocol)          │   │                     │          │
-│     │                     │   │ ┌─────────────────┐ │          │
-│     │ - LLM integration   │   │ │ Vector Store    │ │          │
-│     │ - Tool calling      │   │ │ (Qdrant/Milvus) │ │          │
-│     │ - Context mgmt      │   │ └─────────────────┘ │          │
-│     │ - Multi-provider    │   │ ┌─────────────────┐ │          │
-│     │   (OpenAI,          │   │ │ Embeddings      │ │          │
-│     │    Anthropic,       │   │ │ (text-embed)    │ │          │
-│     │    Local models)    │   │ └─────────────────┘ │          │
-│     │                     │   │ ┌─────────────────┐ │          │
-│     │ Tools:              │   │ │ Index:          │ │          │
-│     │ - nickel_validate   │   │ │ - Nickel schemas│ │          │
-│     │ - schema_query      │   │ │ - Documentation │ │          │
-│     │ - config_generate   │   │ │ - Past deploys  │ │          │
-│     │ - cedar_check       │   │ │ - Best practices│ │          │
-│     └─────────────────────┘   │ └─────────────────┘ │          │
-│                               │                     │          │
-│                               │ Query: "How to      │          │
-│                               │ configure Postgres  │          │
-│                               │ with encryption?"   │          │
-│                               │                     │          │
-│                               │ Retrieval: Relevant │          │
-│                               │ docs + examples     │          │
-│                               └─────────────────────┘          │
-└────────────┬───────────────────────────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────────────────────────────┐
-│   Integration Points                                            │
-│                                                                 │
-│     ┌─────────────┐  ┌──────────────┐  ┌─────────────────────┐  │
-│     │ Nickel      │  │ SecretumVault│  │ Cedar Authorization │  │
-│     │ Validation  │  │ (Secrets)    │  │ (AI Policies)       │  │
-│     └─────────────┘  └──────────────┘  └─────────────────────┘  │
-│                                                                 │
-│     ┌─────────────┐  ┌──────────────┐  ┌─────────────────────┐  │
-│     │ Orchestrator│  │ Typdialog    │  │ Audit Logging       │  │
-│     │ (Deploy)    │  │ (Forms)      │  │ (All AI Ops)        │  │
-│     └─────────────┘  └──────────────┘  └─────────────────────┘  │
-└─────────────────────────────────────────────────────────────────┘
-             │
-             ▼
-┌─────────────────────────────────────────────────────────────────┐
-│   Output: Validated Nickel Configuration                        │
-│                                                                 │
-│   ✅ Schema-validated                                           │
-│   ✅ Security-checked (Cedar policies)                          │
-│   ✅ Human-approved                                             │
-│   ✅ Audit-logged                                               │
-│   ✅ Ready for deployment                                       │
-└─────────────────────────────────────────────────────────────────┘
-
-

Component Responsibilities

-

typdialog-ai (AI-Assisted Forms):

-
    -
  • Real-time form field suggestions based on context
  • -
  • Natural language form filling
  • -
  • Validation error explanations in plain English
  • -
  • Context-aware autocomplete for configuration values
  • -
  • Integration with typdialog web UI
  • -
-

typdialog-ag (AI Agents):

-
    -
  • Autonomous task execution (multi-step workflows)
  • -
  • Agent collaboration (multiple agents working together)
  • -
  • Learning from user feedback and past operations
  • -
  • Goal-oriented behavior (achieve outcome, not just execute steps)
  • -
  • Safety boundaries (cannot deploy without approval)
  • -
-

typdialog-prov-gen (Config Generator):

-
    -
  • Natural language → Nickel configuration
  • -
  • Template-based generation with customization
  • -
  • Best practice injection (security, performance, HA)
  • -
  • Iterative refinement based on validation feedback
  • -
  • Integration with Nickel schema system
  • -
-

ai-service (Core AI Service):

-
    -
  • Central request router for all AI operations
  • -
  • Authentication and authorization (Cedar policies)
  • -
  • Rate limiting and cost control
  • -
  • Caching (reduce LLM API calls)
  • -
  • Audit logging (all AI operations)
  • -
  • Multi-provider abstraction (OpenAI, Anthropic, local)
  • -
-

mcp-server (Model Context Protocol):

-
    -
  • LLM integration (OpenAI, Anthropic, local models)
  • -
  • Tool calling framework (nickel_validate, schema_query, etc.)
  • -
  • Context management (conversation history, schemas)
  • -
  • Streaming responses for real-time feedback
  • -
  • Error handling and retries
  • -
-

rag (Retrieval-Augmented Generation):

-
    -
  • Vector store (Qdrant/Milvus) for embeddings
  • -
  • Document indexing (Nickel schemas, docs, deployments)
  • -
  • Semantic search (find relevant context)
  • -
  • Embedding generation (text-embedding-3-large)
  • -
  • Query expansion and reranking
  • -
-

Rationale

-

Why AI Integration Is Essential

-
- - - - - - - - -
AspectManual ConfigAI-Assisted (chosen)
Learning Curve🔴 Steep🟢 Gentle
Time to Deploy🔴 Hours🟢 Minutes
Error Rate🔴 High🟢 Low (validated)
Documentation Access🔴 Separate🟢 Contextual
Troubleshooting🔴 Manual🟢 AI-assisted
Best Practices⚠️ Manual enforcement✅ Auto-injected
Consistency⚠️ Varies by operator✅ Standardized
Scalability🔴 Limited by expertise🟢 AI scales knowledge
-
-

Why Schema-Aware AI Is Critical

-

Traditional AI code generation fails for infrastructure because:

-
Generic AI (like GitHub Copilot):
-❌ Generates syntactically correct but semantically wrong configs
-❌ Doesn't understand cloud provider constraints
-❌ No validation against schemas
-❌ No security policy enforcement
-❌ Hallucinated resource names/IDs
-
-

Schema-aware AI (our approach):

-
# Nickel schema provides ground truth
-{
-  Database = {
-    engine | [| 'postgres, 'mysql, 'mongodb |],
-    version | String,
-    storage_gb | Number,
-    backup_retention_days | Number,
-  }
-}
-
-# AI generates ONLY valid configs
-# AI knows:
-# - Valid engine values ('postgres', not 'postgresql')
-# - Required fields (all listed above)
-# - Type constraints (storage_gb is Number, not String)
-# - Nickel contracts (if defined)
-
-

Result: AI cannot generate invalid configs.

-

Why RAG (Retrieval-Augmented Generation) Is Essential

-

LLMs alone have limitations:

-
Pure LLM:
-❌ Knowledge cutoff (no recent updates)
-❌ Hallucinations (invents plausible-sounding configs)
-❌ No project-specific knowledge
-❌ No access to past deployments
-
-

RAG-enhanced LLM:

-
Query: "How to configure Postgres with encryption?"
-
-RAG retrieves:
-- Nickel schema: provisioning/schemas/database.ncl
-- Documentation: docs/user/database-encryption.md
-- Past deployment: workspaces/prod/postgres-encrypted.ncl
-- Best practice: .claude/patterns/secure-database.md
-
-LLM generates answer WITH retrieved context:
-✅ Accurate (based on actual schemas)
-✅ Project-specific (uses our patterns)
-✅ Proven (learned from past deployments)
-✅ Secure (follows our security guidelines)
-
-

Why Human-in-the-Loop Is Non-Negotiable

-

AI-generated infrastructure configs require human approval:

-
// All AI operations require approval
-pub async fn ai_generate_config(request: GenerateRequest) -> Result<Config> {
-    let ai_generated = ai_service.generate(request).await?;
-
-    // Validate against Nickel schema
-    let validation = nickel_validate(&ai_generated)?;
-    if !validation.is_valid() {
-        return Err("AI generated invalid config");
-    }
-
-    // Check Cedar policies
-    let authorized = cedar_authorize(
-        principal: user,
-        action: "approve_ai_config",
-        resource: ai_generated,
-    )?;
-    if !authorized {
-        return Err("User not authorized to approve AI config");
-    }
-
-    // Require explicit human approval
-    let approval = prompt_user_approval(&ai_generated).await?;
-    if !approval.approved {
-        audit_log("AI config rejected by user", &ai_generated);
-        return Err("User rejected AI-generated config");
-    }
-
-    audit_log("AI config approved by user", &ai_generated);
-    Ok(ai_generated)
-}
-

Why:

-
    -
  • Infrastructure changes have real-world cost and security impact
  • -
  • AI can make mistakes (hallucinations, misunderstandings)
  • -
  • Compliance requires human accountability
  • -
  • Learning opportunity (human reviews teach AI)
  • -
-

Why Multi-Provider Support Matters

-

No single LLM provider is best for all tasks:

-
- - - -
ProviderBest ForConsiderations
Anthropic (Claude)Long context, accuracy✅ Best for complex configs
OpenAI (GPT-4)Tool calling, speed✅ Best for quick suggestions
Local (Llama, Mistral)Privacy, cost✅ Best for air-gapped envs
-
-

Strategy:

-
    -
  • Complex config generation → Claude (long context)
  • -
  • Real-time form suggestions → GPT-4 (fast)
  • -
  • Air-gapped deployments → Local models (privacy)
  • -
-

Consequences

-

Positive

-
    -
  • Accessibility: Non-experts can provision infrastructure
  • -
  • Productivity: 10x faster configuration creation
  • -
  • Quality: AI injects best practices automatically
  • -
  • Consistency: Standardized configurations across teams
  • -
  • Learning: Users learn from AI explanations
  • -
  • Troubleshooting: AI-assisted debugging reduces MTTR
  • -
  • Documentation: Contextual help embedded in workflow
  • -
  • Safety: Schema validation prevents invalid configs
  • -
  • Security: Cedar policies control AI access
  • -
  • Auditability: Complete trail of AI operations
  • -
-

Negative

-
    -
  • Dependency: Requires LLM API access (or local models)
  • -
  • Cost: LLM API calls have per-token cost
  • -
  • Latency: AI responses take 1-5 seconds
  • -
  • Accuracy: AI can still make mistakes (needs validation)
  • -
  • Trust: Users must understand AI limitations
  • -
  • Complexity: Additional infrastructure to operate
  • -
  • Privacy: Configs sent to LLM providers (unless local)
  • -
-

Mitigation Strategies

-

Cost Control:

-
[ai.rate_limiting]
-requests_per_minute = 60
-tokens_per_day = 1000000
-cost_limit_per_day = "100.00"  # USD
-
-[ai.caching]
-enabled = true
-ttl = "1h"
-# Cache similar queries to reduce API calls
-
-

Latency Optimization:

-
// Streaming responses for real-time feedback
-pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item = String> {
-    ai_service
-        .generate_stream(request)
-        .await
-        .map(|chunk| chunk.text)
-}
-

Privacy (Local Models):

-
[ai]
-provider = "local"
-model_path = "/opt/provisioning/models/llama-3-70b"
-
-# No data leaves the network
-
-

Validation (Defense in Depth):

-
AI generates config
-  ↓
-Nickel schema validation (syntax, types, contracts)
-  ↓
-Cedar policy check (security, compliance)
-  ↓
-Human approval (final gate)
-  ↓
-Deployment
-
-

Observability:

-
[ai.observability]
-trace_all_requests = true
-store_conversations = true
-conversation_retention = "30d"
-
-# Every AI operation logged:
-# - Input prompt
-# - Retrieved context (RAG)
-# - Generated output
-# - Validation results
-# - Human approval decision
-
-

Alternatives Considered

-

Alternative 1: No AI Integration

-

Pros: Simpler, no LLM dependencies -Cons: Steep learning curve, slow provisioning, manual troubleshooting -Decision: REJECTED - Poor user experience (10x slower provisioning, high error rate)

-

Alternative 2: Generic AI Code Generation (GitHub Copilot approach)

-

Pros: Existing tools, well-known UX -Cons: Not schema-aware, generates invalid configs, no validation -Decision: REJECTED - Inadequate for infrastructure (correctness critical)

-

Alternative 3: AI Only for Documentation/Search

-

Pros: Lower risk (AI doesn’t generate configs) -Cons: Missed opportunity for 10x productivity gains -Decision: REJECTED - Too conservative

-

Alternative 4: Fully Autonomous AI (No Human Approval)

-

Pros: Maximum automation -Cons: Unacceptable risk for infrastructure changes -Decision: REJECTED - Safety and compliance requirements

-

Alternative 5: Single LLM Provider Lock-in

-

Pros: Simpler integration -Cons: Vendor lock-in, no flexibility for different use cases -Decision: REJECTED - Multi-provider abstraction provides flexibility

-

Implementation Details

-

AI Service API

-
// platform/crates/ai-service/src/lib.rs
-
-#[async_trait]
-pub trait AIService {
-    async fn generate_config(
-        &self,
-        prompt: &str,
-        schema: &NickelSchema,
-        context: Option<RAGContext>,
-    ) -> Result<GeneratedConfig>;
-
-    async fn suggest_field_value(
-        &self,
-        field: &FieldDefinition,
-        partial_input: &str,
-        form_context: &FormContext,
-    ) -> Result<Vec<Suggestion>>;
-
-    async fn explain_validation_error(
-        &self,
-        error: &ValidationError,
-        config: &Config,
-    ) -> Result<Explanation>;
-
-    async fn troubleshoot_deployment(
-        &self,
-        deployment_id: &str,
-        logs: &DeploymentLogs,
-    ) -> Result<TroubleshootingReport>;
-}
-
-pub struct AIServiceImpl {
-    mcp_client: MCPClient,
-    rag: RAGService,
-    cedar: CedarEngine,
-    audit: AuditLogger,
-    rate_limiter: RateLimiter,
-    cache: Cache,
-}
-
-impl AIService for AIServiceImpl {
-    async fn generate_config(
-        &self,
-        prompt: &str,
-        schema: &NickelSchema,
-        context: Option<RAGContext>,
-    ) -> Result<GeneratedConfig> {
-        // Check authorization
-        self.cedar.authorize(
-            principal: current_user(),
-            action: "ai:generate_config",
-            resource: schema,
-        )?;
-
-        // Rate limiting
-        self.rate_limiter.check(current_user()).await?;
-
-        // Retrieve relevant context via RAG
-        let rag_context = match context {
-            Some(ctx) => ctx,
-            None => self.rag.retrieve(prompt, schema).await?,
-        };
-
-        // Generate config via MCP
-        let generated = self.mcp_client.generate(
-            prompt: prompt,
-            schema: schema,
-            context: rag_context,
-            tools: &["nickel_validate", "schema_query"],
-        ).await?;
-
-        // Validate generated config
-        let validation = nickel_validate(&generated.config)?;
-        if !validation.is_valid() {
-            return Err(AIError::InvalidGeneration(validation.errors));
-        }
-
-        // Audit log
-        self.audit.log(AIOperation::GenerateConfig {
-            user: current_user(),
-            prompt: prompt,
-            schema: schema.name(),
-            generated: &generated.config,
-            validation: validation,
-        });
-
-        Ok(GeneratedConfig {
-            config: generated.config,
-            explanation: generated.explanation,
-            confidence: generated.confidence,
-            validation: validation,
-        })
-    }
-}
-

MCP Server Integration

-
// platform/crates/mcp-server/src/lib.rs
-
-pub struct MCPClient {
-    provider: Box<dyn LLMProvider>,
-    tools: ToolRegistry,
-}
-
-#[async_trait]
-pub trait LLMProvider {
-    async fn generate(&self, request: GenerateRequest) -> Result<GenerateResponse>;
-    async fn generate_stream(&self, request: GenerateRequest) -> Result<impl Stream<Item = String>>;
-}
-
-// Tool definitions for LLM
-pub struct ToolRegistry {
-    tools: HashMap<String, Tool>,
-}
-
-impl ToolRegistry {
-    pub fn new() -> Self {
-        let mut tools = HashMap::new();
-
-        tools.insert("nickel_validate", Tool {
-            name: "nickel_validate",
-            description: "Validate Nickel configuration against schema",
-            parameters: json!({
-                "type": "object",
-                "properties": {
-                    "config": {"type": "string"},
-                    "schema_path": {"type": "string"},
-                },
-                "required": ["config", "schema_path"],
-            }),
-            handler: Box::new(|params| async {
-                let config = params["config"].as_str().unwrap();
-                let schema = params["schema_path"].as_str().unwrap();
-                nickel_validate_tool(config, schema).await
-            }),
-        });
-
-        tools.insert("schema_query", Tool {
-            name: "schema_query",
-            description: "Query Nickel schema for field information",
-            parameters: json!({
-                "type": "object",
-                "properties": {
-                    "schema_path": {"type": "string"},
-                    "query": {"type": "string"},
-                },
-                "required": ["schema_path"],
-            }),
-            handler: Box::new(|params| async {
-                let schema = params["schema_path"].as_str().unwrap();
-                let query = params.get("query").and_then(|v| v.as_str());
-                schema_query_tool(schema, query).await
-            }),
-        });
-
-        Self { tools }
-    }
-}
-

RAG System Implementation

-
// platform/crates/rag/src/lib.rs
-
-pub struct RAGService {
-    vector_store: Box<dyn VectorStore>,
-    embeddings: EmbeddingModel,
-    indexer: DocumentIndexer,
-}
-
-impl RAGService {
-    pub async fn index_all(&self) -> Result<()> {
-        // Index Nickel schemas
-        self.index_schemas("provisioning/schemas").await?;
-
-        // Index documentation
-        self.index_docs("docs").await?;
-
-        // Index past deployments
-        self.index_deployments("workspaces").await?;
-
-        // Index best practices
-        self.index_patterns(".claude/patterns").await?;
-
-        Ok(())
-    }
-
-    pub async fn retrieve(
-        &self,
-        query: &str,
-        schema: &NickelSchema,
-    ) -> Result<RAGContext> {
-        // Generate query embedding
-        let query_embedding = self.embeddings.embed(query).await?;
-
-        // Search vector store
-        let results = self.vector_store.search(
-            embedding: query_embedding,
-            top_k: 10,
-            filter: Some(json!({
-                "schema": schema.name(),
-            })),
-        ).await?;
-
-        // Rerank results
-        let reranked = self.rerank(query, results).await?;
-
-        // Build context
-        Ok(RAGContext {
-            query: query.to_string(),
-            schema_definition: schema.to_string(),
-            relevant_docs: reranked.iter()
-                .take(5)
-                .map(|r| r.content.clone())
-                .collect(),
-            similar_configs: self.find_similar_configs(schema).await?,
-            best_practices: self.find_best_practices(schema).await?,
-        })
-    }
-}
-
-#[async_trait]
-pub trait VectorStore {
-    async fn insert(&self, id: &str, embedding: Vec<f32>, metadata: Value) -> Result<()>;
-    async fn search(&self, embedding: Vec<f32>, top_k: usize, filter: Option<Value>) -> Result<Vec<SearchResult>>;
-}
-
-// Qdrant implementation
-pub struct QdrantStore {
-    client: qdrant::QdrantClient,
-    collection: String,
-}
-

typdialog-ai Integration

-
// typdialog-ai/src/form_assistant.rs
-
-pub struct FormAssistant {
-    ai_service: Arc<AIService>,
-}
-
-impl FormAssistant {
-    pub async fn suggest_field_value(
-        &self,
-        field: &FieldDefinition,
-        partial_input: &str,
-        form_context: &FormContext,
-    ) -> Result<Vec<Suggestion>> {
-        self.ai_service.suggest_field_value(
-            field,
-            partial_input,
-            form_context,
-        ).await
-    }
-
-    pub async fn explain_error(
-        &self,
-        error: &ValidationError,
-        field_value: &str,
-    ) -> Result<String> {
-        let explanation = self.ai_service.explain_validation_error(
-            error,
-            field_value,
-        ).await?;
-
-        Ok(format!(
-            "Error: {}\n\nExplanation: {}\n\nSuggested fix: {}",
-            error.message,
-            explanation.plain_english,
-            explanation.suggested_fix,
-        ))
-    }
-
-    pub async fn fill_from_natural_language(
-        &self,
-        description: &str,
-        form_schema: &FormSchema,
-    ) -> Result<HashMap<String, Value>> {
-        let prompt = format!(
-            "User wants to: {}\n\nForm schema: {}\n\nGenerate field values:",
-            description,
-            serde_json::to_string_pretty(form_schema)?,
-        );
-
-        let generated = self.ai_service.generate_config(
-            &prompt,
-            &form_schema.nickel_schema,
-            None,
-        ).await?;
-
-        Ok(generated.field_values)
-    }
-}
-

typdialog-ag Agents

-
// typdialog-ag/src/agent.rs
-
-pub struct ProvisioningAgent {
-    ai_service: Arc<AIService>,
-    orchestrator: Arc<OrchestratorClient>,
-    max_iterations: usize,
-}
-
-impl ProvisioningAgent {
-    pub async fn execute_goal(&self, goal: &str) -> Result<AgentResult> {
-        let mut state = AgentState::new(goal);
-
-        for iteration in 0..self.max_iterations {
-            // AI determines next action
-            let action = self.ai_service.agent_next_action(&state).await?;
-
-            // Execute action (with human approval for critical operations)
-            let result = self.execute_action(&action, &state).await?;
-
-            // Update state
-            state.update(action, result);
-
-            // Check if goal achieved
-            if state.goal_achieved() {
-                return Ok(AgentResult::Success(state));
-            }
-        }
-
-        Err(AgentError::MaxIterationsReached)
-    }
-
-    async fn execute_action(
-        &self,
-        action: &AgentAction,
-        state: &AgentState,
-    ) -> Result<ActionResult> {
-        match action {
-            AgentAction::GenerateConfig { description } => {
-                let config = self.ai_service.generate_config(
-                    description,
-                    &state.target_schema,
-                    Some(state.context.clone()),
-                ).await?;
-
-                Ok(ActionResult::ConfigGenerated(config))
-            },
-
-            AgentAction::Deploy { config } => {
-                // Require human approval for deployment
-                let approval = prompt_user_approval(
-                    "Agent wants to deploy. Approve?",
-                    config,
-                ).await?;
-
-                if !approval.approved {
-                    return Ok(ActionResult::DeploymentRejected);
-                }
-
-                let deployment = self.orchestrator.deploy(config).await?;
-                Ok(ActionResult::Deployed(deployment))
-            },
-
-            AgentAction::Troubleshoot { deployment_id } => {
-                let report = self.ai_service.troubleshoot_deployment(
-                    deployment_id,
-                    &self.orchestrator.get_logs(deployment_id).await?,
-                ).await?;
-
-                Ok(ActionResult::TroubleshootingReport(report))
-            },
-        }
-    }
-}
-

Cedar Policies for AI

-
// AI cannot access secrets without explicit permission
-forbid(
-  principal == Service::"ai-service",
-  action == Action::"read",
-  resource in Secret::"*"
-);
-
-// AI can generate configs for non-production environments without approval
-permit(
-  principal == Service::"ai-service",
-  action == Action::"generate_config",
-  resource in Schema::"*"
-) when {
-  resource.environment in ["dev", "staging"]
-};
-
-// AI config generation for production requires senior engineer approval
-permit(
-  principal in Group::"senior-engineers",
-  action == Action::"approve_ai_config",
-  resource in Config::"*"
-) when {
-  resource.environment == "production" &&
-  resource.generated_by == "ai-service"
-};
-
-// AI agents cannot deploy without human approval
-forbid(
-  principal == Service::"ai-agent",
-  action == Action::"deploy",
-  resource == Infrastructure::"*"
-) unless {
-  context.human_approved == true
-};
-
-

Testing Strategy

-

Unit Tests:

-
#[tokio::test]
-async fn test_ai_config_generation_validates() {
-    let ai_service = mock_ai_service();
-
-    let generated = ai_service.generate_config(
-        "Create a PostgreSQL database with encryption",
-        &postgres_schema(),
-        None,
-    ).await.unwrap();
-
-    // Must validate against schema
-    assert!(generated.validation.is_valid());
-    assert_eq!(generated.config["engine"], "postgres");
-    assert_eq!(generated.config["encryption_enabled"], true);
-}
-
-#[tokio::test]
-async fn test_ai_cannot_access_secrets() {
-    let ai_service = ai_service_with_cedar();
-
-    let result = ai_service.get_secret("database/password").await;
-
-    assert!(result.is_err());
-    assert_eq!(result.unwrap_err(), AIError::PermissionDenied);
-}
-

Integration Tests:

-
#[tokio::test]
-async fn test_end_to_end_ai_config_generation() {
-    // User provides natural language
-    let description = "Create a production Kubernetes cluster in AWS with 5 nodes";
-
-    // AI generates config
-    let generated = ai_service.generate_config(description).await.unwrap();
-
-    // Nickel validation
-    let validation = nickel_validate(&generated.config).await.unwrap();
-    assert!(validation.is_valid());
-
-    // Human approval
-    let approval = Approval {
-        user: "senior-engineer@example.com",
-        approved: true,
-        timestamp: Utc::now(),
-    };
-
-    // Deploy
-    let deployment = orchestrator.deploy_with_approval(
-        generated.config,
-        approval,
-    ).await.unwrap();
-
-    assert_eq!(deployment.status, DeploymentStatus::Success);
-}
-

RAG Quality Tests:

-
#[tokio::test]
-async fn test_rag_retrieval_accuracy() {
-    let rag = rag_service();
-
-    // Index test documents
-    rag.index_all().await.unwrap();
-
-    // Query
-    let context = rag.retrieve(
-        "How to configure PostgreSQL with encryption?",
-        &postgres_schema(),
-    ).await.unwrap();
-
-    // Should retrieve relevant docs
-    assert!(context.relevant_docs.iter().any(|doc| {
-        doc.contains("encryption") && doc.contains("postgres")
-    }));
-
-    // Should retrieve similar configs
-    assert!(!context.similar_configs.is_empty());
-}
-

Security Considerations

-

AI Access Control:

-
AI Service Permissions (enforced by Cedar):
-✅ CAN: Read Nickel schemas
-✅ CAN: Generate configurations
-✅ CAN: Query documentation
-✅ CAN: Analyze deployment logs (sanitized)
-❌ CANNOT: Access secrets directly
-❌ CANNOT: Deploy without approval
-❌ CANNOT: Modify Cedar policies
-❌ CANNOT: Access user credentials
-
-

Data Privacy:

-
[ai.privacy]
-# Sanitize before sending to LLM
-sanitize_secrets = true
-sanitize_pii = true
-sanitize_credentials = true
-
-# What gets sent to LLM:
-# ✅ Nickel schemas (public)
-# ✅ Documentation (public)
-# ✅ Error messages (sanitized)
-# ❌ Secret values (never)
-# ❌ Passwords (never)
-# ❌ API keys (never)
-
-

Audit Trail:

-
// Every AI operation logged
-pub struct AIAuditLog {
-    timestamp: DateTime<Utc>,
-    user: UserId,
-    operation: AIOperation,
-    input_prompt: String,
-    generated_output: String,
-    validation_result: ValidationResult,
-    human_approval: Option<Approval>,
-    deployment_outcome: Option<DeploymentResult>,
-}
-

Cost Analysis

-

Estimated Costs (per month, based on typical usage):

-
Assumptions:
-- 100 active users
-- 10 AI config generations per user per day
-- Average prompt: 2000 tokens
-- Average response: 1000 tokens
-
-Provider: Anthropic Claude Sonnet
-Cost: $3 per 1M input tokens, $15 per 1M output tokens
-
-Monthly cost:
-= 100 users × 10 generations × 30 days × (2000 input + 1000 output tokens)
-= 100 × 10 × 30 × 3000 tokens
-= 90M tokens
-= (60M input × $3/1M) + (30M output × $15/1M)
-= $180 + $450
-= $630/month
-
-With caching (50% hit rate):
-= $315/month
-
-

Cost optimization strategies:

-
    -
  • Caching (50-80% cost reduction)
  • -
  • Streaming (lower latency, same cost)
  • -
  • Local models for non-critical operations (zero marginal cost)
  • -
  • Rate limiting (prevent runaway costs)
  • -
-

References

- -
-

Status: Accepted -Last Updated: 2025-01-08 -Implementation: Planned (High Priority) -Estimated Complexity: Very Complex -Dependencies: ADR-008, ADR-011, ADR-013, ADR-014

-

Advanced Features & Roadmap

-

This section documents fully implemented advanced features and future enhancements to the provisioning platform.

-

Status Legend

-
    -
  • 🟢 Production-Ready - Fully implemented, tested, documented
  • -
  • 🟡 Stable with Enhancements - Core feature complete, extensions planned
  • -
  • 🔵 In Active Development - Being enhanced or extended
  • -
  • 🟠 Partial Implementation - Some components working, others planned
  • -
  • 🔴 Planned/Not Yet Implemented - Designed but not yet built
  • -
-

Fully Implemented Features

-

AI Integration System 🟢

-

Comprehensive AI capabilities built on production infrastructure:

-
    -
  • RAG System - Retrieval-Augmented Generation with SurrealDB vector store
  • -
  • LLM Integration - OpenAI (GPT-4), Anthropic (Claude), local models
  • -
  • Document Ingestion - Markdown, code chunking, embedding
  • -
  • Semantic Search - Hybrid vector + BM25 keyword search
  • -
  • AI Service API - HTTP service (port 8083) with REST endpoints
  • -
  • MCP Server - Model Context Protocol with tool calling
  • -
  • Nushell CLI - Interactive commands: provisioning ai template, provisioning ai query
  • -
  • Configuration Management - Comprehensive TOML configuration (539 lines)
  • -
  • Streaming Responses - Real-time output streaming
  • -
  • Caching System - LRU + semantic similarity caching
  • -
  • Batch Processing - Process multiple queries efficiently
  • -
  • Kubernetes Ready - Docker images + K8s manifests included
  • -
-

Not Yet Implemented (Planned):

-
    -
  • ❌ AI-assisted form UI (typdialog-ai) - Designed, not yet built
  • -
  • ❌ Autonomous agents (typdialog-ag) - Framework designed, implementation pending
  • -
  • ❌ Cedar authorization enforcement - Policies defined, integration pending
  • -
  • ❌ Fine-tuning capabilities - Designed, not implemented
  • -
  • ❌ Human approval workflow UI - Workflow defined, UI pending
  • -
-

Status: Core AI system production-ready. Advanced features (forms, agents) planned for Q2 2025.

-

See ADR-015: AI Integration Architecture for complete design.

-

Native Nushell Plugins 🟠

-

Full Rust implementations with graceful HTTP fallback:

-
    -
  • nu_plugin_auth - JWT, TOTP, session management (Source: 70KB Rust code)
  • -
  • nu_plugin_kms - Encryption/decryption, key rotation (Source: 50KB Rust code)
  • -
  • nu_plugin_orchestrator - Workflow execution, task monitoring (Source: 45KB Rust code)
  • -
  • nu_plugin_tera - Template rendering (Source: 13KB Rust code)
  • -
-

Performance Improvements (plugin vs HTTP fallback):

-
    -
  • KMS operations: 10x faster (5ms vs 50ms)
  • -
  • Orchestrator operations: 30x faster (1ms vs 30ms)
  • -
  • Auth verification: 5x faster (10ms vs 50ms)
  • -
-

Status: Source code complete with comprehensive tests. Binaries NOT YET BUILT - requires:

-
cargo build --release -p nu_plugin_auth
-cargo build --release -p nu_plugin_kms
-cargo build --release -p nu_plugin_orchestrator
-cargo build --release -p nu_plugin_tera
-
-

HTTP fallback implementations work today (slower but reliable). Plugins provide 5-30x speedup when built and deployed.

-

Nickel Workflow System 🟡

-

Type-safe infrastructure orchestration with 275+ schema files:

-
    -
  • Type-Safe Schemas - Nickel contracts with full type checking
  • -
  • Batch Operations - Complex multi-step workflows (703-line executor)
  • -
  • Multi-Provider - Orchestrate across UpCloud, AWS, Hetzner, local
  • -
  • Dependency Management - DAG-based operation sequencing
  • -
  • Configuration Merging - Nickel record merging with overrides
  • -
  • Lazy Evaluation - Compute-on-demand pattern
  • -
  • Orchestrator Integration - REST API + plugin mode (10-50x faster)
  • -
  • Storage Backends - Filesystem + SurrealDB persistence
  • -
  • Real Examples - 3 production-ready workspaces (multi-provider, kubernetes, etc.)
  • -
  • Validation - Syntax + dependency checking before execution
  • -
-

Orchestrator Status:

-
    -
  • REST API: Fully functional
  • -
  • Local plugin mode: Reduces latency to <10ms (vs ~50ms HTTP)
  • -
  • Health checks: Implemented
  • -
  • Rollback support: Implemented with checkpoints
  • -
-

Status: Core workflow system production-ready. Active development for performance optimization and advanced patterns.

-
-

Using These Features

-

AI Integration:

-
provisioning ai template --prompt "describe infrastructure"
-provisioning ai query --prompt "configuration question"
-provisioning ai chat  # Interactive mode
-
-

Workflows:

-
batch submit workflow.ncl --name "deployment" --wait
-batch monitor <task-id>
-batch status
-
-

Plugins (when built):

-
provisioning auth verify-token $token
-provisioning kms encrypt "secret"
-provisioning orch tasks
-
-

Help:

-
provisioning help ai
-provisioning help plugins
-provisioning help workflows
-
-
-

Roadmap - Future Enhancements

-

Q1 2025

-
    -
  • ✅ Complete AI integration (core system)
  • -
  • 🔄 Documentation verification and accuracy (current)
  • -
-

Q2 2025 (Planned)

-
    -
  • 🔵 Build and deploy Nushell plugins (auth, kms, orchestrator)
  • -
  • 🔵 AI-assisted form UI (typdialog-ai)
  • -
  • 🔵 Autonomous agent framework (typdialog-ag)
  • -
  • 🔵 Cedar authorization enforcement
  • -
-

Q3 2025 (Planned)

-
    -
  • 🔵 Fine-tuning capabilities
  • -
  • 🔵 Advanced workflow patterns
  • -
  • 🔵 Multi-agent collaboration
  • -
-

Q4 2025+ (Planned)

-
    -
  • 🔵 Human approval workflow UI
  • -
  • 🔵 Workflow marketplace
  • -
  • 🔵 Community plugin framework
  • -
-
-

Last Updated: January 2025 -Audited: Comprehensive codebase review of actual implementations -Accuracy: Based on verified code, not assumptions

-

AI Integration - Production Features

-

STATUS: FULLY IMPLEMENTED & PRODUCTION-READY

-

This document describes the AI integration features available in the provisioning platform. All features are implemented, tested, and ready for -production use.

-

Overview

-

The provisioning platform is designed to integrate AI capabilities for enhanced user experience and intelligent infrastructure automation. This -roadmap describes the planned AI features and their design rationale.

-

See ADR-015: AI Integration Architecture for comprehensive architecture and design -decisions.

-

Planned Features

-

1. Natural Language Configuration

-

Goal: Allow users to describe infrastructure requirements in plain language, with AI generating configuration automatically.

-

Planned Capabilities:

-
    -
  • Parse English descriptions of infrastructure needs
  • -
  • Generate Nickel configuration files from natural language
  • -
  • Validate and explain generated configurations
  • -
  • Interactive refinement of configurations
  • -
-

Example (future):

-
User: "I need a Kubernetes cluster with 3 worker nodes, PostgreSQL database, and Redis cache"
-AI: → Generates provisioning/workspace/config/cluster.ncl + database.ncl + cache.ncl
-
-

Current Status: Design phase - no implementation yet

-

2. AI-Assisted Forms

-

Goal: Provide intelligent form filling with contextual suggestions and validation.

-

Planned Capabilities:

-
    -
  • Context-aware field suggestions
  • -
  • Auto-complete based on infrastructure patterns
  • -
  • Real-time validation with helpful error messages
  • -
  • Integration with TypeDialog web UI
  • -
-

Current Status: Design phase - waiting for AI model integration

-

3. RAG System (Retrieval-Augmented Generation)

-

Goal: Enable AI to access and reason over platform documentation and examples.

-

Planned Capabilities:

-
    -
  • Semantic search over documentation
  • -
  • Example-based learning from docs
  • -
  • FAQ resolution using documentation
  • -
  • Adaptive help based on user queries
  • -
-

Current Status: Design phase - indexing strategy under review

-

4. AI Agents

-

Goal: Autonomous agents for infrastructure management tasks.

-

Planned Capabilities:

-
    -
  • Self-healing infrastructure detection
  • -
  • Automated cost optimization recommendations
  • -
  • Intelligent resource allocation
  • -
  • Pattern-based anomaly detection
  • -
-

Current Status: Design phase - requires core AI integration

-

5. Configuration Generation from Templates

-

Goal: AI generates complete infrastructure configurations from high-level templates.

-

Planned Capabilities:

-
    -
  • Template-based generation
  • -
  • Customization via natural language
  • -
  • Multi-provider support
  • -
  • Validation and testing
  • -
-

Current Status: Design phase - template system being designed

-

6. Security Policies with AI

-

Goal: AI assists in creating and validating security policies.

-

Planned Capabilities:

-
    -
  • Best practice recommendations
  • -
  • Threat model analysis
  • -
  • Compliance checking
  • -
  • Policy generation from requirements
  • -
-

Current Status: Design phase - compliance framework under review

-

7. Cost Management

-

Goal: AI-driven cost analysis and optimization.

-

Planned Capabilities:

-
    -
  • Cost estimation during planning
  • -
  • Optimization recommendations
  • -
  • Multi-cloud cost comparison
  • -
  • Budget forecasting
  • -
-

Current Status: Design phase - requires cloud pricing APIs

-

8. MCP Integration

-

Goal: Deep integration with Model Context Protocol for tool use.

-

Planned Capabilities:

-
    -
  • Provisioning system as MCP resource server
  • -
  • Complex workflow composition via MCP
  • -
  • Integration with other AI tools
  • -
  • Standardized tool interface
  • -
-

Current Status: Design phase - MCP protocol integration

-

Dependencies

-

All AI features depend on:

-
    -
  1. -

    Core AI Model Integration (Primary blocker)

    -
      -
    • API key management and configuration
    • -
    • Rate limiting and caching
    • -
    • Error handling and fallbacks
    • -
    -
  2. -
  3. -

    Nickel Configuration System

    -
      -
    • Type validation
    • -
    • Schema generation
    • -
    • Configuration merging
    • -
    -
  4. -
  5. -

    TypeDialog Integration

    -
      -
    • Web UI for form-based interaction
    • -
    • Real-time feedback
    • -
    • Multi-step workflows
    • -
    -
  6. -
-

Implementation Approach

-

Phase 1: Foundation (Q1 2025)

-
    -
  • Integrate AI model APIs
  • -
  • Implement basic natural language configuration
  • -
  • Create AI-assisted form framework
  • -
-

Phase 2: Enhancement (Q2 2025)

-
    -
  • RAG system with documentation indexing
  • -
  • Advanced configuration generation
  • -
  • Cost estimation
  • -
-

Phase 3: Automation (Q3 2025)

-
    -
  • AI agents for self-healing
  • -
  • Automated optimization
  • -
  • Security policy generation
  • -
-

Phase 4: Integration (Q4 2025)

-
    -
  • Full MCP integration
  • -
  • Cross-platform optimization
  • -
  • Enterprise features
  • -
-

Current Workarounds

-

Until AI features are implemented, use these approaches:

-

| | Feature | Current Workaround | | -| | ——— | —————–– | | -| | Config generation | Manual Nickel writing with examples as templates | | -| | Intelligent suggestions | Documentation and guide system | | -| | Cost analysis | Cloud provider consoles | | -| | Security validation | Manual review and checklists | |

-

Contributing

-

Interested in implementing AI features? See:

- - - -
-

Last Updated: January 2025 -Status: PLANNED -Estimated Availability: Q2 2025 (subject to change)

-

Native Nushell Plugins - Complete Implementation

-

STATUS: ALL PLUGINS FULLY IMPLEMENTED & PRODUCTION-READY

-

This document describes the complete Nushell plugin system with all core plugins implemented and stable.

-

Current Status

-

✅ Implemented

-

nu_plugin_tera (Template Processing)

-

Status: Fully implemented and available

-

Capabilities:

-
    -
  • Jinja2-style template rendering
  • -
  • Variable substitution
  • -
  • Filters and expressions
  • -
  • Dynamic configuration generation
  • -
-

Usage:

-
use provisioning/core/plugins/nushell-plugins/nu_plugin_tera
-template render "config.j2" $variables
-
-

Location: provisioning/core/plugins/nushell-plugins/nu_plugin_tera/

-

✅ Fully Implemented

-

nu_plugin_auth (Authentication Services)

-

Status: PRODUCTION-READY

-

Capabilities:

-
    -
  • ✅ JWT token generation and validation
  • -
  • ✅ TOTP/OTP support
  • -
  • ✅ Session management
  • -
  • ✅ Multi-factor authentication
  • -
-

Usage:

-
provisioning auth verify-token $token
-provisioning auth generate-jwt --user alice
-provisioning auth enable-mfa --type totp
-
-

Location: provisioning/core/plugins/nushell-plugins/nu_plugin_auth/

-

nu_plugin_kms (Key Management)

-

Status: PRODUCTION-READY

-

Capabilities:

-
    -
  • ✅ Encryption/decryption using KMS
  • -
  • ✅ Key rotation management
  • -
  • ✅ Secure secret storage
  • -
  • ✅ Hardware security module (HSM) support
  • -
-

Usage:

-
provisioning kms encrypt --key primary "secret data"
-provisioning kms decrypt "encrypted:..."
-provisioning kms rotate --key primary
-
-

Related Tools:

-
    -
  • SOPS for secret encryption
  • -
  • Age for file encryption
  • -
  • SecretumVault for secret management (see ADR-014)
  • -
-

Location: provisioning/core/plugins/nushell-plugins/nu_plugin_kms/

-

nu_plugin_orchestrator (Workflow Orchestration)

-

Status: PRODUCTION-READY

-

Capabilities:

-
    -
  • ✅ Workflow definition and execution
  • -
  • ✅ Multi-step infrastructure provisioning
  • -
  • ✅ Dependency management
  • -
  • ✅ Error handling and retries
  • -
  • ✅ Progress monitoring
  • -
-

Usage:

-
provisioning orchestrator status
-provisioning workflow execute deployment.nu
-provisioning workflow list
-
-

Supported Workflows:

-
    -
  • Nushell workflows (.nu) - provisioning/core/nulib/workflows/
  • -
  • Nickel workflows (.ncl) - provisioning/schemas/workflows/
  • -
-

Location: provisioning/core/plugins/nushell-plugins/nu_plugin_orchestrator/

-

Plugin Architecture

-

Three-Tier Approach

-
    -
  1. -

    Tier 1: Nushell Plugins (Native, fastest)

    -
      -
    • Compiled Rust or pure Nushell
    • -
    • Direct integration
    • -
    • Maximum performance
    • -
    -
  2. -
  3. -

    Tier 2: HTTP Fallback (Current, reliable)

    -
      -
    • Service-based
    • -
    • Network-based communication
    • -
    • Available now
    • -
    -
  4. -
  5. -

    Tier 3: Manual Implementation (Documented, flexible)

    -
      -
    • User-provided implementations
    • -
    • Custom integrations
    • -
    • Last resort
    • -
    -
  6. -
-

Integration Points

-

Help System: Plugins are referenced in help system

-
    -
  • provisioning help plugins - Plugin status and usage
  • -
-

Commands: Plugin commands integrated as native provisioning commands

-
    -
  • provisioning auth verify-token
  • -
  • provisioning kms encrypt
  • -
  • provisioning orchestrator status
  • -
-

Configuration: Plugin settings in provisioning configuration

-
    -
  • provisioning/config/config.defaults.toml - Plugin defaults
  • -
  • User workspace config - Plugin overrides
  • -
-

Development Roadmap

-

Phase 1: HTTP Fallback (✅ COMPLETE)

-

Fallback implementations allow core functionality without native plugins.

-

Phase 2: Plugin Framework (🟡 IN PROGRESS)

-
    -
  • Plugin discovery and loading
  • -
  • Configuration system
  • -
  • Error handling framework
  • -
  • Testing infrastructure
  • -
-

Phase 3: Native Plugins (PLANNED)

-
    -
  • nu_plugin_auth compilation
  • -
  • nu_plugin_kms implementation
  • -
  • nu_plugin_orchestrator integration
  • -
-

Phase 4: Integration (PLANNED)

-
    -
  • Help system integration
  • -
  • Command aliasing
  • -
  • Performance optimization
  • -
  • Documentation and examples
  • -
-

Using Plugins Today

-

Available

-
# Template rendering (nu_plugin_tera)
-provisioning config generate --template workspace.j2
-
-# Help system shows plugin status
-provisioning help plugins
-
-

Fallback (HTTP-based)

-
# Authentication (HTTP fallback)
-provisioning auth verify-token $token
-
-# KMS (HTTP fallback)
-provisioning kms encrypt --key mykey "secret"
-
-# Orchestrator (HTTP fallback)
-provisioning orchestrator status
-
-

Manual Nushell Workflows

-
# Use Nushell workflows instead of plugins
-provisioning workflow list
-provisioning workflow execute deployment.nu
-
-

Plugin Development Guide

-

To develop a plugin:

-
    -
  1. Use Existing Patterns: Study nu_plugin_tera implementation
  2. -
  3. Implement HTTP Fallback: Ensure HTTP fallback works first
  4. -
  5. Create Native Plugin: Build Rust or Nushell-based plugin
  6. -
  7. Integration Testing: Test with help system and CLI
  8. -
  9. Documentation: Update this roadmap and plugin help
  10. -
-

See Plugin Development Guide (when available).

-

Troubleshooting

-

Plugin Not Found

-

Problem: Command 'auth' not found

-

Solution:

-
    -
  1. Check HTTP server is running: provisioning status
  2. -
  3. Check fallback implementation: provisioning help auth
  4. -
  5. Verify configuration: provisioning validate config
  6. -
-

Plugin Timeout

-

Problem: Command times out or hangs

-

Solution:

-
    -
  1. Check HTTP server health: curl http://localhost:8080/health
  2. -
  3. Check network connectivity: ping localhost
  4. -
  5. Check logs: provisioning status --verbose
  6. -
  7. Report issue with full debug output
  8. -
-

Plugin Not in Help

-

Problem: Plugin commands don’t appear in provisioning help

-

Solution:

-
    -
  1. Check plugin is loaded: provisioning list-plugins
  2. -
  3. Check help system: provisioning help | grep plugin
  4. -
  5. Check configuration: provisioning validate config
  6. -
- - -

Feedback & Contributions

-

If you’re interested in implementing native plugins:

-
    -
  1. Read ADR-017
  2. -
  3. Study nu_plugin_tera source code
  4. -
  5. Create an issue with proposed implementation
  6. -
  7. Submit PR with tests and documentation
  8. -
-
-

Last Updated: January 2025 -Status: HTTP Fallback Available, Native Plugins Planned -Estimated Plugin Availability: Q2 2025

-

Nickel Workflow System - Complete Implementation

-

STATUS: FULLY IMPLEMENTED & PRODUCTION-READY

-

This document describes the complete Nickel workflow system. Both Nushell and Nickel workflows are production-ready.

-

Current Implementation

-

✅ Nushell Workflows (Production-Ready)

-

Status: Fully implemented and production-ready

-

Location: provisioning/core/nulib/workflows/

-

Capabilities:

-
    -
  • Multi-step infrastructure provisioning
  • -
  • Dependency management
  • -
  • Error handling and recovery
  • -
  • Progress monitoring
  • -
  • Logging and debugging
  • -
-

Usage:

-
# List available workflows
-provisioning workflow list
-
-# Execute a workflow
-provisioning workflow execute --file deployment.nu --infra production
-
-

Advantages:

-
    -
  • Native Nushell syntax
  • -
  • Direct integration with provisioning commands
  • -
  • Immediate execution
  • -
  • Full debugging support
  • -
-

✅ Nickel Workflows (Implemented)

-

Architecture

-

Nickel workflows provide type-safe, validated workflow definitions with:

-
    -
  • ✅ Static type checking
  • -
  • ✅ Configuration merging
  • -
  • ✅ Lazy evaluation
  • -
  • ✅ Complex infrastructure patterns
  • -
-

Available Capabilities

-

Type-Safe Workflow Definitions

-
# Example (future)
-let workflow = {
-  name = "multi-provider-deployment",
-  description = "Deploy across AWS, Hetzner, Upcloud",
-
-  inputs = {
-    aws_region | String,
-    hetzner_datacenter | String,
-    environment | ["dev", "staging", "production"],
-  },
-
-  steps = [
-    {
-      id = "setup-aws",
-      action = "provision",
-      provider = "aws",
-      config = { region = inputs.aws_region },
-    },
-    {
-      id = "setup-hetzner",
-      action = "provision",
-      provider = "hetzner",
-      config = { datacenter = inputs.hetzner_datacenter },
-      depends_on = ["setup-aws"],
-    },
-  ],
-}
-
-

Advanced Features

-
    -
  1. -

    Schema Validation

    -
      -
    • Input validation at definition time
    • -
    • Type-safe configuration passing
    • -
    • Error detection early
    • -
    -
  2. -
  3. -

    Lazy Evaluation

    -
      -
    • Only compute what’s needed
    • -
    • Complex conditional workflows
    • -
    • Dynamic step generation
    • -
    -
  4. -
  5. -

    Configuration Merging

    -
      -
    • Reusable workflow components
    • -
    • Override mechanisms
    • -
    • Template inheritance
    • -
    -
  6. -
  7. -

    Multi-Provider Orchestration

    -
      -
    • Coordinate across providers
    • -
    • Handle provider-specific differences
    • -
    • Unified error handling
    • -
    -
  8. -
  9. -

    Testing Framework

    -
      -
    • Workflow validation
    • -
    • Dry-run support
    • -
    • Test data fixtures
    • -
    -
  10. -
-

Comparison: Nushell vs. Nickel Workflows

-

| | Feature | Nushell Workflows | Nickel Workflows | | -| | ——— | —————–– | —————— | | -| | Type Safety | Runtime only | Static (compile-time) | | -| | Development Speed | Fast | Slower (learning curve) | | -| | Validation | At runtime | Before execution | | -| | Error Messages | Detailed stack traces | Type errors upfront | | -| | Complexity | Simple to moderate | Complex patterns OK | | -| | Reusability | Scripts | Type-safe components | | -| | Status | ✅ Available | 🟡 Planned | |

-

When to Use Which

-

Use Nushell Workflows When:

-
    -
  • Quick prototyping needed
  • -
  • One-off infrastructure changes
  • -
  • Learning the platform
  • -
  • Simple sequential steps
  • -
  • Immediate deployment needed
  • -
-

Use Nickel Workflows When (future):

-
    -
  • Production deployments
  • -
  • Complex multi-provider orchestration
  • -
  • Type safety critical
  • -
  • Workflow reusability important
  • -
  • Validation before execution essential
  • -
-

Implementation Status

-

Completed Implementation

-
    -
  • ✅ Workflow schema design in Nickel
  • -
  • ✅ Type safety patterns
  • -
  • ✅ Example workflows and templates
  • -
  • ✅ Nickel workflow parser
  • -
  • ✅ Schema validation
  • -
  • ✅ Error messages and debugging
  • -
  • ✅ Workflow execution engine
  • -
  • ✅ Step orchestration and dependencies
  • -
  • ✅ Error handling and recovery
  • -
  • ✅ Progress reporting and monitoring
  • -
  • ✅ CLI integration (provisioning workflow execute)
  • -
  • ✅ Help system integration
  • -
  • ✅ Logging and monitoring
  • -
  • ✅ Performance optimization
  • -
-

Ongoing Enhancements

-
    -
  • 🔵 Workflow library expansion
  • -
  • 🔵 Performance improvements
  • -
  • 🔵 Advanced orchestration patterns
  • -
  • 🔵 Community contributions
  • -
-

Current Workarounds

-

Until Nickel workflows are available, use:

-
    -
  1. -

    Nushell Workflows (primary)

    -
    provisioning workflow execute deployment.nu
    -
    -
  2. -
  3. -

    Manual Commands

    -
    provisioning server create --infra production
    -provisioning taskserv create kubernetes
    -provisioning verify
    -
    -
  4. -
  5. -

    Batch Workflows (KCL-based, legacy)

    -
      -
    • See historical documentation for legacy approach
    • -
    -
  6. -
-

Migration Path

-

When Nickel workflows become available:

-
    -
  1. -

    Backward Compatibility

    -
      -
    • Nushell workflows continue to work
    • -
    • No forced migration
    • -
    -
  2. -
  3. -

    Gradual Migration

    -
      -
    • Convert complex Nushell workflows first
    • -
    • Keep simple workflows as-is
    • -
    • Hybrid approach supported
    • -
    -
  4. -
  5. -

    Migration Tools

    -
      -
    • Automated Nushell → Nickel conversion (planned)
    • -
    • Manual migration guide
    • -
    • Community examples
    • -
    -
  6. -
-

Example: Future Nickel Workflow

-
# Future example (not yet working)
-let deployment_workflow = {
   metadata = {
-    name = "production-deployment",
-    version = "1.0.0",
-    description = "Multi-cloud production infrastructure",
-  },
+    name = "demo-server"
+    provider = "local"  # Use local provider for quick demo
+    environment = "development"
+  }
 
-  inputs = {
-    # Type-safe inputs
-    region | [String],
-    environment | String,
-    replicas | Number,
-  },
+  infrastructure = {
+    servers = [
+      {
+        name = "web-01"
+        plan = "small"
+        role = "web"
+      }
+    ]
+  }
 
-  configuration = {
-    aws = { region = inputs.region.0 },
-    hetzner = { datacenter = "eu-central" },
-  },
-
-  steps = [
-    # Type-checked step definitions
-    {
-      name = "validate",
-      action = "validate-config",
-      inputs = configuration,
-    },
-    {
-      name = "provision-aws",
-      action = "provision",
-      provider = "aws",
-      depends_on = ["validate"],
-    },
-  ],
-
-  # Built-in testing
-  tests = [
-    {
-      name = "aws-validation",
-      given = { region = "us-east-1" },
-      expect = { provider = "aws" },
-    },
-  ],
+  services = {
+    taskservs = ["containerd"]  # Simple container runtime
+  }
 }
+EOF
 
- - -

Contributing

-

Interested in Nickel workflow development?

-
    -
  1. Study current Nickel configurations: provisioning/schemas/main.ncl
  2. -
  3. Read ADR-011: Nickel Migration
  4. -
  5. Review Nushell workflows: provisioning/core/nulib/workflows/
  6. -
  7. Join design discussion for Nickel workflows
  8. -
-
-

Last Updated: January 2025 -Status: PLANNED - Nushell workflows available as interim solution -Estimated Availability: Q2-Q3 2025 -Priority: High (production workflows depend on this)

-

REST API Reference

-

This document provides comprehensive documentation for all REST API endpoints in provisioning.

-

Overview

-

Provisioning exposes two main REST APIs:

-
    -
  • Orchestrator API (Port 8080): Core workflow management and batch operations
  • -
  • Control Center API (Port 9080): Authentication, authorization, and policy management
  • -
-

Base URLs

-
    -
  • Orchestrator: http://localhost:9090
  • -
  • Control Center: http://localhost:9080
  • -
-

Authentication

-

JWT Authentication

-

All API endpoints (except health checks) require JWT authentication via the Authorization header:

-
Authorization: Bearer <jwt_token>
+

Using UpCloud or AWS? Change provider:

+
metadata.provider = "upcloud"  # or "aws"
 
-

Getting Access Token

-
POST /auth/login
-Content-Type: application/json
+

Step 3: Validate Configuration (30 seconds)

+
# Validate Nickel schema
+nickel typecheck infra/demo-server.ncl
 
+# Validate provisioning configuration
+provisioning validate config
+
+# Preview what will be created
+provisioning server create --check --infra demo-server
+
+

Expected output:

+
Infrastructure Plan: demo-server
+Provider: local
+Servers to create: 1
+  - web-01 (small, role: web)
+Task services: containerd
+
+Estimated resources:
+  CPU: 2 cores
+  RAM: 2 GB
+  Disk: 10 GB
+
+

Step 4: Create Infrastructure (2 minutes)

+
# Create server
+provisioning server create --infra demo-server --yes
+
+# Monitor progress
+provisioning server status web-01
+
+

Progress indicators:

+
Creating server: web-01...
+  [████████████████████████] 100% - Server provisioned
+  [████████████████████████] 100% - SSH configured
+  [████████████████████████] 100% - Network ready
+
+Server web-01 created successfully
+IP Address: 10.0.1.10
+Status: running
+
+

Step 5: Install Task Service (1 minute)

+
# Install containerd
+provisioning taskserv create containerd --infra demo-server
+
+# Verify installation
+provisioning taskserv status containerd
+
+

Output:

+
Installing containerd on web-01...
+  [████████████████████████] 100% - Dependencies resolved
+  [████████████████████████] 100% - Containerd installed
+  [████████████████████████] 100% - Service started
+  [████████████████████████] 100% - Health check passed
+
+Containerd v1.7.0 installed successfully
+
+

Step 6: Verify Deployment (30 seconds)

+
# SSH into server
+provisioning server ssh web-01
+
+# Inside server - verify containerd
+sudo systemctl status containerd
+sudo ctr version
+
+# Exit server
+exit
+
+

What You’ve Accomplished

+

In 5 minutes, you’ve:

+
    +
  • Created a workspace for infrastructure management
  • +
  • Defined infrastructure using type-safe Nickel schemas
  • +
  • Validated configuration before deployment
  • +
  • Provisioned a server on your chosen provider
  • +
  • Installed and configured containerd
  • +
  • Verified the deployment
  • +
+

Common Workflows

+

List Resources

+
# List all servers
+provisioning server list
+
+# List task services
+provisioning taskserv list
+
+# Show workspace info
+provisioning workspace info
+
+

Modify Infrastructure

+
# Edit infrastructure schema
+nano infra/demo-server.ncl
+
+# Validate changes
+provisioning validate config --infra demo-server
+
+# Apply changes
+provisioning server update --infra demo-server
+
+

Cleanup

+
# Remove task service
+provisioning taskserv delete containerd --infra demo-server
+
+# Delete server
+provisioning server delete web-01 --yes
+
+# Remove workspace
+cd ..
+rm -rf quickstart-demo
+
+

Next Steps

+

Deploy Kubernetes

+

Ready for something more complex?

+
# infra/kubernetes-cluster.ncl
 {
-  "username": "admin",
-  "password": "password",
-  "mfa_code": "123456"
-}
-
-

Orchestrator API Endpoints

-

Health Check

-

GET /health

-

Check orchestrator health status.

-

Response:

-
{
-  "success": true,
-  "data": "Orchestrator is healthy"
-}
-
-

Task Management

-

GET /tasks

-

List all workflow tasks.

-

Query Parameters:

-
    -
  • status (optional): Filter by task status (Pending, Running, Completed, Failed, Cancelled)
  • -
  • limit (optional): Maximum number of results
  • -
  • offset (optional): Pagination offset
  • -
-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "id": "uuid-string",
-      "name": "create_servers",
-      "command": "/usr/local/provisioning servers create",
-      "args": ["--infra", "production", "--wait"],
-      "dependencies": [],
-      "status": "Completed",
-      "created_at": "2025-09-26T10:00:00Z",
-      "started_at": "2025-09-26T10:00:05Z",
-      "completed_at": "2025-09-26T10:05:30Z",
-      "output": "Successfully created 3 servers",
-      "error": null
-    }
-  ]
-}
-
-

GET /tasks/

-

Get specific task status and details.

-

Path Parameters:

-
    -
  • id: Task UUID
  • -
-

Response:

-
{
-  "success": true,
-  "data": {
-    "id": "uuid-string",
-    "name": "create_servers",
-    "command": "/usr/local/provisioning servers create",
-    "args": ["--infra", "production", "--wait"],
-    "dependencies": [],
-    "status": "Running",
-    "created_at": "2025-09-26T10:00:00Z",
-    "started_at": "2025-09-26T10:00:05Z",
-    "completed_at": null,
-    "output": null,
-    "error": null
+  metadata = {
+    name = "k8s-cluster"
+    provider = "upcloud"
+  }
+
+  infrastructure = {
+    servers = [
+      {name = "control-01", plan = "medium", role = "control"}
+      {name = "worker-01", plan = "large", role = "worker"}
+      {name = "worker-02", plan = "large", role = "worker"}
+    ]
+  }
+
+  services = {
+    taskservs = ["kubernetes", "cilium", "rook-ceph"]
   }
 }
 
-

Workflow Submission

-

POST /workflows/servers/create

-

Submit server creation workflow.

-

Request Body:

-
{
-  "infra": "production",
-  "settings": "config.ncl",
-  "check_mode": false,
-  "wait": true
-}
+
provisioning server create --infra kubernetes-cluster --yes
+provisioning taskserv create kubernetes --infra kubernetes-cluster
 
-

Response:

-
{
-  "success": true,
-  "data": "uuid-task-id"
-}
-
-

POST /workflows/taskserv/create

-

Submit task service workflow.

-

Request Body:

-
{
-  "operation": "create",
-  "taskserv": "kubernetes",
-  "infra": "production",
-  "settings": "config.ncl",
-  "check_mode": false,
-  "wait": true
-}
-
-

Response:

-
{
-  "success": true,
-  "data": "uuid-task-id"
-}
-
-

POST /workflows/cluster/create

-

Submit cluster workflow.

-

Request Body:

-
{
-  "operation": "create",
-  "cluster_type": "buildkit",
-  "infra": "production",
-  "settings": "config.ncl",
-  "check_mode": false,
-  "wait": true
-}
-
-

Response:

-
{
-  "success": true,
-  "data": "uuid-task-id"
-}
-
-

Batch Operations

-

POST /batch/execute

-

Execute batch workflow operation.

-

Request Body:

-
{
-  "name": "multi_cloud_deployment",
-  "version": "1.0.0",
-  "storage_backend": "surrealdb",
-  "parallel_limit": 5,
-  "rollback_enabled": true,
-  "operations": [
-    {
-      "id": "upcloud_servers",
-      "type": "server_batch",
-      "provider": "upcloud",
-      "dependencies": [],
-      "server_configs": [
-        {"name": "web-01", "plan": "1xCPU-2 GB", "zone": "de-fra1"},
-        {"name": "web-02", "plan": "1xCPU-2 GB", "zone": "us-nyc1"}
-      ]
-    },
-    {
-      "id": "aws_taskservs",
-      "type": "taskserv_batch",
-      "provider": "aws",
-      "dependencies": ["upcloud_servers"],
-      "taskservs": ["kubernetes", "cilium", "containerd"]
-    }
-  ]
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "batch_id": "uuid-string",
-    "status": "Running",
-    "operations": [
+

Multi-Cloud Deployment

+

Deploy to multiple providers simultaneously:

+
# infra/multi-cloud.ncl
+{
+  batch_workflow = {
+    operations = [
       {
-        "id": "upcloud_servers",
-        "status": "Pending",
-        "progress": 0.0
-      },
+        id = "aws-cluster"
+        provider = "aws"
+        servers = [{name = "aws-web-01", plan = "t3.medium"}]
+      }
       {
-        "id": "aws_taskservs",
-        "status": "Pending",
-        "progress": 0.0
+        id = "upcloud-cluster"
+        provider = "upcloud"
+        servers = [{name = "upcloud-web-01", plan = "medium"}]
       }
     ]
   }
 }
 
-

GET /batch/operations

-

List all batch operations.

-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "batch_id": "uuid-string",
-      "name": "multi_cloud_deployment",
-      "status": "Running",
-      "created_at": "2025-09-26T10:00:00Z",
-      "operations": [...]
-    }
-  ]
-}
+
provisioning batch submit infra/multi-cloud.ncl
 
-

GET /batch/operations/

-

Get batch operation status.

-

Path Parameters:

-
    -
  • id: Batch operation ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": {
-    "batch_id": "uuid-string",
-    "name": "multi_cloud_deployment",
-    "status": "Running",
-    "operations": [
-      {
-        "id": "upcloud_servers",
-        "status": "Completed",
-        "progress": 100.0,
-        "results": {...}
-      }
-    ]
-  }
-}
-
-

POST /batch/operations/{id}/cancel

-

Cancel running batch operation.

-

Path Parameters:

-
    -
  • id: Batch operation ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": "Operation cancelled"
-}
-
-

State Management

-

GET /state/workflows/{id}/progress

-

Get real-time workflow progress.

-

Path Parameters:

-
    -
  • id: Workflow ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": {
-    "workflow_id": "uuid-string",
-    "progress": 75.5,
-    "current_step": "Installing Kubernetes",
-    "total_steps": 8,
-    "completed_steps": 6,
-    "estimated_time_remaining": 180
-  }
-}
-
-

GET /state/workflows/{id}/snapshots

-

Get workflow state snapshots.

-

Path Parameters:

-
    -
  • id: Workflow ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "snapshot_id": "uuid-string",
-      "timestamp": "2025-09-26T10:00:00Z",
-      "state": "running",
-      "details": {...}
-    }
-  ]
-}
-
-

GET /state/system/metrics

-

Get system-wide metrics.

-

Response:

-
{
-  "success": true,
-  "data": {
-    "total_workflows": 150,
-    "active_workflows": 5,
-    "completed_workflows": 140,
-    "failed_workflows": 5,
-    "system_load": {
-      "cpu_usage": 45.2,
-      "memory_usage": 2048,
-      "disk_usage": 75.5
-    }
-  }
-}
-
-

GET /state/system/health

-

Get system health status.

-

Response:

-
{
-  "success": true,
-  "data": {
-    "overall_status": "Healthy",
-    "components": {
-      "storage": "Healthy",
-      "batch_coordinator": "Healthy",
-      "monitoring": "Healthy"
-    },
-    "last_check": "2025-09-26T10:00:00Z"
-  }
-}
-
-

GET /state/statistics

-

Get state manager statistics.

-

Response:

-
{
-  "success": true,
-  "data": {
-    "total_workflows": 150,
-    "active_snapshots": 25,
-    "storage_usage": "245 MB",
-    "average_workflow_duration": 300
-  }
-}
-
-

Rollback and Recovery

-

POST /rollback/checkpoints

-

Create new checkpoint.

-

Request Body:

-
{
-  "name": "before_major_update",
-  "description": "Checkpoint before deploying v2.0.0"
-}
-
-

Response:

-
{
-  "success": true,
-  "data": "checkpoint-uuid"
-}
-
-

GET /rollback/checkpoints

-

List all checkpoints.

-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "id": "checkpoint-uuid",
-      "name": "before_major_update",
-      "description": "Checkpoint before deploying v2.0.0",
-      "created_at": "2025-09-26T10:00:00Z",
-      "size": "150 MB"
-    }
-  ]
-}
-
-

GET /rollback/checkpoints/

-

Get specific checkpoint details.

-

Path Parameters:

-
    -
  • id: Checkpoint ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": {
-    "id": "checkpoint-uuid",
-    "name": "before_major_update",
-    "description": "Checkpoint before deploying v2.0.0",
-    "created_at": "2025-09-26T10:00:00Z",
-    "size": "150 MB",
-    "operations_count": 25
-  }
-}
-
-

POST /rollback/execute

-

Execute rollback operation.

-

Request Body:

-
{
-  "checkpoint_id": "checkpoint-uuid"
-}
-
-

Or for partial rollback:

-
{
-  "operation_ids": ["op-1", "op-2", "op-3"]
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "rollback_id": "rollback-uuid",
-    "success": true,
-    "operations_executed": 25,
-    "operations_failed": 0,
-    "duration": 45.5
-  }
-}
-
-

POST /rollback/restore/

-

Restore system state from checkpoint.

-

Path Parameters:

-
    -
  • id: Checkpoint ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": "State restored from checkpoint checkpoint-uuid"
-}
-
-

GET /rollback/statistics

-

Get rollback system statistics.

-

Response:

-
{
-  "success": true,
-  "data": {
-    "total_checkpoints": 10,
-    "total_rollbacks": 3,
-    "success_rate": 100.0,
-    "average_rollback_time": 30.5
-  }
-}
-
-

Control Center API Endpoints

-

Authentication

-

POST /auth/login

-

Authenticate user and get JWT token.

-

Request Body:

-
{
-  "username": "admin",
-  "password": "secure_password",
-  "mfa_code": "123456"
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "token": "jwt-token-string",
-    "expires_at": "2025-09-26T18:00:00Z",
-    "user": {
-      "id": "user-uuid",
-      "username": "admin",
-      "email": "admin@example.com",
-      "roles": ["admin", "operator"]
-    }
-  }
-}
-
-

POST /auth/refresh

-

Refresh JWT token.

-

Request Body:

-
{
-  "token": "current-jwt-token"
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "token": "new-jwt-token",
-    "expires_at": "2025-09-26T18:00:00Z"
-  }
-}
-
-

POST /auth/logout

-

Logout and invalidate token.

-

Response:

-
{
-  "success": true,
-  "data": "Successfully logged out"
-}
-
-

User Management

-

GET /users

-

List all users.

-

Query Parameters:

-
    -
  • role (optional): Filter by role
  • -
  • enabled (optional): Filter by enabled status
  • -
-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "id": "user-uuid",
-      "username": "admin",
-      "email": "admin@example.com",
-      "roles": ["admin"],
-      "enabled": true,
-      "created_at": "2025-09-26T10:00:00Z",
-      "last_login": "2025-09-26T12:00:00Z"
-    }
-  ]
-}
-
-

POST /users

-

Create new user.

-

Request Body:

-
{
-  "username": "newuser",
-  "email": "newuser@example.com",
-  "password": "secure_password",
-  "roles": ["operator"],
-  "enabled": true
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "id": "new-user-uuid",
-    "username": "newuser",
-    "email": "newuser@example.com",
-    "roles": ["operator"],
-    "enabled": true
-  }
-}
-
-

PUT /users/

-

Update existing user.

-

Path Parameters:

-
    -
  • id: User ID
  • -
-

Request Body:

-
{
-  "email": "updated@example.com",
-  "roles": ["admin", "operator"],
-  "enabled": false
-}
-
-

Response:

-
{
-  "success": true,
-  "data": "User updated successfully"
-}
-
-

DELETE /users/

-

Delete user.

-

Path Parameters:

-
    -
  • id: User ID
  • -
-

Response:

-
{
-  "success": true,
-  "data": "User deleted successfully"
-}
-
-

Policy Management

-

GET /policies

-

List all policies.

-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "id": "policy-uuid",
-      "name": "admin_access_policy",
-      "version": "1.0.0",
-      "rules": [...],
-      "created_at": "2025-09-26T10:00:00Z",
-      "enabled": true
-    }
-  ]
-}
-
-

POST /policies

-

Create new policy.

-

Request Body:

-
{
-  "name": "new_policy",
-  "version": "1.0.0",
-  "rules": [
-    {
-      "effect": "Allow",
-      "resource": "servers:*",
-      "action": ["create", "read"],
-      "condition": "user.role == 'admin'"
-    }
-  ]
-}
-
-

Response:

-
{
-  "success": true,
-  "data": {
-    "id": "new-policy-uuid",
-    "name": "new_policy",
-    "version": "1.0.0"
-  }
-}
-
-

PUT /policies/

-

Update policy.

-

Path Parameters:

-
    -
  • id: Policy ID
  • -
-

Request Body:

-
{
-  "name": "updated_policy",
-  "rules": [...]
-}
-
-

Response:

-
{
-  "success": true,
-  "data": "Policy updated successfully"
-}
-
-

Audit Logging

-

GET /audit/logs

-

Get audit logs.

-

Query Parameters:

-
    -
  • user_id (optional): Filter by user
  • -
  • action (optional): Filter by action
  • -
  • resource (optional): Filter by resource
  • -
  • from (optional): Start date (ISO 8601)
  • -
  • to (optional): End date (ISO 8601)
  • -
  • limit (optional): Maximum results
  • -
  • offset (optional): Pagination offset
  • -
-

Response:

-
{
-  "success": true,
-  "data": [
-    {
-      "id": "audit-log-uuid",
-      "timestamp": "2025-09-26T10:00:00Z",
-      "user_id": "user-uuid",
-      "action": "server.create",
-      "resource": "servers/web-01",
-      "result": "success",
-      "details": {...}
-    }
-  ]
-}
-
-

Error Responses

-

All endpoints may return error responses in this format:

-
{
-  "success": false,
-  "error": "Detailed error message"
-}
-
-

HTTP Status Codes

-
    -
  • 200 OK: Successful request
  • -
  • 201 Created: Resource created successfully
  • -
  • 400 Bad Request: Invalid request parameters
  • -
  • 401 Unauthorized: Authentication required or invalid
  • -
  • 403 Forbidden: Permission denied
  • -
  • 404 Not Found: Resource not found
  • -
  • 422 Unprocessable Entity: Validation error
  • -
  • 500 Internal Server Error: Server error
  • -
-

Rate Limiting

-

API endpoints are rate-limited:

-
    -
  • Authentication: 5 requests per minute per IP
  • -
  • General APIs: 100 requests per minute per user
  • -
  • Batch operations: 10 requests per minute per user
  • -
-

Rate limit headers are included in responses:

-
X-RateLimit-Limit: 100
-X-RateLimit-Remaining: 95
-X-RateLimit-Reset: 1632150000
-
-

Monitoring Endpoints

-

GET /metrics

-

Prometheus-compatible metrics endpoint.

-

Response:

-
# HELP orchestrator_tasks_total Total number of tasks
-# TYPE orchestrator_tasks_total counter
-orchestrator_tasks_total{status="completed"} 150
-orchestrator_tasks_total{status="failed"} 5
-
-# HELP orchestrator_task_duration_seconds Task execution duration
-# TYPE orchestrator_task_duration_seconds histogram
-orchestrator_task_duration_seconds_bucket{le="10"} 50
-orchestrator_task_duration_seconds_bucket{le="30"} 120
-orchestrator_task_duration_seconds_bucket{le="+Inf"} 155
-
-

WebSocket /ws

-

Real-time event streaming via WebSocket connection.

-

Connection:

-
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token');
-
-ws.onmessage = function(event) {
-  const data = JSON.parse(event.data);
-  console.log('Event:', data);
-};
-
-

Event Format:

-
{
-  "event_type": "TaskStatusChanged",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "task_id": "uuid-string",
-    "status": "completed"
-  },
-  "metadata": {
-    "task_id": "uuid-string",
-    "status": "completed"
-  }
-}
-
-

SDK Examples

-

Python SDK Example

-
import requests
-
-class ProvisioningClient:
-    def __init__(self, base_url, token):
-        self.base_url = base_url
-        self.headers = {
-            'Authorization': f'Bearer {token}',
-            'Content-Type': 'application/json'
-        }
-
-    def create_server_workflow(self, infra, settings, check_mode=False):
-        payload = {
-            'infra': infra,
-            'settings': settings,
-            'check_mode': check_mode,
-            'wait': True
-        }
-        response = requests.post(
-            f'{self.base_url}/workflows/servers/create',
-            json=payload,
-            headers=self.headers
-        )
-        return response.json()
-
-    def get_task_status(self, task_id):
-        response = requests.get(
-            f'{self.base_url}/tasks/{task_id}',
-            headers=self.headers
-        )
-        return response.json()
-
-# Usage
-client = ProvisioningClient('http://localhost:9090', 'your-jwt-token')
-result = client.create_server_workflow('production', 'config.ncl')
-print(f"Task ID: {result['data']}")
-
-

JavaScript/Node.js SDK Example

-
const axios = require('axios');
-
-class ProvisioningClient {
-  constructor(baseUrl, token) {
-    this.client = axios.create({
-      baseURL: baseUrl,
-      headers: {
-        'Authorization': `Bearer ${token}`,
-        'Content-Type': 'application/json'
-      }
-    });
-  }
-
-  async createServerWorkflow(infra, settings, checkMode = false) {
-    const response = await this.client.post('/workflows/servers/create', {
-      infra,
-      settings,
-      check_mode: checkMode,
-      wait: true
-    });
-    return response.data;
-  }
-
-  async getTaskStatus(taskId) {
-    const response = await this.client.get(`/tasks/${taskId}`);
-    return response.data;
-  }
-}
-
-// Usage
-const client = new ProvisioningClient('http://localhost:9090', 'your-jwt-token');
-const result = await client.createServerWorkflow('production', 'config.ncl');
-console.log(`Task ID: ${result.data}`);
-
-

Webhook Integration

-

The system supports webhooks for external integrations:

-

Webhook Configuration

-

Configure webhooks in the system configuration:

-
[webhooks]
-enabled = true
-endpoints = [
-  {
-    url = "https://your-system.com/webhook"
-    events = ["task.completed", "task.failed", "batch.completed"]
-    secret = "webhook-secret"
-  }
-]
-
-

Webhook Payload

-
{
-  "event": "task.completed",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "task_id": "uuid-string",
-    "status": "completed",
-    "output": "Task completed successfully"
-  },
-  "signature": "sha256=calculated-signature"
-}
-
-

Pagination

-

For endpoints that return lists, use pagination parameters:

-
    -
  • limit: Maximum number of items per page (default: 50, max: 1000)
  • -
  • offset: Number of items to skip
  • -
-

Pagination metadata is included in response headers:

-
X-Total-Count: 1500
-X-Limit: 50
-X-Offset: 100
-Link: </api/endpoint?offset=150&limit=50>; rel="next"
-
-

API Versioning

-

The API uses header-based versioning:

-
Accept: application/vnd.provisioning.v1+json
-
-

Current version: v1

-

Testing

-

Use the included test suite to validate API functionality:

-
# Run API integration tests
-cd src/orchestrator
-cargo test --test api_tests
-
-# Run load tests
-cargo test --test load_tests --release
-
-

WebSocket API Reference

-

This document provides comprehensive documentation for the WebSocket API used for real-time monitoring, event streaming, and live updates in -provisioning.

-

Overview

-

The WebSocket API enables real-time communication between clients and the provisioning orchestrator, providing:

-
    -
  • Live workflow progress updates
  • -
  • System health monitoring
  • -
  • Event streaming
  • -
  • Real-time metrics
  • -
  • Interactive debugging sessions
  • -
-

WebSocket Endpoints

-

Primary WebSocket Endpoint

-

ws://localhost:9090/ws

-

The main WebSocket endpoint for real-time events and monitoring.

-

Connection Parameters:

-
    -
  • token: JWT authentication token (required)
  • -
  • events: Comma-separated list of event types to subscribe to (optional)
  • -
  • batch_size: Maximum number of events per message (default: 10)
  • -
  • compression: Enable message compression (default: false)
  • -
-

Example Connection:

-
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token&events=task,batch,system');
-
-

Specialized WebSocket Endpoints

-

ws://localhost:9090/metrics

-

Real-time metrics streaming endpoint.

-

Features:

-
    -
  • Live system metrics
  • -
  • Performance data
  • -
  • Resource utilization
  • -
  • Custom metric streams
  • -
-

ws://localhost:9090/logs

-

Live log streaming endpoint.

-

Features:

-
    -
  • Real-time log tailing
  • -
  • Log level filtering
  • -
  • Component-specific logs
  • -
  • Search and filtering
  • -
-

Authentication

-

JWT Token Authentication

-

All WebSocket connections require authentication via JWT token:

-
// Include token in connection URL
-const ws = new WebSocket('ws://localhost:9090/ws?token=' + jwtToken);
-
-// Or send token after connection
-ws.onopen = function() {
-  ws.send(JSON.stringify({
-    type: 'auth',
-    token: jwtToken
-  }));
-};
-
-

Connection Authentication Flow

-
    -
  1. Initial Connection: Client connects with token parameter
  2. -
  3. Token Validation: Server validates JWT token
  4. -
  5. Authorization: Server checks token permissions
  6. -
  7. Subscription: Client subscribes to event types
  8. -
  9. Event Stream: Server begins streaming events
  10. -
-

Event Types and Schemas

-

Core Event Types

-

Task Status Changed

-

Fired when a workflow task status changes.

-
{
-  "event_type": "TaskStatusChanged",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "task_id": "uuid-string",
-    "name": "create_servers",
-    "status": "Running",
-    "previous_status": "Pending",
-    "progress": 45.5
-  },
-  "metadata": {
-    "task_id": "uuid-string",
-    "workflow_type": "server_creation",
-    "infra": "production"
-  }
-}
-
-

Batch Operation Update

-

Fired when batch operation status changes.

-
{
-  "event_type": "BatchOperationUpdate",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "batch_id": "uuid-string",
-    "name": "multi_cloud_deployment",
-    "status": "Running",
-    "progress": 65.0,
-    "operations": [
-      {
-        "id": "upcloud_servers",
-        "status": "Completed",
-        "progress": 100.0
-      },
-      {
-        "id": "aws_taskservs",
-        "status": "Running",
-        "progress": 30.0
-      }
-    ]
-  },
-  "metadata": {
-    "total_operations": 5,
-    "completed_operations": 2,
-    "failed_operations": 0
-  }
-}
-
-

System Health Update

-

Fired when system health status changes.

-
{
-  "event_type": "SystemHealthUpdate",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "overall_status": "Healthy",
-    "components": {
-      "storage": {
-        "status": "Healthy",
-        "last_check": "2025-09-26T09:59:55Z"
-      },
-      "batch_coordinator": {
-        "status": "Warning",
-        "last_check": "2025-09-26T09:59:55Z",
-        "message": "High memory usage"
-      }
-    },
-    "metrics": {
-      "cpu_usage": 45.2,
-      "memory_usage": 2048,
-      "disk_usage": 75.5,
-      "active_workflows": 5
-    }
-  },
-  "metadata": {
-    "check_interval": 30,
-    "next_check": "2025-09-26T10:00:30Z"
-  }
-}
-
-

Workflow Progress Update

-

Fired when workflow progress changes.

-
{
-  "event_type": "WorkflowProgressUpdate",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "workflow_id": "uuid-string",
-    "name": "kubernetes_deployment",
-    "progress": 75.0,
-    "current_step": "Installing CNI",
-    "total_steps": 8,
-    "completed_steps": 6,
-    "estimated_time_remaining": 120,
-    "step_details": {
-      "step_name": "Installing CNI",
-      "step_progress": 45.0,
-      "step_message": "Downloading Cilium components"
-    }
-  },
-  "metadata": {
-    "infra": "production",
-    "provider": "upcloud",
-    "started_at": "2025-09-26T09:45:00Z"
-  }
-}
-
-

Log Entry

-

Real-time log streaming.

-
{
-  "event_type": "LogEntry",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "level": "INFO",
-    "message": "Server web-01 created successfully",
-    "component": "server-manager",
-    "task_id": "uuid-string",
-    "details": {
-      "server_id": "server-uuid",
-      "hostname": "web-01",
-      "ip_address": "10.0.1.100"
-    }
-  },
-  "metadata": {
-    "source": "orchestrator",
-    "thread": "worker-1"
-  }
-}
-
-

Metric Update

-

Real-time metrics streaming.

-
{
-  "event_type": "MetricUpdate",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    "metric_name": "workflow_duration",
-    "metric_type": "histogram",
-    "value": 180.5,
-    "labels": {
-      "workflow_type": "server_creation",
-      "status": "completed",
-      "infra": "production"
-    }
-  },
-  "metadata": {
-    "interval": 15,
-    "aggregation": "average"
-  }
-}
-
-

Custom Event Types

-

Applications can define custom event types:

-
{
-  "event_type": "CustomApplicationEvent",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "data": {
-    // Custom event data
-  },
-  "metadata": {
-    "custom_field": "custom_value"
-  }
-}
-
-

Client-Side JavaScript API

-

Connection Management

-
class ProvisioningWebSocket {
-  constructor(baseUrl, token, options = {}) {
-    this.baseUrl = baseUrl;
-    this.token = token;
-    this.options = {
-      reconnect: true,
-      reconnectInterval: 5000,
-      maxReconnectAttempts: 10,
-      ...options
-    };
-    this.ws = null;
-    this.reconnectAttempts = 0;
-    this.eventHandlers = new Map();
-  }
-
-  connect() {
-    const wsUrl = `${this.baseUrl}/ws?token=${this.token}`;
-    this.ws = new WebSocket(wsUrl);
-
-    this.ws.onopen = (event) => {
-      console.log('WebSocket connected');
-      this.reconnectAttempts = 0;
-      this.emit('connected', event);
-    };
-
-    this.ws.onmessage = (event) => {
-      try {
-        const message = JSON.parse(event.data);
-        this.handleMessage(message);
-      } catch (error) {
-        console.error('Failed to parse WebSocket message:', error);
-      }
-    };
-
-    this.ws.onclose = (event) => {
-      console.log('WebSocket disconnected');
-      this.emit('disconnected', event);
-
-      if (this.options.reconnect && this.reconnectAttempts < this.options.maxReconnectAttempts) {
-        setTimeout(() => {
-          this.reconnectAttempts++;
-          console.log(`Reconnecting... (${this.reconnectAttempts}/${this.options.maxReconnectAttempts})`);
-          this.connect();
-        }, this.options.reconnectInterval);
-      }
-    };
-
-    this.ws.onerror = (error) => {
-      console.error('WebSocket error:', error);
-      this.emit('error', error);
-    };
-  }
-
-  handleMessage(message) {
-    if (message.event_type) {
-      this.emit(message.event_type, message);
-      this.emit('message', message);
-    }
-  }
-
-  on(eventType, handler) {
-    if (!this.eventHandlers.has(eventType)) {
-      this.eventHandlers.set(eventType, []);
-    }
-    this.eventHandlers.get(eventType).push(handler);
-  }
-
-  off(eventType, handler) {
-    const handlers = this.eventHandlers.get(eventType);
-    if (handlers) {
-      const index = handlers.indexOf(handler);
-      if (index > -1) {
-        handlers.splice(index, 1);
-      }
-    }
-  }
-
-  emit(eventType, data) {
-    const handlers = this.eventHandlers.get(eventType);
-    if (handlers) {
-      handlers.forEach(handler => {
-        try {
-          handler(data);
-        } catch (error) {
-          console.error(`Error in event handler for ${eventType}:`, error);
-        }
-      });
-    }
-  }
-
-  send(message) {
-    if (this.ws && this.ws.readyState === WebSocket.OPEN) {
-      this.ws.send(JSON.stringify(message));
-    } else {
-      console.warn('WebSocket not connected, message not sent');
-    }
-  }
-
-  disconnect() {
-    this.options.reconnect = false;
-    if (this.ws) {
-      this.ws.close();
-    }
-  }
-
-  subscribe(eventTypes) {
-    this.send({
-      type: 'subscribe',
-      events: Array.isArray(eventTypes) ? eventTypes : [eventTypes]
-    });
-  }
-
-  unsubscribe(eventTypes) {
-    this.send({
-      type: 'unsubscribe',
-      events: Array.isArray(eventTypes) ? eventTypes : [eventTypes]
-    });
-  }
-}
-
-// Usage example
-const ws = new ProvisioningWebSocket('ws://localhost:9090', 'your-jwt-token');
-
-ws.on('TaskStatusChanged', (event) => {
-  console.log(`Task ${event.data.task_id} status: ${event.data.status}`);
-  updateTaskUI(event.data);
-});
-
-ws.on('WorkflowProgressUpdate', (event) => {
-  console.log(`Workflow progress: ${event.data.progress}%`);
-  updateProgressBar(event.data.progress);
-});
-
-ws.on('SystemHealthUpdate', (event) => {
-  console.log('System health:', event.data.overall_status);
-  updateHealthIndicator(event.data);
-});
-
-ws.connect();
-
-// Subscribe to specific events
-ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
-
-

Real-Time Dashboard Example

-
class ProvisioningDashboard {
-  constructor(wsUrl, token) {
-    this.ws = new ProvisioningWebSocket(wsUrl, token);
-    this.setupEventHandlers();
-    this.connect();
-  }
-
-  setupEventHandlers() {
-    this.ws.on('TaskStatusChanged', this.handleTaskUpdate.bind(this));
-    this.ws.on('BatchOperationUpdate', this.handleBatchUpdate.bind(this));
-    this.ws.on('SystemHealthUpdate', this.handleHealthUpdate.bind(this));
-    this.ws.on('WorkflowProgressUpdate', this.handleProgressUpdate.bind(this));
-    this.ws.on('LogEntry', this.handleLogEntry.bind(this));
-  }
-
-  connect() {
-    this.ws.connect();
-  }
-
-  handleTaskUpdate(event) {
-    const taskCard = document.getElementById(`task-${event.data.task_id}`);
-    if (taskCard) {
-      taskCard.querySelector('.status').textContent = event.data.status;
-      taskCard.querySelector('.status').className = `status ${event.data.status.toLowerCase()}`;
-
-      if (event.data.progress) {
-        const progressBar = taskCard.querySelector('.progress-bar');
-        progressBar.style.width = `${event.data.progress}%`;
-      }
-    }
-  }
-
-  handleBatchUpdate(event) {
-    const batchCard = document.getElementById(`batch-${event.data.batch_id}`);
-    if (batchCard) {
-      batchCard.querySelector('.batch-progress').style.width = `${event.data.progress}%`;
-
-      event.data.operations.forEach(op => {
-        const opElement = batchCard.querySelector(`[data-operation="${op.id}"]`);
-        if (opElement) {
-          opElement.querySelector('.operation-status').textContent = op.status;
-          opElement.querySelector('.operation-progress').style.width = `${op.progress}%`;
-        }
-      });
-    }
-  }
-
-  handleHealthUpdate(event) {
-    const healthIndicator = document.getElementById('health-indicator');
-    healthIndicator.className = `health-indicator ${event.data.overall_status.toLowerCase()}`;
-    healthIndicator.textContent = event.data.overall_status;
-
-    const metricsPanel = document.getElementById('metrics-panel');
-    metricsPanel.innerHTML = `
-      <div class="metric">CPU: ${event.data.metrics.cpu_usage}%</div>
-      <div class="metric">Memory: ${Math.round(event.data.metrics.memory_usage / 1024 / 1024)}MB</div>
-      <div class="metric">Disk: ${event.data.metrics.disk_usage}%</div>
-      <div class="metric">Active Workflows: ${event.data.metrics.active_workflows}</div>
-    `;
-  }
-
-  handleProgressUpdate(event) {
-    const workflowCard = document.getElementById(`workflow-${event.data.workflow_id}`);
-    if (workflowCard) {
-      const progressBar = workflowCard.querySelector('.workflow-progress');
-      const stepInfo = workflowCard.querySelector('.step-info');
-
-      progressBar.style.width = `${event.data.progress}%`;
-      stepInfo.textContent = `${event.data.current_step} (${event.data.completed_steps}/${event.data.total_steps})`;
-
-      if (event.data.estimated_time_remaining) {
-        const timeRemaining = workflowCard.querySelector('.time-remaining');
-        timeRemaining.textContent = `${Math.round(event.data.estimated_time_remaining / 60)} min remaining`;
-      }
-    }
-  }
-
-  handleLogEntry(event) {
-    const logContainer = document.getElementById('log-container');
-    const logEntry = document.createElement('div');
-    logEntry.className = `log-entry log-${event.data.level.toLowerCase()}`;
-    logEntry.innerHTML = `
-      <span class="log-timestamp">${new Date(event.timestamp).toLocaleTimeString()}</span>
-      <span class="log-level">${event.data.level}</span>
-      <span class="log-component">${event.data.component}</span>
-      <span class="log-message">${event.data.message}</span>
-    `;
-
-    logContainer.appendChild(logEntry);
-
-    // Auto-scroll to bottom
-    logContainer.scrollTop = logContainer.scrollHeight;
-
-    // Limit log entries to prevent memory issues
-    const maxLogEntries = 1000;
-    if (logContainer.children.length > maxLogEntries) {
-      logContainer.removeChild(logContainer.firstChild);
-    }
-  }
-}
-
-// Initialize dashboard
-const dashboard = new ProvisioningDashboard('ws://localhost:9090', jwtToken);
-
-

Server-Side Implementation

-

Rust WebSocket Handler

-

The orchestrator implements WebSocket support using Axum and Tokio:

-
use axum::{
-    extract::{ws::WebSocket, ws::WebSocketUpgrade, Query, State},
-    response::Response,
-};
-use serde::{Deserialize, Serialize};
-use std::collections::HashMap;
-use tokio::sync::broadcast;
-
-#[derive(Debug, Deserialize)]
-pub struct WsQuery {
-    token: String,
-    events: Option<String>,
-    batch_size: Option<usize>,
-    compression: Option<bool>,
-}
-
-#[derive(Debug, Clone, Serialize)]
-pub struct WebSocketMessage {
-    pub event_type: String,
-    pub timestamp: chrono::DateTime<chrono::Utc>,
-    pub data: serde_json::Value,
-    pub metadata: HashMap<String, String>,
-}
-
-pub async fn websocket_handler(
-    ws: WebSocketUpgrade,
-    Query(params): Query<WsQuery>,
-    State(state): State<SharedState>,
-) -> Response {
-    // Validate JWT token
-    let claims = match state.auth_service.validate_token(&params.token) {
-        Ok(claims) => claims,
-        Err(_) => return Response::builder()
-            .status(401)
-            .body("Unauthorized".into())
-            .unwrap(),
-    };
-
-    ws.on_upgrade(move |socket| handle_socket(socket, params, claims, state))
-}
-
-async fn handle_socket(
-    socket: WebSocket,
-    params: WsQuery,
-    claims: Claims,
-    state: SharedState,
-) {
-    let (mut sender, mut receiver) = socket.split();
-
-    // Subscribe to event stream
-    let mut event_rx = state.monitoring_system.subscribe_to_events().await;
-
-    // Parse requested event types
-    let requested_events: Vec<String> = params.events
-        .unwrap_or_default()
-        .split(',')
-        .map(|s| s.trim().to_string())
-        .filter(|s| !s.is_empty())
-        .collect();
-
-    // Handle incoming messages from client
-    let sender_task = tokio::spawn(async move {
-        while let Some(msg) = receiver.next().await {
-            if let Ok(msg) = msg {
-                if let Ok(text) = msg.to_text() {
-                    if let Ok(client_msg) = serde_json::from_str::<ClientMessage>(text) {
-                        handle_client_message(client_msg, &state).await;
-                    }
-                }
-            }
-        }
-    });
-
-    // Handle outgoing messages to client
-    let receiver_task = tokio::spawn(async move {
-        let mut batch = Vec::new();
-        let batch_size = params.batch_size.unwrap_or(10);
-
-        while let Ok(event) = event_rx.recv().await {
-            // Filter events based on subscription
-            if !requested_events.is_empty() && !requested_events.contains(&event.event_type) {
-                continue;
-            }
-
-            // Check permissions
-            if !has_event_permission(&claims, &event.event_type) {
-                continue;
-            }
-
-            batch.push(event);
-
-            // Send batch when full or after timeout
-            if batch.len() >= batch_size {
-                send_event_batch(&mut sender, &batch).await;
-                batch.clear();
-            }
-        }
-    });
-
-    // Wait for either task to complete
-    tokio::select! {
-        _ = sender_task => {},
-        _ = receiver_task => {},
-    }
-}
-
-#[derive(Debug, Deserialize)]
-struct ClientMessage {
-    #[serde(rename = "type")]
-    msg_type: String,
-    token: Option<String>,
-    events: Option<Vec<String>>,
-}
-
-async fn handle_client_message(msg: ClientMessage, state: &SharedState) {
-    match msg.msg_type.as_str() {
-        "subscribe" => {
-            // Handle event subscription
-        },
-        "unsubscribe" => {
-            // Handle event unsubscription
-        },
-        "auth" => {
-            // Handle re-authentication
-        },
-        _ => {
-            // Unknown message type
-        }
-    }
-}
-
-async fn send_event_batch(sender: &mut SplitSink<WebSocket, Message>, batch: &[WebSocketMessage]) {
-    let batch_msg = serde_json::json!({
-        "type": "batch",
-        "events": batch
-    });
-
-    if let Ok(msg_text) = serde_json::to_string(&batch_msg) {
-        if let Err(e) = sender.send(Message::Text(msg_text)).await {
-            eprintln!("Failed to send WebSocket message: {}", e);
-        }
-    }
-}
-
-fn has_event_permission(claims: &Claims, event_type: &str) -> bool {
-    // Check if user has permission to receive this event type
-    match event_type {
-        "SystemHealthUpdate" => claims.role.contains(&"admin".to_string()),
-        "LogEntry" => claims.role.contains(&"admin".to_string()) ||
-                     claims.role.contains(&"developer".to_string()),
-        _ => true, // Most events are accessible to all authenticated users
-    }
-}
-

Event Filtering and Subscriptions

-

Client-Side Filtering

-
// Subscribe to specific event types
-ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
-
-// Subscribe with filters
-ws.send({
-  type: 'subscribe',
-  events: ['TaskStatusChanged'],
-  filters: {
-    task_name: 'create_servers',
-    status: ['Running', 'Completed', 'Failed']
-  }
-});
-
-// Advanced filtering
-ws.send({
-  type: 'subscribe',
-  events: ['LogEntry'],
-  filters: {
-    level: ['ERROR', 'WARN'],
-    component: ['server-manager', 'batch-coordinator'],
-    since: '2025-09-26T10:00:00Z'
-  }
-});
-
-

Server-Side Event Filtering

-

Events can be filtered on the server side based on:

-
    -
  • User permissions and roles
  • -
  • Event type subscriptions
  • -
  • Custom filter criteria
  • -
  • Rate limiting
  • -
-

Error Handling and Reconnection

-

Connection Errors

-
ws.on('error', (error) => {
-  console.error('WebSocket error:', error);
-
-  // Handle specific error types
-  if (error.code === 1006) {
-    // Abnormal closure, attempt reconnection
-    setTimeout(() => ws.connect(), 5000);
-  } else if (error.code === 1008) {
-    // Policy violation, check token
-    refreshTokenAndReconnect();
-  }
-});
-
-ws.on('disconnected', (event) => {
-  console.log(`WebSocket disconnected: ${event.code} - ${event.reason}`);
-
-  // Handle different close codes
-  switch (event.code) {
-    case 1000: // Normal closure
-      console.log('Connection closed normally');
-      break;
-    case 1001: // Going away
-      console.log('Server is shutting down');
-      break;
-    case 4001: // Custom: Token expired
-      refreshTokenAndReconnect();
-      break;
-    default:
-      // Attempt reconnection for other errors
-      if (shouldReconnect()) {
-        scheduleReconnection();
-      }
-  }
-});
-
-

Heartbeat and Keep-Alive

-
class ProvisioningWebSocket {
-  constructor(baseUrl, token, options = {}) {
-    // ... existing code ...
-    this.heartbeatInterval = options.heartbeatInterval || 30000;
-    this.heartbeatTimer = null;
-  }
-
-  connect() {
-    // ... existing connection code ...
-
-    this.ws.onopen = (event) => {
-      console.log('WebSocket connected');
-      this.startHeartbeat();
-      this.emit('connected', event);
-    };
-
-    this.ws.onclose = (event) => {
-      this.stopHeartbeat();
-      // ... existing close handling ...
-    };
-  }
-
-  startHeartbeat() {
-    this.heartbeatTimer = setInterval(() => {
-      if (this.ws && this.ws.readyState === WebSocket.OPEN) {
-        this.send({ type: 'ping' });
-      }
-    }, this.heartbeatInterval);
-  }
-
-  stopHeartbeat() {
-    if (this.heartbeatTimer) {
-      clearInterval(this.heartbeatTimer);
-      this.heartbeatTimer = null;
-    }
-  }
-
-  handleMessage(message) {
-    if (message.type === 'pong') {
-      // Heartbeat response received
-      return;
-    }
-
-    // ... existing message handling ...
-  }
-}
-
-

Performance Considerations

-

Message Batching

-

To improve performance, the server can batch multiple events into single WebSocket messages:

-
{
-  "type": "batch",
-  "timestamp": "2025-09-26T10:00:00Z",
-  "events": [
-    {
-      "event_type": "TaskStatusChanged",
-      "data": { ... }
-    },
-    {
-      "event_type": "WorkflowProgressUpdate",
-      "data": { ... }
-    }
-  ]
-}
-
-

Compression

-

Enable message compression for large events:

-
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt&compression=true');
-
-

Rate Limiting

-

The server implements rate limiting to prevent abuse:

-
    -
  • Maximum connections per user: 10
  • -
  • Maximum messages per second: 100
  • -
  • Maximum subscription events: 50
  • -
-

Security Considerations

-

Authentication and Authorization

-
    -
  • All connections require valid JWT tokens
  • -
  • Tokens are validated on connection and periodically renewed
  • -
  • Event access is controlled by user roles and permissions
  • -
-

Message Validation

-
    -
  • All incoming messages are validated against schemas
  • -
  • Malformed messages are rejected
  • -
  • Rate limiting prevents DoS attacks
  • -
-

Data Sanitization

-
    -
  • All event data is sanitized before transmission
  • -
  • Sensitive information is filtered based on user permissions
  • -
  • PII and secrets are never transmitted
  • -
-

This WebSocket API provides a robust, real-time communication channel for monitoring and managing provisioning with comprehensive security and -performance features.

-

Extension Development API

-

This document provides comprehensive guidance for developing extensions for provisioning, including providers, task services, and cluster configurations.

-

Overview

-

Provisioning supports three types of extensions:

-
    -
  1. Providers: Cloud infrastructure providers (AWS, UpCloud, Local, etc.)
  2. -
  3. Task Services: Infrastructure components (Kubernetes, Cilium, Containerd, etc.)
  4. -
  5. Clusters: Complete deployment configurations (BuildKit, CI/CD, etc.)
  6. -
-

All extensions follow a standardized structure and API for seamless integration.

-

Extension Structure

-

Standard Directory Layout

-
extension-name/
-├── manifest.toml              # Extension metadata
-├── schemas/                   # Nickel configuration files
-│   ├── main.ncl               # Main schema
-│   ├── settings.ncl           # Settings schema
-│   ├── version.ncl            # Version configuration
-│   └── contracts.ncl          # Contract definitions
-├── nulib/                     # Nushell library modules
-│   ├── mod.nu                 # Main module
-│   ├── create.nu              # Creation operations
-│   ├── delete.nu              # Deletion operations
-│   └── utils.nu               # Utility functions
-├── templates/                 # Jinja2 templates
-│   ├── config.j2              # Configuration templates
-│   └── scripts/               # Script templates
-├── generate/                  # Code generation scripts
-│   └── generate.nu            # Generation commands
-├── README.md                  # Extension documentation
-└── metadata.toml              # Extension metadata
-
-

Provider Extension API

-

Provider Interface

-

All providers must implement the following interface:

-

Core Operations

-
    -
  • create-server(config: record) -> record
  • -
  • delete-server(server_id: string) -> null
  • -
  • list-servers() -> list<record>
  • -
  • get-server-info(server_id: string) -> record
  • -
  • start-server(server_id: string) -> null
  • -
  • stop-server(server_id: string) -> null
  • -
  • reboot-server(server_id: string) -> null
  • -
-

Pricing and Plans

-
    -
  • get-pricing() -> list<record>
  • -
  • get-plans() -> list<record>
  • -
  • get-zones() -> list<record>
  • -
-

SSH and Access

-
    -
  • get-ssh-access(server_id: string) -> record
  • -
  • configure-firewall(server_id: string, rules: list<record>) -> null
  • -
-

Provider Development Template

-

Nickel Configuration Schema

-

Create schemas/settings.ncl:

-
# Provider settings schema
-{
-  ProviderSettings = {
-    # Authentication configuration
-    auth | {
-      method | "api_key" | "certificate" | "oauth" | "basic",
-      api_key | String = null,
-      api_secret | String = null,
-      username | String = null,
-      password | String = null,
-      certificate_path | String = null,
-      private_key_path | String = null,
-    },
-
-    # API configuration
-    api | {
-      base_url | String,
-      version | String = "v1",
-      timeout | Number = 30,
-      retries | Number = 3,
-    },
-
-    # Default server configuration
-    defaults: {
-        plan?: str
-        zone?: str
-        os?: str
-        ssh_keys?: [str]
-        firewall_rules?: [FirewallRule]
-    }
-
-    # Provider-specific settings
-    features: {
-        load_balancer?: bool = false
-        storage_encryption?: bool = true
-        backup?: bool = true
-        monitoring?: bool = false
-    }
-}
-
-schema FirewallRule {
-    direction: "ingress" | "egress"
-    protocol: "tcp" | "udp" | "icmp"
-    port?: str
-    source?: str
-    destination?: str
-    action: "allow" | "deny"
-}
-
-schema ServerConfig {
-    hostname: str
-    plan: str
-    zone: str
-    os: str = "ubuntu-22.04"
-    ssh_keys: [str] = []
-    tags?: {str: str} = {}
-    firewall_rules?: [FirewallRule] = []
-    storage?: {
-        size?: int
-        type?: str
-        encrypted?: bool = true
-    }
-    network?: {
-        public_ip?: bool = true
-        private_network?: str
-        bandwidth?: int
-    }
-}
-
-

Nushell Implementation

-

Create nulib/mod.nu:

-
use std log
-
-# Provider name and version
-export const PROVIDER_NAME = "my-provider"
-export const PROVIDER_VERSION = "1.0.0"
-
-# Import sub-modules
-use create.nu *
-use delete.nu *
-use utils.nu *
-
-# Provider interface implementation
-export def "provider-info" [] -> record {
-    {
-        name: $PROVIDER_NAME,
-        version: $PROVIDER_VERSION,
-        type: "provider",
-        interface: "API",
-        supported_operations: [
-            "create-server", "delete-server", "list-servers",
-            "get-server-info", "start-server", "stop-server"
-        ],
-        required_auth: ["api_key", "api_secret"],
-        supported_os: ["ubuntu-22.04", "debian-11", "centos-8"],
-        regions: (get-zones).name
-    }
-}
-
-export def "validate-config" [config: record] -> record {
-    mut errors = []
-    mut warnings = []
-
-    # Validate authentication
-    if ($config | get -o "auth.api_key" | is-empty) {
-        $errors = ($errors | append "Missing API key")
-    }
-
-    if ($config | get -o "auth.api_secret" | is-empty) {
-        $errors = ($errors | append "Missing API secret")
-    }
-
-    # Validate API configuration
-    let api_url = ($config | get -o "api.base_url")
-    if ($api_url | is-empty) {
-        $errors = ($errors | append "Missing API base URL")
-    } else {
-        try {
-            http get $"($api_url)/health" | ignore
-        } catch {
-            $warnings = ($warnings | append "API endpoint not reachable")
-        }
-    }
-
-    {
-        valid: ($errors | is-empty),
-        errors: $errors,
-        warnings: $warnings
-    }
-}
-
-export def "test-connection" [config: record] -> record {
-    try {
-        let api_url = ($config | get "api.base_url")
-        let response = (http get $"($api_url)/account" --headers {
-            Authorization: $"Bearer ($config | get 'auth.api_key')"
-        })
-
-        {
-            success: true,
-            account_info: $response,
-            message: "Connection successful"
-        }
-    } catch {|e|
-        {
-            success: false,
-            error: ($e | get msg),
-            message: "Connection failed"
-        }
-    }
-}
-
-

Create nulib/create.nu:

-
use std log
-use utils.nu *
-
-export def "create-server" [
-    config: record       # Server configuration
-    --check              # Check mode only
-    --wait               # Wait for completion
-] -> record {
-    log info $"Creating server: ($config.hostname)"
-
-    if $check {
-        return {
-            action: "create-server",
-            hostname: $config.hostname,
-            check_mode: true,
-            would_create: true,
-            estimated_time: "2-5 minutes"
-        }
-    }
-
-    # Validate configuration
-    let validation = (validate-server-config $config)
-    if not $validation.valid {
-        error make {
-            msg: $"Invalid server configuration: ($validation.errors | str join ', ')"
-        }
-    }
-
-    # Prepare API request
-    let api_config = (get-api-config)
-    let request_body = {
-        hostname: $config.hostname,
-        plan: $config.plan,
-        zone: $config.zone,
-        os: $config.os,
-        ssh_keys: $config.ssh_keys,
-        tags: $config.tags,
-        firewall_rules: $config.firewall_rules
-    }
-
-    try {
-        let response = (http post $"($api_config.base_url)/servers" --headers {
-            Authorization: $"Bearer ($api_config.auth.api_key)"
-            Content-Type: "application/json"
-        } $request_body)
-
-        let server_id = ($response | get id)
-        log info $"Server creation initiated: ($server_id)"
-
-        if $wait {
-            let final_status = (wait-for-server-ready $server_id)
-            {
-                success: true,
-                server_id: $server_id,
-                hostname: $config.hostname,
-                status: $final_status,
-                ip_addresses: (get-server-ips $server_id),
-                ssh_access: (get-ssh-access $server_id)
-            }
-        } else {
-            {
-                success: true,
-                server_id: $server_id,
-                hostname: $config.hostname,
-                status: "creating",
-                message: "Server creation in progress"
-            }
-        }
-    } catch {|e|
-        error make {
-            msg: $"Server creation failed: ($e | get msg)"
-        }
-    }
-}
-
-def validate-server-config [config: record] -> record {
-    mut errors = []
-
-    # Required fields
-    if ($config | get -o hostname | is-empty) {
-        $errors = ($errors | append "Hostname is required")
-    }
-
-    if ($config | get -o plan | is-empty) {
-        $errors = ($errors | append "Plan is required")
-    }
-
-    if ($config | get -o zone | is-empty) {
-        $errors = ($errors | append "Zone is required")
-    }
-
-    # Validate plan exists
-    let available_plans = (get-plans)
-    if not ($config.plan in ($available_plans | get name)) {
-        $errors = ($errors | append $"Invalid plan: ($config.plan)")
-    }
-
-    # Validate zone exists
-    let available_zones = (get-zones)
-    if not ($config.zone in ($available_zones | get name)) {
-        $errors = ($errors | append $"Invalid zone: ($config.zone)")
-    }
-
-    {
-        valid: ($errors | is-empty),
-        errors: $errors
-    }
-}
-
-def wait-for-server-ready [server_id: string] -> string {
-    mut attempts = 0
-    let max_attempts = 60  # 10 minutes
-
-    while $attempts < $max_attempts {
-        let server_info = (get-server-info $server_id)
-        let status = ($server_info | get status)
-
-        match $status {
-            "running" => { return "running" },
-            "error" => { error make { msg: "Server creation failed" } },
-            _ => {
-                log info $"Server status: ($status), waiting..."
-                sleep 10sec
-                $attempts = $attempts + 1
-            }
-        }
-    }
-
-    error make { msg: "Server creation timeout" }
-}
-
-

Provider Registration

-

Add provider metadata in metadata.toml:

-
[extension]
-name = "my-provider"
-type = "provider"
-version = "1.0.0"
-description = "Custom cloud provider integration"
-author = "Your Name <your.email@example.com>"
-license = "MIT"
-
-[compatibility]
-provisioning_version = ">=2.0.0"
-nushell_version = ">=0.107.0"
-nickel_version = ">=1.15.0"
-
-[capabilities]
-server_management = true
-load_balancer = false
-storage_encryption = true
-backup = true
-monitoring = false
-
-[authentication]
-methods = ["api_key", "certificate"]
-required_fields = ["api_key", "api_secret"]
-
-[regions]
-default = "us-east-1"
-available = ["us-east-1", "us-west-2", "eu-west-1"]
-
-[support]
-documentation = "https://docs.example.com/provider"
-issues = "https://github.com/example/provider/issues"
-
-

Task Service Extension API

-

Task Service Interface

-

Task services must implement:

-

Core Operations

-
    -
  • install(config: record) -> record
  • -
  • uninstall(config: record) -> null
  • -
  • configure(config: record) -> null
  • -
  • status() -> record
  • -
  • restart() -> null
  • -
  • upgrade(version: string) -> record
  • -
-

Version Management

-
    -
  • get-current-version() -> string
  • -
  • get-available-versions() -> list<string>
  • -
  • check-updates() -> record
  • -
-

Task Service Development Template

-

Nickel Schema

-

Create schemas/version.ncl:

-
# Task service version configuration
-{
-  taskserv_version = {
-    name | String = "my-service",
-    version | String = "1.0.0",
-
-    # Version source configuration
-    source | {
-      type | String = "github",
-      repository | String,
-      release_pattern | String = "v{version}",
-    },
-
-    # Installation configuration
-    install | {
-      method | String = "binary",
-      binary_name | String,
-      binary_path | String = "/usr/local/bin",
-      config_path | String = "/etc/my-service",
-      data_path | String = "/var/lib/my-service",
-    },
-
-    # Dependencies
-    dependencies | [
-      {
-        name | String,
-        version | String = ">=1.0.0",
-      }
-    ],
-
-    # Service configuration
-    service | {
-      type | String = "systemd",
-      user | String = "my-service",
-      group | String = "my-service",
-      ports | [Number] = [8080, 9090],
-    },
-
-    # Health check configuration
-    health_check | {
-      endpoint | String,
-      interval | Number = 30,
-      timeout | Number = 5,
-      retries | Number = 3,
-    },
-  }
-}
-
-

Nushell Implementation

-

Create nulib/mod.nu:

-
use std log
-use ../../../lib_provisioning *
-
-export const SERVICE_NAME = "my-service"
-export const SERVICE_VERSION = "1.0.0"
-
-export def "taskserv-info" [] -> record {
-    {
-        name: $SERVICE_NAME,
-        version: $SERVICE_VERSION,
-        type: "taskserv",
-        category: "application",
-        description: "Custom application service",
-        dependencies: ["containerd"],
-        ports: [8080, 9090],
-        config_files: ["/etc/my-service/config.yaml"],
-        data_directories: ["/var/lib/my-service"]
-    }
-}
-
-export def "install" [
-    config: record = {}
-    --check              # Check mode only
-    --version: string    # Specific version to install
-] -> record {
-    let install_version = if ($version | is-not-empty) {
-        $version
-    } else {
-        (get-latest-version)
-    }
-
-    log info $"Installing ($SERVICE_NAME) version ($install_version)"
-
-    if $check {
-        return {
-            action: "install",
-            service: $SERVICE_NAME,
-            version: $install_version,
-            check_mode: true,
-            would_install: true,
-            requirements_met: (check-requirements)
-        }
-    }
-
-    # Check system requirements
-    let req_check = (check-requirements)
-    if not $req_check.met {
-        error make {
-            msg: $"Requirements not met: ($req_check.missing | str join ', ')"
-        }
-    }
-
-    # Download and install
-    let binary_path = (download-binary $install_version)
-    install-binary $binary_path
-    create-user-and-directories
-    generate-config $config
-    install-systemd-service
-
-    # Start service
-    systemctl start $SERVICE_NAME
-    systemctl enable $SERVICE_NAME
-
-    # Verify installation
-    let health = (check-health)
-    if not $health.healthy {
-        error make { msg: "Service failed health check after installation" }
-    }
-
-    {
-        success: true,
-        service: $SERVICE_NAME,
-        version: $install_version,
-        status: "running",
-        health: $health
-    }
-}
-
-export def "uninstall" [
-    --force              # Force removal even if running
-    --keep-data         # Keep data directories
-] -> null {
-    log info $"Uninstalling ($SERVICE_NAME)"
-
-    # Stop and disable service
-    try {
-        systemctl stop $SERVICE_NAME
-        systemctl disable $SERVICE_NAME
-    } catch {
-        log warning "Failed to stop systemd service"
-    }
-
-    # Remove binary
-    try {
-        rm -f $"/usr/local/bin/($SERVICE_NAME)"
-    } catch {
-        log warning "Failed to remove binary"
-    }
-
-    # Remove configuration
-    try {
-        rm -rf $"/etc/($SERVICE_NAME)"
-    } catch {
-        log warning "Failed to remove configuration"
-    }
-
-    # Remove data directories (unless keeping)
-    if not $keep_data {
-        try {
-            rm -rf $"/var/lib/($SERVICE_NAME)"
-        } catch {
-            log warning "Failed to remove data directories"
-        }
-    }
-
-    # Remove systemd service file
-    try {
-        rm -f $"/etc/systemd/system/($SERVICE_NAME).service"
-        systemctl daemon-reload
-    } catch {
-        log warning "Failed to remove systemd service"
-    }
-
-    log info $"($SERVICE_NAME) uninstalled successfully"
-}
-
-export def "status" [] -> record {
-    let systemd_status = try {
-        systemctl is-active $SERVICE_NAME | str trim
-    } catch {
-        "unknown"
-    }
-
-    let health = (check-health)
-    let version = (get-current-version)
-
-    {
-        service: $SERVICE_NAME,
-        version: $version,
-        systemd_status: $systemd_status,
-        health: $health,
-        uptime: (get-service-uptime),
-        memory_usage: (get-memory-usage),
-        cpu_usage: (get-cpu-usage)
-    }
-}
-
-def check-requirements [] -> record {
-    mut missing = []
-    mut met = true
-
-    # Check for containerd
-    if not (which containerd | is-not-empty) {
-        $missing = ($missing | append "containerd")
-        $met = false
-    }
-
-    # Check for systemctl
-    if not (which systemctl | is-not-empty) {
-        $missing = ($missing | append "systemctl")
-        $met = false
-    }
-
-    {
-        met: $met,
-        missing: $missing
-    }
-}
-
-def check-health [] -> record {
-    try {
-        let response = (http get "http://localhost:9090/health")
-        {
-            healthy: true,
-            status: ($response | get status),
-            last_check: (date now)
-        }
-    } catch {
-        {
-            healthy: false,
-            error: "Health endpoint not responding",
-            last_check: (date now)
-        }
-    }
-}
-
-

Cluster Extension API

-

Cluster Interface

-

Clusters orchestrate multiple components:

-

Core Operations

-
    -
  • create(config: record) -> record
  • -
  • delete(config: record) -> null
  • -
  • status() -> record
  • -
  • scale(replicas: int) -> record
  • -
  • upgrade(version: string) -> record
  • -
-

Component Management

-
    -
  • list-components() -> list<record>
  • -
  • component-status(name: string) -> record
  • -
  • restart-component(name: string) -> null
  • -
-

Cluster Development Template

-

Nickel Configuration

-

Create schemas/cluster.ncl:

-
# Cluster configuration schema
-{
-  ClusterConfig = {
-    # Cluster metadata
-    name | String,
-    version | String = "1.0.0",
-    description | String = "",
-
-    # Components to deploy
-    components | [Component],
-
-    # Resource requirements
-    resources | {
-      min_nodes | Number = 1,
-      cpu_per_node | String = "2",
-      memory_per_node | String = "4Gi",
-      storage_per_node | String = "20Gi",
-    },
-
-    # Network configuration
-    network | {
-      cluster_cidr | String = "10.244.0.0/16",
-      service_cidr | String = "10.96.0.0/12",
-      dns_domain | String = "cluster.local",
-    },
-
-    # Feature flags
-    features | {
-      monitoring | Bool = true,
-      logging | Bool = true,
-      ingress | Bool = false,
-      storage | Bool = true,
-    },
-  },
-
-  Component = {
-    name | String,
-    type | String | "taskserv" | "application" | "infrastructure",
-    version | String = "",
-    enabled | Bool = true,
-    dependencies | [String] = [],
-    config | {} = {},
-    resources | {
-      cpu | String = "",
-      memory | String = "",
-      storage | String = "",
-      replicas | Number = 1,
-    } = {},
-  },
-
-  # Example cluster configuration
-  buildkit_cluster = {
-    name = "buildkit",
-    version = "1.0.0",
-    description = "Container build cluster with BuildKit and registry",
-    components = [
-      {
-        name = "containerd",
-        type = "taskserv",
-        version = "1.7.0",
-        enabled = true,
-        dependencies = [],
-      },
-      {
-        name = "buildkit",
-        type = "taskserv",
-        version = "0.12.0",
-        enabled = true,
-        dependencies = ["containerd"],
-        config = {
-          worker_count = 4,
-          cache_size = "10Gi",
-          registry_mirrors = ["registry:5000"],
-        },
-      },
-      {
-        name = "registry",
-        type = "application",
-        version = "2.8.0",
-        enabled = true,
-        dependencies = [],
-        config = {
-          storage_driver = "filesystem",
-          storage_path = "/var/lib/registry",
-          auth_enabled = false,
-        },
-        resources = {
-          cpu = "500m",
-          memory = "1Gi",
-          storage = "50Gi",
-          replicas = 1,
-        },
-      },
-    ],
-    resources = {
-      min_nodes = 1,
-      cpu_per_node = "4",
-      memory_per_node = "8Gi",
-      storage_per_node = "100Gi",
-    },
-    features = {
-      monitoring = true,
-      logging = true,
-      ingress = false,
-      storage = true,
-    },
-  },
-}
-
-

Nushell Implementation

-

Create nulib/mod.nu:

-
use std log
-use ../../../lib_provisioning *
-
-export const CLUSTER_NAME = "my-cluster"
-export const CLUSTER_VERSION = "1.0.0"
-
-export def "cluster-info" [] -> record {
-    {
-        name: $CLUSTER_NAME,
-        version: $CLUSTER_VERSION,
-        type: "cluster",
-        category: "build",
-        description: "Custom application cluster",
-        components: (get-cluster-components),
-        required_resources: {
-            min_nodes: 1,
-            cpu_per_node: "2",
-            memory_per_node: "4Gi",
-            storage_per_node: "20Gi"
-        }
-    }
-}
-
-export def "create" [
-    config: record = {}
-    --check              # Check mode only
-    --wait               # Wait for completion
-] -> record {
-    log info $"Creating cluster: ($CLUSTER_NAME)"
-
-    if $check {
-        return {
-            action: "create-cluster",
-            cluster: $CLUSTER_NAME,
-            check_mode: true,
-            would_create: true,
-            components: (get-cluster-components),
-            requirements_check: (check-cluster-requirements)
-        }
-    }
-
-    # Validate cluster requirements
-    let req_check = (check-cluster-requirements)
-    if not $req_check.met {
-        error make {
-            msg: $"Cluster requirements not met: ($req_check.issues | str join ', ')"
-        }
-    }
-
-    # Get component deployment order
-    let components = (get-cluster-components)
-    let deployment_order = (resolve-component-dependencies $components)
-
-    mut deployment_status = []
-
-    # Deploy components in dependency order
-    for component in $deployment_order {
-        log info $"Deploying component: ($component.name)"
-
-        try {
-            let result = match $component.type {
-                "taskserv" => {
-                    taskserv create $component.name --config $component.config --wait
-                },
-                "application" => {
-                    deploy-application $component
-                },
-                _ => {
-                    error make { msg: $"Unknown component type: ($component.type)" }
-                }
-            }
-
-            $deployment_status = ($deployment_status | append {
-                component: $component.name,
-                status: "deployed",
-                result: $result
-            })
-
-        } catch {|e|
-            log error $"Failed to deploy ($component.name): ($e.msg)"
-            $deployment_status = ($deployment_status | append {
-                component: $component.name,
-                status: "failed",
-                error: $e.msg
-            })
-
-            # Rollback on failure
-            rollback-cluster-deployment $deployment_status
-            error make { msg: $"Cluster deployment failed at component: ($component.name)" }
-        }
-    }
-
-    # Configure cluster networking and integrations
-    configure-cluster-networking $config
-    setup-cluster-monitoring $config
-
-    # Wait for all components to be ready
-    if $wait {
-        wait-for-cluster-ready
-    }
-
-    {
-        success: true,
-        cluster: $CLUSTER_NAME,
-        components: $deployment_status,
-        endpoints: (get-cluster-endpoints),
-        status: "running"
-    }
-}
-
-export def "delete" [
-    config: record = {}
-    --force              # Force deletion
-] -> null {
-    log info $"Deleting cluster: ($CLUSTER_NAME)"
-
-    let components = (get-cluster-components)
-    let deletion_order = ($components | reverse)  # Delete in reverse order
-
-    for component in $deletion_order {
-        log info $"Removing component: ($component.name)"
-
-        try {
-            match $component.type {
-                "taskserv" => {
-                    taskserv delete $component.name --force=$force
-                },
-                "application" => {
-                    remove-application $component --force=$force
-                },
-                _ => {
-                    log warning $"Unknown component type: ($component.type)"
-                }
-            }
-        } catch {|e|
-            log error $"Failed to remove ($component.name): ($e.msg)"
-            if not $force {
-                error make { msg: $"Component removal failed: ($component.name)" }
-            }
-        }
-    }
-
-    # Clean up cluster-level resources
-    cleanup-cluster-networking
-    cleanup-cluster-monitoring
-    cleanup-cluster-storage
-
-    log info $"Cluster ($CLUSTER_NAME) deleted successfully"
-}
-
-def get-cluster-components [] -> list<record> {
-    [
-        {
-            name: "containerd",
-            type: "taskserv",
-            version: "1.7.0",
-            dependencies: []
-        },
-        {
-            name: "my-service",
-            type: "taskserv",
-            version: "1.0.0",
-            dependencies: ["containerd"]
-        },
-        {
-            name: "registry",
-            type: "application",
-            version: "2.8.0",
-            dependencies: []
-        }
-    ]
-}
-
-def resolve-component-dependencies [components: list<record>] -> list<record> {
-    # Topological sort of components based on dependencies
-    mut sorted = []
-    mut remaining = $components
-
-    while ($remaining | length) > 0 {
-        let no_deps = ($remaining | where {|comp|
-            ($comp.dependencies | all {|dep|
-                $dep in ($sorted | get name)
-            })
-        })
-
-        if ($no_deps | length) == 0 {
-            error make { msg: "Circular dependency detected in cluster components" }
-        }
-
-        $sorted = ($sorted | append $no_deps)
-        $remaining = ($remaining | where {|comp|
-            not ($comp.name in ($no_deps | get name))
-        })
-    }
-
-    $sorted
-}
-
-

Extension Registration and Discovery

-

Extension Registry

-

Extensions are registered in the system through:

-
    -
  1. Directory Structure: Placed in appropriate directories (providers/, taskservs/, cluster/)
  2. -
  3. Metadata Files: metadata.toml with extension information
  4. -
  5. Schema Files: schemas/ directory with Nickel schema files
  6. -
-

Registration API

-

register-extension(path: string, type: string) -> record

-

Registers a new extension with the system.

-

Parameters:

-
    -
  • path: Path to extension directory
  • -
  • type: Extension type (provider, taskserv, cluster)
  • -
-

unregister-extension(name: string, type: string) -> null

-

Removes extension from the registry.

-

list-registered-extensions(type?: string) -> list<record>

-

Lists all registered extensions, optionally filtered by type.

-

Extension Validation

-

Validation Rules

-
    -
  1. Structure Validation: Required files and directories exist
  2. -
  3. Schema Validation: Nickel schemas are valid
  4. -
  5. Interface Validation: Required functions are implemented
  6. -
  7. Dependency Validation: Dependencies are available
  8. -
  9. Version Validation: Version constraints are met
  10. -
-

validate-extension(path: string, type: string) -> record

-

Validates extension structure and implementation.

-

Testing Extensions

-

Test Framework

-

Extensions should include comprehensive tests:

-

Unit Tests

-

Create tests/unit_tests.nu:

-
use std testing
-
-export def test_provider_config_validation [] {
-    let config = {
-        auth: { api_key: "test-key", api_secret: "test-secret" },
-        api: { base_url: "https://api.test.com" }
-    }
-
-    let result = (validate-config $config)
-    assert ($result.valid == true)
-    assert ($result.errors | is-empty)
-}
-
-export def test_server_creation_check_mode [] {
-    let config = {
-        hostname: "test-server",
-        plan: "1xCPU-1 GB",
-        zone: "test-zone"
-    }
-
-    let result = (create-server $config --check)
-    assert ($result.check_mode == true)
-    assert ($result.would_create == true)
-}
-
-

Integration Tests

-

Create tests/integration_tests.nu:

-
use std testing
-
-export def test_full_server_lifecycle [] {
-    # Test server creation
-    let create_config = {
-        hostname: "integration-test",
-        plan: "1xCPU-1 GB",
-        zone: "test-zone"
-    }
-
-    let server = (create-server $create_config --wait)
-    assert ($server.success == true)
-    let server_id = $server.server_id
-
-    # Test server info retrieval
-    let info = (get-server-info $server_id)
-    assert ($info.hostname == "integration-test")
-    assert ($info.status == "running")
-
-    # Test server deletion
-    delete-server $server_id
-
-    # Verify deletion
-    let final_info = try { get-server-info $server_id } catch { null }
-    assert ($final_info == null)
-}
-
-

Running Tests

-
# Run unit tests
-nu tests/unit_tests.nu
-
-# Run integration tests
-nu tests/integration_tests.nu
-
-# Run all tests
-nu tests/run_all_tests.nu
-
-

Documentation Requirements

-

Extension Documentation

-

Each extension must include:

-
    -
  1. README.md: Overview, installation, and usage
  2. -
  3. API.md: Detailed API documentation
  4. -
  5. EXAMPLES.md: Usage examples and tutorials
  6. -
  7. CHANGELOG.md: Version history and changes
  8. -
-

API Documentation Template

-
# Extension Name API
-
-## Overview
-Brief description of the extension and its purpose.
-
-## Installation
-Steps to install and configure the extension.
-
-## Configuration
-Configuration schema and options.
-
-## API Reference
-Detailed API documentation with examples.
-
-## Examples
-Common usage patterns and examples.
-
-## Troubleshooting
-Common issues and solutions.
-
-

Best Practices

-

Development Guidelines

-
    -
  1. Follow Naming Conventions: Use consistent naming for functions and variables
  2. -
  3. Error Handling: Implement comprehensive error handling and recovery
  4. -
  5. Logging: Use structured logging for debugging and monitoring
  6. -
  7. Configuration Validation: Validate all inputs and configurations
  8. -
  9. Documentation: Document all public APIs and configurations
  10. -
  11. Testing: Include comprehensive unit and integration tests
  12. -
  13. Versioning: Follow semantic versioning principles
  14. -
  15. Security: Implement secure credential handling and API calls
  16. -
-

Performance Considerations

-
    -
  1. Caching: Cache expensive operations and API calls
  2. -
  3. Parallel Processing: Use parallel execution where possible
  4. -
  5. Resource Management: Clean up resources properly
  6. -
  7. Batch Operations: Batch API calls when possible
  8. -
  9. Health Monitoring: Implement health checks and monitoring
  10. -
-

Security Best Practices

-
    -
  1. Credential Management: Store credentials securely
  2. -
  3. Input Validation: Validate and sanitize all inputs
  4. -
  5. Access Control: Implement proper access controls
  6. -
  7. Audit Logging: Log all security-relevant operations
  8. -
  9. Encryption: Encrypt sensitive data in transit and at rest
  10. -
-

This extension development API provides a comprehensive framework for building robust, scalable, and maintainable extensions for provisioning.

-

SDK Documentation

-

This document provides comprehensive documentation for the official SDKs and client libraries available for provisioning.

-

Available SDKs

-

Provisioning provides SDKs in multiple languages to facilitate integration:

-

Official SDKs

-
    -
  • Python SDK (provisioning-client) - Full-featured Python client
  • -
  • JavaScript/TypeScript SDK (@provisioning/client) - Node.js and browser support
  • -
  • Go SDK (go-provisioning-client) - Go client library
  • -
  • Rust SDK (provisioning-rs) - Native Rust integration
  • -
-

Community SDKs

-
    -
  • Java SDK - Community-maintained Java client
  • -
  • C# SDK - .NET client library
  • -
  • PHP SDK - PHP client library
  • -
-

Python SDK

-

Installation

-
# Install from PyPI
-pip install provisioning-client
-
-# Or install development version
-pip install git+https://github.com/provisioning-systems/python-client.git
-
-

Quick Start

-
from provisioning_client import ProvisioningClient
-import asyncio
-
-async def main():
-    # Initialize client
-    client = ProvisioningClient(
-        base_url="http://localhost:9090",
-        auth_url="http://localhost:8081",
-        username="admin",
-        password="your-password"
-    )
-
-    try:
-        # Authenticate
-        token = await client.authenticate()
-        print(f"Authenticated with token: {token[:20]}...")
-
-        # Create a server workflow
-        task_id = client.create_server_workflow(
-            infra="production",
-            settings="prod-settings.ncl",
-            wait=False
-        )
-        print(f"Server workflow created: {task_id}")
-
-        # Wait for completion
-        task = client.wait_for_task_completion(task_id, timeout=600)
-        print(f"Task completed with status: {task.status}")
-
-        if task.status == "Completed":
-            print(f"Output: {task.output}")
-        elif task.status == "Failed":
-            print(f"Error: {task.error}")
-
-    except Exception as e:
-        print(f"Error: {e}")
-
-if __name__ == "__main__":
-    asyncio.run(main())
-
-

Advanced Usage

-

WebSocket Integration

-
async def monitor_workflows():
-    client = ProvisioningClient()
-    await client.authenticate()
-
-    # Set up event handlers
-    async def on_task_update(event):
-        print(f"Task {event['data']['task_id']} status: {event['data']['status']}")
-
-    async def on_progress_update(event):
-        print(f"Progress: {event['data']['progress']}% - {event['data']['current_step']}")
-
-    client.on_event('TaskStatusChanged', on_task_update)
-    client.on_event('WorkflowProgressUpdate', on_progress_update)
-
-    # Connect to WebSocket
-    await client.connect_websocket(['TaskStatusChanged', 'WorkflowProgressUpdate'])
-
-    # Keep connection alive
-    await asyncio.sleep(3600)  # Monitor for 1 hour
-
-

Batch Operations

-
async def execute_batch_deployment():
-    client = ProvisioningClient()
-    await client.authenticate()
-
-    batch_config = {
-        "name": "production_deployment",
-        "version": "1.0.0",
-        "storage_backend": "surrealdb",
-        "parallel_limit": 5,
-        "rollback_enabled": True,
-        "operations": [
-            {
-                "id": "servers",
-                "type": "server_batch",
-                "provider": "upcloud",
-                "dependencies": [],
-                "config": {
-                    "server_configs": [
-                        {"name": "web-01", "plan": "2xCPU-4 GB", "zone": "de-fra1"},
-                        {"name": "web-02", "plan": "2xCPU-4 GB", "zone": "de-fra1"}
-                    ]
-                }
-            },
-            {
-                "id": "kubernetes",
-                "type": "taskserv_batch",
-                "provider": "upcloud",
-                "dependencies": ["servers"],
-                "config": {
-                    "taskservs": ["kubernetes", "cilium", "containerd"]
-                }
-            }
-        ]
-    }
-
-    # Execute batch operation
-    batch_result = await client.execute_batch_operation(batch_config)
-    print(f"Batch operation started: {batch_result['batch_id']}")
-
-    # Monitor progress
-    while True:
-        status = await client.get_batch_status(batch_result['batch_id'])
-        print(f"Batch status: {status['status']} - {status.get('progress', 0)}%")
-
-        if status['status'] in ['Completed', 'Failed', 'Cancelled']:
-            break
-
-        await asyncio.sleep(10)
-
-    print(f"Batch operation finished: {status['status']}")
-
-

Error Handling with Retries

-
from provisioning_client.exceptions import (
-    ProvisioningAPIError,
-    AuthenticationError,
-    ValidationError,
-    RateLimitError
-)
-from tenacity import retry, stop_after_attempt, wait_exponential
-
-class RobustProvisioningClient(ProvisioningClient):
-    @retry(
-        stop=stop_after_attempt(3),
-        wait=wait_exponential(multiplier=1, min=4, max=10)
-    )
-    async def create_server_workflow_with_retry(self, **kwargs):
-        try:
-            return await self.create_server_workflow(**kwargs)
-        except RateLimitError as e:
-            print(f"Rate limited, retrying in {e.retry_after} seconds...")
-            await asyncio.sleep(e.retry_after)
-            raise
-        except AuthenticationError:
-            print("Authentication failed, re-authenticating...")
-            await self.authenticate()
-            raise
-        except ValidationError as e:
-            print(f"Validation error: {e}")
-            # Don't retry validation errors
-            raise
-        except ProvisioningAPIError as e:
-            print(f"API error: {e}")
-            raise
-
-# Usage
-async def robust_workflow():
-    client = RobustProvisioningClient()
-
-    try:
-        task_id = await client.create_server_workflow_with_retry(
-            infra="production",
-            settings="config.ncl"
-        )
-        print(f"Workflow created successfully: {task_id}")
-    except Exception as e:
-        print(f"Failed after retries: {e}")
-
-

API Reference

-

ProvisioningClient Class

-
class ProvisioningClient:
-    def __init__(self,
-                 base_url: str = "http://localhost:9090",
-                 auth_url: str = "http://localhost:8081",
-                 username: str = None,
-                 password: str = None,
-                 token: str = None):
-        """Initialize the provisioning client"""
-
-    async def authenticate(self) -> str:
-        """Authenticate and get JWT token"""
-
-    def create_server_workflow(self,
-                             infra: str,
-                             settings: str = "config.ncl",
-                             check_mode: bool = False,
-                             wait: bool = False) -> str:
-        """Create a server provisioning workflow"""
-
-    def create_taskserv_workflow(self,
-                               operation: str,
-                               taskserv: str,
-                               infra: str,
-                               settings: str = "config.ncl",
-                               check_mode: bool = False,
-                               wait: bool = False) -> str:
-        """Create a task service workflow"""
-
-    def get_task_status(self, task_id: str) -> WorkflowTask:
-        """Get the status of a specific task"""
-
-    def wait_for_task_completion(self,
-                               task_id: str,
-                               timeout: int = 300,
-                               poll_interval: int = 5) -> WorkflowTask:
-        """Wait for a task to complete"""
-
-    async def connect_websocket(self, event_types: List[str] = None):
-        """Connect to WebSocket for real-time updates"""
-
-    def on_event(self, event_type: str, handler: Callable):
-        """Register an event handler"""
-
-

JavaScript/TypeScript SDK

-

Installation

-
# npm
-npm install @provisioning/client
-
-# yarn
-yarn add @provisioning/client
-
-# pnpm
-pnpm add @provisioning/client
-
-

Quick Start

-
import { ProvisioningClient } from '@provisioning/client';
-
-async function main() {
-  const client = new ProvisioningClient({
-    baseUrl: 'http://localhost:9090',
-    authUrl: 'http://localhost:8081',
-    username: 'admin',
-    password: 'your-password'
-  });
-
-  try {
-    // Authenticate
-    await client.authenticate();
-    console.log('Authentication successful');
-
-    // Create server workflow
-    const taskId = await client.createServerWorkflow({
-      infra: 'production',
-      settings: 'prod-settings.ncl'
-    });
-    console.log(`Server workflow created: ${taskId}`);
-
-    // Wait for completion
-    const task = await client.waitForTaskCompletion(taskId);
-    console.log(`Task completed with status: ${task.status}`);
-
-  } catch (error) {
-    console.error('Error:', error.message);
-  }
-}
-
-main();
-
-

React Integration

-
import React, { useState, useEffect } from 'react';
-import { ProvisioningClient } from '@provisioning/client';
-
-interface Task {
-  id: string;
-  name: string;
-  status: string;
-  progress?: number;
-}
-
-const WorkflowDashboard: React.FC = () => {
-  const [client] = useState(() => new ProvisioningClient({
-    baseUrl: process.env.REACT_APP_API_URL,
-    username: process.env.REACT_APP_USERNAME,
-    password: process.env.REACT_APP_PASSWORD
-  }));
-
-  const [tasks, setTasks] = useState<Task[]>([]);
-  const [connected, setConnected] = useState(false);
-
-  useEffect(() => {
-    const initClient = async () => {
-      try {
-        await client.authenticate();
-
-        // Set up WebSocket event handlers
-        client.on('TaskStatusChanged', (event: any) => {
-          setTasks(prev => prev.map(task =>
-            task.id === event.data.task_id
-              ? { ...task, status: event.data.status, progress: event.data.progress }
-              : task
-          ));
-        });
-
-        client.on('websocketConnected', () => {
-          setConnected(true);
-        });
-
-        client.on('websocketDisconnected', () => {
-          setConnected(false);
-        });
-
-        // Connect WebSocket
-        await client.connectWebSocket(['TaskStatusChanged', 'WorkflowProgressUpdate']);
-
-        // Load initial tasks
-        const initialTasks = await client.listTasks();
-        setTasks(initialTasks);
-
-      } catch (error) {
-        console.error('Failed to initialize client:', error);
-      }
-    };
-
-    initClient();
-
-    return () => {
-      client.disconnectWebSocket();
-    };
-  }, [client]);
-
-  const createServerWorkflow = async () => {
-    try {
-      const taskId = await client.createServerWorkflow({
-        infra: 'production',
-        settings: 'config.ncl'
-      });
-
-      // Add to tasks list
-      setTasks(prev => [...prev, {
-        id: taskId,
-        name: 'Server Creation',
-        status: 'Pending'
-      }]);
-
-    } catch (error) {
-      console.error('Failed to create workflow:', error);
-    }
-  };
-
-  return (
-    <div className="workflow-dashboard">
-      <div className="header">
-        <h1>Workflow Dashboard</h1>
-        <div className={`connection-status ${connected ? 'connected' : 'disconnected'}`}>
-          {connected ? '🟢 Connected' : '🔴 Disconnected'}
-        </div>
-      </div>
-
-      <div className="controls">
-        <button onClick={createServerWorkflow}>
-          Create Server Workflow
-        </button>
-      </div>
-
-      <div className="tasks">
-        {tasks.map(task => (
-          <div key={task.id} className="task-card">
-            <h3>{task.name}</h3>
-            <div className="task-status">
-              <span className={`status ${task.status.toLowerCase()}`}>
-                {task.status}
-              </span>
-              {task.progress && (
-                <div className="progress-bar">
-                  <div
-                    className="progress-fill"
-                    style={{ width: `${task.progress}%` }}
-                  />
-                  <span className="progress-text">{task.progress}%</span>
-                </div>
-              )}
-            </div>
-          </div>
-        ))}
-      </div>
-    </div>
-  );
-};
-
-export default WorkflowDashboard;
-
-

Node.js CLI Tool

-
#!/usr/bin/env node
-
-import { Command } from 'commander';
-import { ProvisioningClient } from '@provisioning/client';
-import chalk from 'chalk';
-import ora from 'ora';
-
-const program = new Command();
-
-program
-  .name('provisioning-cli')
-  .description('CLI tool for provisioning')
-  .version('1.0.0');
-
-program
-  .command('create-server')
-  .description('Create a server workflow')
-  .requiredOption('-i, --infra <infra>', 'Infrastructure target')
-  .option('-s, --settings <settings>', 'Settings file', 'config.ncl')
-  .option('-c, --check', 'Check mode only')
-  .option('-w, --wait', 'Wait for completion')
-  .action(async (options) => {
-    const client = new ProvisioningClient({
-      baseUrl: process.env.PROVISIONING_API_URL,
-      username: process.env.PROVISIONING_USERNAME,
-      password: process.env.PROVISIONING_PASSWORD
-    });
-
-    const spinner = ora('Authenticating...').start();
-
-    try {
-      await client.authenticate();
-      spinner.text = 'Creating server workflow...';
-
-      const taskId = await client.createServerWorkflow({
-        infra: options.infra,
-        settings: options.settings,
-        check_mode: options.check,
-        wait: false
-      });
-
-      spinner.succeed(`Server workflow created: ${chalk.green(taskId)}`);
-
-      if (options.wait) {
-        spinner.start('Waiting for completion...');
-
-        // Set up progress updates
-        client.on('TaskStatusChanged', (event: any) => {
-          if (event.data.task_id === taskId) {
-            spinner.text = `Status: ${event.data.status}`;
-          }
-        });
-
-        client.on('WorkflowProgressUpdate', (event: any) => {
-          if (event.data.workflow_id === taskId) {
-            spinner.text = `${event.data.progress}% - ${event.data.current_step}`;
-          }
-        });
-
-        await client.connectWebSocket(['TaskStatusChanged', 'WorkflowProgressUpdate']);
-
-        const task = await client.waitForTaskCompletion(taskId);
-
-        if (task.status === 'Completed') {
-          spinner.succeed(chalk.green('Workflow completed successfully!'));
-          if (task.output) {
-            console.log(chalk.gray('Output:'), task.output);
-          }
-        } else {
-          spinner.fail(chalk.red(`Workflow failed: ${task.error}`));
-          process.exit(1);
-        }
-      }
-
-    } catch (error) {
-      spinner.fail(chalk.red(`Error: ${error.message}`));
-      process.exit(1);
-    }
-  });
-
-program
-  .command('list-tasks')
-  .description('List all tasks')
-  .option('-s, --status <status>', 'Filter by status')
-  .action(async (options) => {
-    const client = new ProvisioningClient();
-
-    try {
-      await client.authenticate();
-      const tasks = await client.listTasks(options.status);
-
-      console.log(chalk.bold('Tasks:'));
-      tasks.forEach(task => {
-        const statusColor = task.status === 'Completed' ? 'green' :
-                          task.status === 'Failed' ? 'red' :
-                          task.status === 'Running' ? 'yellow' : 'gray';
-
-        console.log(`  ${task.id} - ${task.name} [${chalk[statusColor](task.status)}]`);
-      });
-
-    } catch (error) {
-      console.error(chalk.red(`Error: ${error.message}`));
-      process.exit(1);
-    }
-  });
-
-program
-  .command('monitor')
-  .description('Monitor workflows in real-time')
-  .action(async () => {
-    const client = new ProvisioningClient();
-
-    try {
-      await client.authenticate();
-
-      console.log(chalk.bold('🔍 Monitoring workflows...'));
-      console.log(chalk.gray('Press Ctrl+C to stop'));
-
-      client.on('TaskStatusChanged', (event: any) => {
-        const timestamp = new Date().toLocaleTimeString();
-        const statusColor = event.data.status === 'Completed' ? 'green' :
-                          event.data.status === 'Failed' ? 'red' :
-                          event.data.status === 'Running' ? 'yellow' : 'gray';
-
-        console.log(`[${chalk.gray(timestamp)}] Task ${event.data.task_id} → ${chalk[statusColor](event.data.status)}`);
-      });
-
-      client.on('WorkflowProgressUpdate', (event: any) => {
-        const timestamp = new Date().toLocaleTimeString();
-        console.log(`[${chalk.gray(timestamp)}] ${event.data.workflow_id}: ${event.data.progress}% - ${event.data.current_step}`);
-      });
-
-      await client.connectWebSocket(['TaskStatusChanged', 'WorkflowProgressUpdate']);
-
-      // Keep the process running
-      process.on('SIGINT', () => {
-        console.log(chalk.yellow('\nStopping monitor...'));
-        client.disconnectWebSocket();
-        process.exit(0);
-      });
-
-      // Keep alive
-      setInterval(() => {}, 1000);
-
-    } catch (error) {
-      console.error(chalk.red(`Error: ${error.message}`));
-      process.exit(1);
-    }
-  });
-
-program.parse();
-
-

API Reference

-
interface ProvisioningClientOptions {
-  baseUrl?: string;
-  authUrl?: string;
-  username?: string;
-  password?: string;
-  token?: string;
-}
-
-class ProvisioningClient extends EventEmitter {
-  constructor(options: ProvisioningClientOptions);
-
-  async authenticate(): Promise<string>;
-
-  async createServerWorkflow(config: {
-    infra: string;
-    settings?: string;
-    check_mode?: boolean;
-    wait?: boolean;
-  }): Promise<string>;
-
-  async createTaskservWorkflow(config: {
-    operation: string;
-    taskserv: string;
-    infra: string;
-    settings?: string;
-    check_mode?: boolean;
-    wait?: boolean;
-  }): Promise<string>;
-
-  async getTaskStatus(taskId: string): Promise<Task>;
-
-  async listTasks(statusFilter?: string): Promise<Task[]>;
-
-  async waitForTaskCompletion(
-    taskId: string,
-    timeout?: number,
-    pollInterval?: number
-  ): Promise<Task>;
-
-  async connectWebSocket(eventTypes?: string[]): Promise<void>;
-
-  disconnectWebSocket(): void;
-
-  async executeBatchOperation(batchConfig: BatchConfig): Promise<any>;
-
-  async getBatchStatus(batchId: string): Promise<any>;
-}
-
-

Go SDK

-

Installation

-
go get github.com/provisioning-systems/go-client
-
-

Quick Start

-
package main
-
-import (
-    "context"
-    "fmt"
-    "log"
-    "time"
-
-    "github.com/provisioning-systems/go-client"
-)
-
-func main() {
-    // Initialize client
-    client, err := provisioning.NewClient(&provisioning.Config{
-        BaseURL:  "http://localhost:9090",
-        AuthURL:  "http://localhost:8081",
-        Username: "admin",
-        Password: "your-password",
-    })
-    if err != nil {
-        log.Fatalf("Failed to create client: %v", err)
-    }
-
-    ctx := context.Background()
-
-    // Authenticate
-    token, err := client.Authenticate(ctx)
-    if err != nil {
-        log.Fatalf("Authentication failed: %v", err)
-    }
-    fmt.Printf("Authenticated with token: %.20s...\n", token)
-
-    // Create server workflow
-    taskID, err := client.CreateServerWorkflow(ctx, &provisioning.CreateServerRequest{
-        Infra:    "production",
-        Settings: "prod-settings.ncl",
-        Wait:     false,
-    })
-    if err != nil {
-        log.Fatalf("Failed to create workflow: %v", err)
-    }
-    fmt.Printf("Server workflow created: %s\n", taskID)
-
-    // Wait for completion
-    task, err := client.WaitForTaskCompletion(ctx, taskID, 10*time.Minute)
-    if err != nil {
-        log.Fatalf("Failed to wait for completion: %v", err)
-    }
-
-    fmt.Printf("Task completed with status: %s\n", task.Status)
-    if task.Status == "Completed" {
-        fmt.Printf("Output: %s\n", task.Output)
-    } else if task.Status == "Failed" {
-        fmt.Printf("Error: %s\n", task.Error)
-    }
-}
-
-

WebSocket Integration

-
package main
-
-import (
-    "context"
-    "fmt"
-    "log"
-    "os"
-    "os/signal"
-
-    "github.com/provisioning-systems/go-client"
-)
-
-func main() {
-    client, err := provisioning.NewClient(&provisioning.Config{
-        BaseURL:  "http://localhost:9090",
-        Username: "admin",
-        Password: "password",
-    })
-    if err != nil {
-        log.Fatalf("Failed to create client: %v", err)
-    }
-
-    ctx := context.Background()
-
-    // Authenticate
-    _, err = client.Authenticate(ctx)
-    if err != nil {
-        log.Fatalf("Authentication failed: %v", err)
-    }
-
-    // Set up WebSocket connection
-    ws, err := client.ConnectWebSocket(ctx, []string{
-        "TaskStatusChanged",
-        "WorkflowProgressUpdate",
-    })
-    if err != nil {
-        log.Fatalf("Failed to connect WebSocket: %v", err)
-    }
-    defer ws.Close()
-
-    // Handle events
-    go func() {
-        for event := range ws.Events() {
-            switch event.Type {
-            case "TaskStatusChanged":
-                fmt.Printf("Task %s status changed to: %s\n",
-                    event.Data["task_id"], event.Data["status"])
-            case "WorkflowProgressUpdate":
-                fmt.Printf("Workflow progress: %v%% - %s\n",
-                    event.Data["progress"], event.Data["current_step"])
-            }
-        }
-    }()
-
-    // Wait for interrupt
-    c := make(chan os.Signal, 1)
-    signal.Notify(c, os.Interrupt)
-    <-c
-
-    fmt.Println("Shutting down...")
-}
-
-

HTTP Client with Retry Logic

-
package main
-
-import (
-    "context"
-    "fmt"
-    "time"
-
-    "github.com/provisioning-systems/go-client"
-    "github.com/cenkalti/backoff/v4"
-)
-
-type ResilientClient struct {
-    *provisioning.Client
-}
-
-func NewResilientClient(config *provisioning.Config) (*ResilientClient, error) {
-    client, err := provisioning.NewClient(config)
-    if err != nil {
-        return nil, err
-    }
-
-    return &ResilientClient{Client: client}, nil
-}
-
-func (c *ResilientClient) CreateServerWorkflowWithRetry(
-    ctx context.Context,
-    req *provisioning.CreateServerRequest,
-) (string, error) {
-    var taskID string
-
-    operation := func() error {
-        var err error
-        taskID, err = c.CreateServerWorkflow(ctx, req)
-
-        // Don't retry validation errors
-        if provisioning.IsValidationError(err) {
-            return backoff.Permanent(err)
-        }
-
-        return err
-    }
-
-    exponentialBackoff := backoff.NewExponentialBackOff()
-    exponentialBackoff.MaxElapsedTime = 5 * time.Minute
-
-    err := backoff.Retry(operation, exponentialBackoff)
-    if err != nil {
-        return "", fmt.Errorf("failed after retries: %w", err)
-    }
-
-    return taskID, nil
-}
-
-func main() {
-    client, err := NewResilientClient(&provisioning.Config{
-        BaseURL:  "http://localhost:9090",
-        Username: "admin",
-        Password: "password",
-    })
-    if err != nil {
-        log.Fatalf("Failed to create client: %v", err)
-    }
-
-    ctx := context.Background()
-
-    // Authenticate with retry
-    _, err = client.Authenticate(ctx)
-    if err != nil {
-        log.Fatalf("Authentication failed: %v", err)
-    }
-
-    // Create workflow with retry
-    taskID, err := client.CreateServerWorkflowWithRetry(ctx, &provisioning.CreateServerRequest{
-        Infra:    "production",
-        Settings: "config.ncl",
-    })
-    if err != nil {
-        log.Fatalf("Failed to create workflow: %v", err)
-    }
-
-    fmt.Printf("Workflow created successfully: %s\n", taskID)
-}
-
-

Rust SDK

-

Installation

-

Add to your Cargo.toml:

-
[dependencies]
-provisioning-rs = "2.0.0"
-tokio = { version = "1.0", features = ["full"] }
-
-

Quick Start

-
use provisioning_rs::{ProvisioningClient, Config, CreateServerRequest};
-use tokio;
-
-#[tokio::main]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
-    // Initialize client
-    let config = Config {
-        base_url: "http://localhost:9090".to_string(),
-        auth_url: Some("http://localhost:8081".to_string()),
-        username: Some("admin".to_string()),
-        password: Some("your-password".to_string()),
-        token: None,
-    };
-
-    let mut client = ProvisioningClient::new(config);
-
-    // Authenticate
-    let token = client.authenticate().await?;
-    println!("Authenticated with token: {}...", &token[..20]);
-
-    // Create server workflow
-    let request = CreateServerRequest {
-        infra: "production".to_string(),
-        settings: Some("prod-settings.ncl".to_string()),
-        check_mode: false,
-        wait: false,
-    };
-
-    let task_id = client.create_server_workflow(request).await?;
-    println!("Server workflow created: {}", task_id);
-
-    // Wait for completion
-    let task = client.wait_for_task_completion(&task_id, std::time::Duration::from_secs(600)).await?;
-
-    println!("Task completed with status: {:?}", task.status);
-    match task.status {
-        TaskStatus::Completed => {
-            if let Some(output) = task.output {
-                println!("Output: {}", output);
-            }
-        },
-        TaskStatus::Failed => {
-            if let Some(error) = task.error {
-                println!("Error: {}", error);
-            }
-        },
-        _ => {}
-    }
-
-    Ok(())
-}
-

WebSocket Integration

-
use provisioning_rs::{ProvisioningClient, Config, WebSocketEvent};
-use futures_util::StreamExt;
-use tokio;
-
-#[tokio::main]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
-    let config = Config {
-        base_url: "http://localhost:9090".to_string(),
-        username: Some("admin".to_string()),
-        password: Some("password".to_string()),
-        ..Default::default()
-    };
-
-    let mut client = ProvisioningClient::new(config);
-
-    // Authenticate
-    client.authenticate().await?;
-
-    // Connect WebSocket
-    let mut ws = client.connect_websocket(vec![
-        "TaskStatusChanged".to_string(),
-        "WorkflowProgressUpdate".to_string(),
-    ]).await?;
-
-    // Handle events
-    tokio::spawn(async move {
-        while let Some(event) = ws.next().await {
-            match event {
-                Ok(WebSocketEvent::TaskStatusChanged { data }) => {
-                    println!("Task {} status changed to: {}", data.task_id, data.status);
-                },
-                Ok(WebSocketEvent::WorkflowProgressUpdate { data }) => {
-                    println!("Workflow progress: {}% - {}", data.progress, data.current_step);
-                },
-                Ok(WebSocketEvent::SystemHealthUpdate { data }) => {
-                    println!("System health: {}", data.overall_status);
-                },
-                Err(e) => {
-                    eprintln!("WebSocket error: {}", e);
-                    break;
-                }
-            }
-        }
-    });
-
-    // Keep the main thread alive
-    tokio::signal::ctrl_c().await?;
-    println!("Shutting down...");
-
-    Ok(())
-}
-

Batch Operations

-
use provisioning_rs::{BatchOperationRequest, BatchOperation};
-
-#[tokio::main]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
-    let mut client = ProvisioningClient::new(config);
-    client.authenticate().await?;
+

Use Interactive Guides

+

Access built-in guides for comprehensive walkthroughs:

+
# Quick command reference
+provisioning sc
 
-    // Define batch operation
-    let batch_request = BatchOperationRequest {
-        name: "production_deployment".to_string(),
-        version: "1.0.0".to_string(),
-        storage_backend: "surrealdb".to_string(),
-        parallel_limit: 5,
-        rollback_enabled: true,
-        operations: vec![
-            BatchOperation {
-                id: "servers".to_string(),
-                operation_type: "server_batch".to_string(),
-                provider: "upcloud".to_string(),
-                dependencies: vec![],
-                config: serde_json::json!({
-                    "server_configs": [
-                        {"name": "web-01", "plan": "2xCPU-4 GB", "zone": "de-fra1"},
-                        {"name": "web-02", "plan": "2xCPU-4 GB", "zone": "de-fra1"}
-                    ]
-                }),
-            },
-            BatchOperation {
-                id: "kubernetes".to_string(),
-                operation_type: "taskserv_batch".to_string(),
-                provider: "upcloud".to_string(),
-                dependencies: vec!["servers".to_string()],
-                config: serde_json::json!({
-                    "taskservs": ["kubernetes", "cilium", "containerd"]
-                }),
-            },
-        ],
-    };
+# Complete from-scratch guide
+provisioning guide from-scratch
 
-    // Execute batch operation
-    let batch_result = client.execute_batch_operation(batch_request).await?;
-    println!("Batch operation started: {}", batch_result.batch_id);
-
-    // Monitor progress
-    loop {
-        let status = client.get_batch_status(&batch_result.batch_id).await?;
-        println!("Batch status: {} - {}%", status.status, status.progress.unwrap_or(0.0));
-
-        match status.status.as_str() {
-            "Completed" | "Failed" | "Cancelled" => break,
-            _ => tokio::time::sleep(std::time::Duration::from_secs(10)).await,
-        }
-    }
-
-    Ok(())
-}
-

Best Practices

-

Authentication and Security

-
    -
  1. Token Management: Store tokens securely and implement automatic refresh
  2. -
  3. Environment Variables: Use environment variables for credentials
  4. -
  5. HTTPS: Always use HTTPS in production environments
  6. -
  7. Token Expiration: Handle token expiration gracefully
  8. -
-

Error Handling

-
    -
  1. Specific Exceptions: Handle specific error types appropriately
  2. -
  3. Retry Logic: Implement exponential backoff for transient failures
  4. -
  5. Circuit Breakers: Use circuit breakers for resilient integrations
  6. -
  7. Logging: Log errors with appropriate context
  8. -
-

Performance Optimization

-
    -
  1. Connection Pooling: Reuse HTTP connections
  2. -
  3. Async Operations: Use asynchronous operations where possible
  4. -
  5. Batch Operations: Group related operations for efficiency
  6. -
  7. Caching: Cache frequently accessed data appropriately
  8. -
-

WebSocket Connections

-
    -
  1. Reconnection: Implement automatic reconnection with backoff
  2. -
  3. Event Filtering: Subscribe only to needed event types
  4. -
  5. Error Handling: Handle WebSocket errors gracefully
  6. -
  7. Resource Cleanup: Properly close WebSocket connections
  8. -
-

Testing

-
    -
  1. Unit Tests: Test SDK functionality with mocked responses
  2. -
  3. Integration Tests: Test against real API endpoints
  4. -
  5. Error Scenarios: Test error handling paths
  6. -
  7. Load Testing: Validate performance under load
  8. -
-

This comprehensive SDK documentation provides developers with everything needed to integrate with provisioning using their preferred programming -language, complete with examples, best practices, and detailed API references.

-

Integration Examples

-

This document provides comprehensive examples and patterns for integrating with provisioning APIs, including client libraries, SDKs, error handling -strategies, and performance optimization.

-

Overview

-

Provisioning offers multiple integration points:

-
    -
  • REST APIs for workflow management
  • -
  • WebSocket APIs for real-time monitoring
  • -
  • Configuration APIs for system setup
  • -
  • Extension APIs for custom providers and services
  • -
-

Complete Integration Examples

-

Python Integration

- -
import asyncio
-import json
-import logging
-import time
-import requests
-import websockets
-from typing import Dict, List, Optional, Callable
-from dataclasses import dataclass
-from enum import Enum
-
-class TaskStatus(Enum):
-    PENDING = "Pending"
-    RUNNING = "Running"
-    COMPLETED = "Completed"
-    FAILED = "Failed"
-    CANCELLED = "Cancelled"
-
-@dataclass
-class WorkflowTask:
-    id: str
-    name: str
-    status: TaskStatus
-    created_at: str
-    started_at: Optional[str] = None
-    completed_at: Optional[str] = None
-    output: Optional[str] = None
-    error: Optional[str] = None
-    progress: Optional[float] = None
-
-class ProvisioningAPIError(Exception):
-    """Base exception for provisioning API errors"""
-    pass
-
-class AuthenticationError(ProvisioningAPIError):
-    """Authentication failed"""
-    pass
-
-class ValidationError(ProvisioningAPIError):
-    """Request validation failed"""
-    pass
-
-class ProvisioningClient:
-    """
-    Complete Python client for provisioning
-
-    Features:
-    - REST API integration
-    - WebSocket support for real-time updates
-    - Automatic token refresh
-    - Retry logic with exponential backoff
-    - Comprehensive error handling
-    """
-
-    def __init__(self,
-                 base_url: str = "http://localhost:9090",
-                 auth_url: str = "http://localhost:8081",
-                 username: str = None,
-                 password: str = None,
-                 token: str = None):
-        self.base_url = base_url
-        self.auth_url = auth_url
-        self.username = username
-        self.password = password
-        self.token = token
-        self.session = requests.Session()
-        self.websocket = None
-        self.event_handlers = {}
-
-        # Setup logging
-        self.logger = logging.getLogger(__name__)
-
-        # Configure session with retries
-        from requests.adapters import HTTPAdapter
-        from urllib3.util.retry import Retry
-
-        retry_strategy = Retry(
-            total=3,
-            status_forcelist=[429, 500, 502, 503, 504],
-            method_whitelist=["HEAD", "GET", "OPTIONS"],
-            backoff_factor=1
-        )
-
-        adapter = HTTPAdapter(max_retries=retry_strategy)
-        self.session.mount("http://", adapter)
-        self.session.mount("https://", adapter)
-
-    async def authenticate(self) -> str:
-        """Authenticate and get JWT token"""
-        if self.token:
-            return self.token
-
-        if not self.username or not self.password:
-            raise AuthenticationError("Username and password required for authentication")
-
-        auth_data = {
-            "username": self.username,
-            "password": self.password
-        }
-
-        try:
-            response = requests.post(f"{self.auth_url}/auth/login", json=auth_data)
-            response.raise_for_status()
-
-            result = response.json()
-            if not result.get('success'):
-                raise AuthenticationError(result.get('error', 'Authentication failed'))
-
-            self.token = result['data']['token']
-            self.session.headers.update({
-                'Authorization': f'Bearer {self.token}'
-            })
-
-            self.logger.info("Authentication successful")
-            return self.token
-
-        except requests.RequestException as e:
-            raise AuthenticationError(f"Authentication request failed: {e}")
-
-    def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict:
-        """Make authenticated HTTP request with error handling"""
-        if not self.token:
-            raise AuthenticationError("Not authenticated. Call authenticate() first.")
-
-        url = f"{self.base_url}{endpoint}"
-
-        try:
-            response = self.session.request(method, url, **kwargs)
-            response.raise_for_status()
-
-            result = response.json()
-            if not result.get('success'):
-                error_msg = result.get('error', 'Request failed')
-                if response.status_code == 400:
-                    raise ValidationError(error_msg)
-                else:
-                    raise ProvisioningAPIError(error_msg)
-
-            return result['data']
-
-        except requests.RequestException as e:
-            self.logger.error(f"Request failed: {method} {url} - {e}")
-            raise ProvisioningAPIError(f"Request failed: {e}")
-
-    # Workflow Management Methods
-
-    def create_server_workflow(self,
-                             infra: str,
-                             settings: str = "config.ncl",
-                             check_mode: bool = False,
-                             wait: bool = False) -> str:
-        """Create a server provisioning workflow"""
-        data = {
-            "infra": infra,
-            "settings": settings,
-            "check_mode": check_mode,
-            "wait": wait
-        }
-
-        task_id = self._make_request("POST", "/workflows/servers/create", json=data)
-        self.logger.info(f"Server workflow created: {task_id}")
-        return task_id
-
-    def create_taskserv_workflow(self,
-                               operation: str,
-                               taskserv: str,
-                               infra: str,
-                               settings: str = "config.ncl",
-                               check_mode: bool = False,
-                               wait: bool = False) -> str:
-        """Create a task service workflow"""
-        data = {
-            "operation": operation,
-            "taskserv": taskserv,
-            "infra": infra,
-            "settings": settings,
-            "check_mode": check_mode,
-            "wait": wait
-        }
-
-        task_id = self._make_request("POST", "/workflows/taskserv/create", json=data)
-        self.logger.info(f"Taskserv workflow created: {task_id}")
-        return task_id
-
-    def create_cluster_workflow(self,
-                              operation: str,
-                              cluster_type: str,
-                              infra: str,
-                              settings: str = "config.ncl",
-                              check_mode: bool = False,
-                              wait: bool = False) -> str:
-        """Create a cluster workflow"""
-        data = {
-            "operation": operation,
-            "cluster_type": cluster_type,
-            "infra": infra,
-            "settings": settings,
-            "check_mode": check_mode,
-            "wait": wait
-        }
-
-        task_id = self._make_request("POST", "/workflows/cluster/create", json=data)
-        self.logger.info(f"Cluster workflow created: {task_id}")
-        return task_id
-
-    def get_task_status(self, task_id: str) -> WorkflowTask:
-        """Get the status of a specific task"""
-        data = self._make_request("GET", f"/tasks/{task_id}")
-        return WorkflowTask(
-            id=data['id'],
-            name=data['name'],
-            status=TaskStatus(data['status']),
-            created_at=data['created_at'],
-            started_at=data.get('started_at'),
-            completed_at=data.get('completed_at'),
-            output=data.get('output'),
-            error=data.get('error'),
-            progress=data.get('progress')
-        )
-
-    def list_tasks(self, status_filter: Optional[str] = None) -> List[WorkflowTask]:
-        """List all tasks, optionally filtered by status"""
-        params = {}
-        if status_filter:
-            params['status'] = status_filter
-
-        data = self._make_request("GET", "/tasks", params=params)
-        return [
-            WorkflowTask(
-                id=task['id'],
-                name=task['name'],
-                status=TaskStatus(task['status']),
-                created_at=task['created_at'],
-                started_at=task.get('started_at'),
-                completed_at=task.get('completed_at'),
-                output=task.get('output'),
-                error=task.get('error')
-            )
-            for task in data
-        ]
-
-    def wait_for_task_completion(self,
-                               task_id: str,
-                               timeout: int = 300,
-                               poll_interval: int = 5) -> WorkflowTask:
-        """Wait for a task to complete"""
-        start_time = time.time()
-
-        while time.time() - start_time < timeout:
-            task = self.get_task_status(task_id)
-
-            if task.status in [TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.CANCELLED]:
-                self.logger.info(f"Task {task_id} finished with status: {task.status}")
-                return task
-
-            self.logger.debug(f"Task {task_id} status: {task.status}")
-            time.sleep(poll_interval)
-
-        raise TimeoutError(f"Task {task_id} did not complete within {timeout} seconds")
-
-    # Batch Operations
-
-    def execute_batch_operation(self, batch_config: Dict) -> Dict:
-        """Execute a batch operation"""
-        return self._make_request("POST", "/batch/execute", json=batch_config)
-
-    def get_batch_status(self, batch_id: str) -> Dict:
-        """Get batch operation status"""
-        return self._make_request("GET", f"/batch/operations/{batch_id}")
-
-    def cancel_batch_operation(self, batch_id: str) -> str:
-        """Cancel a running batch operation"""
-        return self._make_request("POST", f"/batch/operations/{batch_id}/cancel")
-
-    # System Health and Monitoring
-
-    def get_system_health(self) -> Dict:
-        """Get system health status"""
-        return self._make_request("GET", "/state/system/health")
-
-    def get_system_metrics(self) -> Dict:
-        """Get system metrics"""
-        return self._make_request("GET", "/state/system/metrics")
-
-    # WebSocket Integration
-
-    async def connect_websocket(self, event_types: List[str] = None):
-        """Connect to WebSocket for real-time updates"""
-        if not self.token:
-            await self.authenticate()
-
-        ws_url = f"ws://localhost:9090/ws?token={self.token}"
-        if event_types:
-            ws_url += f"&events={','.join(event_types)}"
-
-        try:
-            self.websocket = await websockets.connect(ws_url)
-            self.logger.info("WebSocket connected")
-
-            # Start listening for messages
-            asyncio.create_task(self._websocket_listener())
-
-        except Exception as e:
-            self.logger.error(f"WebSocket connection failed: {e}")
-            raise
-
-    async def _websocket_listener(self):
-        """Listen for WebSocket messages"""
-        try:
-            async for message in self.websocket:
-                try:
-                    data = json.loads(message)
-                    await self._handle_websocket_message(data)
-                except json.JSONDecodeError:
-                    self.logger.error(f"Invalid JSON received: {message}")
-        except Exception as e:
-            self.logger.error(f"WebSocket listener error: {e}")
-
-    async def _handle_websocket_message(self, data: Dict):
-        """Handle incoming WebSocket messages"""
-        event_type = data.get('event_type')
-        if event_type and event_type in self.event_handlers:
-            for handler in self.event_handlers[event_type]:
-                try:
-                    await handler(data)
-                except Exception as e:
-                    self.logger.error(f"Error in event handler for {event_type}: {e}")
-
-    def on_event(self, event_type: str, handler: Callable):
-        """Register an event handler"""
-        if event_type not in self.event_handlers:
-            self.event_handlers[event_type] = []
-        self.event_handlers[event_type].append(handler)
-
-    async def disconnect_websocket(self):
-        """Disconnect from WebSocket"""
-        if self.websocket:
-            await self.websocket.close()
-            self.websocket = None
-            self.logger.info("WebSocket disconnected")
-
-# Usage Example
-async def main():
-    # Initialize client
-    client = ProvisioningClient(
-        username="admin",
-        password="password"
-    )
-
-    try:
-        # Authenticate
-        await client.authenticate()
-
-        # Create a server workflow
-        task_id = client.create_server_workflow(
-            infra="production",
-            settings="prod-settings.ncl",
-            wait=False
-        )
-        print(f"Server workflow created: {task_id}")
-
-        # Set up WebSocket event handlers
-        async def on_task_update(event):
-            print(f"Task update: {event['data']['task_id']} -> {event['data']['status']}")
-
-        async def on_system_health(event):
-            print(f"System health: {event['data']['overall_status']}")
-
-        client.on_event('TaskStatusChanged', on_task_update)
-        client.on_event('SystemHealthUpdate', on_system_health)
-
-        # Connect to WebSocket
-        await client.connect_websocket(['TaskStatusChanged', 'SystemHealthUpdate'])
-
-        # Wait for task completion
-        final_task = client.wait_for_task_completion(task_id, timeout=600)
-        print(f"Task completed with status: {final_task.status}")
-
-        if final_task.status == TaskStatus.COMPLETED:
-            print(f"Output: {final_task.output}")
-        elif final_task.status == TaskStatus.FAILED:
-            print(f"Error: {final_task.error}")
-
-    except ProvisioningAPIError as e:
-        print(f"API Error: {e}")
-    except Exception as e:
-        print(f"Unexpected error: {e}")
-    finally:
-        await client.disconnect_websocket()
-
-if __name__ == "__main__":
-    asyncio.run(main())
-
-

Node.js/JavaScript Integration

-

Complete JavaScript/TypeScript Client

-
import axios, { AxiosInstance, AxiosResponse } from 'axios';
-import WebSocket from 'ws';
-import { EventEmitter } from 'events';
-
-interface Task {
-  id: string;
-  name: string;
-  status: 'Pending' | 'Running' | 'Completed' | 'Failed' | 'Cancelled';
-  created_at: string;
-  started_at?: string;
-  completed_at?: string;
-  output?: string;
-  error?: string;
-  progress?: number;
-}
-
-interface BatchConfig {
-  name: string;
-  version: string;
-  storage_backend: string;
-  parallel_limit: number;
-  rollback_enabled: boolean;
-  operations: Array<{
-    id: string;
-    type: string;
-    provider: string;
-    dependencies: string[];
-    [key: string]: any;
-  }>;
-}
-
-interface WebSocketEvent {
-  event_type: string;
-  timestamp: string;
-  data: any;
-  metadata: Record<string, any>;
-}
-
-class ProvisioningClient extends EventEmitter {
-  private httpClient: AxiosInstance;
-  private authClient: AxiosInstance;
-  private websocket?: WebSocket;
-  private token?: string;
-  private reconnectAttempts = 0;
-  private maxReconnectAttempts = 10;
-  private reconnectInterval = 5000;
-
-  constructor(
-    private baseUrl = 'http://localhost:9090',
-    private authUrl = 'http://localhost:8081',
-    private username?: string,
-    private password?: string,
-    token?: string
-  ) {
-    super();
-
-    this.token = token;
-
-    // Setup HTTP clients
-    this.httpClient = axios.create({
-      baseURL: baseUrl,
-      timeout: 30000,
-    });
-
-    this.authClient = axios.create({
-      baseURL: authUrl,
-      timeout: 10000,
-    });
-
-    // Setup request interceptors
-    this.setupInterceptors();
-  }
-
-  private setupInterceptors(): void {
-    // Request interceptor to add auth token
-    this.httpClient.interceptors.request.use((config) => {
-      if (this.token) {
-        config.headers.Authorization = `Bearer ${this.token}`;
-      }
-      return config;
-    });
-
-    // Response interceptor for error handling
-    this.httpClient.interceptors.response.use(
-      (response) => response,
-      async (error) => {
-        if (error.response?.status === 401 && this.username && this.password) {
-          // Token expired, try to refresh
-          try {
-            await this.authenticate();
-            // Retry the original request
-            const originalRequest = error.config;
-            originalRequest.headers.Authorization = `Bearer ${this.token}`;
-            return this.httpClient.request(originalRequest);
-          } catch (authError) {
-            this.emit('authError', authError);
-            throw error;
-          }
-        }
-        throw error;
-      }
-    );
-  }
-
-  async authenticate(): Promise<string> {
-    if (this.token) {
-      return this.token;
-    }
-
-    if (!this.username || !this.password) {
-      throw new Error('Username and password required for authentication');
-    }
-
-    try {
-      const response = await this.authClient.post('/auth/login', {
-        username: this.username,
-        password: this.password,
-      });
-
-      const result = response.data;
-      if (!result.success) {
-        throw new Error(result.error || 'Authentication failed');
-      }
-
-      this.token = result.data.token;
-      console.log('Authentication successful');
-      this.emit('authenticated', this.token);
-
-      return this.token;
-    } catch (error) {
-      console.error('Authentication failed:', error);
-      throw new Error(`Authentication failed: ${error.message}`);
-    }
-  }
-
-  private async makeRequest<T>(method: string, endpoint: string, data?: any): Promise<T> {
-    try {
-      const response: AxiosResponse = await this.httpClient.request({
-        method,
-        url: endpoint,
-        data,
-      });
-
-      const result = response.data;
-      if (!result.success) {
-        throw new Error(result.error || 'Request failed');
-      }
-
-      return result.data;
-    } catch (error) {
-      console.error(`Request failed: ${method} ${endpoint}`, error);
-      throw error;
-    }
-  }
-
-  // Workflow Management Methods
-
-  async createServerWorkflow(config: {
-    infra: string;
-    settings?: string;
-    check_mode?: boolean;
-    wait?: boolean;
-  }): Promise<string> {
-    const data = {
-      infra: config.infra,
-      settings: config.settings || 'config.ncl',
-      check_mode: config.check_mode || false,
-      wait: config.wait || false,
-    };
-
-    const taskId = await this.makeRequest<string>('POST', '/workflows/servers/create', data);
-    console.log(`Server workflow created: ${taskId}`);
-    this.emit('workflowCreated', { type: 'server', taskId });
-    return taskId;
-  }
-
-  async createTaskservWorkflow(config: {
-    operation: string;
-    taskserv: string;
-    infra: string;
-    settings?: string;
-    check_mode?: boolean;
-    wait?: boolean;
-  }): Promise<string> {
-    const data = {
-      operation: config.operation,
-      taskserv: config.taskserv,
-      infra: config.infra,
-      settings: config.settings || 'config.ncl',
-      check_mode: config.check_mode || false,
-      wait: config.wait || false,
-    };
-
-    const taskId = await this.makeRequest<string>('POST', '/workflows/taskserv/create', data);
-    console.log(`Taskserv workflow created: ${taskId}`);
-    this.emit('workflowCreated', { type: 'taskserv', taskId });
-    return taskId;
-  }
-
-  async createClusterWorkflow(config: {
-    operation: string;
-    cluster_type: string;
-    infra: string;
-    settings?: string;
-    check_mode?: boolean;
-    wait?: boolean;
-  }): Promise<string> {
-    const data = {
-      operation: config.operation,
-      cluster_type: config.cluster_type,
-      infra: config.infra,
-      settings: config.settings || 'config.ncl',
-      check_mode: config.check_mode || false,
-      wait: config.wait || false,
-    };
-
-    const taskId = await this.makeRequest<string>('POST', '/workflows/cluster/create', data);
-    console.log(`Cluster workflow created: ${taskId}`);
-    this.emit('workflowCreated', { type: 'cluster', taskId });
-    return taskId;
-  }
-
-  async getTaskStatus(taskId: string): Promise<Task> {
-    return this.makeRequest<Task>('GET', `/tasks/${taskId}`);
-  }
-
-  async listTasks(statusFilter?: string): Promise<Task[]> {
-    const params = statusFilter ? `?status=${statusFilter}` : '';
-    return this.makeRequest<Task[]>('GET', `/tasks${params}`);
-  }
-
-  async waitForTaskCompletion(
-    taskId: string,
-    timeout = 300000, // 5 minutes
-    pollInterval = 5000 // 5 seconds
-  ): Promise<Task> {
-    return new Promise((resolve, reject) => {
-      const startTime = Date.now();
-
-      const poll = async () => {
-        try {
-          const task = await this.getTaskStatus(taskId);
-
-          if (['Completed', 'Failed', 'Cancelled'].includes(task.status)) {
-            console.log(`Task ${taskId} finished with status: ${task.status}`);
-            resolve(task);
-            return;
-          }
-
-          if (Date.now() - startTime > timeout) {
-            reject(new Error(`Task ${taskId} did not complete within ${timeout}ms`));
-            return;
-          }
-
-          console.log(`Task ${taskId} status: ${task.status}`);
-          this.emit('taskProgress', task);
-          setTimeout(poll, pollInterval);
-        } catch (error) {
-          reject(error);
-        }
-      };
-
-      poll();
-    });
-  }
-
-  // Batch Operations
-
-  async executeBatchOperation(batchConfig: BatchConfig): Promise<any> {
-    const result = await this.makeRequest('POST', '/batch/execute', batchConfig);
-    console.log(`Batch operation started: ${result.batch_id}`);
-    this.emit('batchStarted', result);
-    return result;
-  }
-
-  async getBatchStatus(batchId: string): Promise<any> {
-    return this.makeRequest('GET', `/batch/operations/${batchId}`);
-  }
-
-  async cancelBatchOperation(batchId: string): Promise<string> {
-    return this.makeRequest('POST', `/batch/operations/${batchId}/cancel`);
-  }
-
-  // System Monitoring
-
-  async getSystemHealth(): Promise<any> {
-    return this.makeRequest('GET', '/state/system/health');
-  }
-
-  async getSystemMetrics(): Promise<any> {
-    return this.makeRequest('GET', '/state/system/metrics');
-  }
-
-  // WebSocket Integration
-
-  async connectWebSocket(eventTypes?: string[]): Promise<void> {
-    if (!this.token) {
-      await this.authenticate();
-    }
-
-    let wsUrl = `ws://localhost:9090/ws?token=${this.token}`;
-    if (eventTypes && eventTypes.length > 0) {
-      wsUrl += `&events=${eventTypes.join(',')}`;
-    }
-
-    return new Promise((resolve, reject) => {
-      this.websocket = new WebSocket(wsUrl);
-
-      this.websocket.on('open', () => {
-        console.log('WebSocket connected');
-        this.reconnectAttempts = 0;
-        this.emit('websocketConnected');
-        resolve();
-      });
-
-      this.websocket.on('message', (data: WebSocket.Data) => {
-        try {
-          const event: WebSocketEvent = JSON.parse(data.toString());
-          this.handleWebSocketMessage(event);
-        } catch (error) {
-          console.error('Failed to parse WebSocket message:', error);
-        }
-      });
-
-      this.websocket.on('close', (code: number, reason: string) => {
-        console.log(`WebSocket disconnected: ${code} - ${reason}`);
-        this.emit('websocketDisconnected', { code, reason });
-
-        if (this.reconnectAttempts < this.maxReconnectAttempts) {
-          setTimeout(() => {
-            this.reconnectAttempts++;
-            console.log(`Reconnecting... (${this.reconnectAttempts}/${this.maxReconnectAttempts})`);
-            this.connectWebSocket(eventTypes);
-          }, this.reconnectInterval);
-        }
-      });
-
-      this.websocket.on('error', (error: Error) => {
-        console.error('WebSocket error:', error);
-        this.emit('websocketError', error);
-        reject(error);
-      });
-    });
-  }
-
-  private handleWebSocketMessage(event: WebSocketEvent): void {
-    console.log(`WebSocket event: ${event.event_type}`);
-
-    // Emit specific event
-    this.emit(event.event_type, event);
-
-    // Emit general event
-    this.emit('websocketMessage', event);
-
-    // Handle specific event types
-    switch (event.event_type) {
-      case 'TaskStatusChanged':
-        this.emit('taskStatusChanged', event.data);
-        break;
-      case 'WorkflowProgressUpdate':
-        this.emit('workflowProgress', event.data);
-        break;
-      case 'SystemHealthUpdate':
-        this.emit('systemHealthUpdate', event.data);
-        break;
-      case 'BatchOperationUpdate':
-        this.emit('batchUpdate', event.data);
-        break;
-    }
-  }
-
-  disconnectWebSocket(): void {
-    if (this.websocket) {
-      this.websocket.close();
-      this.websocket = undefined;
-      console.log('WebSocket disconnected');
-    }
-  }
-
-  // Utility Methods
-
-  async healthCheck(): Promise<boolean> {
-    try {
-      const response = await this.httpClient.get('/health');
-      return response.data.success;
-    } catch (error) {
-      return false;
-    }
-  }
-}
-
-// Usage Example
-async function main() {
-  const client = new ProvisioningClient(
-    'http://localhost:9090',
-    'http://localhost:8081',
-    'admin',
-    'password'
-  );
-
-  try {
-    // Authenticate
-    await client.authenticate();
-
-    // Set up event listeners
-    client.on('taskStatusChanged', (task) => {
-      console.log(`Task ${task.task_id} status changed to: ${task.status}`);
-    });
-
-    client.on('workflowProgress', (progress) => {
-      console.log(`Workflow progress: ${progress.progress}% - ${progress.current_step}`);
-    });
-
-    client.on('systemHealthUpdate', (health) => {
-      console.log(`System health: ${health.overall_status}`);
-    });
-
-    // Connect WebSocket
-    await client.connectWebSocket(['TaskStatusChanged', 'WorkflowProgressUpdate', 'SystemHealthUpdate']);
-
-    // Create workflows
-    const serverTaskId = await client.createServerWorkflow({
-      infra: 'production',
-      settings: 'prod-settings.ncl',
-    });
-
-    const taskservTaskId = await client.createTaskservWorkflow({
-      operation: 'create',
-      taskserv: 'kubernetes',
-      infra: 'production',
-    });
-
-    // Wait for completion
-    const [serverTask, taskservTask] = await Promise.all([
-      client.waitForTaskCompletion(serverTaskId),
-      client.waitForTaskCompletion(taskservTaskId),
-    ]);
-
-    console.log('All workflows completed');
-    console.log(`Server task: ${serverTask.status}`);
-    console.log(`Taskserv task: ${taskservTask.status}`);
-
-    // Create batch operation
-    const batchConfig: BatchConfig = {
-      name: 'test_deployment',
-      version: '1.0.0',
-      storage_backend: 'filesystem',
-      parallel_limit: 3,
-      rollback_enabled: true,
-      operations: [
-        {
-          id: 'servers',
-          type: 'server_batch',
-          provider: 'upcloud',
-          dependencies: [],
-          server_configs: [
-            { name: 'web-01', plan: '1xCPU-2 GB', zone: 'de-fra1' },
-            { name: 'web-02', plan: '1xCPU-2 GB', zone: 'de-fra1' },
-          ],
-        },
-        {
-          id: 'taskservs',
-          type: 'taskserv_batch',
-          provider: 'upcloud',
-          dependencies: ['servers'],
-          taskservs: ['kubernetes', 'cilium'],
-        },
-      ],
-    };
-
-    const batchResult = await client.executeBatchOperation(batchConfig);
-    console.log(`Batch operation started: ${batchResult.batch_id}`);
-
-    // Monitor batch operation
-    const monitorBatch = setInterval(async () => {
-      try {
-        const batchStatus = await client.getBatchStatus(batchResult.batch_id);
-        console.log(`Batch status: ${batchStatus.status} - ${batchStatus.progress}%`);
-
-        if (['Completed', 'Failed', 'Cancelled'].includes(batchStatus.status)) {
-          clearInterval(monitorBatch);
-          console.log(`Batch operation finished: ${batchStatus.status}`);
-        }
-      } catch (error) {
-        console.error('Error checking batch status:', error);
-        clearInterval(monitorBatch);
-      }
-    }, 10000);
-
-  } catch (error) {
-    console.error('Integration example failed:', error);
-  } finally {
-    client.disconnectWebSocket();
-  }
-}
-
-// Run example
-if (require.main === module) {
-  main().catch(console.error);
-}
-
-export { ProvisioningClient, Task, BatchConfig };
-
-

Error Handling Strategies

-

Comprehensive Error Handling

-
class ProvisioningErrorHandler:
-    """Centralized error handling for provisioning operations"""
-
-    def __init__(self, client: ProvisioningClient):
-        self.client = client
-        self.retry_strategies = {
-            'network_error': self._exponential_backoff,
-            'rate_limit': self._rate_limit_backoff,
-            'server_error': self._server_error_strategy,
-            'auth_error': self._auth_error_strategy,
-        }
-
-    async def execute_with_retry(self, operation: Callable, *args, **kwargs):
-        """Execute operation with intelligent retry logic"""
-        max_attempts = 3
-        attempt = 0
-
-        while attempt < max_attempts:
-            try:
-                return await operation(*args, **kwargs)
-            except Exception as e:
-                attempt += 1
-                error_type = self._classify_error(e)
-
-                if attempt >= max_attempts:
-                    self._log_final_failure(operation.__name__, e, attempt)
-                    raise
-
-                retry_strategy = self.retry_strategies.get(error_type, self._default_retry)
-                wait_time = retry_strategy(attempt, e)
-
-                self._log_retry_attempt(operation.__name__, e, attempt, wait_time)
-                await asyncio.sleep(wait_time)
-
-    def _classify_error(self, error: Exception) -> str:
-        """Classify error type for appropriate retry strategy"""
-        if isinstance(error, requests.ConnectionError):
-            return 'network_error'
-        elif isinstance(error, requests.HTTPError):
-            if error.response.status_code == 429:
-                return 'rate_limit'
-            elif 500 <= error.response.status_code < 600:
-                return 'server_error'
-            elif error.response.status_code == 401:
-                return 'auth_error'
-        return 'unknown'
-
-    def _exponential_backoff(self, attempt: int, error: Exception) -> float:
-        """Exponential backoff for network errors"""
-        return min(2 ** attempt + random.uniform(0, 1), 60)
-
-    def _rate_limit_backoff(self, attempt: int, error: Exception) -> float:
-        """Handle rate limiting with appropriate backoff"""
-        retry_after = getattr(error.response, 'headers', {}).get('Retry-After')
-        if retry_after:
-            return float(retry_after)
-        return 60  # Default to 60 seconds
-
-    def _server_error_strategy(self, attempt: int, error: Exception) -> float:
-        """Handle server errors"""
-        return min(10 * attempt, 60)
-
-    def _auth_error_strategy(self, attempt: int, error: Exception) -> float:
-        """Handle authentication errors"""
-        # Re-authenticate before retry
-        asyncio.create_task(self.client.authenticate())
-        return 5
-
-    def _default_retry(self, attempt: int, error: Exception) -> float:
-        """Default retry strategy"""
-        return min(5 * attempt, 30)
-
-# Usage example
-async def robust_workflow_execution():
-    client = ProvisioningClient()
-    handler = ProvisioningErrorHandler(client)
-
-    try:
-        # Execute with automatic retry
-        task_id = await handler.execute_with_retry(
-            client.create_server_workflow,
-            infra="production",
-            settings="config.ncl"
-        )
-
-        # Wait for completion with retry
-        task = await handler.execute_with_retry(
-            client.wait_for_task_completion,
-            task_id,
-            timeout=600
-        )
-
-        return task
-    except Exception as e:
-        # Log detailed error information
-        logger.error(f"Workflow execution failed after all retries: {e}")
-        # Implement fallback strategy
-        return await fallback_workflow_strategy()
-
-

Circuit Breaker Pattern

-
class CircuitBreaker {
-  private failures = 0;
-  private nextAttempt = Date.now();
-  private state: 'CLOSED' | 'OPEN' | 'HALF_OPEN' = 'CLOSED';
-
-  constructor(
-    private threshold = 5,
-    private timeout = 60000, // 1 minute
-    private monitoringPeriod = 10000 // 10 seconds
-  ) {}
-
-  async execute<T>(operation: () => Promise<T>): Promise<T> {
-    if (this.state === 'OPEN') {
-      if (Date.now() < this.nextAttempt) {
-        throw new Error('Circuit breaker is OPEN');
-      }
-      this.state = 'HALF_OPEN';
-    }
-
-    try {
-      const result = await operation();
-      this.onSuccess();
-      return result;
-    } catch (error) {
-      this.onFailure();
-      throw error;
-    }
-  }
-
-  private onSuccess(): void {
-    this.failures = 0;
-    this.state = 'CLOSED';
-  }
-
-  private onFailure(): void {
-    this.failures++;
-    if (this.failures >= this.threshold) {
-      this.state = 'OPEN';
-      this.nextAttempt = Date.now() + this.timeout;
-    }
-  }
-
-  getState(): string {
-    return this.state;
-  }
-
-  getFailures(): number {
-    return this.failures;
-  }
-}
-
-// Usage with ProvisioningClient
-class ResilientProvisioningClient {
-  private circuitBreaker = new CircuitBreaker();
-
-  constructor(private client: ProvisioningClient) {}
-
-  async createServerWorkflow(config: any): Promise<string> {
-    return this.circuitBreaker.execute(async () => {
-      return this.client.createServerWorkflow(config);
-    });
-  }
-
-  async getTaskStatus(taskId: string): Promise<Task> {
-    return this.circuitBreaker.execute(async () => {
-      return this.client.getTaskStatus(taskId);
-    });
-  }
-}
-
-

Performance Optimization

-

Connection Pooling and Caching

-
import asyncio
-import aiohttp
-from cachetools import TTLCache
-import time
-
-class OptimizedProvisioningClient:
-    """High-performance client with connection pooling and caching"""
-
-    def __init__(self, base_url: str, max_connections: int = 100):
-        self.base_url = base_url
-        self.session = None
-        self.cache = TTLCache(maxsize=1000, ttl=300)  # 5-minute cache
-        self.max_connections = max_connections
-
-    async def __aenter__(self):
-        """Async context manager entry"""
-        connector = aiohttp.TCPConnector(
-            limit=self.max_connections,
-            limit_per_host=20,
-            keepalive_timeout=30,
-            enable_cleanup_closed=True
-        )
-
-        timeout = aiohttp.ClientTimeout(total=30, connect=5)
-
-        self.session = aiohttp.ClientSession(
-            connector=connector,
-            timeout=timeout,
-            headers={'User-Agent': 'ProvisioningClient/2.0.0'}
-        )
-
-        return self
-
-    async def __aexit__(self, exc_type, exc_val, exc_tb):
-        """Async context manager exit"""
-        if self.session:
-            await self.session.close()
-
-    async def get_task_status_cached(self, task_id: str) -> dict:
-        """Get task status with caching"""
-        cache_key = f"task_status:{task_id}"
-
-        # Check cache first
-        if cache_key in self.cache:
-            return self.cache[cache_key]
-
-        # Fetch from API
-        result = await self._make_request('GET', f'/tasks/{task_id}')
-
-        # Cache completed tasks for longer
-        if result.get('status') in ['Completed', 'Failed', 'Cancelled']:
-            self.cache[cache_key] = result
-
-        return result
-
-    async def batch_get_task_status(self, task_ids: list) -> dict:
-        """Get multiple task statuses in parallel"""
-        tasks = [self.get_task_status_cached(task_id) for task_id in task_ids]
-        results = await asyncio.gather(*tasks, return_exceptions=True)
-
-        return {
-            task_id: result for task_id, result in zip(task_ids, results)
-            if not isinstance(result, Exception)
-        }
-
-    async def _make_request(self, method: str, endpoint: str, **kwargs):
-        """Optimized HTTP request method"""
-        url = f"{self.base_url}{endpoint}"
-
-        start_time = time.time()
-        async with self.session.request(method, url, **kwargs) as response:
-            request_time = time.time() - start_time
-
-            # Log slow requests
-            if request_time > 5.0:
-                print(f"Slow request: {method} {endpoint} took {request_time:.2f}s")
-
-            response.raise_for_status()
-            result = await response.json()
-
-            if not result.get('success'):
-                raise Exception(result.get('error', 'Request failed'))
-
-            return result['data']
-
-# Usage example
-async def high_performance_workflow():
-    async with OptimizedProvisioningClient('http://localhost:9090') as client:
-        # Create multiple workflows in parallel
-        workflow_tasks = [
-            client.create_server_workflow({'infra': f'server-{i}'})
-            for i in range(10)
-        ]
-
-        task_ids = await asyncio.gather(*workflow_tasks)
-        print(f"Created {len(task_ids)} workflows")
-
-        # Monitor all tasks efficiently
-        while True:
-            # Batch status check
-            statuses = await client.batch_get_task_status(task_ids)
-
-            completed = [
-                task_id for task_id, status in statuses.items()
-                if status.get('status') in ['Completed', 'Failed', 'Cancelled']
-            ]
-
-            print(f"Completed: {len(completed)}/{len(task_ids)}")
-
-            if len(completed) == len(task_ids):
-                break
-
-            await asyncio.sleep(10)
-
-

WebSocket Connection Pooling

-
class WebSocketPool {
-  constructor(maxConnections = 5) {
-    this.maxConnections = maxConnections;
-    this.connections = new Map();
-    this.connectionQueue = [];
-  }
-
-  async getConnection(token, eventTypes = []) {
-    const key = `${token}:${eventTypes.sort().join(',')}`;
-
-    if (this.connections.has(key)) {
-      return this.connections.get(key);
-    }
-
-    if (this.connections.size >= this.maxConnections) {
-      // Wait for available connection
-      await this.waitForAvailableSlot();
-    }
-
-    const connection = await this.createConnection(token, eventTypes);
-    this.connections.set(key, connection);
-
-    return connection;
-  }
-
-  async createConnection(token, eventTypes) {
-    const ws = new WebSocket(`ws://localhost:9090/ws?token=${token}&events=${eventTypes.join(',')}`);
-
-    return new Promise((resolve, reject) => {
-      ws.onopen = () => resolve(ws);
-      ws.onerror = (error) => reject(error);
-
-      ws.onclose = () => {
-        // Remove from pool when closed
-        for (const [key, conn] of this.connections.entries()) {
-          if (conn === ws) {
-            this.connections.delete(key);
-            break;
-          }
-        }
-      };
-    });
-  }
-
-  async waitForAvailableSlot() {
-    return new Promise((resolve) => {
-      this.connectionQueue.push(resolve);
-    });
-  }
-
-  releaseConnection(ws) {
-    if (this.connectionQueue.length > 0) {
-      const waitingResolver = this.connectionQueue.shift();
-      waitingResolver();
-    }
-  }
-}
-
-

SDK Documentation

-

Python SDK

-

The Python SDK provides a comprehensive interface for provisioning:

-

Installation

-
pip install provisioning-client
-
-

Quick Start

-
from provisioning_client import ProvisioningClient
-
-# Initialize client
-client = ProvisioningClient(
-    base_url="http://localhost:9090",
-    username="admin",
-    password="password"
-)
-
-# Create workflow
-task_id = await client.create_server_workflow(
-    infra="production",
-    settings="config.ncl"
-)
-
-# Wait for completion
-task = await client.wait_for_task_completion(task_id)
-print(f"Workflow completed: {task.status}")
-
-

Advanced Usage

-
# Use with async context manager
-async with ProvisioningClient() as client:
-    # Batch operations
-    batch_config = {
-        "name": "deployment",
-        "operations": [...]
-    }
-
-    batch_result = await client.execute_batch_operation(batch_config)
-
-    # Real-time monitoring
-    await client.connect_websocket(['TaskStatusChanged'])
-
-    client.on_event('TaskStatusChanged', handle_task_update)
-
-

JavaScript/TypeScript SDK

-

Installation

-
npm install @provisioning/client
+# Customization patterns
+provisioning guide customize
 
-

Usage

-
import { ProvisioningClient } from '@provisioning/client';
-
-const client = new ProvisioningClient({
-  baseUrl: 'http://localhost:9090',
-  username: 'admin',
-  password: 'password'
-});
-
-// Create workflow
-const taskId = await client.createServerWorkflow({
-  infra: 'production',
-  settings: 'config.ncl'
-});
+

Troubleshooting Quick Issues

+

Server creation fails

+
# Check provider connectivity
+provisioning providers
 
-// Monitor progress
-client.on('workflowProgress', (progress) => {
-  console.log(`Progress: ${progress.progress}%`);
-});
+# Validate credentials
+provisioning validate config
 
-await client.connectWebSocket();
+# Enable debug mode
+provisioning --debug server create --infra demo-server
 
-

Common Integration Patterns

-

Workflow Orchestration Pipeline

-
class WorkflowPipeline:
-    """Orchestrate complex multi-step workflows"""
-
-    def __init__(self, client: ProvisioningClient):
-        self.client = client
-        self.steps = []
-
-    def add_step(self, name: str, operation: Callable, dependencies: list = None):
-        """Add a step to the pipeline"""
-        self.steps.append({
-            'name': name,
-            'operation': operation,
-            'dependencies': dependencies or [],
-            'status': 'pending',
-            'result': None
-        })
-
-    async def execute(self):
-        """Execute the pipeline"""
-        completed_steps = set()
-
-        while len(completed_steps) < len(self.steps):
-            # Find steps ready to execute
-            ready_steps = [
-                step for step in self.steps
-                if (step['status'] == 'pending' and
-                    all(dep in completed_steps for dep in step['dependencies']))
-            ]
-
-            if not ready_steps:
-                raise Exception("Pipeline deadlock detected")
-
-            # Execute ready steps in parallel
-            tasks = []
-            for step in ready_steps:
-                step['status'] = 'running'
-                tasks.append(self._execute_step(step))
-
-            # Wait for completion
-            results = await asyncio.gather(*tasks, return_exceptions=True)
-
-            for step, result in zip(ready_steps, results):
-                if isinstance(result, Exception):
-                    step['status'] = 'failed'
-                    step['error'] = str(result)
-                    raise Exception(f"Step {step['name']} failed: {result}")
-                else:
-                    step['status'] = 'completed'
-                    step['result'] = result
-                    completed_steps.add(step['name'])
-
-    async def _execute_step(self, step):
-        """Execute a single step"""
-        try:
-            return await step['operation']()
-        except Exception as e:
-            print(f"Step {step['name']} failed: {e}")
-            raise
-
-# Usage example
-async def complex_deployment():
-    client = ProvisioningClient()
-    pipeline = WorkflowPipeline(client)
-
-    # Define deployment steps
-    pipeline.add_step('servers', lambda: client.create_server_workflow({
-        'infra': 'production'
-    }))
-
-    pipeline.add_step('kubernetes', lambda: client.create_taskserv_workflow({
-        'operation': 'create',
-        'taskserv': 'kubernetes',
-        'infra': 'production'
-    }), dependencies=['servers'])
+

Task service installation fails

+
# Check server connectivity
+provisioning server ssh web-01
 
-    pipeline.add_step('cilium', lambda: client.create_taskserv_workflow({
-        'operation': 'create',
-        'taskserv': 'cilium',
-        'infra': 'production'
-    }), dependencies=['kubernetes'])
+# Verify dependencies
+provisioning taskserv check-deps containerd
 
-    # Execute pipeline
-    await pipeline.execute()
-    print("Deployment pipeline completed successfully")
+# Retry installation
+provisioning taskserv create containerd --infra demo-server --force
 
-

Event-Driven Architecture

-
class EventDrivenWorkflowManager {
-  constructor(client) {
-    this.client = client;
-    this.workflows = new Map();
-    this.setupEventHandlers();
-  }
-
-  setupEventHandlers() {
-    this.client.on('TaskStatusChanged', this.handleTaskStatusChange.bind(this));
-    this.client.on('WorkflowProgressUpdate', this.handleProgressUpdate.bind(this));
-    this.client.on('SystemHealthUpdate', this.handleHealthUpdate.bind(this));
-  }
-
-  async createWorkflow(config) {
-    const workflowId = generateUUID();
-    const workflow = {
-      id: workflowId,
-      config,
-      tasks: [],
-      status: 'pending',
-      progress: 0,
-      events: []
-    };
-
-    this.workflows.set(workflowId, workflow);
-
-    // Start workflow execution
-    await this.executeWorkflow(workflow);
-
-    return workflowId;
-  }
-
-  async executeWorkflow(workflow) {
-    try {
-      workflow.status = 'running';
-
-      // Create initial tasks based on configuration
-      const taskId = await this.client.createServerWorkflow(workflow.config);
-      workflow.tasks.push({
-        id: taskId,
-        type: 'server_creation',
-        status: 'pending'
-      });
-
-      this.emit('workflowStarted', { workflowId: workflow.id, taskId });
-
-    } catch (error) {
-      workflow.status = 'failed';
-      workflow.error = error.message;
-      this.emit('workflowFailed', { workflowId: workflow.id, error });
-    }
-  }
-
-  handleTaskStatusChange(event) {
-    // Find workflows containing this task
-    for (const [workflowId, workflow] of this.workflows) {
-      const task = workflow.tasks.find(t => t.id === event.data.task_id);
-      if (task) {
-        task.status = event.data.status;
-        this.updateWorkflowProgress(workflow);
-
-        // Trigger next steps based on task completion
-        if (event.data.status === 'Completed') {
-          this.triggerNextSteps(workflow, task);
-        }
-      }
-    }
-  }
-
-  updateWorkflowProgress(workflow) {
-    const completedTasks = workflow.tasks.filter(t =>
-      ['Completed', 'Failed'].includes(t.status)
-    ).length;
-
-    workflow.progress = (completedTasks / workflow.tasks.length) * 100;
-
-    if (completedTasks === workflow.tasks.length) {
-      const failedTasks = workflow.tasks.filter(t => t.status === 'Failed');
-      workflow.status = failedTasks.length > 0 ? 'failed' : 'completed';
-
-      this.emit('workflowCompleted', {
-        workflowId: workflow.id,
-        status: workflow.status
-      });
-    }
-  }
-
-  async triggerNextSteps(workflow, completedTask) {
-    // Define workflow dependencies and next steps
-    const nextSteps = this.getNextSteps(workflow, completedTask);
+

Configuration validation errors

+
# Check Nickel syntax
+nickel typecheck infra/demo-server.ncl
 
-    for (const nextStep of nextSteps) {
-      try {
-        const taskId = await this.executeWorkflowStep(nextStep);
-        workflow.tasks.push({
-          id: taskId,
-          type: nextStep.type,
-          status: 'pending',
-          dependencies: [completedTask.id]
-        });
-      } catch (error) {
-        console.error(`Failed to trigger next step: ${error.message}`);
-      }
-    }
-  }
+# Show detailed validation errors
+provisioning validate config --verbose
 
-  getNextSteps(workflow, completedTask) {
-    // Define workflow logic based on completed task type
-    switch (completedTask.type) {
-      case 'server_creation':
-        return [
-          { type: 'kubernetes_installation', taskserv: 'kubernetes' },
-          { type: 'monitoring_setup', taskserv: 'prometheus' }
-        ];
-      case 'kubernetes_installation':
-        return [
-          { type: 'networking_setup', taskserv: 'cilium' }
-        ];
-      default:
-        return [];
-    }
-  }
-}
+# View configuration
+provisioning config show
 
-

This comprehensive integration documentation provides developers with everything needed to successfully integrate with provisioning, including -complete client implementations, error handling strategies, performance optimizations, and common integration patterns.

-

Provider API Reference

-

API documentation for creating and using infrastructure providers.

-

Overview

-

Providers handle cloud-specific operations and resource provisioning. The provisioning platform supports multiple cloud providers through a unified API.

-

Supported Providers

-
    -
  • UpCloud - European cloud provider
  • -
  • AWS - Amazon Web Services
  • -
  • Local - Local development environment
  • -
-

Provider Interface

-

All providers must implement the following interface:

-

Required Functions

-
# Provider initialization
-export def init [] -> record { ... }
+

Reference

+

Essential Commands

+
# Workspace management
+provisioning workspace init <name>
+provisioning workspace list
+provisioning workspace switch <name>
 
 # Server operations
-export def create-servers [plan: record] -> list { ... }
-export def delete-servers [ids: list] -> bool { ... }
-export def list-servers [] -> table { ... }
-
-# Resource information
-export def get-server-plans [] -> table { ... }
-export def get-regions [] -> list { ... }
-export def get-pricing [plan: string] -> record { ... }
-
-

Provider Configuration

-

Each provider requires configuration in Nickel format:

-
# Example: UpCloud provider configuration
-{
-  provider = {
-    name = "upcloud",
-    type = "cloud",
-    enabled = true,
-    config = {
-      username = "{{env.UPCLOUD_USERNAME}}",
-      password = "{{env.UPCLOUD_PASSWORD}}",
-      default_zone = "de-fra1",
-    },
-  }
-}
-
-

Creating a Custom Provider

-

1. Directory Structure

-
provisioning/extensions/providers/my-provider/
-├── nulib/
-│   └── my_provider.nu          # Provider implementation
-├── schemas/
-│   ├── main.ncl                # Nickel schema
-│   └── defaults.ncl            # Default configuration
-└── README.md                   # Provider documentation
-
-

2. Implementation Template

-
# my_provider.nu
-export def init [] {
-    {
-        name: "my-provider"
-        type: "cloud"
-        ready: true
-    }
-}
-
-export def create-servers [plan: record] {
-    # Implementation here
-    []
-}
-
-export def list-servers [] {
-    # Implementation here
-    []
-}
-
-# ... other required functions
-
-

3. Nickel Schema

-
# main.ncl
-{
-  MyProvider = {
-    # My custom provider schema
-    name | String = "my-provider",
-    type | String | "cloud" | "local" = "cloud",
-    config | MyProviderConfig,
-  },
-
-  MyProviderConfig = {
-    api_key | String,
-    region | String = "us-east-1",
-  },
-}
-
-

Provider Discovery

-

Providers are automatically discovered from:

-
    -
  • provisioning/extensions/providers/*/nu/*.nu
  • -
  • User workspace: workspace/extensions/providers/*/nu/*.nu
  • -
-
# Discover available providers
-provisioning module discover providers
-
-# Load provider
-provisioning module load providers workspace my-provider
-
-

Provider API Examples

-

Create Servers

-
use my_provider.nu *
-
-let plan = {
-    count: 3
-    size: "medium"
-    zone: "us-east-1"
-}
-
-create-servers $plan
-
-

List Servers

-
list-servers | where status == "running" | select hostname ip_address
-
-

Get Pricing

-
get-pricing "small" | to yaml
-
-

Testing Providers

-

Use the test environment system to test providers:

-
# Test provider without real resources
-provisioning test env single my-provider --check
-
-

Provider Development Guide

-

For complete provider development guide, see:

- -

API Stability

-

Provider API follows semantic versioning:

-
    -
  • Major: Breaking changes
  • -
  • Minor: New features, backward compatible
  • -
  • Patch: Bug fixes
  • -
-

Current API version: 2.0.0

-
-

For more examples, see Integration Examples.

-

Nushell API Reference

-

API documentation for Nushell library functions in the provisioning platform.

-

Overview

-

The provisioning platform provides a comprehensive Nushell library with reusable functions for infrastructure automation.

-

Core Modules

-

Configuration Module

-

Location: provisioning/core/nulib/lib_provisioning/config/

-
    -
  • get-config <key> - Retrieve configuration values
  • -
  • validate-config - Validate configuration files
  • -
  • load-config <path> - Load configuration from file
  • -
-

Server Module

-

Location: provisioning/core/nulib/lib_provisioning/servers/

-
    -
  • create-servers <plan> - Create server infrastructure
  • -
  • list-servers - List all provisioned servers
  • -
  • delete-servers <ids> - Remove servers
  • -
-

Task Service Module

-

Location: provisioning/core/nulib/lib_provisioning/taskservs/

-
    -
  • install-taskserv <name> - Install infrastructure service
  • -
  • list-taskservs - List installed services
  • -
  • generate-taskserv-config <name> - Generate service configuration
  • -
-

Workspace Module

-

Location: provisioning/core/nulib/lib_provisioning/workspace/

-
    -
  • init-workspace <name> - Initialize new workspace
  • -
  • get-active-workspace - Get current workspace
  • -
  • switch-workspace <name> - Switch to different workspace
  • -
-

Provider Module

-

Location: provisioning/core/nulib/lib_provisioning/providers/

-
    -
  • discover-providers - Find available providers
  • -
  • load-provider <name> - Load provider module
  • -
  • list-providers - List loaded providers
  • -
-

Diagnostics & Utilities

-

Diagnostics Module

-

Location: provisioning/core/nulib/lib_provisioning/diagnostics/

-
    -
  • system-status - Check system health (13+ checks)
  • -
  • health-check - Deep validation (7 areas)
  • -
  • next-steps - Get progressive guidance
  • -
  • deployment-phase - Check deployment progress
  • -
-

Hints Module

-

Location: provisioning/core/nulib/lib_provisioning/utils/hints.nu

-
    -
  • show-next-step <context> - Display next step suggestion
  • -
  • show-doc-link <topic> - Show documentation link
  • -
  • show-example <command> - Display command example
  • -
-

Usage Example

-
# Load provisioning library
-use provisioning/core/nulib/lib_provisioning *
-
-# Check system status
-system-status | table
-
-# Create servers
-create-servers --plan "3-node-cluster" --check
-
-# Install kubernetes
-install-taskserv kubernetes --check
-
-# Get next steps
-next-steps
-
-

API Conventions

-

All API functions follow these conventions:

-
    -
  • Explicit types: All parameters have type annotations
  • -
  • Early returns: Validate first, fail fast
  • -
  • Pure functions: No side effects (mutations marked with !)
  • -
  • Pipeline-friendly: Output designed for Nu pipelines
  • -
-

Best Practices

-

See Nushell Best Practices for coding guidelines.

-

Source Code

-

Browse the complete source code:

-
    -
  • Core library: provisioning/core/nulib/lib_provisioning/
  • -
  • Module index: provisioning/core/nulib/lib_provisioning/mod.nu
  • -
-
-

For integration examples, see Integration Examples.

-

Path Resolution API

-

This document describes the path resolution system used throughout the provisioning infrastructure for discovering configurations, extensions, and -resolving workspace paths.

-

Overview

-

The path resolution system provides a hierarchical and configurable mechanism for:

-
    -
  • Configuration file discovery and loading
  • -
  • Extension discovery (providers, task services, clusters)
  • -
  • Workspace and project path management
  • -
  • Environment variable interpolation
  • -
  • Cross-platform path handling
  • -
-

Configuration Resolution Hierarchy

-

The system follows a specific hierarchy for loading configuration files:

-
1. System defaults      (config.defaults.toml)
-2. User configuration   (config.user.toml)
-3. Project configuration (config.project.toml)
-4. Infrastructure config (infra/config.toml)
-5. Environment config   (config.{env}.toml)
-6. Runtime overrides    (CLI arguments, ENV vars)
-
-

Configuration Search Paths

-

The system searches for configuration files in these locations:

-
# Default search paths (in order)
-/usr/local/provisioning/config.defaults.toml
-$HOME/.config/provisioning/config.user.toml
-$PWD/config.project.toml
-$PROVISIONING_KLOUD_PATH/config.infra.toml
-$PWD/config.{PROVISIONING_ENV}.toml
-
-

Path Resolution API

-

Core Functions

-

resolve-config-path(pattern: string, search_paths: list<string>) -> string

-

Resolves configuration file paths using the search hierarchy.

-

Parameters:

-
    -
  • pattern: File pattern to search for (for example, “config.*.toml”)
  • -
  • search_paths: Additional paths to search (optional)
  • -
-

Returns:

-
    -
  • Full path to the first matching configuration file
  • -
  • Empty string if no file found
  • -
-

Example:

-
use path-resolution.nu *
-let config_path = (resolve-config-path "config.user.toml" [])
-# Returns: "/home/user/.config/provisioning/config.user.toml"
-
-

resolve-extension-path(type: string, name: string) -> record

-

Discovers extension paths (providers, taskservs, clusters).

-

Parameters:

-
    -
  • type: Extension type (“provider”, “taskserv”, “cluster”)
  • -
  • name: Extension name (for example, “upcloud”, “kubernetes”, “buildkit”)
  • -
-

Returns:

-
{
-    base_path: "/usr/local/provisioning/providers/upcloud",
-    schemas_path: "/usr/local/provisioning/providers/upcloud/schemas",
-    nulib_path: "/usr/local/provisioning/providers/upcloud/nulib",
-    templates_path: "/usr/local/provisioning/providers/upcloud/templates",
-    exists: true
-}
-
-

resolve-workspace-paths() -> record

-

Gets current workspace path configuration.

-

Returns:

-
{
-    base: "/usr/local/provisioning",
-    current_infra: "/workspace/infra/production",
-    kloud_path: "/workspace/kloud",
-    providers: "/usr/local/provisioning/providers",
-    taskservs: "/usr/local/provisioning/taskservs",
-    clusters: "/usr/local/provisioning/cluster",
-    extensions: "/workspace/extensions"
-}
-
-

Path Interpolation

-

The system supports variable interpolation in configuration paths:

-

Supported Variables

-
    -
  • {{paths.base}} - Base provisioning path
  • -
  • {{paths.kloud}} - Current kloud path
  • -
  • {{env.HOME}} - User home directory
  • -
  • {{env.PWD}} - Current working directory
  • -
  • {{now.date}} - Current date (YYYY-MM-DD)
  • -
  • {{now.time}} - Current time (HH:MM:SS)
  • -
  • {{git.branch}} - Current git branch
  • -
  • {{git.commit}} - Current git commit hash
  • -
-

interpolate-path(template: string, context: record) -> string

-

Interpolates variables in path templates.

-

Parameters:

-
    -
  • template: Path template with variables
  • -
  • context: Variable context record
  • -
-

Example:

-
let template = "{{paths.base}}/infra/{{env.USER}}/{{git.branch}}"
-let result = (interpolate-path $template {
-    paths: { base: "/usr/local/provisioning" },
-    env: { USER: "admin" },
-    git: { branch: "main" }
-})
-# Returns: "/usr/local/provisioning/infra/admin/main"
-
-

Extension Discovery API

-

Provider Discovery

-

discover-providers() -> list<record>

-

Discovers all available providers.

-

Returns:

-
[
-    {
-        name: "upcloud",
-        path: "/usr/local/provisioning/providers/upcloud",
-        type: "provider",
-        version: "1.2.0",
-        enabled: true,
-        has_schemas: true,
-        has_nulib: true,
-        has_templates: true
-    },
-    {
-        name: "aws",
-        path: "/usr/local/provisioning/providers/aws",
-        type: "provider",
-        version: "2.1.0",
-        enabled: true,
-        has_schemas: true,
-        has_nulib: true,
-        has_templates: true
-    }
-]
-
-

get-provider-config(name: string) -> record

-

Gets provider-specific configuration and paths.

-

Parameters:

-
    -
  • name: Provider name
  • -
-

Returns:

-
{
-    name: "upcloud",
-    base_path: "/usr/local/provisioning/providers/upcloud",
-    config: {
-        api_url: "https://api.upcloud.com/1.3",
-        auth_method: "basic",
-        interface: "API"
-    },
-    paths: {
-        schemas: "/usr/local/provisioning/providers/upcloud/schemas",
-        nulib: "/usr/local/provisioning/providers/upcloud/nulib",
-        templates: "/usr/local/provisioning/providers/upcloud/templates"
-    },
-    metadata: {
-        version: "1.2.0",
-        description: "UpCloud provider for server provisioning"
-    }
-}
-
-

Task Service Discovery

-

discover-taskservs() -> list<record>

-

Discovers all available task services.

-

Returns:

-
[
-    {
-        name: "kubernetes",
-        path: "/usr/local/provisioning/taskservs/kubernetes",
-        type: "taskserv",
-        category: "orchestration",
-        version: "1.28.0",
-        enabled: true
-    },
-    {
-        name: "cilium",
-        path: "/usr/local/provisioning/taskservs/cilium",
-        type: "taskserv",
-        category: "networking",
-        version: "1.14.0",
-        enabled: true
-    }
-]
-
-

get-taskserv-config(name: string) -> record

-

Gets task service configuration and version information.

-

Parameters:

-
    -
  • name: Task service name
  • -
-

Returns:

-
{
-    name: "kubernetes",
-    path: "/usr/local/provisioning/taskservs/kubernetes",
-    version: {
-        current: "1.28.0",
-        available: "1.28.2",
-        update_available: true,
-        source: "github",
-        release_url: "https://github.com/kubernetes/kubernetes/releases"
-    },
-    config: {
-        category: "orchestration",
-        dependencies: ["containerd"],
-        supports_versions: ["1.26.x", "1.27.x", "1.28.x"]
-    }
-}
-
-

Cluster Discovery

-

discover-clusters() -> list<record>

-

Discovers all available cluster configurations.

-

Returns:

-
[
-    {
-        name: "buildkit",
-        path: "/usr/local/provisioning/cluster/buildkit",
-        type: "cluster",
-        category: "build",
-        components: ["buildkit", "registry", "storage"],
-        enabled: true
-    }
-]
-
-

Environment Management API

-

Environment Detection

-

detect-environment() -> string

-

Automatically detects the current environment based on:

-
    -
  1. PROVISIONING_ENV environment variable
  2. -
  3. Git branch patterns (main → prod, develop → dev, etc.)
  4. -
  5. Directory structure analysis
  6. -
  7. Configuration file presence
  8. -
-

Returns:

-
    -
  • Environment name string (dev, test, prod, etc.)
  • -
-

get-environment-config(env: string) -> record

-

Gets environment-specific configuration.

-

Parameters:

-
    -
  • env: Environment name
  • -
-

Returns:

-
{
-    name: "production",
-    paths: {
-        base: "/opt/provisioning",
-        kloud: "/data/kloud",
-        logs: "/var/log/provisioning"
-    },
-    providers: {
-        default: "upcloud",
-        allowed: ["upcloud", "aws"]
-    },
-    features: {
-        debug: false,
-        telemetry: true,
-        rollback: true
-    }
-}
-
-

Environment Switching

-

switch-environment(env: string, validate: bool = true) -> null

-

Switches to a different environment and updates path resolution.

-

Parameters:

-
    -
  • env: Target environment name
  • -
  • validate: Whether to validate environment configuration
  • -
-

Effects:

-
    -
  • Updates PROVISIONING_ENV environment variable
  • -
  • Reconfigures path resolution for new environment
  • -
  • Validates environment configuration if requested
  • -
-

Workspace Management API

-

Workspace Discovery

-

discover-workspaces() -> list<record>

-

Discovers available workspaces and infrastructure directories.

-

Returns:

-
[
-    {
-        name: "production",
-        path: "/workspace/infra/production",
-        type: "infrastructure",
-        provider: "upcloud",
-        settings: "settings.ncl",
-        valid: true
-    },
-    {
-        name: "development",
-        path: "/workspace/infra/development",
-        type: "infrastructure",
-        provider: "local",
-        settings: "dev-settings.ncl",
-        valid: true
-    }
-]
-
-

set-current-workspace(path: string) -> null

-

Sets the current workspace for path resolution.

-

Parameters:

-
    -
  • path: Workspace directory path
  • -
-

Effects:

-
    -
  • Updates CURRENT_INFRA_PATH environment variable
  • -
  • Reconfigures workspace-relative path resolution
  • -
-

Project Structure Analysis

-

analyze-project-structure(path: string = $PWD) -> record

-

Analyzes project structure and identifies components.

-

Parameters:

-
    -
  • path: Project root path (defaults to current directory)
  • -
-

Returns:

-
{
-    root: "/workspace/project",
-    type: "provisioning_workspace",
-    components: {
-        providers: [
-            { name: "upcloud", path: "providers/upcloud" },
-            { name: "aws", path: "providers/aws" }
-        ],
-        taskservs: [
-            { name: "kubernetes", path: "taskservs/kubernetes" },
-            { name: "cilium", path: "taskservs/cilium" }
-        ],
-        clusters: [
-            { name: "buildkit", path: "cluster/buildkit" }
-        ],
-        infrastructure: [
-            { name: "production", path: "infra/production" },
-            { name: "staging", path: "infra/staging" }
-        ]
-    },
-    config_files: [
-        "config.defaults.toml",
-        "config.user.toml",
-        "config.prod.toml"
-    ]
-}
-
-

Caching and Performance

-

Path Caching

-

The path resolution system includes intelligent caching:

-

cache-paths(duration: duration = 5 min) -> null

-

Enables path caching for the specified duration.

-

Parameters:

-
    -
  • duration: Cache validity duration
  • -
-

invalidate-path-cache() -> null

-

Invalidates the path resolution cache.

-

get-cache-stats() -> record

-

Gets path resolution cache statistics.

-

Returns:

-
{
-    enabled: true,
-    size: 150,
-    hit_rate: 0.85,
-    last_invalidated: "2025-09-26T10:00:00Z"
-}
-
-

Cross-Platform Compatibility

-

Path Normalization

-

normalize-path(path: string) -> string

-

Normalizes paths for cross-platform compatibility.

-

Parameters:

-
    -
  • path: Input path (may contain mixed separators)
  • -
-

Returns:

-
    -
  • Normalized path using platform-appropriate separators
  • -
-

Example:

-
# On Windows
-normalize-path "path/to/file" # Returns: "path\to\file"
-
-# On Unix
-normalize-path "path\to\file" # Returns: "path/to/file"
-
-

join-paths(segments: list<string>) -> string

-

Safely joins path segments using platform separators.

-

Parameters:

-
    -
  • segments: List of path segments
  • -
-

Returns:

-
    -
  • Joined path string
  • -
-

Configuration Validation API

-

Path Validation

-

validate-paths(config: record) -> record

-

Validates all paths in configuration.

-

Parameters:

-
    -
  • config: Configuration record
  • -
-

Returns:

-
{
-    valid: true,
-    errors: [],
-    warnings: [
-        { path: "paths.extensions", message: "Path does not exist" }
-    ],
-    checks_performed: 15
-}
-
-

validate-extension-structure(type: string, path: string) -> record

-

Validates extension directory structure.

-

Parameters:

-
    -
  • type: Extension type (provider, taskserv, cluster)
  • -
  • path: Extension base path
  • -
-

Returns:

-
{
-    valid: true,
-    required_files: [
-        { file: "manifest.toml", exists: true },
-        { file: "schemas/main.ncl", exists: true },
-        { file: "nulib/mod.nu", exists: true }
-    ],
-    optional_files: [
-        { file: "templates/server.j2", exists: false }
-    ]
-}
-
-

Command-Line Interface

-

Path Resolution Commands

-

The path resolution API is exposed via Nushell commands:

-
# Show current path configuration
-provisioning show paths
-
-# Discover available extensions
-provisioning discover providers
-provisioning discover taskservs
-provisioning discover clusters
-
-# Validate path configuration
-provisioning validate paths
-
-# Switch environments
-provisioning env switch prod
-
-# Set workspace
-provisioning workspace set /path/to/infra
-
-

Integration Examples

-

Python Integration

-
import subprocess
-import json
-
-class PathResolver:
-    def __init__(self, provisioning_path="/usr/local/bin/provisioning"):
-        self.cmd = provisioning_path
-
-    def get_paths(self):
-        result = subprocess.run([
-            "nu", "-c", f"use {self.cmd} *; show-config --section=paths --format=json"
-        ], capture_output=True, text=True)
-        return json.loads(result.stdout)
-
-    def discover_providers(self):
-        result = subprocess.run([
-            "nu", "-c", f"use {self.cmd} *; discover providers --format=json"
-        ], capture_output=True, text=True)
-        return json.loads(result.stdout)
-
-# Usage
-resolver = PathResolver()
-paths = resolver.get_paths()
-providers = resolver.discover_providers()
-
-

JavaScript/Node.js Integration

-
const { exec } = require('child_process');
-const util = require('util');
-const execAsync = util.promisify(exec);
-
-class PathResolver {
-  constructor(provisioningPath = '/usr/local/bin/provisioning') {
-    this.cmd = provisioningPath;
-  }
-
-  async getPaths() {
-    const { stdout } = await execAsync(
-      `nu -c "use ${this.cmd} *; show-config --section=paths --format=json"`
-    );
-    return JSON.parse(stdout);
-  }
-
-  async discoverExtensions(type) {
-    const { stdout } = await execAsync(
-      `nu -c "use ${this.cmd} *; discover ${type} --format=json"`
-    );
-    return JSON.parse(stdout);
-  }
-}
-
-// Usage
-const resolver = new PathResolver();
-const paths = await resolver.getPaths();
-const providers = await resolver.discoverExtensions('providers');
-
-

Error Handling

-

Common Error Scenarios

-
    -
  1. -

    Configuration File Not Found

    -
    Error: Configuration file not found in search paths
    -Searched: ["/usr/local/provisioning/config.defaults.toml", ...]
    -
    -
  2. -
  3. -

    Extension Not Found

    -
    Error: Provider 'missing-provider' not found
    -Available providers: ["upcloud", "aws", "local"]
    -
    -
  4. -
  5. -

    Invalid Path Template

    -
    Error: Invalid template variable: {{invalid.var}}
    -Valid variables: ["paths.*", "env.*", "now.*", "git.*"]
    -
    -
  6. -
  7. -

    Environment Not Found

    -
    Error: Environment 'staging' not configured
    -Available environments: ["dev", "test", "prod"]
    -
    -
  8. -
-

Error Recovery

-

The system provides graceful fallbacks:

-
    -
  • Missing configuration files use system defaults
  • -
  • Invalid paths fall back to safe defaults
  • -
  • Extension discovery continues if some paths are inaccessible
  • -
  • Environment detection falls back to ‘local’ if detection fails
  • -
-

Performance Considerations

-

Best Practices

-
    -
  1. Use Path Caching: Enable caching for frequently accessed paths
  2. -
  3. Batch Discovery: Discover all extensions at once rather than individually
  4. -
  5. Lazy Loading: Load extension configurations only when needed
  6. -
  7. Environment Detection: Cache environment detection results
  8. -
-

Monitoring

-

Monitor path resolution performance:

-
# Get resolution statistics
-provisioning debug path-stats
-
-# Monitor cache performance
-provisioning debug cache-stats
-
-# Profile path resolution
-provisioning debug profile-paths
-
-

Security Considerations

-

Path Traversal Protection

-

The system includes protections against path traversal attacks:

-
    -
  • All paths are normalized and validated
  • -
  • Relative paths are resolved within safe boundaries
  • -
  • Symlinks are validated before following
  • -
-

Access Control

-

Path resolution respects file system permissions:

-
    -
  • Configuration files require read access
  • -
  • Extension directories require read/execute access
  • -
  • Workspace directories may require write access for operations
  • -
-

This path resolution API provides a comprehensive and flexible system for managing the complex path requirements of multi-provider, multi-environment -infrastructure provisioning.

-

Infrastructure-Specific Extension Development

-

This guide focuses on creating extensions tailored to specific infrastructure requirements, business needs, and organizational constraints.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Infrastructure Assessment
  4. -
  5. Custom Taskserv Development
  6. -
  7. Provider-Specific Extensions
  8. -
  9. Multi-Environment Management
  10. -
  11. Integration Patterns
  12. -
  13. Real-World Examples
  14. -
-

Overview

-

Infrastructure-specific extensions address unique requirements that generic modules cannot cover:

-
    -
  • Company-specific applications and services
  • -
  • Compliance and security requirements
  • -
  • Legacy system integrations
  • -
  • Custom networking configurations
  • -
  • Specialized monitoring and alerting
  • -
  • Multi-cloud and hybrid deployments
  • -
-

Infrastructure Assessment

-

Identifying Extension Needs

-

Before creating custom extensions, assess your infrastructure requirements:

-

1. Application Inventory

-
# Document existing applications
-cat > infrastructure-assessment.yaml << EOF
-applications:
-  - name: "legacy-billing-system"
-    type: "monolith"
-    runtime: "java-8"
-    database: "oracle-11g"
-    integrations: ["ldap", "file-storage", "email"]
-    compliance: ["pci-dss", "sox"]
-
-  - name: "customer-portal"
-    type: "microservices"
-    runtime: "nodejs-16"
-    database: "postgresql-13"
-    integrations: ["redis", "elasticsearch", "s3"]
-    compliance: ["gdpr", "hipaa"]
-
-infrastructure:
-  - type: "on-premise"
-    location: "datacenter-primary"
-    capabilities: ["kubernetes", "vmware", "storage-array"]
-
-  - type: "cloud"
-    provider: "aws"
-    regions: ["us-east-1", "eu-west-1"]
-    services: ["eks", "rds", "s3", "cloudfront"]
-
-compliance_requirements:
-  - "PCI DSS Level 1"
-  - "SOX compliance"
-  - "GDPR data protection"
-  - "HIPAA safeguards"
-
-network_requirements:
-  - "air-gapped environments"
-  - "private subnet isolation"
-  - "vpn connectivity"
-  - "load balancer integration"
-EOF
-
-

2. Gap Analysis

-
# Analyze what standard modules don't cover
-./provisioning/core/cli/module-loader discover taskservs > available-modules.txt
-
-# Create gap analysis
-cat > gap-analysis.md << EOF
-# Infrastructure Gap Analysis
-
-## Standard Modules Available
-$(cat available-modules.txt)
-
-## Missing Capabilities
-- [ ] Legacy Oracle database integration
-- [ ] Company-specific LDAP authentication
-- [ ] Custom monitoring for legacy systems
-- [ ] Compliance reporting automation
-- [ ] Air-gapped deployment workflows
-- [ ] Multi-datacenter replication
-
-## Custom Extensions Needed
-1. **oracle-db-taskserv**: Oracle database with company settings
-2. **company-ldap-taskserv**: LDAP integration with custom schema
-3. **compliance-monitor-taskserv**: Automated compliance checking
-4. **airgap-deployment-cluster**: Air-gapped deployment patterns
-5. **company-monitoring-taskserv**: Custom monitoring dashboard
-EOF
-
-

Requirements Gathering

-

Business Requirements Template

-
"""
-Business Requirements Schema for Custom Extensions
-Use this template to document requirements before development
-"""
-
-schema BusinessRequirements:
-    """Document business requirements for custom extensions"""
-
-    # Project information
-    project_name: str
-    stakeholders: [str]
-    timeline: str
-    budget_constraints?: str
-
-    # Functional requirements
-    functional_requirements: [FunctionalRequirement]
-
-    # Non-functional requirements
-    performance_requirements: PerformanceRequirements
-    security_requirements: SecurityRequirements
-    compliance_requirements: [str]
-
-    # Integration requirements
-    existing_systems: [ExistingSystem]
-    required_integrations: [Integration]
-
-    # Operational requirements
-    monitoring_requirements: [str]
-    backup_requirements: [str]
-    disaster_recovery_requirements: [str]
-
-schema FunctionalRequirement:
-    id: str
-    description: str
-    priority: "high" | "medium" | "low"
-    acceptance_criteria: [str]
-
-schema PerformanceRequirements:
-    max_response_time: str
-    throughput_requirements: str
-    availability_target: str
-    scalability_requirements: str
-
-schema SecurityRequirements:
-    authentication_method: str
-    authorization_model: str
-    encryption_requirements: [str]
-    audit_requirements: [str]
-    network_security: [str]
-
-schema ExistingSystem:
-    name: str
-    type: str
-    version: str
-    api_available: bool
-    integration_method: str
-
-schema Integration:
-    target_system: str
-    integration_type: "api" | "database" | "file" | "message_queue"
-    data_format: str
-    frequency: str
-    direction: "inbound" | "outbound" | "bidirectional"
-
-

Custom Taskserv Development

-

Company-Specific Application Taskserv

-

Example: Legacy ERP System Integration

-
# Create company-specific taskserv
-mkdir -p extensions/taskservs/company-specific/legacy-erp/nickel
-cd extensions/taskservs/company-specific/legacy-erp/nickel
-
-

Create legacy-erp.ncl:

-
"""
-Legacy ERP System Taskserv
-Handles deployment and management of company's legacy ERP system
-"""
-
-import provisioning.lib as lib
-import provisioning.dependencies as deps
-import provisioning.defaults as defaults
-
-# ERP system configuration
-schema LegacyERPConfig:
-    """Configuration for legacy ERP system"""
-
-    # Application settings
-    erp_version: str = "12.2.0"
-    installation_mode: "standalone" | "cluster" | "ha" = "ha"
-
-    # Database configuration
-    database_type: "oracle" | "sqlserver" = "oracle"
-    database_version: str = "19c"
-    database_size: str = "500Gi"
-    database_backup_retention: int = 30
-
-    # Network configuration
-    erp_port: int = 8080
-    database_port: int = 1521
-    ssl_enabled: bool = True
-    internal_network_only: bool = True
-
-    # Integration settings
-    ldap_server: str
-    file_share_path: str
-    email_server: str
-
-    # Compliance settings
-    audit_logging: bool = True
-    encryption_at_rest: bool = True
-    encryption_in_transit: bool = True
-    data_retention_years: int = 7
-
-    # Resource allocation
-    app_server_resources: ERPResourceConfig
-    database_resources: ERPResourceConfig
-
-    # Backup configuration
-    backup_schedule: str = "0 2 * * *"  # Daily at 2 AM
-    backup_retention_policy: BackupRetentionPolicy
-
-    check:
-        erp_port > 0 and erp_port < 65536, "ERP port must be valid"
-        database_port > 0 and database_port < 65536, "Database port must be valid"
-        data_retention_years > 0, "Data retention must be positive"
-        len(ldap_server) > 0, "LDAP server required"
-
-schema ERPResourceConfig:
-    """Resource configuration for ERP components"""
-    cpu_request: str
-    memory_request: str
-    cpu_limit: str
-    memory_limit: str
-    storage_size: str
-    storage_class: str = "fast-ssd"
-
-schema BackupRetentionPolicy:
-    """Backup retention policy for ERP system"""
-    daily_backups: int = 7
-    weekly_backups: int = 4
-    monthly_backups: int = 12
-    yearly_backups: int = 7
-
-# Environment-specific resource configurations
-erp_resource_profiles = {
-    "development": {
-        app_server_resources = {
-            cpu_request = "1"
-            memory_request = "4Gi"
-            cpu_limit = "2"
-            memory_limit = "8Gi"
-            storage_size = "50Gi"
-            storage_class = "standard"
-        }
-        database_resources = {
-            cpu_request = "2"
-            memory_request = "8Gi"
-            cpu_limit = "4"
-            memory_limit = "16Gi"
-            storage_size = "100Gi"
-            storage_class = "standard"
-        }
-    },
-    "production": {
-        app_server_resources = {
-            cpu_request = "4"
-            memory_request = "16Gi"
-            cpu_limit = "8"
-            memory_limit = "32Gi"
-            storage_size = "200Gi"
-            storage_class = "fast-ssd"
-        }
-        database_resources = {
-            cpu_request = "8"
-            memory_request = "32Gi"
-            cpu_limit = "16"
-            memory_limit = "64Gi"
-            storage_size = "2Ti"
-            storage_class = "fast-ssd"
-        }
-    }
-}
-
-# Taskserv definition
-schema LegacyERPTaskserv(lib.TaskServDef):
-    """Legacy ERP Taskserv Definition"""
-    name: str = "legacy-erp"
-    config: LegacyERPConfig
-    environment: "development" | "staging" | "production"
-
-# Dependencies for legacy ERP
-legacy_erp_dependencies: deps.TaskservDependencies = {
-    name = "legacy-erp"
-
-    # Infrastructure dependencies
-    requires = ["kubernetes", "storage-class"]
-    optional = ["monitoring", "backup-agent", "log-aggregator"]
-    conflicts = ["modern-erp"]
-
-    # Services provided
-    provides = ["erp-api", "erp-ui", "erp-reports", "erp-integration"]
-
-    # Resource requirements
-    resources = {
-        cpu = "8"
-        memory = "32Gi"
-        disk = "2Ti"
-        network = True
-        privileged = True  # Legacy systems often need privileged access
-    }
-
-    # Health checks
-    health_checks = [
-        {
-            command = "curl -k https://localhost:9090/health"
-            interval = 60
-            timeout = 30
-            retries = 3
-        },
-        {
-            command = "sqlplus system/password@localhost:1521/XE <<< 'SELECT 1 FROM DUAL;'"
-            interval = 300
-            timeout = 60
-            retries = 2
-        }
-    ]
-
-    # Installation phases
-    phases = [
-        {
-            name = "pre-install"
-            order = 1
-            parallel = False
-            required = True
-        },
-        {
-            name = "database-setup"
-            order = 2
-            parallel = False
-            required = True
-        },
-        {
-            name = "application-install"
-            order = 3
-            parallel = False
-            required = True
-        },
-        {
-            name = "integration-setup"
-            order = 4
-            parallel = True
-            required = False
-        },
-        {
-            name = "compliance-validation"
-            order = 5
-            parallel = False
-            required = True
-        }
-    ]
-
-    # Compatibility
-    os_support = ["linux"]
-    arch_support = ["amd64"]
-    timeout = 3600  # 1 hour for legacy system deployment
-}
-
-# Default configuration
-legacy_erp_default: LegacyERPTaskserv = {
-    name = "legacy-erp"
-    environment = "production"
-    config = {
-        erp_version = "12.2.0"
-        installation_mode = "ha"
-
-        database_type = "oracle"
-        database_version = "19c"
-        database_size = "1Ti"
-        database_backup_retention = 30
-
-        erp_port = 8080
-        database_port = 1521
-        ssl_enabled = True
-        internal_network_only = True
-
-        # Company-specific settings
-        ldap_server = "ldap.company.com"
-        file_share_path = "/mnt/company-files"
-        email_server = "smtp.company.com"
-
-        # Compliance settings
-        audit_logging = True
-        encryption_at_rest = True
-        encryption_in_transit = True
-        data_retention_years = 7
-
-        # Production resources
-        app_server_resources = erp_resource_profiles.production.app_server_resources
-        database_resources = erp_resource_profiles.production.database_resources
-
-        backup_schedule = "0 2 * * *"
-        backup_retention_policy = {
-            daily_backups = 7
-            weekly_backups = 4
-            monthly_backups = 12
-            yearly_backups = 7
-        }
-    }
-}
-
-# Export for provisioning system
-{
-    config: legacy_erp_default,
-    dependencies: legacy_erp_dependencies,
-    profiles: erp_resource_profiles
-}
-
-

Compliance-Focused Taskserv

-

Create compliance-monitor.ncl:

-
"""
-Compliance Monitoring Taskserv
-Automated compliance checking and reporting for regulated environments
-"""
-
-import provisioning.lib as lib
-import provisioning.dependencies as deps
-
-schema ComplianceMonitorConfig:
-    """Configuration for compliance monitoring system"""
-
-    # Compliance frameworks
-    enabled_frameworks: [ComplianceFramework]
-
-    # Monitoring settings
-    scan_frequency: str = "0 0 * * *"  # Daily
-    real_time_monitoring: bool = True
-
-    # Reporting settings
-    report_frequency: str = "0 0 * * 0"  # Weekly
-    report_recipients: [str]
-    report_format: "pdf" | "html" | "json" = "pdf"
-
-    # Alerting configuration
-    alert_severity_threshold: "low" | "medium" | "high" = "medium"
-    alert_channels: [AlertChannel]
-
-    # Data retention
-    audit_log_retention_days: int = 2555  # 7 years
-    report_retention_days: int = 365
-
-    # Integration settings
-    siem_integration: bool = True
-    siem_endpoint?: str
-
-    check:
-        audit_log_retention_days >= 2555, "Audit logs must be retained for at least 7 years"
-        len(report_recipients) > 0, "At least one report recipient required"
-
-schema ComplianceFramework:
-    """Compliance framework configuration"""
-    name: "pci-dss" | "sox" | "gdpr" | "hipaa" | "iso27001" | "nist"
-    version: str
-    enabled: bool = True
-    custom_controls?: [ComplianceControl]
-
-schema ComplianceControl:
-    """Custom compliance control"""
-    id: str
-    description: str
-    check_command: str
-    severity: "low" | "medium" | "high" | "critical"
-    remediation_guidance: str
-
-schema AlertChannel:
-    """Alert channel configuration"""
-    type: "email" | "slack" | "teams" | "webhook" | "sms"
-    endpoint: str
-    severity_filter: ["low", "medium", "high", "critical"]
-
-# Taskserv definition
-schema ComplianceMonitorTaskserv(lib.TaskServDef):
-    """Compliance Monitor Taskserv Definition"""
-    name: str = "compliance-monitor"
-    config: ComplianceMonitorConfig
-
-# Dependencies
-compliance_monitor_dependencies: deps.TaskservDependencies = {
-    name = "compliance-monitor"
-
-    # Dependencies
-    requires = ["kubernetes"]
-    optional = ["monitoring", "logging", "backup"]
-    provides = ["compliance-reports", "audit-logs", "compliance-api"]
-
-    # Resource requirements
-    resources = {
-        cpu = "500m"
-        memory = "1Gi"
-        disk = "50Gi"
-        network = True
-        privileged = False
-    }
-
-    # Health checks
-    health_checks = [
-        {
-            command = "curl -f http://localhost:9090/health"
-            interval = 30
-            timeout = 10
-            retries = 3
-        },
-        {
-            command = "compliance-check --dry-run"
-            interval = 300
-            timeout = 60
-            retries = 1
-        }
-    ]
-
-    # Compatibility
-    os_support = ["linux"]
-    arch_support = ["amd64", "arm64"]
-}
-
-# Default configuration with common compliance frameworks
-compliance_monitor_default: ComplianceMonitorTaskserv = {
-    name = "compliance-monitor"
-    config = {
-        enabled_frameworks = [
-            {
-                name = "pci-dss"
-                version = "3.2.1"
-                enabled = True
-            },
-            {
-                name = "sox"
-                version = "2002"
-                enabled = True
-            },
-            {
-                name = "gdpr"
-                version = "2018"
-                enabled = True
-            }
-        ]
-
-        scan_frequency = "0 */6 * * *"  # Every 6 hours
-        real_time_monitoring = True
-
-        report_frequency = "0 0 * * 1"  # Weekly on Monday
-        report_recipients = ["compliance@company.com", "security@company.com"]
-        report_format = "pdf"
-
-        alert_severity_threshold = "medium"
-        alert_channels = [
-            {
-                type = "email"
-                endpoint = "security-alerts@company.com"
-                severity_filter = ["medium", "high", "critical"]
-            },
-            {
-                type = "slack"
-                endpoint = "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX"
-                severity_filter = ["high", "critical"]
-            }
-        ]
-
-        audit_log_retention_days = 2555
-        report_retention_days = 365
-
-        siem_integration = True
-        siem_endpoint = "https://siem.company.com/api/events"
-    }
-}
-
-# Export configuration
-{
-    config: compliance_monitor_default,
-    dependencies: compliance_monitor_dependencies
-}
-
-

Provider-Specific Extensions

-

Custom Cloud Provider Integration

-

When working with specialized or private cloud providers:

-
# Create custom provider extension
-mkdir -p extensions/providers/company-private-cloud/nickel
-cd extensions/providers/company-private-cloud/nickel
-
-

Create provision_company-private-cloud.ncl:

-
"""
-Company Private Cloud Provider
-Integration with company's private cloud infrastructure
-"""
-
-import provisioning.defaults as defaults
-import provisioning.server as server
-
-schema CompanyPrivateCloudConfig:
-    """Company private cloud configuration"""
-
-    # API configuration
-    api_endpoint: str = "https://cloud-api.company.com"
-    api_version: str = "v2"
-    auth_token: str
-
-    # Network configuration
-    management_network: str = "10.0.0.0/24"
-    production_network: str = "10.1.0.0/16"
-    dmz_network: str = "10.2.0.0/24"
-
-    # Resource pools
-    compute_cluster: str = "production-cluster"
-    storage_cluster: str = "storage-cluster"
-
-    # Compliance settings
-    encryption_required: bool = True
-    audit_all_operations: bool = True
-
-    # Company-specific settings
-    cost_center: str
-    department: str
-    project_code: str
-
-    check:
-        len(api_endpoint) > 0, "API endpoint required"
-        len(auth_token) > 0, "Authentication token required"
-        len(cost_center) > 0, "Cost center required for billing"
-
-schema CompanyPrivateCloudServer(server.Server):
-    """Server configuration for company private cloud"""
-
-    # Instance configuration
-    instance_class: "standard" | "compute-optimized" | "memory-optimized" | "storage-optimized" = "standard"
-    instance_size: "small" | "medium" | "large" | "xlarge" | "2xlarge" = "medium"
-
-    # Storage configuration
-    root_disk_type: "ssd" | "nvme" | "spinning" = "ssd"
-    root_disk_size: int = 50
-    additional_storage?: [CompanyCloudStorage]
-
-    # Network configuration
-    network_segment: "management" | "production" | "dmz" = "production"
-    security_groups: [str] = ["default"]
-
-    # Compliance settings
-    encrypted_storage: bool = True
-    backup_enabled: bool = True
-    monitoring_enabled: bool = True
-
-    # Company metadata
-    cost_center: str
-    department: str
-    project_code: str
-    environment: "dev" | "test" | "staging" | "prod" = "prod"
-
-    check:
-        root_disk_size >= 20, "Root disk must be at least 20 GB"
-        len(cost_center) > 0, "Cost center required"
-        len(department) > 0, "Department required"
-
-schema CompanyCloudStorage:
-    """Additional storage configuration"""
-    size: int
-    type: "ssd" | "nvme" | "spinning" | "archive" = "ssd"
-    mount_point: str
-    encrypted: bool = True
-    backup_enabled: bool = True
-
-# Instance size configurations
-instance_specs = {
-    "small": {
-        vcpus = 2
-        memory_gb = 4
-        network_performance = "moderate"
-    },
-    "medium": {
-        vcpus = 4
-        memory_gb = 8
-        network_performance = "good"
-    },
-    "large": {
-        vcpus = 8
-        memory_gb = 16
-        network_performance = "high"
-    },
-    "xlarge": {
-        vcpus = 16
-        memory_gb = 32
-        network_performance = "high"
-    },
-    "2xlarge": {
-        vcpus = 32
-        memory_gb = 64
-        network_performance = "very-high"
-    }
-}
-
-# Provider defaults
-company_private_cloud_defaults: defaults.ServerDefaults = {
-    lock = False
-    time_zone = "UTC"
-    running_wait = 20
-    running_timeout = 600  # Private cloud may be slower
-
-    # Company-specific OS image
-    storage_os_find = "name: company-ubuntu-20.04-hardened | arch: x86_64"
-
-    # Network settings
-    network_utility_ipv4 = True
-    network_public_ipv4 = False  # Private cloud, no public IPs
-
-    # Security settings
-    user = "company-admin"
-    user_ssh_port = 22
-    fix_local_hosts = True
-
-    # Company metadata
-    labels = "provider: company-private-cloud, compliance: required"
-}
-
-# Export provider configuration
-{
-    config: CompanyPrivateCloudConfig,
-    server: CompanyPrivateCloudServer,
-    defaults: company_private_cloud_defaults,
-    instance_specs: instance_specs
-}
-
-

Multi-Environment Management

-

Environment-Specific Configuration Management

-

Create environment-specific extensions that handle different deployment patterns:

-
# Create environment management extension
-mkdir -p extensions/clusters/company-environments/nickel
-cd extensions/clusters/company-environments/nickel
-
-

Create company-environments.ncl:

-
"""
-Company Environment Management
-Standardized environment configurations for different deployment stages
-"""
-
-import provisioning.cluster as cluster
-import provisioning.server as server
-
-schema CompanyEnvironment:
-    """Standard company environment configuration"""
-
-    # Environment metadata
-    name: str
-    type: "development" | "testing" | "staging" | "production" | "disaster-recovery"
-    region: str
-    availability_zones: [str]
-
-    # Network configuration
-    vpc_cidr: str
-    subnet_configuration: SubnetConfiguration
-
-    # Security configuration
-    security_profile: SecurityProfile
-
-    # Compliance requirements
-    compliance_level: "basic" | "standard" | "high" | "critical"
-    data_classification: "public" | "internal" | "confidential" | "restricted"
-
-    # Resource constraints
-    resource_limits: ResourceLimits
-
-    # Backup and DR configuration
-    backup_configuration: BackupConfiguration
-    disaster_recovery_configuration?: DRConfiguration
-
-    # Monitoring and alerting
-    monitoring_level: "basic" | "standard" | "enhanced"
-    alert_routing: AlertRouting
-
-schema SubnetConfiguration:
-    """Network subnet configuration"""
-    public_subnets: [str]
-    private_subnets: [str]
-    database_subnets: [str]
-    management_subnets: [str]
-
-schema SecurityProfile:
-    """Security configuration profile"""
-    encryption_at_rest: bool
-    encryption_in_transit: bool
-    network_isolation: bool
-    access_logging: bool
-    vulnerability_scanning: bool
-
-    # Access control
-    multi_factor_auth: bool
-    privileged_access_management: bool
-    network_segmentation: bool
-
-    # Compliance controls
-    audit_logging: bool
-    data_loss_prevention: bool
-    endpoint_protection: bool
-
-schema ResourceLimits:
-    """Resource allocation limits for environment"""
-    max_cpu_cores: int
-    max_memory_gb: int
-    max_storage_tb: int
-    max_instances: int
-
-    # Cost controls
-    max_monthly_cost: int
-    cost_alerts_enabled: bool
-
-schema BackupConfiguration:
-    """Backup configuration for environment"""
-    backup_frequency: str
-    retention_policy: {str: int}
-    cross_region_backup: bool
-    encryption_enabled: bool
-
-schema DRConfiguration:
-    """Disaster recovery configuration"""
-    dr_region: str
-    rto_minutes: int  # Recovery Time Objective
-    rpo_minutes: int  # Recovery Point Objective
-    automated_failover: bool
-
-schema AlertRouting:
-    """Alert routing configuration"""
-    business_hours_contacts: [str]
-    after_hours_contacts: [str]
-    escalation_policy: [EscalationLevel]
-
-schema EscalationLevel:
-    """Alert escalation level"""
-    level: int
-    delay_minutes: int
-    contacts: [str]
-
-# Environment templates
-environment_templates = {
-    "development": {
-        type = "development"
-        compliance_level = "basic"
-        data_classification = "internal"
-        security_profile = {
-            encryption_at_rest = False
-            encryption_in_transit = False
-            network_isolation = False
-            access_logging = True
-            vulnerability_scanning = False
-            multi_factor_auth = False
-            privileged_access_management = False
-            network_segmentation = False
-            audit_logging = False
-            data_loss_prevention = False
-            endpoint_protection = False
-        }
-        resource_limits = {
-            max_cpu_cores = 50
-            max_memory_gb = 200
-            max_storage_tb = 10
-            max_instances = 20
-            max_monthly_cost = 5000
-            cost_alerts_enabled = True
-        }
-        monitoring_level = "basic"
-    },
-
-    "production": {
-        type = "production"
-        compliance_level = "critical"
-        data_classification = "confidential"
-        security_profile = {
-            encryption_at_rest = True
-            encryption_in_transit = True
-            network_isolation = True
-            access_logging = True
-            vulnerability_scanning = True
-            multi_factor_auth = True
-            privileged_access_management = True
-            network_segmentation = True
-            audit_logging = True
-            data_loss_prevention = True
-            endpoint_protection = True
-        }
-        resource_limits = {
-            max_cpu_cores = 1000
-            max_memory_gb = 4000
-            max_storage_tb = 500
-            max_instances = 200
-            max_monthly_cost = 100000
-            cost_alerts_enabled = True
-        }
-        monitoring_level = "enhanced"
-        disaster_recovery_configuration = {
-            dr_region = "us-west-2"
-            rto_minutes = 60
-            rpo_minutes = 15
-            automated_failover = True
-        }
-    }
-}
-
-# Export environment templates
-{
-    templates: environment_templates,
-    schema: CompanyEnvironment
-}
-
-

Integration Patterns

-

Legacy System Integration

-

Create integration patterns for common legacy system scenarios:

-
# Create integration patterns
-mkdir -p extensions/taskservs/integrations/legacy-bridge/nickel
-cd extensions/taskservs/integrations/legacy-bridge/nickel
-
-

Create legacy-bridge.ncl:

-
"""
-Legacy System Integration Bridge
-Provides standardized integration patterns for legacy systems
-"""
-
-import provisioning.lib as lib
-import provisioning.dependencies as deps
-
-schema LegacyBridgeConfig:
-    """Configuration for legacy system integration bridge"""
-
-    # Bridge configuration
-    bridge_name: str
-    integration_type: "api" | "database" | "file" | "message-queue" | "etl"
-
-    # Legacy system details
-    legacy_system: LegacySystemInfo
-
-    # Modern system details
-    modern_system: ModernSystemInfo
-
-    # Data transformation configuration
-    data_transformation: DataTransformationConfig
-
-    # Security configuration
-    security_config: IntegrationSecurityConfig
-
-    # Monitoring and alerting
-    monitoring_config: IntegrationMonitoringConfig
-
-schema LegacySystemInfo:
-    """Legacy system information"""
-    name: str
-    type: "mainframe" | "as400" | "unix" | "windows" | "database" | "file-system"
-    version: str
-
-    # Connection details
-    connection_method: "direct" | "vpn" | "dedicated-line" | "api-gateway"
-    endpoint: str
-    port?: int
-
-    # Authentication
-    auth_method: "password" | "certificate" | "kerberos" | "ldap" | "token"
-    credentials_source: "vault" | "config" | "environment"
-
-    # Data characteristics
-    data_format: "fixed-width" | "csv" | "xml" | "json" | "binary" | "proprietary"
-    character_encoding: str = "utf-8"
-
-    # Operational characteristics
-    availability_hours: str = "24/7"
-    maintenance_windows: [MaintenanceWindow]
-
-schema ModernSystemInfo:
-    """Modern system information"""
-    name: str
-    type: "microservice" | "api" | "database" | "event-stream" | "file-store"
-
-    # Connection details
-    endpoint: str
-    api_version?: str
-
-    # Data format
-    data_format: "json" | "xml" | "avro" | "protobuf"
-
-    # Authentication
-    auth_method: "oauth2" | "jwt" | "api-key" | "mutual-tls"
-
-schema DataTransformationConfig:
-    """Data transformation configuration"""
-    transformation_rules: [TransformationRule]
-    error_handling: ErrorHandlingConfig
-    data_validation: DataValidationConfig
-
-schema TransformationRule:
-    """Individual data transformation rule"""
-    source_field: str
-    target_field: str
-    transformation_type: "direct" | "calculated" | "lookup" | "conditional"
-    transformation_expression?: str
-
-schema ErrorHandlingConfig:
-    """Error handling configuration"""
-    retry_policy: RetryPolicy
-    dead_letter_queue: bool = True
-    error_notification: bool = True
-
-schema RetryPolicy:
-    """Retry policy configuration"""
-    max_attempts: int = 3
-    initial_delay_seconds: int = 5
-    backoff_multiplier: float = 2.0
-    max_delay_seconds: int = 300
-
-schema DataValidationConfig:
-    """Data validation configuration"""
-    schema_validation: bool = True
-    business_rules_validation: bool = True
-    data_quality_checks: [DataQualityCheck]
-
-schema DataQualityCheck:
-    """Data quality check definition"""
-    name: str
-    check_type: "completeness" | "uniqueness" | "validity" | "consistency"
-    threshold: float = 0.95
-    action_on_failure: "warn" | "stop" | "quarantine"
-
-schema IntegrationSecurityConfig:
-    """Security configuration for integration"""
-    encryption_in_transit: bool = True
-    encryption_at_rest: bool = True
-
-    # Access control
-    source_ip_whitelist?: [str]
-    api_rate_limiting: bool = True
-
-    # Audit and compliance
-    audit_all_transactions: bool = True
-    pii_data_handling: PIIHandlingConfig
-
-schema PIIHandlingConfig:
-    """PII data handling configuration"""
-    pii_fields: [str]
-    anonymization_enabled: bool = True
-    retention_policy_days: int = 365
-
-schema IntegrationMonitoringConfig:
-    """Monitoring configuration for integration"""
-    metrics_collection: bool = True
-    performance_monitoring: bool = True
-
-    # SLA monitoring
-    sla_targets: SLATargets
-
-    # Alerting
-    alert_on_failures: bool = True
-    alert_on_performance_degradation: bool = True
-
-schema SLATargets:
-    """SLA targets for integration"""
-    max_latency_ms: int = 5000
-    min_availability_percent: float = 99.9
-    max_error_rate_percent: float = 0.1
-
-schema MaintenanceWindow:
-    """Maintenance window definition"""
-    day_of_week: int  # 0=Sunday, 6=Saturday
-    start_time: str   # HH:MM format
-    duration_hours: int
-
-# Taskserv definition
-schema LegacyBridgeTaskserv(lib.TaskServDef):
-    """Legacy Bridge Taskserv Definition"""
-    name: str = "legacy-bridge"
-    config: LegacyBridgeConfig
-
-# Dependencies
-legacy_bridge_dependencies: deps.TaskservDependencies = {
-    name = "legacy-bridge"
-
-    requires = ["kubernetes"]
-    optional = ["monitoring", "logging", "vault"]
-    provides = ["legacy-integration", "data-bridge"]
-
-    resources = {
-        cpu = "500m"
-        memory = "1Gi"
-        disk = "10Gi"
-        network = True
-        privileged = False
-    }
-
-    health_checks = [
-        {
-            command = "curl -f http://localhost:9090/health"
-            interval = 30
-            timeout = 10
-            retries = 3
-        },
-        {
-            command = "integration-test --quick"
-            interval = 300
-            timeout = 120
-            retries = 1
-        }
-    ]
-
-    os_support = ["linux"]
-    arch_support = ["amd64", "arm64"]
-}
-
-# Export configuration
-{
-    config: LegacyBridgeTaskserv,
-    dependencies: legacy_bridge_dependencies
-}
-
-

Real-World Examples

-

Example 1: Financial Services Company

-
# Financial services specific extensions
-mkdir -p extensions/taskservs/financial-services/{trading-system,risk-engine,compliance-reporter}/nickel
-
-

Example 2: Healthcare Organization

-
# Healthcare specific extensions
-mkdir -p extensions/taskservs/healthcare/{hl7-processor,dicom-storage,hipaa-audit}/nickel
-
-

Example 3: Manufacturing Company

-
# Manufacturing specific extensions
-mkdir -p extensions/taskservs/manufacturing/{iot-gateway,scada-bridge,quality-system}/nickel
-
-

Usage Examples

-

Loading Infrastructure-Specific Extensions

-
# Load company-specific extensions
-cd workspace/infra/production
-module-loader load taskservs . [legacy-erp, compliance-monitor, legacy-bridge]
-module-loader load providers . [company-private-cloud]
-module-loader load clusters . [company-environments]
-
-# Verify loading
-module-loader list taskservs .
-module-loader validate .
-
-

Using in Server Configuration

-
# Import loaded extensions
-import .taskservs.legacy-erp.legacy-erp as erp
-import .taskservs.compliance-monitor.compliance-monitor as compliance
-import .providers.company-private-cloud as private_cloud
-
-# Configure servers with company-specific extensions
-company_servers: [server.Server] = [
-    {
-        hostname = "erp-prod-01"
-        title = "Production ERP Server"
-
-        # Use company private cloud
-        # Provider-specific configuration goes here
-
-        taskservs = [
-            {
-                name = "legacy-erp"
-                profile = "production"
-            },
-            {
-                name = "compliance-monitor"
-                profile = "default"
-            }
-        ]
-    }
-]
-
-

This comprehensive guide covers all aspects of creating infrastructure-specific extensions, from assessment and planning to implementation and deployment.

-

Command Handler Developer Guide

-

Target Audience: Developers working on the provisioning CLI -Last Updated: 2025-09-30 -Related: ADR-006 CLI Refactoring

-

Overview

-

The provisioning CLI uses a modular, domain-driven architecture that separates concerns into focused command handlers. This guide shows you how to -work with this architecture.

-

Key Architecture Principles

-
    -
  1. Separation of Concerns: Routing, flag parsing, and business logic are separated
  2. -
  3. Domain-Driven Design: Commands organized by domain (infrastructure, orchestration, etc.)
  4. -
  5. DRY (Don’t Repeat Yourself): Centralized flag handling eliminates code duplication
  6. -
  7. Single Responsibility: Each module has one clear purpose
  8. -
  9. Open/Closed Principle: Easy to extend, no need to modify core routing
  10. -
-

Architecture Components

-
provisioning/core/nulib/
-├── provisioning (211 lines) - Main entry point
-├── main_provisioning/
-│   ├── flags.nu (139 lines) - Centralized flag handling
-│   ├── dispatcher.nu (264 lines) - Command routing
-│   ├── help_system.nu - Categorized help system
-│   └── commands/ - Domain-focused handlers
-│       ├── infrastructure.nu (117 lines) - Server, taskserv, cluster, infra
-│       ├── orchestration.nu (64 lines) - Workflow, batch, orchestrator
-│       ├── development.nu (72 lines) - Module, layer, version, pack
-│       ├── workspace.nu (56 lines) - Workspace, template
-│       ├── generation.nu (78 lines) - Generate commands
-│       ├── utilities.nu (157 lines) - SSH, SOPS, cache, providers
-│       └── configuration.nu (316 lines) - Env, show, init, validate
-
-

Adding New Commands

-

Step 1: Choose the Right Domain Handler

-

Commands are organized by domain. Choose the appropriate handler:

-
- - - - - - - -
DomainHandlerResponsibility
infrastructureinfrastructure.nuServer/taskserv/cluster/infra lifecycle
orchestrationorchestration.nuWorkflow/batch operations, orchestrator control
developmentdevelopment.nuModule discovery, layers, versions, packaging
workspaceworkspace.nuWorkspace and template management
configurationconfiguration.nuEnvironment, settings, initialization
utilitiesutilities.nuSSH, SOPS, cache, providers, utilities
generationgeneration.nuGenerate commands (server, taskserv, etc.)
-
-

Step 2: Add Command to Handler

-

Example: Adding a new server command server status

-

Edit provisioning/core/nulib/main_provisioning/commands/infrastructure.nu:

-
# Add to the handle_infrastructure_command match statement
-export def handle_infrastructure_command [
-  command: string
-  ops: string
-  flags: record
-] {
-  set_debug_env $flags
-
-  match $command {
-    "server" => { handle_server $ops $flags }
-    "taskserv" | "task" => { handle_taskserv $ops $flags }
-    "cluster" => { handle_cluster $ops $flags }
-    "infra" | "infras" => { handle_infra $ops $flags }
-    _ => {
-      print $"❌ Unknown infrastructure command: ($command)"
-      print ""
-      print "Available infrastructure commands:"
-      print "  server      - Server operations (create, delete, list, ssh, status)"  # Updated
-      print "  taskserv    - Task service management"
-      print "  cluster     - Cluster operations"
-      print "  infra       - Infrastructure management"
-      print ""
-      print "Use 'provisioning help infrastructure' for more details"
-      exit 1
-    }
-  }
-}
-
-# Add the new command handler
-def handle_server [ops: string, flags: record] {
-  let args = build_module_args $flags $ops
-  run_module $args "server" --exec
-}
-
-

That’s it! The command is now available as provisioning server status.

-

Step 3: Add Shortcuts (Optional)

-

If you want shortcuts like provisioning s status:

-

Edit provisioning/core/nulib/main_provisioning/dispatcher.nu:

-
export def get_command_registry []: nothing -> record {
-  {
-    # Infrastructure commands
-    "s" => "infrastructure server"           # Already exists
-    "server" => "infrastructure server"      # Already exists
-
-    # Your new shortcut (if needed)
-    # Example: "srv-status" => "infrastructure server status"
-
-    # ... rest of registry
-  }
-}
-
-

Note: Most shortcuts are already configured. You only need to add new shortcuts if you’re creating completely new command categories.

-

Modifying Existing Handlers

-

Example: Enhancing the taskserv Command

-

Let’s say you want to add better error handling to the taskserv command:

-

Before:

-
def handle_taskserv [ops: string, flags: record] {
-  let args = build_module_args $flags $ops
-  run_module $args "taskserv" --exec
-}
-
-

After:

-
def handle_taskserv [ops: string, flags: record] {
-  # Validate taskserv name if provided
-  let first_arg = ($ops | split row " " | get -o 0)
-  if ($first_arg | is-not-empty) and $first_arg not-in ["create", "delete", "list", "generate", "check-updates", "help"] {
-    # Check if taskserv exists
-    let available_taskservs = (^$env.PROVISIONING_NAME module discover taskservs | from json)
-    if $first_arg not-in $available_taskservs {
-      print $"❌ Unknown taskserv: ($first_arg)"
-      print ""
-      print "Available taskservs:"
-      $available_taskservs | each { |ts| print $"  • ($ts)" }
-      exit 1
-    }
-  }
-
-  let args = build_module_args $flags $ops
-  run_module $args "taskserv" --exec
-}
-
-

Working with Flags

-

Using Centralized Flag Handling

-

The flags.nu module provides centralized flag handling:

-
# Parse all flags into normalized record
-let parsed_flags = (parse_common_flags {
-  version: $version, v: $v, info: $info,
-  debug: $debug, check: $check, yes: $yes,
-  wait: $wait, infra: $infra, # ... etc
-})
-
-# Build argument string for module execution
-let args = build_module_args $parsed_flags $ops
-
-# Set environment variables based on flags
-set_debug_env $parsed_flags
-
-

Available Flag Parsing

-

The parse_common_flags function normalizes these flags:

-
- - - - - - - - - - - - - - - -
Flag Record FieldDescription
show_versionVersion display (--version, -v)
show_infoInfo display (--info, -i)
show_aboutAbout display (--about, -a)
debug_modeDebug mode (--debug, -x)
check_modeCheck mode (--check, -c)
auto_confirmAuto-confirm (--yes, -y)
waitWait for completion (--wait, -w)
keep_storageKeep storage (--keepstorage)
infraInfrastructure name (--infra)
outfileOutput file (--outfile)
output_formatOutput format (--out)
templateTemplate name (--template)
selectSelection (--select)
settingsSettings file (--settings)
new_infraNew infra name (--new)
-
-

Adding New Flags

-

If you need to add a new flag:

-
    -
  1. Update main provisioning file to accept the flag
  2. -
  3. Update flags.nu:parse_common_flags to normalize it
  4. -
  5. Update flags.nu:build_module_args to pass it to modules
  6. -
-

Example: Adding --timeout flag

-
# 1. In provisioning main file (parameter list)
-def main [
-  # ... existing parameters
-  --timeout: int = 300        # Timeout in seconds
-  # ... rest of parameters
-] {
-  # ... existing code
-  let parsed_flags = (parse_common_flags {
-    # ... existing flags
-    timeout: $timeout
-  })
-}
-
-# 2. In flags.nu:parse_common_flags
-export def parse_common_flags [flags: record]: nothing -> record {
-  {
-    # ... existing normalizations
-    timeout: ($flags.timeout? | default 300)
-  }
-}
-
-# 3. In flags.nu:build_module_args
-export def build_module_args [flags: record, extra: string = ""]: nothing -> string {
-  # ... existing code
-  let str_timeout = if ($flags.timeout != 300) { $"--timeout ($flags.timeout) " } else { "" }
-  # ... rest of function
-  $"($extra) ($use_check)($use_yes)($use_wait)($str_timeout)..."
-}
-
-

Adding New Shortcuts

-

Shortcut Naming Conventions

-
    -
  • 1-2 letters: Ultra-short for common commands (s for server, ws for workspace)
  • -
  • 3-4 letters: Abbreviations (orch for orchestrator, tmpl for template)
  • -
  • Aliases: Alternative names (task for taskserv, flow for workflow)
  • -
-

Example: Adding a New Shortcut

-

Edit provisioning/core/nulib/main_provisioning/dispatcher.nu:

-
export def get_command_registry []: nothing -> record {
-  {
-    # ... existing shortcuts
-
-    # Add your new shortcut
-    "db" => "infrastructure database"          # New: db command
-    "database" => "infrastructure database"    # Full name
-
-    # ... rest of registry
-  }
-}
-
-

Important: After adding a shortcut, update the help system in help_system.nu to document it.

-

Testing Your Changes

-

Running the Test Suite

-
# Run comprehensive test suite
-nu tests/test_provisioning_refactor.nu
-
-

Test Coverage

-

The test suite validates:

-
    -
  • ✅ Main help display
  • -
  • ✅ Category help (infrastructure, orchestration, development, workspace)
  • -
  • ✅ Bi-directional help routing
  • -
  • ✅ All command shortcuts
  • -
  • ✅ Category shortcut help
  • -
  • ✅ Command routing to correct handlers
  • -
-

Adding Tests for Your Changes

-

Edit tests/test_provisioning_refactor.nu:

-
# Add your test function
-export def test_my_new_feature [] {
-  print "\n🧪 Testing my new feature..."
-
-  let output = (run_provisioning "my-command" "test")
-  assert_contains $output "Expected Output" "My command works"
-}
-
-# Add to main test runner
-export def main [] {
-  # ... existing tests
-
-  let results = [
-    # ... existing test calls
-    (try { test_my_new_feature; "passed" } catch { "failed" })
-  ]
-
-  # ... rest of main
-}
-
-

Manual Testing

-
# Test command execution
-provisioning/core/cli/provisioning my-command test --check
-
-# Test with debug mode
-provisioning/core/cli/provisioning --debug my-command test
-
-# Test help
-provisioning/core/cli/provisioning my-command help
-provisioning/core/cli/provisioning help my-command  # Bi-directional
-
-

Common Patterns

-

Pattern 1: Simple Command Handler

-

Use Case: Command just needs to execute a module with standard flags

-
def handle_simple_command [ops: string, flags: record] {
-  let args = build_module_args $flags $ops
-  run_module $args "module_name" --exec
-}
-
-

Pattern 2: Command with Validation

-

Use Case: Need to validate input before execution

-
def handle_validated_command [ops: string, flags: record] {
-  # Validate
-  let first_arg = ($ops | split row " " | get -o 0)
-  if ($first_arg | is-empty) {
-    print "❌ Missing required argument"
-    print "Usage: provisioning command <arg>"
-    exit 1
-  }
-
-  # Execute
-  let args = build_module_args $flags $ops
-  run_module $args "module_name" --exec
-}
-
-

Pattern 3: Command with Subcommands

-

Use Case: Command has multiple subcommands (like server create, server delete)

-
def handle_complex_command [ops: string, flags: record] {
-  let subcommand = ($ops | split row " " | get -o 0)
-  let rest_ops = ($ops | split row " " | skip 1 | str join " ")
-
-  match $subcommand {
-    "create" => { handle_create $rest_ops $flags }
-    "delete" => { handle_delete $rest_ops $flags }
-    "list" => { handle_list $rest_ops $flags }
-    _ => {
-      print "❌ Unknown subcommand: $subcommand"
-      print "Available: create, delete, list"
-      exit 1
-    }
-  }
-}
-
-

Pattern 4: Command with Flag-Based Routing

-

Use Case: Command behavior changes based on flags

-
def handle_flag_routed_command [ops: string, flags: record] {
-  if $flags.check_mode {
-    # Dry-run mode
-    print "🔍 Check mode: simulating command..."
-    let args = build_module_args $flags $ops
-    run_module $args "module_name" # No --exec, returns output
-  } else {
-    # Normal execution
-    let args = build_module_args $flags $ops
-    run_module $args "module_name" --exec
-  }
-}
-
-

Best Practices

-

1. Keep Handlers Focused

-

Each handler should do one thing well:

-
    -
  • ✅ Good: handle_server manages all server operations
  • -
  • ❌ Bad: handle_server also manages clusters and taskservs
  • -
-

2. Use Descriptive Error Messages

-
# ❌ Bad
-print "Error"
-
-# ✅ Good
-print "❌ Unknown taskserv: kubernetes-invalid"
-print ""
-print "Available taskservs:"
-print "  • kubernetes"
-print "  • containerd"
-print "  • cilium"
-print ""
-print "Use 'provisioning taskserv list' to see all available taskservs"
-
-

3. Leverage Centralized Functions

-

Don’t repeat code - use centralized functions:

-
# ❌ Bad: Repeating flag handling
-def handle_bad [ops: string, flags: record] {
-  let use_check = if $flags.check_mode { "--check " } else { "" }
-  let use_yes = if $flags.auto_confirm { "--yes " } else { "" }
-  let str_infra = if ($flags.infra | is-not-empty) { $"--infra ($flags.infra) " } else { "" }
-  # ... 10 more lines of flag handling
-  run_module $"($ops) ($use_check)($use_yes)($str_infra)..." "module" --exec
-}
-
-# ✅ Good: Using centralized function
-def handle_good [ops: string, flags: record] {
-  let args = build_module_args $flags $ops
-  run_module $args "module" --exec
-}
-
-

4. Document Your Changes

-

Update relevant documentation:

-
    -
  • ADR-006: If architectural changes
  • -
  • CLAUDE.md: If new commands or shortcuts
  • -
  • help_system.nu: If new categories or commands
  • -
  • This guide: If new patterns or conventions
  • -
-

5. Test Thoroughly

-

Before committing:

-
    -
  • -Run test suite: nu tests/test_provisioning_refactor.nu
  • -
  • -Test manual execution
  • -
  • -Test with --check flag
  • -
  • -Test with --debug flag
  • -
  • -Test help: both provisioning cmd help and provisioning help cmd
  • -
  • -Test shortcuts
  • -
-

Troubleshooting

-

Issue: “Module not found”

-

Cause: Incorrect import path in handler

-

Fix: Use relative imports with .nu extension:

-
# ✅ Correct
-use ../flags.nu *
-use ../../lib_provisioning *
-
-# ❌ Wrong
-use ../main_provisioning/flags *
-use lib_provisioning *
-
-

Issue: “Parse mismatch: expected colon”

-

Cause: Missing type signature format

-

Fix: Use proper Nushell 0.107 type signature:

-
# ✅ Correct
-export def my_function [param: string]: nothing -> string {
-  "result"
-}
-
-# ❌ Wrong
-export def my_function [param: string] -> string {
-  "result"
-}
-
-

Issue: “Command not routing correctly”

-

Cause: Shortcut not in command registry

-

Fix: Add to dispatcher.nu:get_command_registry:

-
"myshortcut" => "domain command"
-
-

Issue: “Flags not being passed”

-

Cause: Not using build_module_args

-

Fix: Use centralized flag builder:

-
let args = build_module_args $flags $ops
-run_module $args "module" --exec
-
-

Quick Reference

-

File Locations

-
provisioning/core/nulib/
-├── provisioning - Main entry, flag definitions
-├── main_provisioning/
-│   ├── flags.nu - Flag parsing (parse_common_flags, build_module_args)
-│   ├── dispatcher.nu - Routing (get_command_registry, dispatch_command)
-│   ├── help_system.nu - Help (provisioning-help, help-*)
-│   └── commands/ - Domain handlers (handle_*_command)
-tests/
-└── test_provisioning_refactor.nu - Test suite
-docs/
-├── architecture/
-│   └── adr-006-provisioning-cli-refactoring.md - Architecture docs
-└── development/
-    └── COMMAND_HANDLER_GUIDE.md - This guide
-
-

Key Functions

-
# In flags.nu
-parse_common_flags [flags: record]: nothing -> record
-build_module_args [flags: record, extra: string = ""]: nothing -> string
-set_debug_env [flags: record]
-get_debug_flag [flags: record]: nothing -> string
-
-# In dispatcher.nu
-get_command_registry []: nothing -> record
-dispatch_command [args: list, flags: record]
-
-# In help_system.nu
-provisioning-help [category?: string]: nothing -> string
-help-infrastructure []: nothing -> string
-help-orchestration []: nothing -> string
-# ... (one for each category)
-
-# In commands/*.nu
-handle_*_command [command: string, ops: string, flags: record]
-# Example: handle_infrastructure_command, handle_workspace_command
-
-

Testing Commands

-
# Run full test suite
-nu tests/test_provisioning_refactor.nu
-
-# Test specific command
-provisioning/core/cli/provisioning my-command test --check
-
-# Test with debug
-provisioning/core/cli/provisioning --debug my-command test
-
-# Test help
-provisioning/core/cli/provisioning help my-command
-provisioning/core/cli/provisioning my-command help  # Bi-directional
+provisioning server create --infra <name>
+provisioning server list
+provisioning server status <hostname>
+provisioning server ssh <hostname>
+provisioning server delete <hostname>
+
+# Task service operations
+provisioning taskserv create <service> --infra <name>
+provisioning taskserv list
+provisioning taskserv status <service>
+provisioning taskserv delete <service>
+
+# Configuration
+provisioning config show
+provisioning validate config
+provisioning env
+
+

Quick Reference

+
# Shortcut for fastest reference
+provisioning sc
 

Further Reading

-

Contributing

-

When contributing command handler changes:

-
    -
  1. Follow existing patterns - Use the patterns in this guide
  2. -
  3. Update documentation - Keep docs in sync with code
  4. -
  5. Add tests - Cover your new functionality
  6. -
  7. Run test suite - Ensure nothing breaks
  8. -
  9. Update CLAUDE.md - Document new commands/shortcuts
  10. -
-

For questions or issues, refer to ADR-006 or ask the team.

-
-

This guide is part of the provisioning project documentation. Last updated: 2025-09-30

-

Development Workflow Guide

-

This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning -project.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Development Setup
  4. -
  5. Daily Development Workflow
  6. -
  7. Code Organization
  8. -
  9. Testing Strategies
  10. -
  11. Debugging Techniques
  12. -
  13. Integration Workflows
  14. -
  15. Collaboration Guidelines
  16. -
  17. Quality Assurance
  18. -
  19. Best Practices
  20. -
-

Overview

-

The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency, -quality, and efficiency.

-

Key Technologies:

+

First Deployment

+

Comprehensive walkthrough deploying production-ready infrastructure with the Provisioning platform.

+

Overview

+

This guide walks through deploying a complete Kubernetes cluster with storage and networking +on a cloud provider. You’ll learn workspace management, Nickel schema structure, provider +configuration, dependency resolution, and validation workflows.

+

Deployment Architecture

+

What we’ll build:

    -
  • Nushell: Primary scripting and automation language
  • -
  • Rust: High-performance system components
  • -
  • KCL: Configuration language and schemas
  • -
  • TOML: Configuration files
  • -
  • Jinja2: Template engine
  • +
  • 3-node Kubernetes cluster (1 control plane, 2 workers)
  • +
  • Cilium CNI for networking
  • +
  • Rook-Ceph for persistent storage
  • +
  • Container runtime (containerd)
  • +
  • Automated dependency resolution
  • +
  • Health monitoring
-

Development Principles:

+

Prerequisites

    -
  • Configuration-Driven: Never hardcode, always configure
  • -
  • Hybrid Architecture: Rust for performance, Nushell for flexibility
  • -
  • Test-First: Comprehensive testing at all levels
  • -
  • Documentation-Driven: Code and APIs are self-documenting
  • +
  • Platform installed
  • +
  • Cloud provider credentials configured (UpCloud or AWS recommended)
  • +
  • 30-60 minutes for complete deployment
-

Development Setup

-

Initial Environment Setup

-

1. Clone and Navigate:

-
# Clone repository
-git clone https://github.com/company/provisioning-system.git
-cd provisioning-system
+

Part 1: Workspace Setup

+

Create Workspace

+
# Initialize production workspace
+provisioning workspace init production-k8s
+cd production-k8s
 
-# Navigate to workspace
-cd workspace/tools
+# Verify structure
+ls -la
 
-

2. Initialize Workspace:

-
# Initialize development workspace
-nu workspace.nu init --user-name $USER --infra-name dev-env
-
-# Check workspace health
-nu workspace.nu health --detailed --fix-issues
+

Workspace contains:

+
production-k8s/
+├── infra/       # Infrastructure Nickel schemas
+├── config/      # Workspace configuration
+├── extensions/  # Custom providers/taskservs
+└── runtime/     # State and logs
 
-

3. Configure Development Environment:

-
# Create user configuration
-cp workspace/config/local-overrides.toml.example workspace/config/$USER.toml
+

Configure Workspace

+
# Edit workspace configuration
+cat > config/provisioning-config.yaml <<'EOF'
+workspace:
+  name: production-k8s
+  environment: production
 
-# Edit configuration for development
-$EDITOR workspace/config/$USER.toml
+defaults:
+  provider: upcloud  # or aws
+  region: de-fra1    # UpCloud Frankfurt
+  ssh_key_path: ~/.ssh/provisioning_production
+
+servers:
+  default_plan: medium
+  auto_backup: true
+
+logging:
+  level: info
+  format: text
+EOF
 
-

4. Set Up Build System:

-
# Navigate to build tools
-cd src/tools
-
-# Check build prerequisites
-make info
-
-# Perform initial build
-make dev-build
-
-

Tool Installation

-

Required Tools:

-
# Install Nushell
-cargo install nu
-
-# Install Nickel
-cargo install nickel
-
-# Install additional tools
-cargo install cross          # Cross-compilation
-cargo install cargo-audit    # Security auditing
-cargo install cargo-watch    # File watching
-
-

Optional Development Tools:

-
# Install development enhancers
-cargo install nu_plugin_tera    # Template plugin
-cargo install sops              # Secrets management
-brew install k9s                # Kubernetes management
-
-

IDE Configuration

-

VS Code Setup (.vscode/settings.json):

-
{
-  "files.associations": {
-    "*.nu": "shellscript",
-    "*.ncl": "nickel",
-    "*.toml": "toml"
-  },
-  "nushell.shellPath": "/usr/local/bin/nu",
-  "rust-analyzer.cargo.features": "all",
-  "editor.formatOnSave": true,
-  "editor.rulers": [100],
-  "files.trimTrailingWhitespace": true
-}
-
-

Recommended Extensions:

-
    -
  • Nushell Language Support
  • -
  • Rust Analyzer
  • -
  • Nickel Language Support
  • -
  • TOML Language Support
  • -
  • Better TOML
  • -
-

Daily Development Workflow

-

Morning Routine

-

1. Sync and Update:

-
# Sync with upstream
-git pull origin main
-
-# Update workspace
-cd workspace/tools
-nu workspace.nu health --fix-issues
-
-# Check for updates
-nu workspace.nu status --detailed
-
-

2. Review Current State:

-
# Check current infrastructure
-provisioning show servers
-provisioning show settings
-
-# Review workspace status
-nu workspace.nu status
-
-

Development Cycle

-

1. Feature Development:

-
# Create feature branch
-git checkout -b feature/new-provider-support
-
-# Start development environment
-cd workspace/tools
-nu workspace.nu init --workspace-type development
-
-# Begin development
-$EDITOR workspace/extensions/providers/new-provider/nulib/provider.nu
-
-

2. Incremental Testing:

-
# Test syntax during development
-nu --check workspace/extensions/providers/new-provider/nulib/provider.nu
-
-# Run unit tests
-nu workspace/extensions/providers/new-provider/tests/unit/basic-test.nu
-
-# Integration testing
-nu workspace.nu tools test-extension providers/new-provider
-
-

3. Build and Validate:

-
# Quick development build
-cd src/tools
-make dev-build
-
-# Validate changes
-make validate-all
-
-# Test distribution
-make test-dist
-
-

Testing During Development

-

Unit Testing:

-
# Add test examples to functions
-def create-server [name: string] -> record {
-    # @test: "test-server" -> {name: "test-server", status: "created"}
-    # Implementation here
-}
-
-

Integration Testing:

-
# Test with real infrastructure
-nu workspace/extensions/providers/new-provider/nulib/provider.nu \
-    create-server test-server --dry-run
-
-# Test with workspace isolation
-PROVISIONING_WORKSPACE_USER=$USER provisioning server create test-server --check
-
-

End-of-Day Routine

-

1. Commit Progress:

-
# Stage changes
-git add .
-
-# Commit with descriptive message
-git commit -m "feat(provider): add new cloud provider support
-
-- Implement basic server creation
-- Add configuration schema
-- Include unit tests
-- Update documentation"
-
-# Push to feature branch
-git push origin feature/new-provider-support
-
-

2. Workspace Maintenance:

-
# Clean up development data
-nu workspace.nu cleanup --type cache --age 1d
-
-# Backup current state
-nu workspace.nu backup --auto-name --components config,extensions
-
-# Check workspace health
-nu workspace.nu health
-
-

Code Organization

-

Nushell Code Structure

-

File Organization:

-
Extension Structure:
-├── nulib/
-│   ├── main.nu              # Main entry point
-│   ├── core/                # Core functionality
-│   │   ├── api.nu           # API interactions
-│   │   ├── config.nu        # Configuration handling
-│   │   └── utils.nu         # Utility functions
-│   ├── commands/            # User commands
-│   │   ├── create.nu        # Create operations
-│   │   ├── delete.nu        # Delete operations
-│   │   └── list.nu          # List operations
-│   └── tests/               # Test files
-│       ├── unit/            # Unit tests
-│       └── integration/     # Integration tests
-└── templates/               # Template files
-    ├── config.j2            # Configuration templates
-    └── manifest.j2          # Manifest templates
-
-

Function Naming Conventions:

-
# Use kebab-case for commands
-def create-server [name: string] -> record { ... }
-def validate-config [config: record] -> bool { ... }
-
-# Use snake_case for internal functions
-def get_api_client [] -> record { ... }
-def parse_config_file [path: string] -> record { ... }
-
-# Use descriptive prefixes
-def check-server-status [server: string] -> string { ... }
-def get-server-info [server: string] -> record { ... }
-def list-available-zones [] -> list<string> { ... }
-
-

Error Handling Pattern:

-
def create-server [
-    name: string
-    --dry-run: bool = false
-] -> record {
-    # 1. Validate inputs
-    if ($name | str length) == 0 {
-        error make {
-            msg: "Server name cannot be empty"
-            label: {
-                text: "empty name provided"
-                span: (metadata $name).span
-            }
-        }
-    }
-
-    # 2. Check prerequisites
-    let config = try {
-        get-provider-config
-    } catch {
-        error make {msg: "Failed to load provider configuration"}
-    }
-
-    # 3. Perform operation
-    if $dry_run {
-        return {action: "create", server: $name, status: "dry-run"}
-    }
-
-    # 4. Return result
-    {server: $name, status: "created", id: (generate-id)}
-}
-
-

Rust Code Structure

-

Project Organization:

-
src/
-├── lib.rs                   # Library root
-├── main.rs                  # Binary entry point
-├── config/                  # Configuration handling
-│   ├── mod.rs
-│   ├── loader.rs            # Config loading
-│   └── validation.rs        # Config validation
-├── api/                     # HTTP API
-│   ├── mod.rs
-│   ├── handlers.rs          # Request handlers
-│   └── middleware.rs        # Middleware components
-└── orchestrator/            # Orchestration logic
-    ├── mod.rs
-    ├── workflow.rs          # Workflow management
-    └── task_queue.rs        # Task queue management
-
-

Error Handling:

-
use anyhow::{Context, Result};
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum ProvisioningError {
-    #[error("Configuration error: {message}")]
-    Config { message: String },
-
-    #[error("Network error: {source}")]
-    Network {
-        #[from]
-        source: reqwest::Error,
-    },
-
-    #[error("Validation failed: {field}")]
-    Validation { field: String },
-}
-
-pub fn create_server(name: &str) -> Result<ServerInfo> {
-    let config = load_config()
-        .context("Failed to load configuration")?;
-
-    validate_server_name(name)
-        .context("Server name validation failed")?;
-
-    let server = provision_server(name, &config)
-        .context("Failed to provision server")?;
-
-    Ok(server)
-}
-

Nickel Schema Organization

-

Schema Structure:

-
# Base schema definitions
-let ServerConfig = {
-    name | string,
-    plan | string,
-    zone | string,
-    tags | { } | default = {},
-} in
-ServerConfig
-
-# Provider-specific extensions
-let UpCloudServerConfig = {
-    template | string | default = "Ubuntu Server 22.04 LTS (Jammy Jellyfish)",
-    storage | number | default = 25,
-} in
-UpCloudServerConfig
-
-# Composition schemas
-let InfrastructureConfig = {
-    servers | array,
-    networks | array | default = [],
-    load_balancers | array | default = [],
-} in
-InfrastructureConfig
-
-

Testing Strategies

-

Test-Driven Development

-

TDD Workflow:

-
    -
  1. Write Test First: Define expected behavior
  2. -
  3. Run Test (Fail): Confirm test fails as expected
  4. -
  5. Write Code: Implement minimal code to pass
  6. -
  7. Run Test (Pass): Confirm test now passes
  8. -
  9. Refactor: Improve code while keeping tests green
  10. -
-

Nushell Testing

-

Unit Test Pattern:

-
# Function with embedded test
-def validate-server-name [name: string] -> bool {
-    # @test: "valid-name" -> true
-    # @test: "" -> false
-    # @test: "name-with-spaces" -> false
-
-    if ($name | str length) == 0 {
-        return false
-    }
-
-    if ($name | str contains " ") {
-        return false
-    }
-
-    true
-}
-
-# Separate test file
-# tests/unit/server-validation-test.nu
-def test_validate_server_name [] {
-    # Valid cases
-    assert (validate-server-name "valid-name")
-    assert (validate-server-name "server123")
-
-    # Invalid cases
-    assert not (validate-server-name "")
-    assert not (validate-server-name "name with spaces")
-    assert not (validate-server-name "name@with!special")
-
-    print "✅ validate-server-name tests passed"
-}
-
-

Integration Test Pattern:

-
# tests/integration/server-lifecycle-test.nu
-def test_complete_server_lifecycle [] {
-    # Setup
-    let test_server = "test-server-" + (date now | format date "%Y%m%d%H%M%S")
-
-    try {
-        # Test creation
-        let create_result = (create-server $test_server --dry-run)
-        assert ($create_result.status == "dry-run")
-
-        # Test validation
-        let validate_result = (validate-server-config $test_server)
-        assert $validate_result
-
-        print $"✅ Server lifecycle test passed for ($test_server)"
-    } catch { |e|
-        print $"❌ Server lifecycle test failed: ($e.msg)"
-        exit 1
-    }
-}
-
-

Rust Testing

-

Unit Testing:

-
#[cfg(test)]
-mod tests {
-    use super::*;
-    use tokio_test;
-
-    #[test]
-    fn test_validate_server_name() {
-        assert!(validate_server_name("valid-name"));
-        assert!(validate_server_name("server123"));
-
-        assert!(!validate_server_name(""));
-        assert!(!validate_server_name("name with spaces"));
-        assert!(!validate_server_name("name@special"));
-    }
-
-    #[tokio::test]
-    async fn test_server_creation() {
-        let config = test_config();
-        let result = create_server("test-server", &config).await;
-
-        assert!(result.is_ok());
-        let server = result.unwrap();
-        assert_eq!(server.name, "test-server");
-        assert_eq!(server.status, "created");
-    }
-}
-

Integration Testing:

-
#[cfg(test)]
-mod integration_tests {
-    use super::*;
-    use testcontainers::*;
-
-    #[tokio::test]
-    async fn test_full_workflow() {
-        // Setup test environment
-        let docker = clients::Cli::default();
-        let postgres = docker.run(images::postgres::Postgres::default());
-
-        let config = TestConfig {
-            database_url: format!("postgresql://localhost:{}/test",
-                                 postgres.get_host_port_ipv4(5432))
-        };
-
-        // Test complete workflow
-        let workflow = create_workflow(&config).await.unwrap();
-        let result = execute_workflow(workflow).await.unwrap();
-
-        assert_eq!(result.status, WorkflowStatus::Completed);
-    }
-}
-

Nickel Testing

-

Schema Validation Testing:

-
# Test Nickel schemas
-nickel check schemas/
-
-# Validate specific schemas
-nickel typecheck schemas/server.ncl
-
-# Test with examples
-nickel eval schemas/server.ncl
-
-

Test Automation

-

Continuous Testing:

-
# Watch for changes and run tests
-cargo watch -x test -x check
-
-# Watch Nushell files
-find . -name "*.nu" | entr -r nu tests/run-all-tests.nu
-
-# Automated testing in workspace
-nu workspace.nu tools test-all --watch
-
-

Debugging Techniques

-

Debug Configuration

-

Enable Debug Mode:

-
# Environment variables
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-export RUST_LOG=debug
-export RUST_BACKTRACE=1
-
-# Workspace debug
-export PROVISIONING_WORKSPACE_USER=$USER
-
-

Nushell Debugging

-

Debug Techniques:

-
# Debug prints
-def debug-server-creation [name: string] {
-    print $"🐛 Creating server: ($name)"
-
-    let config = get-provider-config
-    print $"🐛 Config loaded: ($config | to json)"
-
-    let result = try {
-        create-server-api $name $config
-    } catch { |e|
-        print $"🐛 API call failed: ($e.msg)"
-        $e
-    }
-
-    print $"🐛 Result: ($result | to json)"
-    $result
-}
-
-# Conditional debugging
-def create-server [name: string] {
-    if $env.PROVISIONING_DEBUG? == "true" {
-        print $"Debug: Creating server ($name)"
-    }
-
-    # Implementation
-}
-
-# Interactive debugging
-def debug-interactive [] {
-    print "🐛 Entering debug mode..."
-    print "Available commands: $env.PATH"
-    print "Current config: " (get-config | to json)
-
-    # Drop into interactive shell
-    nu --interactive
-}
-
-

Error Investigation:

-
# Comprehensive error handling
-def safe-server-creation [name: string] {
-    try {
-        create-server $name
-    } catch { |e|
-        # Log error details
-        {
-            timestamp: (date now | format date "%Y-%m-%d %H:%M:%S"),
-            operation: "create-server",
-            input: $name,
-            error: $e.msg,
-            debug: $e.debug?,
-            env: {
-                user: $env.USER,
-                workspace: $env.PROVISIONING_WORKSPACE_USER?,
-                debug: $env.PROVISIONING_DEBUG?
-            }
-        } | save --append logs/error-debug.json
-
-        # Re-throw with context
-        error make {
-            msg: $"Server creation failed: ($e.msg)",
-            label: {text: "failed here", span: $e.span?}
-        }
-    }
-}
-
-

Rust Debugging

-

Debug Logging:

-
use tracing::{debug, info, warn, error, instrument};
-
-#[instrument]
-pub async fn create_server(name: &str) -> Result<ServerInfo> {
-    debug!("Starting server creation for: {}", name);
-
-    let config = load_config()
-        .map_err(|e| {
-            error!("Failed to load config: {:?}", e);
-            e
-        })?;
-
-    info!("Configuration loaded successfully");
-    debug!("Config details: {:?}", config);
-
-    let server = provision_server(name, &config).await
-        .map_err(|e| {
-            error!("Provisioning failed for {}: {:?}", name, e);
-            e
-        })?;
-
-    info!("Server {} created successfully", name);
-    Ok(server)
-}
-

Interactive Debugging:

-
// Use debugger breakpoints
-#[cfg(debug_assertions)]
+

Part 2: Infrastructure Definition

+

Define Nickel Schema

+

Create infrastructure definition with type-safe Nickel:

+
# Create Kubernetes cluster schema
+cat > infra/k8s-cluster.ncl <<'EOF'
 {
-    println!("Debug: server creation starting");
-    dbg!(&config);
-    // Add breakpoint here in IDE
-}
-

Log Analysis

-

Log Monitoring:

-
# Follow all logs
-tail -f workspace/runtime/logs/$USER/*.log
+  metadata = {
+    name = "k8s-prod"
+    provider = "upcloud"
+    environment = "production"
+    version = "1.0.0"
+  }
 
-# Filter for errors
-grep -i error workspace/runtime/logs/$USER/*.log
+  infrastructure = {
+    servers = [
+      {
+        name = "k8s-control-01"
+        plan = "medium"      # 4 CPU, 8 GB RAM
+        role = "control"
+        zone = "de-fra1"
+        disk_size_gb = 50
+        backup_enabled = true
+      }
+      {
+        name = "k8s-worker-01"
+        plan = "large"       # 8 CPU, 16 GB RAM
+        role = "worker"
+        zone = "de-fra1"
+        disk_size_gb = 100
+        backup_enabled = true
+      }
+      {
+        name = "k8s-worker-02"
+        plan = "large"
+        role = "worker"
+        zone = "de-fra1"
+        disk_size_gb = 100
+        backup_enabled = true
+      }
+    ]
+  }
 
-# Monitor specific component
-tail -f workspace/runtime/logs/$USER/orchestrator.log | grep -i workflow
+  services = {
+    taskservs = [
+      "containerd"     # Container runtime (dependency)
+      "etcd"           # Key-value store (dependency)
+      "kubernetes"     # Core orchestration
+      "cilium"         # CNI networking
+      "rook-ceph"      # Persistent storage
+    ]
+  }
 
-# Structured log analysis
-jq '.level == "ERROR"' workspace/runtime/logs/$USER/structured.jsonl
-
-

Debug Log Levels:

-
# Different verbosity levels
-PROVISIONING_LOG_LEVEL=trace provisioning server create test
-PROVISIONING_LOG_LEVEL=debug provisioning server create test
-PROVISIONING_LOG_LEVEL=info provisioning server create test
-
-

Integration Workflows

-

Existing System Integration

-

Working with Legacy Components:

-
# Test integration with existing system
-provisioning --version                    # Legacy system
-src/core/nulib/provisioning --version    # New system
+  kubernetes = {
+    version = "1.28.0"
+    pod_cidr = "10.244.0.0/16"
+    service_cidr = "10.96.0.0/12"
+    container_runtime = "containerd"
+    cri_socket = "/run/containerd/containerd.sock"
+  }
 
-# Test workspace integration
-PROVISIONING_WORKSPACE_USER=$USER provisioning server list
+  networking = {
+    cni = "cilium"
+    enable_network_policy = true
+    enable_encryption = true
+  }
 
-# Validate configuration compatibility
-provisioning validate config
-nu workspace.nu config validate
-
-

API Integration Testing

-

REST API Testing:

-
# Test orchestrator API
-curl -X GET http://localhost:9090/health
-curl -X GET http://localhost:9090/tasks
-
-# Test workflow creation
-curl -X POST http://localhost:9090/workflows/servers/create \
-  -H "Content-Type: application/json" \
-  -d '{"name": "test-server", "plan": "2xCPU-4 GB"}'
-
-# Monitor workflow
-curl -X GET http://localhost:9090/workflows/batch/status/workflow-id
-
-

Database Integration

-

SurrealDB Integration:

-
# Test database connectivity
-use core/nulib/lib_provisioning/database/surreal.nu
-let db = (connect-database)
-(test-connection $db)
-
-# Workflow state testing
-let workflow_id = (create-workflow-record "test-workflow")
-let status = (get-workflow-status $workflow_id)
-assert ($status.status == "pending")
-
-

External Tool Integration

-

Container Integration:

-
# Test with Docker
-docker run --rm -v $(pwd):/work provisioning:dev provisioning --version
-
-# Test with Kubernetes
-kubectl apply -f manifests/test-pod.yaml
-kubectl logs test-pod
-
-# Validate in different environments
-make test-dist PLATFORM=docker
-make test-dist PLATFORM=kubernetes
-
-

Collaboration Guidelines

-

Branch Strategy

-

Branch Naming:

-
    -
  • feature/description - New features
  • -
  • fix/description - Bug fixes
  • -
  • docs/description - Documentation updates
  • -
  • refactor/description - Code refactoring
  • -
  • test/description - Test improvements
  • -
-

Workflow:

-
# Start new feature
-git checkout main
-git pull origin main
-git checkout -b feature/new-provider-support
-
-# Regular commits
-git add .
-git commit -m "feat(provider): implement server creation API"
-
-# Push and create PR
-git push origin feature/new-provider-support
-gh pr create --title "Add new provider support" --body "..."
-
-

Code Review Process

-

Review Checklist:

-
    -
  • -Code follows project conventions
  • -
  • -Tests are included and passing
  • -
  • -Documentation is updated
  • -
  • -No hardcoded values
  • -
  • -Error handling is comprehensive
  • -
  • -Performance considerations addressed
  • -
-

Review Commands:

-
# Test PR locally
-gh pr checkout 123
-cd src/tools && make ci-test
-
-# Run specific tests
-nu workspace/extensions/providers/new-provider/tests/run-all.nu
-
-# Check code quality
-cargo clippy -- -D warnings
-nu --check $(find . -name "*.nu")
-
-

Documentation Requirements

-

Code Documentation:

-
# Function documentation
-def create-server [
-    name: string        # Server name (must be unique)
-    plan: string        # Server plan (for example, "2xCPU-4 GB")
-    --dry-run: bool     # Show what would be created without doing it
-] -> record {           # Returns server creation result
-    # Creates a new server with the specified configuration
-    #
-    # Examples:
-    #   create-server "web-01" "2xCPU-4 GB"
-    #   create-server "test" "1xCPU-2 GB" --dry-run
-
-    # Implementation
+  storage = {
+    provider = "rook-ceph"
+    replicas = 3
+    storage_class = "ceph-rbd"
+  }
 }
+EOF
 
-

Communication

-

Progress Updates:

-
    -
  • Daily standup participation
  • -
  • Weekly architecture reviews
  • -
  • PR descriptions with context
  • -
  • Issue tracking with details
  • -
-

Knowledge Sharing:

-
    -
  • Technical blog posts
  • -
  • Architecture decision records
  • -
  • Code review discussions
  • -
  • Team documentation updates
  • -
-

Quality Assurance

-

Code Quality Checks

-

Automated Quality Gates:

-
# Pre-commit hooks
-pre-commit install
+

Validate Schema

+
# Type-check Nickel schema
+nickel typecheck infra/k8s-cluster.ncl
 
-# Manual quality check
-cd src/tools
-make validate-all
-
-# Security audit
-cargo audit
+# Validate against provisioning contracts
+provisioning validate config --infra k8s-cluster
 
-

Quality Metrics:

-
    -
  • Code coverage > 80%
  • -
  • No critical security vulnerabilities
  • -
  • All tests passing
  • -
  • Documentation coverage complete
  • -
  • Performance benchmarks met
  • -
-

Performance Monitoring

-

Performance Testing:

-
# Benchmark builds
-make benchmark
-
-# Performance profiling
-cargo flamegraph --bin provisioning-orchestrator
-
-# Load testing
-ab -n 1000 -c 10 http://localhost:9090/health
+

Expected output:

+
Schema validation: PASSED
+  - Syntax: Valid Nickel
+  - Type safety: All contracts satisfied
+  - Dependencies: Resolved (5 taskservs)
+  - Provider: upcloud (credentials found)
 
-

Resource Monitoring:

-
# Monitor during development
-nu workspace/tools/runtime-manager.nu monitor --duration 5m
-
-# Check resource usage
-du -sh workspace/runtime/
-df -h
+

Part 3: Preview and Validation

+

Preview Infrastructure

+
# Dry-run to see what will be created
+provisioning server create --check --infra k8s-cluster
 
-

Best Practices

-

Configuration Management

-

Never Hardcode:

-
# Bad
-def get-api-url [] { "https://api.upcloud.com" }
+

Output shows:

+
Infrastructure Plan: k8s-prod
+Provider: upcloud
+Region: de-fra1
 
-# Good
-def get-api-url [] {
-    get-config-value "providers.upcloud.api_url" "https://api.upcloud.com"
-}
+Servers to create: 3
+  - k8s-control-01 (medium, 4 CPU, 8 GB RAM, 50 GB disk)
+  - k8s-worker-01 (large, 8 CPU, 16 GB RAM, 100 GB disk)
+  - k8s-worker-02 (large, 8 CPU, 16 GB RAM, 100 GB disk)
+
+Task services: 5 (with dependencies resolved)
+  1. containerd (dependency for kubernetes)
+  2. etcd (dependency for kubernetes)
+  3. kubernetes
+  4. cilium (requires kubernetes)
+  5. rook-ceph (requires kubernetes)
+
+Estimated monthly cost: $xxx.xx
+Estimated deployment time: 15-20 minutes
+
+WARNING: Production deployment - ensure backup enabled
 
-

Error Handling

-

Comprehensive Error Context:

-
def create-server [name: string] {
-    try {
-        validate-server-name $name
-    } catch { |e|
-        error make {
-            msg: $"Invalid server name '($name)': ($e.msg)",
-            label: {text: "server name validation failed", span: $e.span?}
-        }
-    }
-
-    try {
-        provision-server $name
-    } catch { |e|
-        error make {
-            msg: $"Server provisioning failed for '($name)': ($e.msg)",
-            help: "Check provider credentials and quota limits"
-        }
-    }
-}
+

Dependency Graph

+
# Visualize dependency resolution
+provisioning taskserv dependencies kubernetes --graph
 
-

Resource Management

-

Clean Up Resources:

-
def with-temporary-server [name: string, action: closure] {
-    let server = (create-server $name)
+

Shows:

+
kubernetes
+├── containerd (required)
+├── etcd (required)
+└── cni (cilium) (soft dependency)
 
-    try {
-        do $action $server
-    } catch { |e|
-        # Clean up on error
-        delete-server $name
-        $e
-    }
+cilium
+└── kubernetes (required)
 
-    # Clean up on success
-    delete-server $name
-}
+rook-ceph
+└── kubernetes (required)
 
-

Testing Best Practices

-

Test Isolation:

-
def test-with-isolation [test_name: string, test_action: closure] {
-    let test_workspace = $"test-($test_name)-(date now | format date '%Y%m%d%H%M%S')"
-
-    try {
-        # Set up isolated environment
-        $env.PROVISIONING_WORKSPACE_USER = $test_workspace
-        nu workspace.nu init --user-name $test_workspace
-
-        # Run test
-        do $test_action
-
-        print $"✅ Test ($test_name) passed"
-    } catch { |e|
-        print $"❌ Test ($test_name) failed: ($e.msg)"
-        exit 1
-    } finally {
-        # Clean up test environment
-        nu workspace.nu cleanup --user-name $test_workspace --type all --force
-    }
-}
+

Part 4: Server Provisioning

+

Create Servers

+
# Create all servers in parallel
+provisioning server create --infra k8s-cluster --yes
 
-

This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the project’s architectural -principles and ensuring smooth collaboration across the team.

-

Integration Guide

-

This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration -strategies, deployment considerations, and monitoring and observability.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Existing System Integration
  4. -
  5. API Compatibility and Versioning
  6. -
  7. Database Migration Strategies
  8. -
  9. Deployment Considerations
  10. -
  11. Monitoring and Observability
  12. -
  13. Legacy System Bridge
  14. -
  15. Migration Pathways
  16. -
  17. Troubleshooting Integration Issues
  18. -
-

Overview

-

Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and -existing production systems while providing clear migration pathways.

-

Integration Principles:

-
    -
  • Backward Compatibility: All existing APIs and interfaces remain functional
  • -
  • Gradual Migration: Systems can be migrated incrementally without disruption
  • -
  • Dual Operation: New and legacy systems operate side-by-side during transition
  • -
  • Zero Downtime: Migrations occur without service interruption
  • -
  • Data Integrity: All data migrations are atomic and reversible
  • -
-

Integration Architecture:

-
Integration Ecosystem
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│   Legacy Core   │ ←→ │  Bridge Layer   │ ←→ │   New Systems   │
-│                 │    │                 │    │                 │
-│ - ENV config    │    │ - Compatibility │    │ - TOML config   │
-│ - Direct calls  │    │ - Translation   │    │ - Orchestrator  │
-│ - File-based    │    │ - Monitoring    │    │ - Workflows     │
-│ - Simple logging│    │ - Validation    │    │ - REST APIs     │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
+

Progress tracking:

+
Creating 3 servers...
+  k8s-control-01: [████████████████████████] 100%
+  k8s-worker-01:  [████████████████████████] 100%
+  k8s-worker-02:  [████████████████████████] 100%
+
+Servers created: 3/3
+SSH configured: 3/3
+Network ready: 3/3
+
+Servers available:
+  k8s-control-01: 94.237.x.x (running)
+  k8s-worker-01:  94.237.x.x (running)
+  k8s-worker-02:  94.237.x.x (running)
 
-

Existing System Integration

-

Command-Line Interface Integration

-

Seamless CLI Compatibility:

-
# All existing commands continue to work unchanged
-./core/nulib/provisioning server create web-01 2xCPU-4 GB
-./core/nulib/provisioning taskserv install kubernetes
-./core/nulib/provisioning cluster create buildkit
+

Verify Server Access

+
# Test SSH connectivity
+provisioning server ssh k8s-control-01 -- uname -a
 
-# New commands available alongside existing ones
-./src/core/nulib/provisioning server create web-01 2xCPU-4 GB --orchestrated
-nu workspace/tools/workspace.nu health --detailed
+# Check all servers
+provisioning server list
 
-

Path Resolution Integration:

-
# Automatic path resolution between systems
-use workspace/lib/path-resolver.nu
-
-# Resolves to workspace path if available, falls back to core
-let config_path = (path-resolver resolve_path "config" "user" --fallback-to-core)
-
-# Seamless extension discovery
-let provider_path = (path-resolver resolve_extension "providers" "upcloud")
+

Part 5: Service Installation

+

Install Task Services

+
# Install all task services (automatic dependency resolution)
+provisioning taskserv create kubernetes --infra k8s-cluster
 
-

Configuration System Bridge

-

Dual Configuration Support:

-
# Configuration bridge supports both ENV and TOML
-def get-config-value-bridge [key: string, default: string = ""] -> string {
-    # Try new TOML configuration first
-    let toml_value = try {
-        get-config-value $key
-    } catch { null }
+

Installation flow (automatic):

+
Resolving dependencies...
+  containerd → etcd → kubernetes → cilium, rook-ceph
 
-    if $toml_value != null {
-        return $toml_value
-    }
+Installing task services: 5
 
-    # Fall back to ENV variable (legacy support)
-    let env_key = ($key | str replace "." "_" | str upcase | $"PROVISIONING_($in)")
-    let env_value = ($env | get $env_key | default null)
+[1/5] Installing containerd...
+  k8s-control-01: [████████████████████████] 100%
+  k8s-worker-01:  [████████████████████████] 100%
+  k8s-worker-02:  [████████████████████████] 100%
 
-    if $env_value != null {
-        return $env_value
-    }
+[2/5] Installing etcd...
+  k8s-control-01: [████████████████████████] 100%
 
-    # Use default if provided
-    if $default != "" {
-        return $default
-    }
+[3/5] Installing kubernetes...
+  Control plane init: [████████████████████████] 100%
+  Worker join: [████████████████████████] 100%
+  Cluster ready: [████████████████████████] 100%
 
-    # Error with helpful migration message
-    error make {
-        msg: $"Configuration not found: ($key)",
-        help: $"Migrate from ($env_key) environment variable to ($key) in config file"
-    }
-}
+[4/5] Installing cilium...
+  CNI deployment: [████████████████████████] 100%
+  Network policies: [████████████████████████] 100%
+
+[5/5] Installing rook-ceph...
+  Operator: [████████████████████████] 100%
+  Cluster: [████████████████████████] 100%
+  Storage class: [████████████████████████] 100%
+
+All task services installed successfully
 
-

Data Integration

-

Shared Data Access:

-
# Unified data access across old and new systems
-def get-server-info [server_name: string] -> record {
-    # Try new orchestrator data store first
-    let orchestrator_data = try {
-        get-orchestrator-server-data $server_name
-    } catch { null }
+

Verify Kubernetes Cluster

+
# SSH to control plane
+provisioning server ssh k8s-control-01
 
-    if $orchestrator_data != null {
-        return $orchestrator_data
-    }
-
-    # Fall back to legacy file-based storage
-    let legacy_data = try {
-        get-legacy-server-data $server_name
-    } catch { null }
-
-    if $legacy_data != null {
-        return ($legacy_data | migrate-to-new-format)
-    }
-
-    error make {msg: $"Server not found: ($server_name)"}
-}
+# Check cluster status
+kubectl get nodes
+kubectl get pods --all-namespaces
+kubectl get storageclass
 
-

Process Integration

-

Hybrid Process Management:

-
# Orchestrator-aware process management
-def create-server-integrated [
-    name: string,
-    plan: string,
-    --orchestrated: bool = false
-] -> record {
-    if $orchestrated and (check-orchestrator-available) {
-        # Use new orchestrator workflow
-        return (create-server-workflow $name $plan)
-    } else {
-        # Use legacy direct creation
-        return (create-server-direct $name $plan)
-    }
-}
+

Expected output:

+
NAME              STATUS   ROLES    AGE   VERSION
+k8s-control-01    Ready    control-plane  5m   v1.28.0
+k8s-worker-01     Ready    <none>   4m   v1.28.0
+k8s-worker-02     Ready    <none>   4m   v1.28.0
 
-def check-orchestrator-available [] -> bool {
-    try {
-        http get "http://localhost:9090/health" | get status == "ok"
-    } catch {
-        false
-    }
-}
+NAMESPACE     NAME                                READY   STATUS
+kube-system   cilium-xxxxx                        1/1     Running
+kube-system   cilium-operator-xxxxx               1/1     Running
+kube-system   etcd-k8s-control-01                 1/1     Running
+rook-ceph     rook-ceph-operator-xxxxx            1/1     Running
+
+NAME              PROVISIONER
+ceph-rbd          rook-ceph.rbd.csi.ceph.com
 
-

API Compatibility and Versioning

-

REST API Versioning

-

API Version Strategy:

-
    -
  • v1: Legacy compatibility API (existing functionality)
  • -
  • v2: Enhanced API with orchestrator features
  • -
  • v3: Full workflow and batch operation support
  • -
-

Version Header Support:

-
# API calls with version specification
-curl -H "API-Version: v1" http://localhost:9090/servers
-curl -H "API-Version: v2" http://localhost:9090/workflows/servers/create
-curl -H "API-Version: v3" http://localhost:9090/workflows/batch/submit
+

Part 6: Deployment Verification

+

Health Checks

+
# Platform-level health check
+provisioning cluster status k8s-cluster
+
+# Individual service health
+provisioning taskserv status kubernetes
+provisioning taskserv status cilium
+provisioning taskserv status rook-ceph
 
-

API Compatibility Layer

-

Backward Compatible Endpoints:

-
// Rust API compatibility layer
-#[derive(Debug, Serialize, Deserialize)]
-struct ApiRequest {
-    version: Option<String>,
-    #[serde(flatten)]
-    payload: serde_json::Value,
-}
-
-async fn handle_versioned_request(
-    headers: HeaderMap,
-    req: ApiRequest,
-) -> Result<ApiResponse, ApiError> {
-    let api_version = headers
-        .get("API-Version")
-        .and_then(|v| v.to_str().ok())
-        .unwrap_or("v1");
-
-    match api_version {
-        "v1" => handle_v1_request(req.payload).await,
-        "v2" => handle_v2_request(req.payload).await,
-        "v3" => handle_v3_request(req.payload).await,
-        _ => Err(ApiError::UnsupportedVersion(api_version.to_string())),
-    }
-}
-
-// V1 compatibility endpoint
-async fn handle_v1_request(payload: serde_json::Value) -> Result<ApiResponse, ApiError> {
-    // Transform request to legacy format
-    let legacy_request = transform_to_legacy_format(payload)?;
-
-    // Execute using legacy system
-    let result = execute_legacy_operation(legacy_request).await?;
-
-    // Transform response to v1 format
-    Ok(transform_to_v1_response(result))
-}
-

Schema Evolution

-

Backward Compatible Schema Changes:

-
# API schema with version support
-let ServerCreateRequest = {
-    # V1 fields (always supported)
-    name | string,
-    plan | string,
-    zone | string | default = "auto",
-
-    # V2 additions (optional for backward compatibility)
-    orchestrated | bool | default = false,
-    workflow_options | { } | optional,
-
-    # V3 additions
-    batch_options | { } | optional,
-    dependencies | array | default = [],
-
-    # Version constraints
-    api_version | string | default = "v1",
-} in
-ServerCreateRequest
-
-# Conditional validation based on API version
-let WorkflowOptions = {
-    wait_for_completion | bool | default = true,
-    timeout_seconds | number | default = 300,
-    retry_count | number | default = 3,
-} in
-WorkflowOptions
-
-

Client SDK Compatibility

-

Multi-Version Client Support:

-
# Nushell client with version support
-def "client create-server" [
-    name: string,
-    plan: string,
-    --api-version: string = "v1",
-    --orchestrated: bool = false
-] -> record {
-    let endpoint = match $api_version {
-        "v1" => "/servers",
-        "v2" => "/workflows/servers/create",
-        "v3" => "/workflows/batch/submit",
-        _ => (error make {msg: $"Unsupported API version: ($api_version)"})
-    }
-
-    let request_body = match $api_version {
-        "v1" => {name: $name, plan: $plan},
-        "v2" => {name: $name, plan: $plan, orchestrated: $orchestrated},
-        "v3" => {
-            operations: [{
-                id: "create_server",
-                type: "server_create",
-                config: {name: $name, plan: $plan}
-            }]
-        },
-        _ => (error make {msg: $"Unsupported API version: ($api_version)"})
-    }
-
-    http post $"http://localhost:9090($endpoint)" $request_body
-        --headers {
-            "Content-Type": "application/json",
-            "API-Version": $api_version
-        }
-}
-
-

Database Migration Strategies

-

Database Architecture Evolution

-

Migration Strategy:

-
Database Evolution Path
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│  File-based     │ → │   SQLite        │ → │   SurrealDB     │
-│  Storage        │    │   Migration     │    │   Full Schema   │
-│                 │    │                 │    │                 │
-│ - JSON files    │    │ - Structured    │    │ - Graph DB      │
-│ - Text logs     │    │ - Transactions  │    │ - Real-time     │
-│ - Simple state  │    │ - Backup/restore│    │ - Clustering    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-
-

Migration Scripts

-

Automated Database Migration:

-
# Database migration orchestration
-def migrate-database [
-    --from: string = "filesystem",
-    --to: string = "surrealdb",
-    --backup-first: bool = true,
-    --verify: bool = true
-] -> record {
-    if $backup_first {
-        print "Creating backup before migration..."
-        let backup_result = (create-database-backup $from)
-        print $"Backup created: ($backup_result.path)"
-    }
-
-    print $"Migrating from ($from) to ($to)..."
-
-    match [$from, $to] {
-        ["filesystem", "sqlite"] => migrate_filesystem_to_sqlite,
-        ["filesystem", "surrealdb"] => migrate_filesystem_to_surrealdb,
-        ["sqlite", "surrealdb"] => migrate_sqlite_to_surrealdb,
-        _ => (error make {msg: $"Unsupported migration path: ($from) → ($to)"})
-    }
-
-    if $verify {
-        print "Verifying migration integrity..."
-        let verification = (verify-migration $from $to)
-        if not $verification.success {
-            error make {
-                msg: $"Migration verification failed: ($verification.errors)",
-                help: "Restore from backup and retry migration"
-            }
-        }
-    }
-
-    print $"Migration from ($from) to ($to) completed successfully"
-    {from: $from, to: $to, status: "completed", migrated_at: (date now)}
-}
-
-

File System to SurrealDB Migration:

-
def migrate_filesystem_to_surrealdb [] -> record {
-    # Initialize SurrealDB connection
-    let db = (connect-surrealdb)
-
-    # Migrate server data
-    let server_files = (ls data/servers/*.json)
-    let migrated_servers = []
-
-    for server_file in $server_files {
-        let server_data = (open $server_file.name | from json)
-
-        # Transform to new schema
-        let server_record = {
-            id: $server_data.id,
-            name: $server_data.name,
-            plan: $server_data.plan,
-            zone: ($server_data.zone? | default "unknown"),
-            status: $server_data.status,
-            ip_address: $server_data.ip_address?,
-            created_at: $server_data.created_at,
-            updated_at: (date now),
-            metadata: ($server_data.metadata? | default {}),
-            tags: ($server_data.tags? | default [])
-        }
-
-        # Insert into SurrealDB
-        let insert_result = try {
-            query-surrealdb $"CREATE servers:($server_record.id) CONTENT ($server_record | to json)"
-        } catch { |e|
-            print $"Warning: Failed to migrate server ($server_data.name): ($e.msg)"
-        }
-
-        $migrated_servers = ($migrated_servers | append $server_record.id)
-    }
-
-    # Migrate workflow data
-    migrate_workflows_to_surrealdb $db
-
-    # Migrate state data
-    migrate_state_to_surrealdb $db
-
-    {
-        migrated_servers: ($migrated_servers | length),
-        migrated_workflows: (migrate_workflows_to_surrealdb $db).count,
-        status: "completed"
-    }
-}
-
-

Data Integrity Verification

-

Migration Verification:

-
def verify-migration [from: string, to: string] -> record {
-    print "Verifying data integrity..."
-
-    let source_data = (read-source-data $from)
-    let target_data = (read-target-data $to)
-
-    let errors = []
-
-    # Verify record counts
-    if $source_data.servers.count != $target_data.servers.count {
-        $errors = ($errors | append "Server count mismatch")
-    }
-
-    # Verify key records
-    for server in $source_data.servers {
-        let target_server = ($target_data.servers | where id == $server.id | first)
-
-        if ($target_server | is-empty) {
-            $errors = ($errors | append $"Missing server: ($server.id)")
-        } else {
-            # Verify critical fields
-            if $target_server.name != $server.name {
-                $errors = ($errors | append $"Name mismatch for server ($server.id)")
-            }
-
-            if $target_server.status != $server.status {
-                $errors = ($errors | append $"Status mismatch for server ($server.id)")
-            }
-        }
-    }
-
-    {
-        success: ($errors | length) == 0,
-        errors: $errors,
-        verified_at: (date now)
-    }
-}
-
-

Deployment Considerations

-

Deployment Architecture

-

Hybrid Deployment Model:

-
Deployment Architecture
-┌─────────────────────────────────────────────────────────────────┐
-│                    Load Balancer / Reverse Proxy               │
-└─────────────────────┬───────────────────────────────────────────┘
-                      │
-    ┌─────────────────┼─────────────────┐
-    │                 │                 │
-┌───▼────┐      ┌─────▼─────┐      ┌───▼────┐
-│Legacy  │      │Orchestrator│      │New     │
-│System  │ ←→   │Bridge      │  ←→  │Systems │
-│        │      │            │      │        │
-│- CLI   │      │- API Gate  │      │- REST  │
-│- Files │      │- Compat    │      │- DB    │
-│- Logs  │      │- Monitor   │      │- Queue │
-└────────┘      └────────────┘      └────────┘
-
-

Deployment Strategies

-

Blue-Green Deployment:

-
# Blue-Green deployment with integration bridge
-# Phase 1: Deploy new system alongside existing (Green environment)
-cd src/tools
-make all
-make create-installers
-
-# Install new system without disrupting existing
-./packages/installers/install-provisioning-2.0.0.sh \
-    --install-path /opt/provisioning-v2 \
-    --no-replace-existing \
-    --enable-bridge-mode
-
-# Phase 2: Start orchestrator and validate integration
-/opt/provisioning-v2/bin/orchestrator start --bridge-mode --legacy-path /opt/provisioning-v1
-
-# Phase 3: Gradual traffic shift
-# Route 10% traffic to new system
-nginx-traffic-split --new-backend 10%
-
-# Validate metrics and gradually increase
-nginx-traffic-split --new-backend 50%
-nginx-traffic-split --new-backend 90%
-
-# Phase 4: Complete cutover
-nginx-traffic-split --new-backend 100%
-/opt/provisioning-v1/bin/orchestrator stop
-
-

Rolling Update:

-
def rolling-deployment [
-    --target-version: string,
-    --batch-size: int = 3,
-    --health-check-interval: duration = 30sec
-] -> record {
-    let nodes = (get-deployment-nodes)
-    let batches = ($nodes | group_by --chunk-size $batch_size)
-
-    let deployment_results = []
-
-    for batch in $batches {
-        print $"Deploying to batch: ($batch | get name | str join ', ')"
-
-        # Deploy to batch
-        for node in $batch {
-            deploy-to-node $node $target_version
-        }
-
-        # Wait for health checks
-        sleep $health_check_interval
-
-        # Verify batch health
-        let batch_health = ($batch | each { |node| check-node-health $node })
-        let healthy_nodes = ($batch_health | where healthy == true | length)
-
-        if $healthy_nodes != ($batch | length) {
-            # Rollback batch on failure
-            print $"Health check failed, rolling back batch"
-            for node in $batch {
-                rollback-node $node
-            }
-            error make {msg: "Rolling deployment failed at batch"}
-        }
-
-        print $"Batch deployed successfully"
-        $deployment_results = ($deployment_results | append {
-            batch: $batch,
-            status: "success",
-            deployed_at: (date now)
-        })
-    }
-
-    {
-        strategy: "rolling",
-        target_version: $target_version,
-        batches: ($deployment_results | length),
-        status: "completed",
-        completed_at: (date now)
-    }
-}
-
-

Configuration Deployment

-

Environment-Specific Deployment:

-
# Development deployment
-PROVISIONING_ENV=dev ./deploy.sh \
-    --config-source config.dev.toml \
-    --enable-debug \
-    --enable-hot-reload
-
-# Staging deployment
-PROVISIONING_ENV=staging ./deploy.sh \
-    --config-source config.staging.toml \
-    --enable-monitoring \
-    --backup-before-deploy
-
-# Production deployment
-PROVISIONING_ENV=prod ./deploy.sh \
-    --config-source config.prod.toml \
-    --zero-downtime \
-    --enable-all-monitoring \
-    --backup-before-deploy \
-    --health-check-timeout 5m
-
-

Container Integration

-

Docker Deployment with Bridge:

-
# Multi-stage Docker build supporting both systems
-FROM rust:1.70 as builder
-WORKDIR /app
-COPY . .
-RUN cargo build --release
-
-FROM ubuntu:22.04 as runtime
-WORKDIR /app
-
-# Install both legacy and new systems
-COPY --from=builder /app/target/release/orchestrator /app/bin/
-COPY legacy-provisioning/ /app/legacy/
-COPY config/ /app/config/
-
-# Bridge script for dual operation
-COPY bridge-start.sh /app/bin/
-
-ENV PROVISIONING_BRIDGE_MODE=true
-ENV PROVISIONING_LEGACY_PATH=/app/legacy
-ENV PROVISIONING_NEW_PATH=/app/bin
-
-EXPOSE 8080
-CMD ["/app/bin/bridge-start.sh"]
-
-

Kubernetes Integration:

-
# Kubernetes deployment with bridge sidecar
+

Test Application Deployment

+
# Deploy test application on K8s cluster
+cat <<EOF | kubectl apply -f -
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: test-pvc
+spec:
+  storageClassName: ceph-rbd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+---
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: provisioning-system
+  name: test-nginx
 spec:
-  replicas: 3
+  replicas: 2
+  selector:
+    matchLabels:
+      app: nginx
   template:
+    metadata:
+      labels:
+        app: nginx
     spec:
       containers:
-      - name: orchestrator
-        image: provisioning-system:2.0.0
-        ports:
-        - containerPort: 8080
-        env:
-        - name: PROVISIONING_BRIDGE_MODE
-          value: "true"
+      - name: nginx
+        image: nginx:latest
         volumeMounts:
-        - name: config
-          mountPath: /app/config
-        - name: legacy-data
-          mountPath: /app/legacy/data
-
-      - name: legacy-bridge
-        image: provisioning-legacy:1.0.0
-        env:
-        - name: BRIDGE_ORCHESTRATOR_URL
-          value: "http://localhost:9090"
-        volumeMounts:
-        - name: legacy-data
-          mountPath: /data
-
+        - name: storage
+          mountPath: /usr/share/nginx/html
       volumes:
-      - name: config
-        configMap:
-          name: provisioning-config
-      - name: legacy-data
+      - name: storage
         persistentVolumeClaim:
-          claimName: provisioning-data
-
-

Monitoring and Observability

-

Integrated Monitoring Architecture

-

Monitoring Stack Integration:

-
Observability Architecture
-┌─────────────────────────────────────────────────────────────────┐
-│                    Monitoring Dashboard                         │
-│  ┌─────────────┐  ┌─────────────┐  ┌─────────────┐           │
-│  │   Grafana   │  │  Jaeger     │  │  AlertMgr   │           │
-│  └─────────────┘  └─────────────┘  └─────────────┘           │
-└─────────────┬───────────────┬───────────────┬─────────────────┘
-              │               │               │
-   ┌──────────▼──────────┐   │   ┌───────────▼───────────┐
-   │     Prometheus      │   │   │      Jaeger           │
-   │   (Metrics)         │   │   │    (Tracing)          │
-   └──────────┬──────────┘   │   └───────────┬───────────┘
-              │               │               │
-┌─────────────▼─────────────┐ │ ┌─────────────▼─────────────┐
-│        Legacy             │ │ │        New System         │
-│      Monitoring           │ │ │       Monitoring          │
-│                           │ │ │                           │
-│ - File-based logs        │ │ │ - Structured logs         │
-│ - Simple metrics         │ │ │ - Prometheus metrics      │
-│ - Basic health checks    │ │ │ - Distributed tracing     │
-└───────────────────────────┘ │ └───────────────────────────┘
-                              │
-                    ┌─────────▼─────────┐
-                    │   Bridge Monitor  │
-                    │                   │
-                    │ - Integration     │
-                    │ - Compatibility   │
-                    │ - Migration       │
-                    └───────────────────┘
-
-

Metrics Integration

-

Unified Metrics Collection:

-
# Metrics bridge for legacy and new systems
-def collect-system-metrics [] -> record {
-    let legacy_metrics = collect-legacy-metrics
-    let new_metrics = collect-new-metrics
-    let bridge_metrics = collect-bridge-metrics
-
-    {
-        timestamp: (date now),
-        legacy: $legacy_metrics,
-        new: $new_metrics,
-        bridge: $bridge_metrics,
-        integration: {
-            compatibility_rate: (calculate-compatibility-rate $bridge_metrics),
-            migration_progress: (calculate-migration-progress),
-            system_health: (assess-overall-health $legacy_metrics $new_metrics)
-        }
-    }
-}
-
-def collect-legacy-metrics [] -> record {
-    let log_files = (ls logs/*.log)
-    let process_stats = (get-process-stats "legacy-provisioning")
-
-    {
-        active_processes: $process_stats.count,
-        log_file_sizes: ($log_files | get size | math sum),
-        last_activity: (get-last-log-timestamp),
-        error_count: (count-log-errors "last 1h"),
-        performance: {
-            avg_response_time: (calculate-avg-response-time),
-            throughput: (calculate-throughput)
-        }
-    }
-}
-
-def collect-new-metrics [] -> record {
-    let orchestrator_stats = try {
-        http get "http://localhost:9090/metrics"
-    } catch {
-        {status: "unavailable"}
-    }
-
-    {
-        orchestrator: $orchestrator_stats,
-        workflow_stats: (get-workflow-metrics),
-        api_stats: (get-api-metrics),
-        database_stats: (get-database-metrics)
-    }
-}
-
-

Logging Integration

-

Unified Logging Strategy:

-
# Structured logging bridge
-def log-integrated [
-    level: string,
-    message: string,
-    --component: string = "bridge",
-    --legacy-compat: bool = true
-] {
-    let log_entry = {
-        timestamp: (date now | format date "%Y-%m-%d %H:%M:%S%.3f"),
-        level: $level,
-        component: $component,
-        message: $message,
-        system: "integrated",
-        correlation_id: (generate-correlation-id)
-    }
-
-    # Write to structured log (new system)
-    $log_entry | to json | save --append logs/integrated.jsonl
-
-    if $legacy_compat {
-        # Write to legacy log format
-        let legacy_entry = $"[($log_entry.timestamp)] [($level)] ($component): ($message)"
-        $legacy_entry | save --append logs/legacy.log
-    }
-
-    # Send to monitoring system
-    send-to-monitoring $log_entry
-}
-
-

Health Check Integration

-

Comprehensive Health Monitoring:

-
def health-check-integrated [] -> record {
-    let health_checks = [
-        {name: "legacy-system", check: (check-legacy-health)},
-        {name: "orchestrator", check: (check-orchestrator-health)},
-        {name: "database", check: (check-database-health)},
-        {name: "bridge-compatibility", check: (check-bridge-health)},
-        {name: "configuration", check: (check-config-health)}
-    ]
-
-    let results = ($health_checks | each { |check|
-        let result = try {
-            do $check.check
-        } catch { |e|
-            {status: "unhealthy", error: $e.msg}
-        }
-
-        {name: $check.name, result: $result}
-    })
-
-    let healthy_count = ($results | where result.status == "healthy" | length)
-    let total_count = ($results | length)
-
-    {
-        overall_status: (if $healthy_count == $total_count { "healthy" } else { "degraded" }),
-        healthy_services: $healthy_count,
-        total_services: $total_count,
-        services: $results,
-        checked_at: (date now)
-    }
-}
-
-

Legacy System Bridge

-

Bridge Architecture

-

Bridge Component Design:

-
# Legacy system bridge module
-export module bridge {
-    # Bridge state management
-    export def init-bridge [] -> record {
-        let bridge_config = get-config-section "bridge"
-
-        {
-            legacy_path: ($bridge_config.legacy_path? | default "/opt/provisioning-v1"),
-            new_path: ($bridge_config.new_path? | default "/opt/provisioning-v2"),
-            mode: ($bridge_config.mode? | default "compatibility"),
-            monitoring_enabled: ($bridge_config.monitoring? | default true),
-            initialized_at: (date now)
-        }
-    }
-
-    # Command translation layer
-    export def translate-command [
-        legacy_command: list<string>
-    ] -> list<string> {
-        match $legacy_command {
-            ["provisioning", "server", "create", $name, $plan, ...$args] => {
-                let new_args = ($args | each { |arg|
-                    match $arg {
-                        "--dry-run" => "--dry-run",
-                        "--wait" => "--wait",
-                        $zone if ($zone | str starts-with "--zone=") => $zone,
-                        _ => $arg
-                    }
-                })
-
-                ["provisioning", "server", "create", $name, $plan] ++ $new_args ++ ["--orchestrated"]
-            },
-            _ => $legacy_command  # Pass through unchanged
-        }
-    }
-
-    # Data format translation
-    export def translate-response [
-        legacy_response: record,
-        target_format: string = "v2"
-    ] -> record {
-        match $target_format {
-            "v2" => {
-                id: ($legacy_response.id? | default (generate-uuid)),
-                name: $legacy_response.name,
-                status: $legacy_response.status,
-                created_at: ($legacy_response.created_at? | default (date now)),
-                metadata: ($legacy_response | reject name status created_at),
-                version: "v2-compat"
-            },
-            _ => $legacy_response
-        }
-    }
-}
-
-

Bridge Operation Modes

-

Compatibility Mode:

-
# Full compatibility with legacy system
-def run-compatibility-mode [] {
-    print "Starting bridge in compatibility mode..."
-
-    # Intercept legacy commands
-    let legacy_commands = monitor-legacy-commands
-
-    for command in $legacy_commands {
-        let translated = (bridge translate-command $command)
-
-        try {
-            let result = (execute-new-system $translated)
-            let legacy_result = (bridge translate-response $result "v1")
-            respond-to-legacy $legacy_result
-        } catch { |e|
-            # Fall back to legacy system on error
-            let fallback_result = (execute-legacy-system $command)
-            respond-to-legacy $fallback_result
-        }
-    }
-}
-
-

Migration Mode:

-
# Gradual migration with traffic splitting
-def run-migration-mode [
-    --new-system-percentage: int = 50
-] {
-    print $"Starting bridge in migration mode (($new_system_percentage)% new system)"
-
-    let commands = monitor-all-commands
-
-    for command in $commands {
-        let route_to_new = ((random integer 1..100) <= $new_system_percentage)
-
-        if $route_to_new {
-            try {
-                execute-new-system $command
-            } catch {
-                # Fall back to legacy on failure
-                execute-legacy-system $command
-            }
-        } else {
-            execute-legacy-system $command
-        }
-    }
-}
-
-

Migration Pathways

-

Migration Phases

-

Phase 1: Parallel Deployment

-
    -
  • Deploy new system alongside existing
  • -
  • Enable bridge for compatibility
  • -
  • Begin data synchronization
  • -
  • Monitor integration health
  • -
-

Phase 2: Gradual Migration

-
    -
  • Route increasing traffic to new system
  • -
  • Migrate data in background
  • -
  • Validate consistency
  • -
  • Address integration issues
  • -
-

Phase 3: Full Migration

-
    -
  • Complete traffic cutover
  • -
  • Decommission legacy system
  • -
  • Clean up bridge components
  • -
  • Finalize data migration
  • -
-

Migration Automation

-

Automated Migration Orchestration:

-
def execute-migration-plan [
-    migration_plan: string,
-    --dry-run: bool = false,
-    --skip-backup: bool = false
-] -> record {
-    let plan = (open $migration_plan | from yaml)
-
-    if not $skip_backup {
-        create-pre-migration-backup
-    }
-
-    let migration_results = []
-
-    for phase in $plan.phases {
-        print $"Executing migration phase: ($phase.name)"
-
-        if $dry_run {
-            print $"[DRY RUN] Would execute phase: ($phase)"
-            continue
-        }
-
-        let phase_result = try {
-            execute-migration-phase $phase
-        } catch { |e|
-            print $"Migration phase failed: ($e.msg)"
-
-            if $phase.rollback_on_failure? | default false {
-                print "Rolling back migration phase..."
-                rollback-migration-phase $phase
-            }
-
-            error make {msg: $"Migration failed at phase ($phase.name): ($e.msg)"}
-        }
-
-        $migration_results = ($migration_results | append $phase_result)
-
-        # Wait between phases if specified
-        if "wait_seconds" in $phase {
-            sleep ($phase.wait_seconds * 1sec)
-        }
-    }
-
-    {
-        migration_plan: $migration_plan,
-        phases_completed: ($migration_results | length),
-        status: "completed",
-        completed_at: (date now),
-        results: $migration_results
-    }
-}
-
-

Migration Validation:

-
def validate-migration-readiness [] -> record {
-    let checks = [
-        {name: "backup-available", check: (check-backup-exists)},
-        {name: "new-system-healthy", check: (check-new-system-health)},
-        {name: "database-accessible", check: (check-database-connectivity)},
-        {name: "configuration-valid", check: (validate-migration-config)},
-        {name: "resources-available", check: (check-system-resources)},
-        {name: "network-connectivity", check: (check-network-health)}
-    ]
-
-    let results = ($checks | each { |check|
-        {
-            name: $check.name,
-            result: (do $check.check),
-            timestamp: (date now)
-        }
-    })
-
-    let failed_checks = ($results | where result.status != "ready")
-
-    {
-        ready_for_migration: ($failed_checks | length) == 0,
-        checks: $results,
-        failed_checks: $failed_checks,
-        validated_at: (date now)
-    }
-}
-
-

Troubleshooting Integration Issues

-

Common Integration Problems

-

API Compatibility Issues

-

Problem: Version mismatch between client and server

-
# Diagnosis
-curl -H "API-Version: v1" http://localhost:9090/health
-curl -H "API-Version: v2" http://localhost:9090/health
-
-# Solution: Check supported versions
-curl http://localhost:9090/api/versions
-
-# Update client API version
-export PROVISIONING_API_VERSION=v2
-
-

Configuration Bridge Issues

-

Problem: Configuration not found in either system

-
# Diagnosis
-def diagnose-config-issue [key: string] -> record {
-    let toml_result = try {
-        get-config-value $key
-    } catch { |e| {status: "failed", error: $e.msg} }
-
-    let env_key = ($key | str replace "." "_" | str upcase | $"PROVISIONING_($in)")
-    let env_result = try {
-        $env | get $env_key
-    } catch { |e| {status: "failed", error: $e.msg} }
-
-    {
-        key: $key,
-        toml_config: $toml_result,
-        env_config: $env_result,
-        migration_needed: ($toml_result.status == "failed" and $env_result.status != "failed")
-    }
-}
-
-# Solution: Migrate configuration
-def migrate-single-config [key: string] {
-    let diagnosis = (diagnose-config-issue $key)
-
-    if $diagnosis.migration_needed {
-        let env_value = $diagnosis.env_config
-        set-config-value $key $env_value
-        print $"Migrated ($key) from environment variable"
-    }
-}
-
-

Database Integration Issues

-

Problem: Data inconsistency between systems

-
# Diagnosis and repair
-def repair-data-consistency [] -> record {
-    let legacy_data = (read-legacy-data)
-    let new_data = (read-new-data)
-
-    let inconsistencies = []
-
-    # Check server records
-    for server in $legacy_data.servers {
-        let new_server = ($new_data.servers | where id == $server.id | first)
-
-        if ($new_server | is-empty) {
-            print $"Missing server in new system: ($server.id)"
-            create-server-record $server
-            $inconsistencies = ($inconsistencies | append {type: "missing", id: $server.id})
-        } else if $new_server != $server {
-            print $"Inconsistent server data: ($server.id)"
-            update-server-record $server
-            $inconsistencies = ($inconsistencies | append {type: "inconsistent", id: $server.id})
-        }
-    }
-
-    {
-        inconsistencies_found: ($inconsistencies | length),
-        repairs_applied: ($inconsistencies | length),
-        repaired_at: (date now)
-    }
-}
-
-

Debug Tools

-

Integration Debug Mode:

-
# Enable comprehensive debugging
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-export PROVISIONING_BRIDGE_DEBUG=true
-export PROVISIONING_INTEGRATION_TRACE=true
-
-# Run with integration debugging
-provisioning server create test-server 2xCPU-4 GB --debug-integration
-
-

Health Check Debugging:

-
def debug-integration-health [] -> record {
-    print "=== Integration Health Debug ==="
-
-    # Check all integration points
-    let legacy_health = try {
-        check-legacy-system
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let orchestrator_health = try {
-        http get "http://localhost:9090/health"
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let bridge_health = try {
-        check-bridge-status
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    let config_health = try {
-        validate-config-integration
-    } catch { |e| {status: "error", error: $e.msg} }
-
-    print $"Legacy System: ($legacy_health.status)"
-    print $"Orchestrator: ($orchestrator_health.status)"
-    print $"Bridge: ($bridge_health.status)"
-    print $"Configuration: ($config_health.status)"
-
-    {
-        legacy: $legacy_health,
-        orchestrator: $orchestrator_health,
-        bridge: $bridge_health,
-        configuration: $config_health,
-        debug_timestamp: (date now)
-    }
-}
-
-

This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while -maintaining reliability, compatibility, and clear migration pathways.

-

Build System Documentation

-

This document provides comprehensive documentation for the provisioning project’s build system, including the complete Makefile reference with 40+ -targets, build tools, compilation instructions, and troubleshooting.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Quick Start
  4. -
  5. Makefile Reference
  6. -
  7. Build Tools
  8. -
  9. Cross-Platform Compilation
  10. -
  11. Dependency Management
  12. -
  13. Troubleshooting
  14. -
  15. CI/CD Integration
  16. -
-

Overview

-

The build system is a comprehensive, Makefile-based solution that orchestrates:

-
    -
  • Rust compilation: Platform binaries (orchestrator, control-center, etc.)
  • -
  • Nushell bundling: Core libraries and CLI tools
  • -
  • Nickel validation: Configuration schema validation
  • -
  • Distribution generation: Multi-platform packages
  • -
  • Release management: Automated release pipelines
  • -
  • Documentation generation: API and user documentation
  • -
-

Location: /src/tools/ -Main entry point: /src/tools/Makefile

-

Quick Start

-
# Navigate to build system
-cd src/tools
-
-# View all available targets
-make help
-
-# Complete build and package
-make all
-
-# Development build (quick)
-make dev-build
-
-# Build for specific platform
-make linux
-make macos
-make windows
-
-# Clean everything
-make clean
-
-# Check build system status
-make status
-
-

Makefile Reference

-

Build Configuration

-

Variables:

-
# Project metadata
-PROJECT_NAME := provisioning
-VERSION := $(git describe --tags --always --dirty)
-BUILD_TIME := $(date -u +"%Y-%m-%dT%H:%M:%SZ")
-
-# Build configuration
-RUST_TARGET := x86_64-unknown-linux-gnu
-BUILD_MODE := release
-PLATFORMS := linux-amd64,macos-amd64,windows-amd64
-VARIANTS := complete,minimal
-
-# Flags
-VERBOSE := false
-DRY_RUN := false
-PARALLEL := true
-
-

Build Targets

-

Primary Build Targets

-

make all - Complete build, package, and test

-
    -
  • Runs: clean build-all package-all test-dist
  • -
  • Use for: Production releases, complete validation
  • -
-

make build-all - Build all components

-
    -
  • Runs: build-platform build-core validate-nickel
  • -
  • Use for: Complete system compilation
  • -
-

make build-platform - Build platform binaries for all targets

-
make build-platform
-# Equivalent to:
-nu tools/build/compile-platform.nu \
-    --target x86_64-unknown-linux-gnu \
-    --release \
-    --output-dir dist/platform \
-    --verbose=false
-
-

make build-core - Bundle core Nushell libraries

-
make build-core
-# Equivalent to:
-nu tools/build/bundle-core.nu \
-    --output-dir dist/core \
-    --config-dir dist/config \
-    --validate \
-    --exclude-dev
-
-

make validate-nickel - Validate and compile Nickel schemas

-
make validate-nickel
-# Equivalent to:
-nu tools/build/validate-nickel.nu \
-    --output-dir dist/schemas \
-    --format-code \
-    --check-dependencies
-
-

make build-cross - Cross-compile for multiple platforms

-
    -
  • Builds for all platforms in PLATFORMS variable
  • -
  • Parallel execution support
  • -
  • Failure handling for each platform
  • -
-

Package Targets

-

make package-all - Create all distribution packages

-
    -
  • Runs: dist-generate package-binaries package-containers
  • -
-

make dist-generate - Generate complete distributions

-
make dist-generate
-# Advanced usage:
-make dist-generate PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
-
-

make package-binaries - Package binaries for distribution

-
    -
  • Creates platform-specific archives
  • -
  • Strips debug symbols
  • -
  • Generates checksums
  • -
-

make package-containers - Build container images

-
    -
  • Multi-platform container builds
  • -
  • Optimized layers and caching
  • -
  • Version tagging
  • -
-

make create-archives - Create distribution archives

-
    -
  • TAR and ZIP formats
  • -
  • Platform-specific and universal archives
  • -
  • Compression and checksums
  • -
-

make create-installers - Create installation packages

-
    -
  • Shell script installers
  • -
  • Platform-specific packages (DEB, RPM, MSI)
  • -
  • Uninstaller creation
  • -
-

Release Targets

-

make release - Create a complete release (requires VERSION)

-
make release VERSION=2.1.0
-
-

Features:

-
    -
  • Automated changelog generation
  • -
  • Git tag creation and push
  • -
  • Artifact upload
  • -
  • Comprehensive validation
  • -
-

make release-draft - Create a draft release

-
    -
  • Create without publishing
  • -
  • Review artifacts before release
  • -
  • Manual approval workflow
  • -
-

make upload-artifacts - Upload release artifacts

-
    -
  • GitHub Releases
  • -
  • Container registries
  • -
  • Package repositories
  • -
  • Verification and validation
  • -
-

make notify-release - Send release notifications

-
    -
  • Slack notifications
  • -
  • Discord announcements
  • -
  • Email notifications
  • -
  • Custom webhook support
  • -
-

make update-registry - Update package manager registries

-
    -
  • Homebrew formula updates
  • -
  • APT repository updates
  • -
  • Custom registry support
  • -
-

Development and Testing Targets

-

make dev-build - Quick development build

-
make dev-build
-# Fast build with minimal validation
-
-

make test-build - Test build system

-
    -
  • Validates build process
  • -
  • Runs with test configuration
  • -
  • Comprehensive logging
  • -
-

make test-dist - Test generated distributions

-
    -
  • Validates distribution integrity
  • -
  • Tests installation process
  • -
  • Platform compatibility checks
  • -
-

make validate-all - Validate all components

-
    -
  • Nickel schema validation
  • -
  • Package validation
  • -
  • Configuration validation
  • -
-

make benchmark - Run build benchmarks

-
    -
  • Times build process
  • -
  • Performance analysis
  • -
  • Resource usage monitoring
  • -
-

Documentation Targets

-

make docs - Generate documentation

-
make docs
-# Generates API docs, user guides, and examples
-
-

make docs-serve - Generate and serve documentation locally

-
    -
  • Starts local HTTP server on port 8000
  • -
  • Live documentation browsing
  • -
  • Development documentation workflow
  • -
-

Utility Targets

-

make clean - Clean all build artifacts

-
make clean
-# Removes all build, distribution, and package directories
-
-

make clean-dist - Clean only distribution artifacts

-
    -
  • Preserves build cache
  • -
  • Removes distribution packages
  • -
  • Faster cleanup option
  • -
-

make install - Install the built system locally

-
    -
  • Requires distribution to be built
  • -
  • Installs to system directories
  • -
  • Creates uninstaller
  • -
-

make uninstall - Uninstall the system

-
    -
  • Removes system installation
  • -
  • Cleans configuration
  • -
  • Removes service files
  • -
-

make status - Show build system status

-
make status
-# Output:
-# Build System Status
-# ===================
-# Project: provisioning
-# Version: v2.1.0-5-g1234567
-# Git Commit: 1234567890abcdef
-# Build Time: 2025-09-25T14:30:22Z
-#
-# Directories:
-#   Source: /Users/user/repo-cnz/src
-#   Tools: /Users/user/repo-cnz/src/tools
-#   Build: /Users/user/repo-cnz/src/target
-#   Distribution: /Users/user/repo-cnz/src/dist
-#   Packages: /Users/user/repo-cnz/src/packages
-
-

make info - Show detailed system information

-
    -
  • OS and architecture details
  • -
  • Tool versions (Nushell, Rust, Docker, Git)
  • -
  • Environment information
  • -
  • Build prerequisites
  • -
-

CI/CD Integration Targets

-

make ci-build - CI build pipeline

-
    -
  • Complete validation build
  • -
  • Suitable for automated CI systems
  • -
  • Comprehensive testing
  • -
-

make ci-test - CI test pipeline

-
    -
  • Validation and testing only
  • -
  • Fast feedback for pull requests
  • -
  • Quality assurance
  • -
-

make ci-release - CI release pipeline

-
    -
  • Build and packaging for releases
  • -
  • Artifact preparation
  • -
  • Release candidate creation
  • -
-

make cd-deploy - CD deployment pipeline

-
    -
  • Complete release and deployment
  • -
  • Artifact upload and distribution
  • -
  • User notifications
  • -
-

Platform-Specific Targets

-

make linux - Build for Linux only

-
make linux
-# Sets PLATFORMS=linux-amd64
-
-

make macos - Build for macOS only

-
make macos
-# Sets PLATFORMS=macos-amd64
-
-

make windows - Build for Windows only

-
make windows
-# Sets PLATFORMS=windows-amd64
-
-

Debugging Targets

-

make debug - Build with debug information

-
make debug
-# Sets BUILD_MODE=debug VERBOSE=true
-
-

make debug-info - Show debug information

-
    -
  • Make variables and environment
  • -
  • Build system diagnostics
  • -
  • Troubleshooting information
  • -
-

Build Tools

-

Core Build Scripts

-

All build tools are implemented as Nushell scripts with comprehensive parameter validation and error handling.

-

/src/tools/build/compile-platform.nu

-

Purpose: Compiles all Rust components for distribution

-

Components Compiled:

-
    -
  • orchestratorprovisioning-orchestrator binary
  • -
  • control-centercontrol-center binary
  • -
  • control-center-ui → Web UI assets
  • -
  • mcp-server-rust → MCP integration binary
  • -
-

Usage:

-
nu compile-platform.nu [options]
-
-Options:
-  --target STRING          Target platform (default: x86_64-unknown-linux-gnu)
-  --release                Build in release mode
-  --features STRING        Comma-separated features to enable
-  --output-dir STRING      Output directory (default: dist/platform)
-  --verbose                Enable verbose logging
-  --clean                  Clean before building
-
-

Example:

-
nu compile-platform.nu \
-    --target x86_64-apple-darwin \
-    --release \
-    --features "surrealdb,telemetry" \
-    --output-dir dist/macos \
-    --verbose
-
-

/src/tools/build/bundle-core.nu

-

Purpose: Bundles Nushell core libraries and CLI for distribution

-

Components Bundled:

-
    -
  • Nushell provisioning CLI wrapper
  • -
  • Core Nushell libraries (lib_provisioning)
  • -
  • Configuration system
  • -
  • Template system
  • -
  • Extensions and plugins
  • -
-

Usage:

-
nu bundle-core.nu [options]
-
-Options:
-  --output-dir STRING      Output directory (default: dist/core)
-  --config-dir STRING      Configuration directory (default: dist/config)
-  --validate               Validate Nushell syntax
-  --compress               Compress bundle with gzip
-  --exclude-dev            Exclude development files (default: true)
-  --verbose                Enable verbose logging
-
-

Validation Features:

-
    -
  • Syntax validation of all Nushell files
  • -
  • Import dependency checking
  • -
  • Function signature validation
  • -
  • Test execution (if tests present)
  • -
-

/src/tools/build/validate-nickel.nu

-

Purpose: Validates and compiles Nickel schemas

-

Validation Process:

-
    -
  1. Syntax validation of all .ncl files
  2. -
  3. Schema dependency checking
  4. -
  5. Type constraint validation
  6. -
  7. Example validation against schemas
  8. -
  9. Documentation generation
  10. -
-

Usage:

-
nu validate-nickel.nu [options]
-
-Options:
-  --output-dir STRING      Output directory (default: dist/schemas)
-  --format-code            Format Nickel code during validation
-  --check-dependencies     Validate schema dependencies
-  --verbose                Enable verbose logging
-
-

/src/tools/build/test-distribution.nu

-

Purpose: Tests generated distributions for correctness

-

Test Types:

-
    -
  • Basic: Installation test, CLI help, version check
  • -
  • Integration: Server creation, configuration validation
  • -
  • Complete: Full workflow testing including cluster operations
  • -
-

Usage:

-
nu test-distribution.nu [options]
-
-Options:
-  --dist-dir STRING        Distribution directory (default: dist)
-  --test-types STRING      Test types: basic,integration,complete
-  --platform STRING        Target platform for testing
-  --cleanup                Remove test files after completion
-  --verbose                Enable verbose logging
-
-

/src/tools/build/clean-build.nu

-

Purpose: Intelligent build artifact cleanup

-

Cleanup Scopes:

-
    -
  • all: Complete cleanup (build, dist, packages, cache)
  • -
  • dist: Distribution artifacts only
  • -
  • cache: Build cache and temporary files
  • -
  • old: Files older than specified age
  • -
-

Usage:

-
nu clean-build.nu [options]
-
-Options:
-  --scope STRING           Cleanup scope: all,dist,cache,old
-  --age DURATION          Age threshold for 'old' scope (default: 7d)
-  --force                  Force cleanup without confirmation
-  --dry-run               Show what would be cleaned without doing it
-  --verbose               Enable verbose logging
-
-

Distribution Tools

-

/src/tools/distribution/generate-distribution.nu

-

Purpose: Main distribution generator orchestrating the complete process

-

Generation Process:

-
    -
  1. Platform binary compilation
  2. -
  3. Core library bundling
  4. -
  5. Nickel schema validation and packaging
  6. -
  7. Configuration system preparation
  8. -
  9. Documentation generation
  10. -
  11. Archive creation and compression
  12. -
  13. Installer generation
  14. -
  15. Validation and testing
  16. -
-

Usage:

-
nu generate-distribution.nu [command] [options]
-
-Commands:
-  <default>                Generate complete distribution
-  quick                    Quick development distribution
-  status                   Show generation status
-
-Options:
-  --version STRING         Version to build (default: auto-detect)
-  --platforms STRING       Comma-separated platforms
-  --variants STRING        Variants: complete,minimal
-  --output-dir STRING      Output directory (default: dist)
-  --compress               Enable compression
-  --generate-docs          Generate documentation
-  --parallel-builds        Enable parallel builds
-  --validate-output        Validate generated output
-  --verbose                Enable verbose logging
-
-

Advanced Examples:

-
# Complete multi-platform release
-nu generate-distribution.nu \
-    --version 2.1.0 \
-    --platforms linux-amd64,macos-amd64,windows-amd64 \
-    --variants complete,minimal \
-    --compress \
-    --generate-docs \
-    --parallel-builds \
-    --validate-output
-
-# Quick development build
-nu generate-distribution.nu quick \
-    --platform linux \
-    --variant minimal
-
-# Status check
-nu generate-distribution.nu status
-
-

/src/tools/distribution/create-installer.nu

-

Purpose: Creates platform-specific installers

-

Installer Types:

-
    -
  • shell: Shell script installer (cross-platform)
  • -
  • package: Platform packages (DEB, RPM, MSI, PKG)
  • -
  • container: Container image with provisioning
  • -
  • source: Source distribution with build instructions
  • -
-

Usage:

-
nu create-installer.nu DISTRIBUTION_DIR [options]
-
-Options:
-  --output-dir STRING      Installer output directory
-  --installer-types STRING Installer types: shell,package,container,source
-  --platforms STRING       Target platforms
-  --include-services       Include systemd/launchd service files
-  --create-uninstaller     Generate uninstaller
-  --validate-installer     Test installer functionality
-  --verbose                Enable verbose logging
-
-

Package Tools

-

/src/tools/package/package-binaries.nu

-

Purpose: Packages compiled binaries for distribution

-

Package Formats:

-
    -
  • archive: TAR.GZ and ZIP archives
  • -
  • standalone: Single binary with embedded resources
  • -
  • installer: Platform-specific installer packages
  • -
-

Features:

-
    -
  • Binary stripping for size reduction
  • -
  • Compression optimization
  • -
  • Checksum generation (SHA256, MD5)
  • -
  • Digital signing (if configured)
  • -
-

/src/tools/package/build-containers.nu

-

Purpose: Builds optimized container images

-

Container Features:

-
    -
  • Multi-stage builds for minimal image size
  • -
  • Security scanning integration
  • -
  • Multi-platform image generation
  • -
  • Layer caching optimization
  • -
  • Runtime environment configuration
  • -
-

Release Tools

-

/src/tools/release/create-release.nu

-

Purpose: Automated release creation and management

-

Release Process:

-
    -
  1. Version validation and tagging
  2. -
  3. Changelog generation from git history
  4. -
  5. Asset building and validation
  6. -
  7. Release creation (GitHub, GitLab, etc.)
  8. -
  9. Asset upload and verification
  10. -
  11. Release announcement preparation
  12. -
-

Usage:

-
nu create-release.nu [options]
-
-Options:
-  --version STRING         Release version (required)
-  --asset-dir STRING       Directory containing release assets
-  --draft                  Create draft release
-  --prerelease             Mark as pre-release
-  --generate-changelog     Auto-generate changelog
-  --push-tag               Push git tag
-  --auto-upload            Upload assets automatically
-  --verbose                Enable verbose logging
-
-

Cross-Platform Compilation

-

Supported Platforms

-

Primary Platforms:

-
    -
  • linux-amd64 (x86_64-unknown-linux-gnu)
  • -
  • macos-amd64 (x86_64-apple-darwin)
  • -
  • windows-amd64 (x86_64-pc-windows-gnu)
  • -
-

Additional Platforms:

-
    -
  • linux-arm64 (aarch64-unknown-linux-gnu)
  • -
  • macos-arm64 (aarch64-apple-darwin)
  • -
  • freebsd-amd64 (x86_64-unknown-freebsd)
  • -
-

Cross-Compilation Setup

-

Install Rust Targets:

-
# Install additional targets
-rustup target add x86_64-apple-darwin
-rustup target add x86_64-pc-windows-gnu
-rustup target add aarch64-unknown-linux-gnu
-rustup target add aarch64-apple-darwin
-
-

Platform-Specific Dependencies:

-

macOS Cross-Compilation:

-
# Install osxcross toolchain
-brew install FiloSottile/musl-cross/musl-cross
-brew install mingw-w64
-
-

Windows Cross-Compilation:

-
# Install Windows dependencies
-brew install mingw-w64
-# or on Linux:
-sudo apt-get install gcc-mingw-w64
-
-

Cross-Compilation Usage

-

Single Platform:

-
# Build for macOS from Linux
-make build-platform RUST_TARGET=x86_64-apple-darwin
-
-# Build for Windows
-make build-platform RUST_TARGET=x86_64-pc-windows-gnu
-
-

Multiple Platforms:

-
# Build for all configured platforms
-make build-cross
-
-# Specify platforms
-make build-cross PLATFORMS=linux-amd64,macos-amd64,windows-amd64
-
-

Platform-Specific Targets:

-
# Quick platform builds
-make linux      # Linux AMD64
-make macos      # macOS AMD64
-make windows    # Windows AMD64
-
-

Dependency Management

-

Build Dependencies

-

Required Tools:

-
    -
  • Nushell 0.107.1+: Core shell and scripting
  • -
  • Rust 1.70+: Platform binary compilation
  • -
  • Cargo: Rust package management
  • -
  • KCL 0.11.2+: Configuration language
  • -
  • Git: Version control and tagging
  • -
-

Optional Tools:

-
    -
  • Docker: Container image building
  • -
  • Cross: Simplified cross-compilation
  • -
  • SOPS: Secrets management
  • -
  • Age: Encryption for secrets
  • -
-

Dependency Validation

-

Check Dependencies:

-
make info
-# Shows versions of all required tools
-
-# Output example:
-# Tool Versions:
-#   Nushell: 0.107.1
-#   Rust: rustc 1.75.0
-#   Docker: Docker version 24.0.6
-#   Git: git version 2.42.0
-
-

Install Missing Dependencies:

-
# Install Nushell
-cargo install nu
-
-# Install Nickel
-cargo install nickel
-
-# Install Cross (for cross-compilation)
-cargo install cross
-
-

Dependency Caching

-

Rust Dependencies:

-
    -
  • Cargo cache: ~/.cargo/registry
  • -
  • Target cache: target/ directory
  • -
  • Cross-compilation cache: ~/.cache/cross
  • -
-

Build Cache Management:

-
# Clean Cargo cache
-cargo clean
-
-# Clean cross-compilation cache
-cross clean
-
-# Clean all caches
-make clean SCOPE=cache
-
-

Troubleshooting

-

Common Build Issues

-

Rust Compilation Errors

-

Error: linker 'cc' not found

-
# Solution: Install build essentials
-sudo apt-get install build-essential  # Linux
-xcode-select --install                 # macOS
-
-

Error: target not found

-
# Solution: Install target
-rustup target add x86_64-unknown-linux-gnu
-
-

Error: Cross-compilation linking errors

-
# Solution: Use cross instead of cargo
-cargo install cross
-make build-platform CROSS=true
-
-

Nushell Script Errors

-

Error: command not found

-
# Solution: Ensure Nushell is in PATH
-which nu
-export PATH="$HOME/.cargo/bin:$PATH"
-
-

Error: Permission denied

-
# Solution: Make scripts executable
-chmod +x src/tools/build/*.nu
-
-

Error: Module not found

-
# Solution: Check working directory
-cd src/tools
-nu build/compile-platform.nu --help
-
-

Nickel Validation Errors

-

Error: nickel command not found

-
# Solution: Install Nickel
-cargo install nickel
-# or
-brew install nickel
-
-

Error: Schema validation failed

-
# Solution: Check Nickel syntax
-nickel fmt schemas/
-nickel check schemas/
-
-

Build Performance Issues

-

Slow Compilation

-

Optimizations:

-
# Enable parallel builds
-make build-all PARALLEL=true
-
-# Use faster linker
-export RUSTFLAGS="-C link-arg=-fuse-ld=lld"
-
-# Increase build jobs
-export CARGO_BUILD_JOBS=8
-
-

Cargo Configuration (~/.cargo/config.toml):

-
[build]
-jobs = 8
-
-[target.x86_64-unknown-linux-gnu]
-linker = "lld"
-
-

Memory Issues

-

Solutions:

-
# Reduce parallel jobs
-export CARGO_BUILD_JOBS=2
-
-# Use debug build for development
-make dev-build BUILD_MODE=debug
-
-# Clean up between builds
-make clean-dist
-
-

Distribution Issues

-

Missing Assets

-

Validation:

-
# Test distribution
-make test-dist
-
-# Detailed validation
-nu src/tools/package/validate-package.nu dist/
-
-

Size Optimization

-

Optimizations:

-
# Strip binaries
-make package-binaries STRIP=true
-
-# Enable compression
-make dist-generate COMPRESS=true
-
-# Use minimal variant
-make dist-generate VARIANTS=minimal
-
-

Debug Mode

-

Enable Debug Logging:

-
# Set environment
-export PROVISIONING_DEBUG=true
-export RUST_LOG=debug
-
-# Run with debug
-make debug
-
-# Verbose make output
-make build-all VERBOSE=true
-
-

Debug Information:

-
# Show debug information
-make debug-info
-
-# Build system status
-make status
-
-# Tool information
-make info
-
-

CI/CD Integration

-

GitHub Actions

-

Example Workflow (.github/workflows/build.yml):

-
name: Build and Test
-on: [push, pull_request]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Setup Nushell
-        uses: hustcer/setup-nu@v3.5
-
-      - name: Setup Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: stable
-
-      - name: CI Build
-        run: |
-          cd src/tools
-          make ci-build
-
-      - name: Upload Artifacts
-        uses: actions/upload-artifact@v4
-        with:
-          name: build-artifacts
-          path: src/dist/
-
-

Release Automation

-

Release Workflow:

-
name: Release
-on:
-  push:
-    tags: ['v*']
-
-jobs:
-  release:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Build Release
-        run: |
-          cd src/tools
-          make ci-release VERSION=${{ github.ref_name }}
-
-      - name: Create Release
-        run: |
-          cd src/tools
-          make release VERSION=${{ github.ref_name }}
-
-

Local CI Testing

-

Test CI Pipeline Locally:

-
# Run CI build pipeline
-make ci-build
-
-# Run CI test pipeline
-make ci-test
-
-# Full CI/CD pipeline
-make ci-release
-
-

This build system provides a comprehensive, maintainable foundation for the provisioning project’s development lifecycle, from local development to -production releases.

-

Distribution Process Documentation

-

This document provides comprehensive documentation for the provisioning project’s distribution process, covering release workflows, package -generation, multi-platform distribution, and rollback procedures.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Distribution Architecture
  4. -
  5. Release Process
  6. -
  7. Package Generation
  8. -
  9. Multi-Platform Distribution
  10. -
  11. Validation and Testing
  12. -
  13. Release Management
  14. -
  15. Rollback Procedures
  16. -
  17. CI/CD Integration
  18. -
  19. Troubleshooting
  20. -
-

Overview

-

The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with -automated release management.

-

Key Features:

-
    -
  • Multi-Platform Support: Linux, macOS, Windows with multiple architectures
  • -
  • Multiple Distribution Variants: Complete and minimal distributions
  • -
  • Automated Release Pipeline: From development to production deployment
  • -
  • Package Management: Binary packages, container images, and installers
  • -
  • Validation Framework: Comprehensive testing and validation
  • -
  • Rollback Capabilities: Safe rollback and recovery procedures
  • -
-

Location: /src/tools/ -Main Tool: /src/tools/Makefile and associated Nushell scripts

-

Distribution Architecture

-

Distribution Components

-
Distribution Ecosystem
-├── Core Components
-│   ├── Platform Binaries      # Rust-compiled binaries
-│   ├── Core Libraries         # Nushell libraries and CLI
-│   ├── Configuration System   # TOML configuration files
-│   └── Documentation         # User and API documentation
-├── Platform Packages
-│   ├── Archives              # TAR.GZ and ZIP files
-│   ├── Installers            # Platform-specific installers
-│   └── Container Images      # Docker/OCI images
-├── Distribution Variants
-│   ├── Complete              # Full-featured distribution
-│   └── Minimal               # Lightweight distribution
-└── Release Artifacts
-    ├── Checksums             # SHA256/MD5 verification
-    ├── Signatures            # Digital signatures
-    └── Metadata              # Release information
-
-

Build Pipeline

-
Build Pipeline Flow
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│   Source Code   │ -> │   Build Stage   │ -> │  Package Stage  │
-│                 │    │                 │    │                 │
-│ - Rust code     │    │ - compile-      │    │ - create-       │
-│ - Nushell libs  │    │   platform      │    │   archives      │
-│ - Nickel schemas│    │ - bundle-core   │    │ - build-        │
-│ - Config files  │    │ - validate-nickel│   │   containers    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-                                |
-                                v
-┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
-│ Release Stage   │ <- │ Validate Stage  │ <- │ Distribute Stage│
-│                 │    │                 │    │                 │
-│ - create-       │    │ - test-dist     │    │ - generate-     │
-│   release       │    │ - validate-     │    │   distribution  │
-│ - upload-       │    │   package       │    │ - create-       │
-│   artifacts     │    │ - integration   │    │   installers    │
-└─────────────────┘    └─────────────────┘    └─────────────────┘
-
-

Distribution Variants

-

Complete Distribution:

-
    -
  • All Rust binaries (orchestrator, control-center, MCP server)
  • -
  • Full Nushell library suite
  • -
  • All providers, taskservs, and clusters
  • -
  • Complete documentation and examples
  • -
  • Development tools and templates
  • -
-

Minimal Distribution:

-
    -
  • Essential binaries only
  • -
  • Core Nushell libraries
  • -
  • Basic provider support
  • -
  • Essential task services
  • -
  • Minimal documentation
  • -
-

Release Process

-

Release Types

-

Release Classifications:

-
    -
  • Major Release (x.0.0): Breaking changes, new major features
  • -
  • Minor Release (x.y.0): New features, backward compatible
  • -
  • Patch Release (x.y.z): Bug fixes, security updates
  • -
  • Pre-Release (x.y.z-alpha/beta/rc): Development/testing releases
  • -
-

Step-by-Step Release Process

-

1. Preparation Phase

-

Pre-Release Checklist:

-
# Update dependencies and security
-cargo update
-cargo audit
-
-# Run comprehensive tests
-make ci-test
-
-# Update documentation
-make docs
-
-# Validate all configurations
-make validate-all
-
-

Version Planning:

-
# Check current version
-git describe --tags --always
-
-# Plan next version
-make status | grep Version
-
-# Validate version bump
-nu src/tools/release/create-release.nu --dry-run --version 2.1.0
-
-

2. Build Phase

-

Complete Build:

-
# Clean build environment
-make clean
-
-# Build all platforms and variants
-make all
-
-# Validate build output
-make test-dist
-
-

Build with Specific Parameters:

-
# Build for specific platforms
-make all PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
-
-# Build with custom version
-make all VERSION=2.1.0-rc1
-
-# Parallel build for speed
-make all PARALLEL=true
-
-

3. Package Generation

-

Create Distribution Packages:

-
# Generate complete distributions
-make dist-generate
-
-# Create binary packages
-make package-binaries
-
-# Build container images
-make package-containers
-
-# Create installers
-make create-installers
-
-

Package Validation:

-
# Validate packages
-make test-dist
-
-# Check package contents
-nu src/tools/package/validate-package.nu packages/
-
-# Test installation
-make install
-make uninstall
-
-

4. Release Creation

-

Automated Release:

-
# Create complete release
-make release VERSION=2.1.0
-
-# Create draft release for review
-make release-draft VERSION=2.1.0
-
-# Manual release creation
-nu src/tools/release/create-release.nu \
-    --version 2.1.0 \
-    --generate-changelog \
-    --push-tag \
-    --auto-upload
-
-

Release Options:

-
    -
  • --pre-release: Mark as pre-release
  • -
  • --draft: Create draft release
  • -
  • --generate-changelog: Auto-generate changelog from commits
  • -
  • --push-tag: Push git tag to remote
  • -
  • --auto-upload: Upload assets automatically
  • -
-

5. Distribution and Notification

-

Upload Artifacts:

-
# Upload to GitHub Releases
-make upload-artifacts
-
-# Update package registries
-make update-registry
-
-# Send notifications
-make notify-release
-
-

Registry Updates:

-
# Update Homebrew formula
-nu src/tools/release/update-registry.nu \
-    --registries homebrew \
-    --version 2.1.0 \
-    --auto-commit
-
-# Custom registry updates
-nu src/tools/release/update-registry.nu \
-    --registries custom \
-    --registry-url https://packages.company.com \
-    --credentials-file ~/.registry-creds
-
-

Release Automation

-

Complete Automated Release:

-
# Full release pipeline
-make cd-deploy VERSION=2.1.0
-
-# Equivalent manual steps:
-make clean
-make all VERSION=2.1.0
-make create-archives
-make create-installers
-make release VERSION=2.1.0
-make upload-artifacts
-make update-registry
-make notify-release
-
-

Package Generation

-

Binary Packages

-

Package Types:

-
    -
  • Standalone Archives: TAR.GZ and ZIP with all dependencies
  • -
  • Platform Packages: DEB, RPM, MSI, PKG with system integration
  • -
  • Portable Packages: Single-directory distributions
  • -
  • Source Packages: Source code with build instructions
  • -
-

Create Binary Packages:

-
# Standard binary packages
-make package-binaries
-
-# Custom package creation
-nu src/tools/package/package-binaries.nu \
-    --source-dir dist/platform \
-    --output-dir packages/binaries \
-    --platforms linux-amd64,macos-amd64 \
-    --format archive \
-    --compress \
-    --strip \
-    --checksum
-
-

Package Features:

-
    -
  • Binary Stripping: Removes debug symbols for smaller size
  • -
  • Compression: GZIP, LZMA, and Brotli compression
  • -
  • Checksums: SHA256 and MD5 verification
  • -
  • Signatures: GPG and code signing support
  • -
-

Container Images

-

Container Build Process:

-
# Build container images
-make package-containers
-
-# Advanced container build
-nu src/tools/package/build-containers.nu \
-    --dist-dir dist \
-    --tag-prefix provisioning \
-    --version 2.1.0 \
-    --platforms "linux/amd64,linux/arm64" \
-    --optimize-size \
-    --security-scan \
-    --multi-stage
-
-

Container Features:

-
    -
  • Multi-Stage Builds: Minimal runtime images
  • -
  • Security Scanning: Vulnerability detection
  • -
  • Multi-Platform: AMD64, ARM64 support
  • -
  • Layer Optimization: Efficient layer caching
  • -
  • Runtime Configuration: Environment-based configuration
  • -
-

Container Registry Support:

-
    -
  • Docker Hub
  • -
  • GitHub Container Registry
  • -
  • Amazon ECR
  • -
  • Google Container Registry
  • -
  • Azure Container Registry
  • -
  • Private registries
  • -
-

Installers

-

Installer Types:

-
    -
  • Shell Script Installer: Universal Unix/Linux installer
  • -
  • Package Installers: DEB, RPM, MSI, PKG
  • -
  • Container Installer: Docker/Podman setup
  • -
  • Source Installer: Build-from-source installer
  • -
-

Create Installers:

-
# Generate all installer types
-make create-installers
-
-# Custom installer creation
-nu src/tools/distribution/create-installer.nu \
-    dist/provisioning-2.1.0-linux-amd64-complete \
-    --output-dir packages/installers \
-    --installer-types shell,package \
-    --platforms linux,macos \
-    --include-services \
-    --create-uninstaller \
-    --validate-installer
-
-

Installer Features:

-
    -
  • System Integration: Systemd/Launchd service files
  • -
  • Path Configuration: Automatic PATH updates
  • -
  • User/System Install: Support for both user and system-wide installation
  • -
  • Uninstaller: Clean removal capability
  • -
  • Dependency Management: Automatic dependency resolution
  • -
  • Configuration Setup: Initial configuration creation
  • -
-

Multi-Platform Distribution

-

Supported Platforms

-

Primary Platforms:

-
    -
  • Linux AMD64 (x86_64-unknown-linux-gnu)
  • -
  • Linux ARM64 (aarch64-unknown-linux-gnu)
  • -
  • macOS AMD64 (x86_64-apple-darwin)
  • -
  • macOS ARM64 (aarch64-apple-darwin)
  • -
  • Windows AMD64 (x86_64-pc-windows-gnu)
  • -
  • FreeBSD AMD64 (x86_64-unknown-freebsd)
  • -
-

Platform-Specific Features:

-
    -
  • Linux: SystemD integration, package manager support
  • -
  • macOS: LaunchAgent services, Homebrew packages
  • -
  • Windows: Windows Service support, MSI installers
  • -
  • FreeBSD: RC scripts, pkg packages
  • -
-

Cross-Platform Build

-

Cross-Compilation Setup:

-
# Install cross-compilation targets
-rustup target add aarch64-unknown-linux-gnu
-rustup target add x86_64-apple-darwin
-rustup target add aarch64-apple-darwin
-rustup target add x86_64-pc-windows-gnu
-
-# Install cross-compilation tools
-cargo install cross
-
-

Platform-Specific Builds:

-
# Build for specific platform
-make build-platform RUST_TARGET=aarch64-apple-darwin
-
-# Build for multiple platforms
-make build-cross PLATFORMS=linux-amd64,macos-arm64,windows-amd64
-
-# Platform-specific distributions
-make linux
-make macos
-make windows
-
-

Distribution Matrix

-

Generated Distributions:

-
Distribution Matrix:
-provisioning-{version}-{platform}-{variant}.{format}
-
-Examples:
-- provisioning-2.1.0-linux-amd64-complete.tar.gz
-- provisioning-2.1.0-macos-arm64-minimal.tar.gz
-- provisioning-2.1.0-windows-amd64-complete.zip
-- provisioning-2.1.0-freebsd-amd64-minimal.tar.xz
-
-

Platform Considerations:

-
    -
  • File Permissions: Executable permissions on Unix systems
  • -
  • Path Separators: Platform-specific path handling
  • -
  • Service Integration: Platform-specific service management
  • -
  • Package Formats: TAR.GZ for Unix, ZIP for Windows
  • -
  • Line Endings: CRLF for Windows, LF for Unix
  • -
-

Validation and Testing

-

Distribution Validation

-

Validation Pipeline:

-
# Complete validation
-make test-dist
-
-# Custom validation
-nu src/tools/build/test-distribution.nu \
-    --dist-dir dist \
-    --test-types basic,integration,complete \
-    --platform linux \
-    --cleanup \
-    --verbose
-
-

Validation Types:

-
    -
  • Basic: Installation test, CLI help, version check
  • -
  • Integration: Server creation, configuration validation
  • -
  • Complete: Full workflow testing including cluster operations
  • -
-

Testing Framework

-

Test Categories:

-
    -
  • Unit Tests: Component-specific testing
  • -
  • Integration Tests: Cross-component testing
  • -
  • End-to-End Tests: Complete workflow testing
  • -
  • Performance Tests: Load and performance validation
  • -
  • Security Tests: Security scanning and validation
  • -
-

Test Execution:

-
# Run all tests
-make ci-test
-
-# Specific test types
-nu src/tools/build/test-distribution.nu --test-types basic
-nu src/tools/build/test-distribution.nu --test-types integration
-nu src/tools/build/test-distribution.nu --test-types complete
-
-

Package Validation

-

Package Integrity:

-
# Validate package structure
-nu src/tools/package/validate-package.nu dist/
-
-# Check checksums
-sha256sum -c packages/checksums.sha256
-
-# Verify signatures
-gpg --verify packages/provisioning-2.1.0.tar.gz.sig
-
-

Installation Testing:

-
# Test installation process
-./packages/installers/install-provisioning-2.1.0.sh --dry-run
-
-# Test uninstallation
-./packages/installers/uninstall-provisioning.sh --dry-run
-
-# Container testing
-docker run --rm provisioning:2.1.0 provisioning --version
-
-

Release Management

-

Release Workflow

-

GitHub Release Integration:

-
# Create GitHub release
-nu src/tools/release/create-release.nu \
-    --version 2.1.0 \
-    --asset-dir packages \
-    --generate-changelog \
-    --push-tag \
-    --auto-upload
-
-

Release Features:

-
    -
  • Automated Changelog: Generated from git commit history
  • -
  • Asset Management: Automatic upload of all distribution artifacts
  • -
  • Tag Management: Semantic version tagging
  • -
  • Release Notes: Formatted release notes with change summaries
  • -
-

Versioning Strategy

-

Semantic Versioning:

-
    -
  • MAJOR.MINOR.PATCH format (for example, 2.1.0)
  • -
  • Pre-release suffixes (for example, 2.1.0-alpha.1, 2.1.0-rc.2)
  • -
  • Build metadata (for example, 2.1.0+20250925.abcdef)
  • -
-

Version Detection:

-
# Auto-detect next version
-nu src/tools/release/create-release.nu --release-type minor
-
-# Manual version specification
-nu src/tools/release/create-release.nu --version 2.1.0
-
-# Pre-release versioning
-nu src/tools/release/create-release.nu --version 2.1.0-rc.1 --pre-release
-
-

Artifact Management

-

Artifact Types:

-
    -
  • Source Archives: Complete source code distributions
  • -
  • Binary Archives: Compiled binary distributions
  • -
  • Container Images: OCI-compliant container images
  • -
  • Installers: Platform-specific installation packages
  • -
  • Documentation: Generated documentation packages
  • -
-

Upload and Distribution:

-
# Upload to GitHub Releases
-make upload-artifacts
-
-# Upload to container registries
-docker push provisioning:2.1.0
-
-# Update package repositories
-make update-registry
-
-

Rollback Procedures

-

Rollback Scenarios

-

Common Rollback Triggers:

-
    -
  • Critical bugs discovered post-release
  • -
  • Security vulnerabilities identified
  • -
  • Performance regression
  • -
  • Compatibility issues
  • -
  • Infrastructure failures
  • -
-

Rollback Process

-

Automated Rollback:

-
# Rollback latest release
-nu src/tools/release/rollback-release.nu --version 2.1.0
-
-# Rollback with specific target
-nu src/tools/release/rollback-release.nu \
-    --from-version 2.1.0 \
-    --to-version 2.0.5 \
-    --update-registries \
-    --notify-users
-
-

Manual Rollback Steps:

-
# 1. Identify target version
-git tag -l | grep -v 2.1.0 | tail -5
-
-# 2. Create rollback release
-nu src/tools/release/create-release.nu \
-    --version 2.0.6 \
-    --rollback-from 2.1.0 \
-    --urgent
-
-# 3. Update package managers
-nu src/tools/release/update-registry.nu \
-    --version 2.0.6 \
-    --rollback-notice "Critical fix for 2.1.0 issues"
-
-# 4. Notify users
-nu src/tools/release/notify-users.nu \
-    --channels slack,discord,email \
-    --message-type rollback \
-    --urgent
-
-

Rollback Safety

-

Pre-Rollback Validation:

-
    -
  • Validate target version integrity
  • -
  • Check compatibility matrix
  • -
  • Verify rollback procedure testing
  • -
  • Confirm communication plan
  • -
-

Rollback Testing:

-
# Test rollback in staging
-nu src/tools/release/rollback-release.nu \
-    --version 2.1.0 \
-    --target-version 2.0.5 \
-    --dry-run \
-    --staging-environment
-
-# Validate rollback success
-make test-dist DIST_VERSION=2.0.5
-
-

Emergency Procedures

-

Critical Security Rollback:

-
# Emergency rollback (bypasses normal procedures)
-nu src/tools/release/rollback-release.nu \
-    --version 2.1.0 \
-    --emergency \
-    --security-issue \
-    --immediate-notify
-
-

Infrastructure Failure Recovery:

-
# Failover to backup infrastructure
-nu src/tools/release/rollback-release.nu \
-    --infrastructure-failover \
-    --backup-registry \
-    --mirror-sync
-
-

CI/CD Integration

-

GitHub Actions Integration

-

Build Workflow (.github/workflows/build.yml):

-
name: Build and Distribute
-on:
-  push:
-    branches: [main]
-  pull_request:
-    branches: [main]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        platform: [linux, macos, windows]
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Setup Nushell
-        uses: hustcer/setup-nu@v3.5
-
-      - name: Setup Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: stable
-
-      - name: CI Build
-        run: |
-          cd src/tools
-          make ci-build
-
-      - name: Upload Build Artifacts
-        uses: actions/upload-artifact@v4
-        with:
-          name: build-${{ matrix.platform }}
-          path: src/dist/
-
-

Release Workflow (.github/workflows/release.yml):

-
name: Release
-on:
-  push:
-    tags: ['v*']
-
-jobs:
-  release:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v4
-
-      - name: Build Release
-        run: |
-          cd src/tools
-          make ci-release VERSION=${{ github.ref_name }}
-
-      - name: Create Release
-        run: |
-          cd src/tools
-          make release VERSION=${{ github.ref_name }}
-
-      - name: Update Registries
-        run: |
-          cd src/tools
-          make update-registry VERSION=${{ github.ref_name }}
-
-

GitLab CI Integration

-

GitLab CI Configuration (.gitlab-ci.yml):

-
stages:
-  - build
-  - package
-  - test
-  - release
-
-build:
-  stage: build
-  script:
-    - cd src/tools
-    - make ci-build
-  artifacts:
-    paths:
-      - src/dist/
-    expire_in: 1 hour
-
-package:
-  stage: package
-  script:
-    - cd src/tools
-    - make package-all
-  artifacts:
-    paths:
-      - src/packages/
-    expire_in: 1 day
-
-release:
-  stage: release
-  script:
-    - cd src/tools
-    - make cd-deploy VERSION=${CI_COMMIT_TAG}
-  only:
-    - tags
-
-

Jenkins Integration

-

Jenkinsfile:

-
pipeline {
-    agent any
-
-    stages {
-        stage('Build') {
-            steps {
-                dir('src/tools') {
-                    sh 'make ci-build'
-                }
-            }
-        }
-
-        stage('Package') {
-            steps {
-                dir('src/tools') {
-                    sh 'make package-all'
-                }
-            }
-        }
-
-        stage('Release') {
-            when {
-                tag '*'
-            }
-            steps {
-                dir('src/tools') {
-                    sh "make cd-deploy VERSION=${env.TAG_NAME}"
-                }
-            }
-        }
-    }
-}
-
-

Troubleshooting

-

Common Issues

-

Build Failures

-

Rust Compilation Errors:

-
# Solution: Clean and rebuild
-make clean
-cargo clean
-make build-platform
-
-# Check Rust toolchain
-rustup show
-rustup update
-
-

Cross-Compilation Issues:

-
# Solution: Install missing targets
-rustup target list --installed
-rustup target add x86_64-apple-darwin
-
-# Use cross for problematic targets
-cargo install cross
-make build-platform CROSS=true
-
-

Package Generation Issues

-

Missing Dependencies:

-
# Solution: Install build tools
-sudo apt-get install build-essential
-brew install gnu-tar
-
-# Check tool availability
-make info
-
-

Permission Errors:

-
# Solution: Fix permissions
-chmod +x src/tools/build/*.nu
-chmod +x src/tools/distribution/*.nu
-chmod +x src/tools/package/*.nu
-
-

Distribution Validation Failures

-

Package Integrity Issues:

-
# Solution: Regenerate packages
-make clean-dist
-make package-all
-
-# Verify manually
-sha256sum packages/*.tar.gz
-
-

Installation Test Failures:

-
# Solution: Test in clean environment
-docker run --rm -v $(pwd):/work ubuntu:latest /work/packages/installers/install.sh
-
-# Debug installation
-./packages/installers/install.sh --dry-run --verbose
-
-

Release Issues

-

Upload Failures

-

Network Issues:

-
# Solution: Retry with backoff
-nu src/tools/release/upload-artifacts.nu \
-    --retry-count 5 \
-    --backoff-delay 30
-
-# Manual upload
-gh release upload v2.1.0 packages/*.tar.gz
-
-

Authentication Failures:

-
# Solution: Refresh tokens
-gh auth refresh
-docker login ghcr.io
-
-# Check credentials
-gh auth status
-docker system info
-
-

Registry Update Issues

-

Homebrew Formula Issues:

-
# Solution: Manual PR creation
-git clone https://github.com/Homebrew/homebrew-core
-cd homebrew-core
-# Edit formula
-git add Formula/provisioning.rb
-git commit -m "provisioning 2.1.0"
-
-

Debug and Monitoring

-

Debug Mode:

-
# Enable debug logging
-export PROVISIONING_DEBUG=true
-export RUST_LOG=debug
-
-# Run with verbose output
-make all VERBOSE=true
-
-# Debug specific components
-nu src/tools/distribution/generate-distribution.nu \
-    --verbose \
-    --dry-run
-
-

Monitoring Build Progress:

-
# Monitor build logs
-tail -f src/tools/build.log
-
-# Check build status
-make status
-
-# Resource monitoring
-top
-df -h
-
-

This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms -while maintaining high quality and reliability standards.

-

Repository Restructuring - Implementation Guide

-

Status: Ready for Implementation -Estimated Time: 12-16 days -Priority: High -Related: Architecture Analysis

-

Overview

-

This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes -specific commands, validation steps, and rollback procedures.

-
-

Prerequisites

-

Required Tools

-
    -
  • Nushell 0.107.1+
  • -
  • Rust toolchain (for platform builds)
  • -
  • Git
  • -
  • tar/gzip
  • -
  • curl or wget
  • -
- -
    -
  • Just (task runner)
  • -
  • ripgrep (for code searches)
  • -
  • fd (for file finding)
  • -
-

Before Starting

-
    -
  1. Create full backup
  2. -
  3. Notify team members
  4. -
  5. Create implementation branch
  6. -
  7. Set aside dedicated time
  8. -
-
-

Phase 1: Repository Restructuring (Days 1-4)

-

Day 1: Backup and Analysis

-

Step 1.1: Create Complete Backup

-
# Create timestamped backup
-BACKUP_DIR="/Users/Akasha/project-provisioning-backup-$(date +%Y%m%d)"
-cp -r /Users/Akasha/project-provisioning "$BACKUP_DIR"
-
-# Verify backup
-ls -lh "$BACKUP_DIR"
-du -sh "$BACKUP_DIR"
-
-# Create backup manifest
-find "$BACKUP_DIR" -type f > "$BACKUP_DIR/manifest.txt"
-echo "✅ Backup created: $BACKUP_DIR"
-
-

Step 1.2: Analyze Current State

-
cd /Users/Akasha/project-provisioning
-
-# Count workspace directories
-echo "=== Workspace Directories ==="
-fd workspace -t d
-
-# Analyze workspace contents
-echo "=== Active Workspace ==="
-du -sh workspace/
-
-echo "=== Backup Workspaces ==="
-du -sh _workspace/ backup-workspace/ workspace-librecloud/
-
-# Find obsolete directories
-echo "=== Build Artifacts ==="
-du -sh target/ wrks/ NO/
-
-# Save analysis
-{
-    echo "# Current State Analysis - $(date)"
-    echo ""
-    echo "## Workspace Directories"
-    fd workspace -t d
-    echo ""
-    echo "## Directory Sizes"
-    du -sh workspace/ _workspace/ backup-workspace/ workspace-librecloud/ 2>/dev/null
-    echo ""
-    echo "## Build Artifacts"
-    du -sh target/ wrks/ NO/ 2>/dev/null
-} > docs/development/current-state-analysis.txt
-
-echo "✅ Analysis complete: docs/development/current-state-analysis.txt"
-
-

Step 1.3: Identify Dependencies

-
# Find all hardcoded paths
-echo "=== Hardcoded Paths in Nushell Scripts ==="
-rg -t nu "workspace/|_workspace/|backup-workspace/" provisioning/core/nulib/ | tee hardcoded-paths.txt
-
-# Find ENV references (legacy)
-echo "=== ENV References ==="
-rg "PROVISIONING_" provisioning/core/nulib/ | wc -l
-
-# Find workspace references in configs
-echo "=== Config References ==="
-rg "workspace" provisioning/config/
-
-echo "✅ Dependencies mapped"
-
-

Step 1.4: Create Implementation Branch

-
# Create and switch to implementation branch
-git checkout -b feat/repo-restructure
-
-# Commit analysis
-git add docs/development/current-state-analysis.txt
-git commit -m "docs: add current state analysis for restructuring"
-
-echo "✅ Implementation branch created: feat/repo-restructure"
-
-

Validation:

-
    -
  • ✅ Backup exists and is complete
  • -
  • ✅ Analysis document created
  • -
  • ✅ Dependencies mapped
  • -
  • ✅ Implementation branch ready
  • -
-
-

Day 2: Directory Restructuring

-

Step 2.1: Create New Directory Structure

-
cd /Users/Akasha/project-provisioning
-
-# Create distribution directory structure
-mkdir -p distribution/{packages,installers,registry}
-echo "✅ Created distribution/"
-
-# Create workspace structure (keep tracked templates)
-mkdir -p workspace/{infra,config,extensions,runtime}/{.gitkeep}
-mkdir -p workspace/templates/{minimal,kubernetes,multi-cloud}
-echo "✅ Created workspace/"
-
-# Verify
-tree -L 2 distribution/ workspace/
-
-

Step 2.2: Move Build Artifacts

-
# Move Rust build artifacts
-if [ -d "target" ]; then
-    mv target distribution/target
-    echo "✅ Moved target/ to distribution/"
-fi
-
-# Move KCL packages
-if [ -d "provisioning/tools/dist" ]; then
-    mv provisioning/tools/dist/* distribution/packages/ 2>/dev/null || true
-    echo "✅ Moved packages to distribution/"
-fi
-
-# Move any existing packages
-find . -name "*.tar.gz" -o -name "*.zip" | grep -v node_modules | while read pkg; do
-    mv "$pkg" distribution/packages/
-    echo "  Moved: $pkg"
-done
-
-

Step 2.3: Consolidate Workspaces

-
# Identify active workspace
-echo "=== Current Workspace Status ==="
-ls -la workspace/ _workspace/ backup-workspace/ 2>/dev/null
-
-# Interactive workspace consolidation
-read -p "Which workspace is currently active? (workspace/_workspace/backup-workspace): " ACTIVE_WS
-
-if [ "$ACTIVE_WS" != "workspace" ]; then
-    echo "Consolidating $ACTIVE_WS to workspace/"
-
-    # Merge infra configs
-    if [ -d "$ACTIVE_WS/infra" ]; then
-        cp -r "$ACTIVE_WS/infra/"* workspace/infra/
-    fi
-
-    # Merge configs
-    if [ -d "$ACTIVE_WS/config" ]; then
-        cp -r "$ACTIVE_WS/config/"* workspace/config/
-    fi
-
-    # Merge extensions
-    if [ -d "$ACTIVE_WS/extensions" ]; then
-        cp -r "$ACTIVE_WS/extensions/"* workspace/extensions/
-    fi
-
-    echo "✅ Consolidated workspace"
-fi
-
-# Archive old workspace directories
-mkdir -p .archived-workspaces
-for ws in _workspace backup-workspace workspace-librecloud; do
-    if [ -d "$ws" ] && [ "$ws" != "$ACTIVE_WS" ]; then
-        mv "$ws" ".archived-workspaces/$(basename $ws)-$(date +%Y%m%d)"
-        echo "  Archived: $ws"
-    fi
-done
-
-echo "✅ Workspaces consolidated"
-
-

Step 2.4: Remove Obsolete Directories

-
# Remove build artifacts (already moved)
-rm -rf wrks/
-echo "✅ Removed wrks/"
-
-# Remove test/scratch directories
-rm -rf NO/
-echo "✅ Removed NO/"
-
-# Archive presentations (optional)
-if [ -d "presentations" ]; then
-    read -p "Archive presentations directory? (y/N): " ARCHIVE_PRES
-    if [ "$ARCHIVE_PRES" = "y" ]; then
-        tar czf presentations-archive-$(date +%Y%m%d).tar.gz presentations/
-        rm -rf presentations/
-        echo "✅ Archived and removed presentations/"
-    fi
-fi
-
-# Remove empty directories
-find . -type d -empty -delete 2>/dev/null || true
-
-echo "✅ Cleanup complete"
-
-

Step 2.5: Update .gitignore

-
# Backup existing .gitignore
-cp .gitignore .gitignore.backup
-
-# Update .gitignore
-cat >> .gitignore << 'EOF'
-
-# ============================================================================
-# Repository Restructure (2025-10-01)
-# ============================================================================
-
-# Workspace runtime data (user-specific)
-/workspace/infra/
-/workspace/config/
-/workspace/extensions/
-/workspace/runtime/
-
-# Distribution artifacts
-/distribution/packages/
-/distribution/target/
-
-# Build artifacts
-/target/
-/provisioning/platform/target/
-/provisioning/platform/*/target/
-
-# Rust artifacts
-**/*.rs.bk
-Cargo.lock
-
-# Archived directories
-/.archived-workspaces/
-
-# Temporary files
-*.tmp
-*.temp
-/tmp/
-/wrks/
-/NO/
-
-# Logs
-*.log
-/workspace/runtime/logs/
-
-# Cache
-.cache/
-/workspace/runtime/cache/
-
-# IDE
-.vscode/
-.idea/
-*.swp
-*.swo
-*~
-
-# OS
-.DS_Store
-Thumbs.db
-
-# Backup files
-*.backup
-*.bak
-
+          claimName: test-pvc
 EOF
 
-echo "✅ Updated .gitignore"
+# Verify deployment
+kubectl get deployment test-nginx
+kubectl get pods -l app=nginx
+kubectl get pvc test-pvc
 
-

Step 2.6: Commit Restructuring

-
# Stage changes
-git add -A
-
-# Show what's being committed
-git status
-
-# Commit
-git commit -m "refactor: restructure repository for clean distribution
-
-- Consolidate workspace directories to single workspace/
-- Move build artifacts to distribution/
-- Remove obsolete directories (wrks/, NO/)
-- Update .gitignore for new structure
-- Archive old workspace variants
-
-This is part of Phase 1 of the repository restructuring plan.
-
-Related: docs/architecture/repo-dist-analysis.md"
-
-echo "✅ Restructuring committed"
-
-

Validation:

-
    -
  • ✅ Single workspace/ directory exists
  • -
  • ✅ Build artifacts in distribution/
  • -
  • ✅ No wrks/, NO/ directories
  • -
  • .gitignore updated
  • -
  • ✅ Changes committed
  • -
-
-

Day 3: Update Path References

-

Step 3.1: Create Path Update Script

-
# Create migration script
-cat > provisioning/tools/migration/update-paths.nu << 'EOF'
-#!/usr/bin/env nu
-# Path update script for repository restructuring
-
-# Find and replace path references
-export def main [] {
-    print "🔧 Updating path references..."
-
-    let replacements = [
-        ["_workspace/" "workspace/"]
-        ["backup-workspace/" "workspace/"]
-        ["workspace-librecloud/" "workspace/"]
-        ["wrks/" "distribution/"]
-        ["NO/" "distribution/"]
-    ]
-
-    let files = (fd -e nu -e toml -e md . provisioning/)
-
-    mut updated_count = 0
-
-    for file in $files {
-        mut content = (open $file)
-        mut modified = false
-
-        for replacement in $replacements {
-            let old = $replacement.0
-            let new = $replacement.1
-
-            if ($content | str contains $old) {
-                $content = ($content | str replace -a $old $new)
-                $modified = true
-            }
-        }
-
-        if $modified {
-            $content | save -f $file
-            $updated_count = $updated_count + 1
-            print $"  ✓ Updated: ($file)"
-        }
-    }
-
-    print $"✅ Updated ($updated_count) files"
-}
-EOF
-
-chmod +x provisioning/tools/migration/update-paths.nu
-
-

Step 3.2: Run Path Updates

-
# Create backup before updates
-git stash
-git checkout -b feat/path-updates
-
-# Run update script
-nu provisioning/tools/migration/update-paths.nu
-
-# Review changes
-git diff
-
-# Test a sample file
-nu -c "use provisioning/core/nulib/servers/create.nu; print 'OK'"
-
-

Step 3.3: Update CLAUDE.md

-
# Update CLAUDE.md with new paths
-cat > CLAUDE.md.new << 'EOF'
-# CLAUDE.md
-
-[Keep existing content, update paths section...]
-
-## Updated Path Structure (2025-10-01)
-
-### Core System
-- **Main CLI**: `provisioning/core/cli/provisioning`
-- **Libraries**: `provisioning/core/nulib/`
-- **Extensions**: `provisioning/extensions/`
-- **Platform**: `provisioning/platform/`
-
-### User Workspace
-- **Active Workspace**: `workspace/` (gitignored runtime data)
-- **Templates**: `workspace/templates/` (tracked)
-- **Infrastructure**: `workspace/infra/` (user configs, gitignored)
-
-### Build System
-- **Distribution**: `distribution/` (gitignored artifacts)
-- **Packages**: `distribution/packages/`
-- **Installers**: `distribution/installers/`
-
-[Continue with rest of content...]
-EOF
-
-# Review changes
-diff CLAUDE.md CLAUDE.md.new
-
-# Apply if satisfied
-mv CLAUDE.md.new CLAUDE.md
-
-

Step 3.4: Update Documentation

-
# Find all documentation files
-fd -e md . docs/
-
-# Update each doc with new paths
-# This is semi-automated - review each file
-
-# Create list of docs to update
-fd -e md . docs/ > docs-to-update.txt
-
-# Manual review and update
-echo "Review and update each documentation file with new paths"
-echo "Files listed in: docs-to-update.txt"
-
-

Step 3.5: Commit Path Updates

-
git add -A
-git commit -m "refactor: update all path references for new structure
-
-- Update Nushell scripts to use workspace/ instead of variants
-- Update CLAUDE.md with new path structure
-- Update documentation references
-- Add migration script for future path changes
-
-Phase 1.3 of repository restructuring."
-
-echo "✅ Path updates committed"
-
-

Validation:

-
    -
  • ✅ All Nushell scripts reference correct paths
  • -
  • ✅ CLAUDE.md updated
  • -
  • ✅ Documentation updated
  • -
  • ✅ No references to old paths remain
  • -
-
-

Day 4: Validation and Testing

-

Step 4.1: Automated Validation

-
# Create validation script
-cat > provisioning/tools/validation/validate-structure.nu << 'EOF'
-#!/usr/bin/env nu
-# Repository structure validation
-
-export def main [] {
-    print "🔍 Validating repository structure..."
-
-    mut passed = 0
-    mut failed = 0
-
-    # Check required directories exist
-    let required_dirs = [
-        "provisioning/core"
-        "provisioning/extensions"
-        "provisioning/platform"
-        "provisioning/schemas"
-        "workspace"
-        "workspace/templates"
-        "distribution"
-        "docs"
-        "tests"
-    ]
-
-    for dir in $required_dirs {
-        if ($dir | path exists) {
-            print $"  ✓ ($dir)"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ ($dir) MISSING"
-            $failed = $failed + 1
-        }
-    }
-
-    # Check obsolete directories don't exist
-    let obsolete_dirs = [
-        "_workspace"
-        "backup-workspace"
-        "workspace-librecloud"
-        "wrks"
-        "NO"
-    ]
-
-    for dir in $obsolete_dirs {
-        if not ($dir | path exists) {
-            print $"  ✓ ($dir) removed"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ ($dir) still exists"
-            $failed = $failed + 1
-        }
-    }
-
-    # Check no old path references
-    let old_paths = ["_workspace/" "backup-workspace/" "wrks/"]
-    for path in $old_paths {
-        let results = (rg -l $path provisioning/ --iglob "!*.md" 2>/dev/null | lines)
-        if ($results | is-empty) {
-            print $"  ✓ No references to ($path)"
-            $passed = $passed + 1
-        } else {
-            print $"  ✗ Found references to ($path):"
-            $results | each { |f| print $"    - ($f)" }
-            $failed = $failed + 1
-        }
-    }
-
-    print ""
-    print $"Results: ($passed) passed, ($failed) failed"
-
-    if $failed > 0 {
-        error make { msg: "Validation failed" }
-    }
-
-    print "✅ Validation passed"
-}
-EOF
-
-chmod +x provisioning/tools/validation/validate-structure.nu
-
-# Run validation
-nu provisioning/tools/validation/validate-structure.nu
-
-

Step 4.2: Functional Testing

-
# Test core commands
-echo "=== Testing Core Commands ==="
-
-# Version
-provisioning/core/cli/provisioning version
-echo "✓ version command"
-
-# Help
-provisioning/core/cli/provisioning help
-echo "✓ help command"
-
-# List
-provisioning/core/cli/provisioning list servers
-echo "✓ list command"
-
-# Environment
-provisioning/core/cli/provisioning env
-echo "✓ env command"
-
-# Validate config
-provisioning/core/cli/provisioning validate config
-echo "✓ validate command"
-
-echo "✅ Functional tests passed"
-
-

Step 4.3: Integration Testing

-
# Test workflow system
-echo "=== Testing Workflow System ==="
-
-# List workflows
-nu -c "use provisioning/core/nulib/workflows/management.nu *; workflow list"
-echo "✓ workflow list"
-
-# Test workspace commands
-echo "=== Testing Workspace Commands ==="
-
-# Workspace info
-provisioning/core/cli/provisioning workspace info
-echo "✓ workspace info"
-
-echo "✅ Integration tests passed"
-
-

Step 4.4: Create Test Report

-
{
-    echo "# Repository Restructuring - Validation Report"
-    echo "Date: $(date)"
-    echo ""
-    echo "## Structure Validation"
-    nu provisioning/tools/validation/validate-structure.nu 2>&1
-    echo ""
-    echo "## Functional Tests"
-    echo "✓ version command"
-    echo "✓ help command"
-    echo "✓ list command"
-    echo "✓ env command"
-    echo "✓ validate command"
-    echo ""
-    echo "## Integration Tests"
-    echo "✓ workflow list"
-    echo "✓ workspace info"
-    echo ""
-    echo "## Conclusion"
-    echo "✅ Phase 1 validation complete"
-} > docs/development/phase1-validation-report.md
-
-echo "✅ Test report created: docs/development/phase1-validation-report.md"
-
-

Step 4.5: Update README

-
# Update main README with new structure
-# This is manual - review and update README.md
-
-echo "📝 Please review and update README.md with new structure"
-echo "   - Update directory structure diagram"
-echo "   - Update installation instructions"
-echo "   - Update quick start guide"
-
-

Step 4.6: Finalize Phase 1

-
# Commit validation and reports
-git add -A
-git commit -m "test: add validation for repository restructuring
-
-- Add structure validation script
-- Add functional tests
-- Add integration tests
-- Create validation report
-- Document Phase 1 completion
-
-Phase 1 complete: Repository restructuring validated."
-
-# Merge to implementation branch
-git checkout feat/repo-restructure
-git merge feat/path-updates
-
-echo "✅ Phase 1 complete and merged"
-
-

Validation:

-
    -
  • ✅ All validation tests pass
  • -
  • ✅ Functional tests pass
  • -
  • ✅ Integration tests pass
  • -
  • ✅ Validation report created
  • -
  • ✅ README updated
  • -
  • ✅ Phase 1 changes merged
  • -
-
-

Phase 2: Build System Implementation (Days 5-8)

-

Day 5: Build System Core

-

Step 5.1: Create Build Tools Directory

-
mkdir -p provisioning/tools/build
-cd provisioning/tools/build
-
-# Create directory structure
-mkdir -p {core,platform,extensions,validation,distribution}
-
-echo "✅ Build tools directory created"
-
-

Step 5.2: Implement Core Build System

-
# Create main build orchestrator
-# See full implementation in repo-dist-analysis.md
-# Copy build-system.nu from the analysis document
-
-# Test build system
-nu build-system.nu status
-
-

Step 5.3: Implement Core Packaging

-
# Create package-core.nu
-# This packages Nushell libraries, KCL schemas, templates
-
-# Test core packaging
-nu build-system.nu build-core --version dev
-
-

Step 5.4: Create Justfile

-
# Create Justfile in project root
-# See full Justfile in repo-dist-analysis.md
-
-# Test Justfile
-just --list
-just status
-
-

Validation:

-
    -
  • ✅ Build system structure exists
  • -
  • ✅ Core build orchestrator works
  • -
  • ✅ Core packaging works
  • -
  • ✅ Justfile functional
  • -
-

Day 6-8: Continue with Platform, Extensions, and Validation

-

[Follow similar pattern for remaining build system components]

-
-

Phase 3: Installation System (Days 9-11)

-

Day 9: Nushell Installer

-

Step 9.1: Create install.nu

-
mkdir -p distribution/installers
-
-# Create install.nu
-# See full implementation in repo-dist-analysis.md
-
-

Step 9.2: Test Installation

-
# Test installation to /tmp
-nu distribution/installers/install.nu --prefix /tmp/provisioning-test
-
-# Verify
-ls -lh /tmp/provisioning-test/
-
-# Test uninstallation
-nu distribution/installers/install.nu uninstall --prefix /tmp/provisioning-test
-
-

Validation:

-
    -
  • ✅ Installer works
  • -
  • ✅ Files installed to correct locations
  • -
  • ✅ Uninstaller works
  • -
  • ✅ No files left after uninstall
  • -
-
-

Rollback Procedures

-

If Phase 1 Fails

-
# Restore from backup
-rm -rf /Users/Akasha/project-provisioning
-cp -r "$BACKUP_DIR" /Users/Akasha/project-provisioning
-
-# Return to main branch
-cd /Users/Akasha/project-provisioning
-git checkout main
-git branch -D feat/repo-restructure
-
-

If Build System Fails

-
# Revert build system commits
-git checkout feat/repo-restructure
-git revert <commit-hash>
-
-

If Installation Fails

-
# Clean up test installation
-rm -rf /tmp/provisioning-test
-sudo rm -rf /usr/local/lib/provisioning
-sudo rm -rf /usr/local/share/provisioning
-
-
-

Checklist

-

Phase 1: Repository Restructuring

-
    -
  • -Day 1: Backup and analysis complete
  • -
  • -Day 2: Directory restructuring complete
  • -
  • -Day 3: Path references updated
  • -
  • -Day 4: Validation passed
  • -
-

Phase 2: Build System

-
    -
  • -Day 5: Core build system implemented
  • -
  • -Day 6: Platform/extensions packaging
  • -
  • -Day 7: Package validation
  • -
  • -Day 8: Build system tested
  • -
-

Phase 3: Installation

-
    -
  • -Day 9: Nushell installer created
  • -
  • -Day 10: Bash installer and CLI
  • -
  • -Day 11: Multi-OS testing
  • -
-

Phase 4: Registry (Optional)

-
    -
  • -Day 12: Registry system
  • -
  • -Day 13: Registry commands
  • -
  • -Day 14: Registry hosting
  • -
-

Phase 5: Documentation

-
    -
  • -Day 15: Documentation updated
  • -
  • -Day 16: Release prepared
  • -
-
-

Notes

-
    -
  • Take breaks between phases - Don’t rush
  • -
  • Test thoroughly - Each phase builds on previous
  • -
  • Commit frequently - Small, atomic commits
  • -
  • Document issues - Track any problems encountered
  • -
  • Ask for review - Get feedback at phase boundaries
  • -
-
-

Support

-

If you encounter issues:

-
    -
  1. Check the validation reports
  2. -
  3. Review the rollback procedures
  4. -
  5. Consult the architecture analysis
  6. -
  7. Create an issue in the tracker
  8. -
-

Project Structure Guide

-

This document provides a comprehensive overview of the provisioning project’s structure after the major reorganization, explaining both the new -development-focused organization and the preserved existing functionality.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. New Structure vs Legacy
  4. -
  5. Core Directories
  6. -
  7. Development Workspace
  8. -
  9. File Naming Conventions
  10. -
  11. Navigation Guide
  12. -
  13. Migration Path
  14. -
-

Overview

-

The provisioning project has been restructured to support a dual-organization approach:

-
    -
  • src/: Development-focused structure with build tools, distribution system, and core components
  • -
  • Legacy directories: Preserved in their original locations for backward compatibility
  • -
  • workspace/: Development workspace with tools and runtime management
  • -
-

This reorganization enables efficient development workflows while maintaining full backward compatibility with existing deployments.

-

New Structure vs Legacy

-

New Development Structure (/src/)

-
src/
-├── config/                      # System configuration
-├── control-center/              # Control center application
-├── control-center-ui/           # Web UI for control center
-├── core/                        # Core system libraries
-├── docs/                        # Documentation (new)
-├── extensions/                  # Extension framework
-├── generators/                  # Code generation tools
-├── schemas/                     # Nickel configuration schemas (migrated from kcl/)
-├── orchestrator/               # Hybrid Rust/Nushell orchestrator
-├── platform/                   # Platform-specific code
-├── provisioning/               # Main provisioning
-├── templates/                   # Template files
-├── tools/                      # Build and development tools
-└── utils/                      # Utility scripts
-
-

Legacy Structure (Preserved)

-
repo-cnz/
-├── cluster/                     # Cluster configurations (preserved)
-├── core/                        # Core system (preserved)
-├── generate/                    # Generation scripts (preserved)
-├── schemas/                     # Nickel schemas (migrated from kcl/)
-├── klab/                       # Development lab (preserved)
-├── nushell-plugins/            # Plugin development (preserved)
-├── providers/                  # Cloud providers (preserved)
-├── taskservs/                  # Task services (preserved)
-└── templates/                  # Template files (preserved)
-
-

Development Workspace (/workspace/)

-
workspace/
-├── config/                     # Development configuration
-├── extensions/                 # Extension development
-├── infra/                      # Development infrastructure
-├── lib/                        # Workspace libraries
-├── runtime/                    # Runtime data
-└── tools/                      # Workspace management tools
-
-

Core Directories

-

/src/core/ - Core Development Libraries

-

Purpose: Development-focused core libraries and entry points

-

Key Files:

-
    -
  • nulib/provisioning - Main CLI entry point (symlinks to legacy location)
  • -
  • nulib/lib_provisioning/ - Core provisioning libraries
  • -
  • nulib/workflows/ - Workflow management (orchestrator integration)
  • -
-

Relationship to Legacy: Preserves original core/ functionality while adding development enhancements

-

/src/tools/ - Build and Development Tools

-

Purpose: Complete build system for the provisioning project

-

Key Components:

-
tools/
-├── build/                      # Build tools
-│   ├── compile-platform.nu     # Platform-specific compilation
-│   ├── bundle-core.nu          # Core library bundling
-│   ├── validate-nickel.nu      # Nickel schema validation
-│   ├── clean-build.nu          # Build cleanup
-│   └── test-distribution.nu    # Distribution testing
-├── distribution/               # Distribution tools
-│   ├── generate-distribution.nu # Main distribution generator
-│   ├── prepare-platform-dist.nu # Platform-specific distribution
-│   ├── prepare-core-dist.nu    # Core distribution
-│   ├── create-installer.nu     # Installer creation
-│   └── generate-docs.nu        # Documentation generation
-├── package/                    # Packaging tools
-│   ├── package-binaries.nu     # Binary packaging
-│   ├── build-containers.nu     # Container image building
-│   ├── create-tarball.nu       # Archive creation
-│   └── validate-package.nu     # Package validation
-├── release/                    # Release management
-│   ├── create-release.nu       # Release creation
-│   ├── upload-artifacts.nu     # Artifact upload
-│   ├── rollback-release.nu     # Release rollback
-│   ├── notify-users.nu         # Release notifications
-│   └── update-registry.nu      # Package registry updates
-└── Makefile                    # Main build system (40+ targets)
-
-

/src/orchestrator/ - Hybrid Orchestrator

-

Purpose: Rust/Nushell hybrid orchestrator for solving deep call stack limitations

-

Key Components:

-
    -
  • src/ - Rust orchestrator implementation
  • -
  • scripts/ - Orchestrator management scripts
  • -
  • data/ - File-based task queue and persistence
  • -
-

Integration: Provides REST API and workflow management while preserving all Nushell business logic

-

/src/provisioning/ - Enhanced Provisioning

-

Purpose: Enhanced version of the main provisioning with additional features

-

Key Features:

-
    -
  • Batch workflow system (v3.1.0)
  • -
  • Provider-agnostic design
  • -
  • Configuration-driven architecture (v2.0.0)
  • -
-

/workspace/ - Development Workspace

-

Purpose: Complete development environment with tools and runtime management

-

Key Components:

-
    -
  • tools/workspace.nu - Unified workspace management interface
  • -
  • lib/path-resolver.nu - Smart path resolution system
  • -
  • config/ - Environment-specific development configurations
  • -
  • extensions/ - Extension development templates and examples
  • -
  • infra/ - Development infrastructure examples
  • -
  • runtime/ - Isolated runtime data per user
  • -
-

Development Workspace

-

Workspace Management

-

The workspace provides a sophisticated development environment:

-

Initialization:

-
cd workspace/tools
-nu workspace.nu init --user-name developer --infra-name my-infra
-
-

Health Monitoring:

-
nu workspace.nu health --detailed --fix-issues
-
-

Path Resolution:

-
use lib/path-resolver.nu
-let config = (path-resolver resolve_config "user" --workspace-user "john")
-
-

Extension Development

-

The workspace provides templates for developing:

-
    -
  • Providers: Custom cloud provider implementations
  • -
  • Task Services: Infrastructure service components
  • -
  • Clusters: Complete deployment solutions
  • -
-

Templates are available in workspace/extensions/{type}/template/

-

Configuration Hierarchy

-

The workspace implements a sophisticated configuration cascade:

-
    -
  1. Workspace user configuration (workspace/config/{user}.toml)
  2. -
  3. Environment-specific defaults (workspace/config/{env}-defaults.toml)
  4. -
  5. Workspace defaults (workspace/config/dev-defaults.toml)
  6. -
  7. Core system defaults (config.defaults.toml)
  8. -
-

File Naming Conventions

-

Nushell Files (.nu)

-
    -
  • Commands: kebab-case - create-server.nu, validate-config.nu
  • -
  • Modules: snake_case - lib_provisioning, path_resolver
  • -
  • Scripts: kebab-case - workspace-health.nu, runtime-manager.nu
  • -
-

Configuration Files

-
    -
  • TOML: kebab-case.toml - config-defaults.toml, user-settings.toml
  • -
  • Environment: {env}-defaults.toml - dev-defaults.toml, prod-defaults.toml
  • -
  • Examples: *.toml.example - local-overrides.toml.example
  • -
-

Nickel Files (.ncl)

-
    -
  • Schemas: kebab-case.ncl - server-config.ncl, workflow-schema.ncl
  • -
  • Configuration: manifest.toml - Package metadata
  • -
  • Structure: Organized in schemas/ directories per extension
  • -
-

Build and Distribution

-
    -
  • Scripts: kebab-case.nu - compile-platform.nu, generate-distribution.nu
  • -
  • Makefiles: Makefile - Standard naming
  • -
  • Archives: {project}-{version}-{platform}-{variant}.{ext}
  • -
- -

Finding Components

-

Core System Entry Points:

-
# Main CLI (development version)
-/src/core/nulib/provisioning
-
-# Legacy CLI (production version)
-/core/nulib/provisioning
-
-# Workspace management
-/workspace/tools/workspace.nu
-
-

Build System:

-
# Main build system
-cd /src/tools && make help
-
-# Quick development build
-make dev-build
-
-# Complete distribution
-make all
-
-

Configuration Files:

-
# System defaults
-/config.defaults.toml
-
-# User configuration (workspace)
-/workspace/config/{user}.toml
-
-# Environment-specific
-/workspace/config/{env}-defaults.toml
-
-

Extension Development:

-
# Provider template
-/workspace/extensions/providers/template/
-
-# Task service template
-/workspace/extensions/taskservs/template/
-
-# Cluster template
-/workspace/extensions/clusters/template/
-
-

Common Workflows

-

1. Development Setup:

-
# Initialize workspace
-cd workspace/tools
-nu workspace.nu init --user-name $USER
-
-# Check health
-nu workspace.nu health --detailed
-
-

2. Building Distribution:

-
# Complete build
-cd src/tools
-make all
-
-# Platform-specific build
-make linux
-make macos
-make windows
-
-

3. Extension Development:

-
# Create new provider
-cp -r workspace/extensions/providers/template workspace/extensions/providers/my-provider
-
-# Test extension
-nu workspace/extensions/providers/my-provider/nulib/provider.nu test
-
-

Legacy Compatibility

-

Existing Commands Still Work:

-
# All existing commands preserved
-./core/nulib/provisioning server create
-./core/nulib/provisioning taskserv install kubernetes
-./core/nulib/provisioning cluster create buildkit
-
-

Configuration Migration:

-
    -
  • ENV variables still supported as fallbacks
  • -
  • New configuration system provides better defaults
  • -
  • Migration tools available in src/tools/migration/
  • -
-

Migration Path

-

For Users

-

No Changes Required:

-
    -
  • All existing commands continue to work
  • -
  • Configuration files remain compatible
  • -
  • Existing infrastructure deployments unaffected
  • -
-

Optional Enhancements:

-
    -
  • Migrate to new configuration system for better defaults
  • -
  • Use workspace for development environments
  • -
  • Leverage new build system for custom distributions
  • -
-

For Developers

-

Development Environment:

-
    -
  1. Initialize development workspace: nu workspace/tools/workspace.nu init
  2. -
  3. Use new build system: cd src/tools && make dev-build
  4. -
  5. Leverage extension templates for custom development
  6. -
-

Build System:

-
    -
  1. Use new Makefile for comprehensive build management
  2. -
  3. Leverage distribution tools for packaging
  4. -
  5. Use release management for version control
  6. -
-

Orchestrator Integration:

-
    -
  1. Start orchestrator for workflow management: cd src/orchestrator && ./scripts/start-orchestrator.nu
  2. -
  3. Use workflow APIs for complex operations
  4. -
  5. Leverage batch operations for efficiency
  6. -
-

Migration Tools

-

Available Migration Scripts:

-
    -
  • src/tools/migration/config-migration.nu - Configuration migration
  • -
  • src/tools/migration/workspace-setup.nu - Workspace initialization
  • -
  • src/tools/migration/path-resolver.nu - Path resolution migration
  • -
-

Validation Tools:

-
    -
  • src/tools/validation/system-health.nu - System health validation
  • -
  • src/tools/validation/compatibility-check.nu - Compatibility verification
  • -
  • src/tools/validation/migration-status.nu - Migration status tracking
  • -
-

Architecture Benefits

-

Development Efficiency

-
    -
  • Build System: Comprehensive 40+ target Makefile system
  • -
  • Workspace Isolation: Per-user development environments
  • -
  • Extension Framework: Template-based extension development
  • -
-

Production Reliability

-
    -
  • Backward Compatibility: All existing functionality preserved
  • -
  • Configuration Migration: Gradual migration from ENV to config-driven
  • -
  • Orchestrator Architecture: Hybrid Rust/Nushell for performance and flexibility
  • -
  • Workflow Management: Batch operations with rollback capabilities
  • -
-

Maintenance Benefits

-
    -
  • Clean Separation: Development tools separate from production code
  • -
  • Organized Structure: Logical grouping of related functionality
  • -
  • Documentation: Comprehensive documentation and examples
  • -
  • Testing Framework: Built-in testing and validation tools
  • -
-

This structure represents a significant evolution in the project’s organization while maintaining complete backward compatibility and providing -powerful new development capabilities.

-

CTRL-C Handling Implementation Notes

-

Overview

-

Implemented graceful CTRL-C handling for sudo password prompts during server creation/generation operations.

-

Problem Statement

-

When fix_local_hosts: true is set, the provisioning tool requires sudo access to -modify /etc/hosts and SSH config. When a user cancels the sudo password prompt (no -password, wrong password, timeout), the system would:

-
    -
  1. Exit with code 1 (sudo failed)
  2. -
  3. Propagate null values up the call stack
  4. -
  5. Show cryptic Nushell errors about pipeline failures
  6. -
  7. Leave the operation in an inconsistent state
  8. -
-

Important Unix Limitation: Pressing CTRL-C at the sudo password prompt sends SIGINT to the entire process group, interrupting Nushell before exit -code handling can occur. This cannot be caught and is expected Unix behavior.

-

Solution Architecture

-

Key Principle: Return Values, Not Exit Codes

-

Instead of using exit 130 which kills the entire process, we use return values -to signal cancellation and let each layer of the call stack handle it gracefully.

-

Three-Layer Approach

-
    -
  1. -

    Detection Layer (ssh.nu helper functions)

    -
      -
    • Detects sudo cancellation via exit code + stderr
    • -
    • Returns false instead of calling exit
    • -
    -
  2. -
  3. -

    Propagation Layer (ssh.nu core functions)

    -
      -
    • on_server_ssh(): Returns false on cancellation
    • -
    • server_ssh(): Uses reduce to propagate failures
    • -
    -
  4. -
  5. -

    Handling Layer (create.nu, generate.nu)

    -
      -
    • Checks return values
    • -
    • Displays user-friendly messages
    • -
    • Returns false to caller
    • -
    -
  6. -
-

Implementation Details

-

1. Helper Functions (ssh.nu:11-32)

-
def check_sudo_cached []: nothing -> bool {
-  let result = (do --ignore-errors { ^sudo -n true } | complete)
-  $result.exit_code == 0
-}
-
-def run_sudo_with_interrupt_check [
-  command: closure
-  operation_name: string
-]: nothing -> bool {
-  let result = (do --ignore-errors { do $command } | complete)
-  if $result.exit_code == 1 and ($result.stderr | str contains "password is required") {
-    print "\n⚠ Operation cancelled - sudo password required but not provided"
-    print "ℹ Run 'sudo -v' first to cache credentials, or run without --fix-local-hosts"
-    return false  # Signal cancellation
-  } else if $result.exit_code != 0 and $result.exit_code != 1 {
-    error make {msg: $"($operation_name) failed: ($result.stderr)"}
-  }
-  true
-}
-
-

Design Decision: Return bool instead of throwing error or calling exit. This allows the caller to decide how to handle cancellation.

-

2. Pre-emptive Warning (ssh.nu:155-160)

-
if $server.fix_local_hosts and not (check_sudo_cached) {
-  print "\n⚠ Sudo access required for --fix-local-hosts"
-  print "ℹ You will be prompted for your password, or press CTRL-C to cancel"
-  print "  Tip: Run 'sudo -v' beforehand to cache credentials\n"
-}
-
-

Design Decision: Warn users upfront so they’re not surprised by the password prompt.

-

3. CTRL-C Detection (ssh.nu:171-199)

-

All sudo commands wrapped with detection:

-
let result = (do --ignore-errors { ^sudo <command> } | complete)
-if $result.exit_code == 1 and ($result.stderr | str contains "password is required") {
-  print "\n⚠ Operation cancelled"
-  return false
-}
-
-

Design Decision: Use do --ignore-errors + complete to capture both exit code and stderr without throwing exceptions.

-

4. State Accumulation Pattern (ssh.nu:122-129)

-

Using Nushell’s reduce instead of mutable variables:

-
let all_succeeded = ($settings.data.servers | reduce -f true { |server, acc|
-  if $text_match == null or $server.hostname == $text_match {
-    let result = (on_server_ssh $settings $server $ip_type $request_from $run)
-    $acc and $result
-  } else {
-    $acc
-  }
-})
-
-

Design Decision: Nushell doesn’t allow mutable variable capture in closures. Use reduce for accumulating boolean state across iterations.

-

5. Caller Handling (create.nu:262-266, generate.nu:269-273)

-
let ssh_result = (on_server_ssh $settings $server "pub" "create" false)
-if not $ssh_result {
-  _print "\n✗ Server creation cancelled"
-  return false
-}
-
-

Design Decision: Check return value and provide context-specific message before returning.

-

Error Flow Diagram

-
User presses CTRL-C during password prompt
-    ↓
-sudo exits with code 1, stderr: "password is required"
-    ↓
-do --ignore-errors captures exit code & stderr
-    ↓
-Detection logic identifies cancellation
-    ↓
-Print user-friendly message
-    ↓
-Return false (not exit!)
-    ↓
-on_server_ssh returns false
-    ↓
-Caller (create.nu/generate.nu) checks return value
-    ↓
-Print "✗ Server creation cancelled"
-    ↓
-Return false to settings.nu
-    ↓
-settings.nu handles false gracefully (no append)
-    ↓
-Clean exit, no cryptic errors
-
-

Nushell Idioms Used

-

1. do --ignore-errors + complete

-

Captures both stdout, stderr, and exit code without throwing:

-
let result = (do --ignore-errors { ^sudo command } | complete)
-# result = { stdout: "...", stderr: "...", exit_code: 1 }
-
-

2. reduce for Accumulation

-

Instead of mutable variables in loops:

-
# ❌ BAD - mutable capture in closure
-mut all_succeeded = true
-$servers | each { |s|
-  $all_succeeded = false  # Error: capture of mutable variable
-}
-
-# ✅ GOOD - reduce with accumulator
-let all_succeeded = ($servers | reduce -f true { |s, acc|
-  $acc and (check_server $s)
-})
-
-

3. Early Returns for Error Handling

-
if not $condition {
-  print "Error message"
-  return false
-}
-# Continue with happy path
-
-

Testing Scenarios

-

Scenario 1: CTRL-C During First Sudo Command

-
provisioning -c server create
-# Password: [CTRL-C]
-
-# Expected Output:
-# ⚠ Operation cancelled - sudo password required but not provided
-# ℹ Run 'sudo -v' first to cache credentials
-# ✗ Server creation cancelled
-
-

Scenario 2: Pre-cached Credentials

-
sudo -v
-provisioning -c server create
-
-# Expected: No password prompt, smooth operation
-
-

Scenario 3: Wrong Password 3 Times

-
provisioning -c server create
-# Password: [wrong]
-# Password: [wrong]
-# Password: [wrong]
-
-# Expected: Same as CTRL-C (treated as cancellation)
+

Network Policy Test

+
# Verify Cilium network policies work
+kubectl exec -it <pod-name> -- curl  [http://test-nginx](http://test-nginx)
 
-

Scenario 4: Multiple Servers, Cancel on Second

-
# If creating multiple servers and CTRL-C on second:
-# - First server completes successfully
-# - Second server shows cancellation message
-# - Operation stops, doesn't proceed to third
-
-

Maintenance Notes

-

Adding New Sudo Commands

-

When adding new sudo commands to the codebase:

-
    -
  1. Wrap with do --ignore-errors + complete
  2. -
  3. Check for exit code 1 + “password is required”
  4. -
  5. Return false on cancellation
  6. -
  7. Let caller handle the false return value
  8. -
-

Example template:

-
let result = (do --ignore-errors { ^sudo new-command } | complete)
-if $result.exit_code == 1 and ($result.stderr | str contains "password is required") {
-  print "\n⚠ Operation cancelled - sudo password required"
-  return false
-}
-
-

Common Pitfalls

-
    -
  1. Don’t use exit: It kills the entire process
  2. -
  3. Don’t use mutable variables in closures: Use reduce instead
  4. -
  5. Don’t ignore return values: Always check and propagate
  6. -
  7. Don’t forget the pre-check warning: Users should know sudo is needed
  8. -
-

Future Improvements

-
    -
  1. Sudo Credential Manager: Optionally use a credential manager (keychain, etc.)
  2. -
  3. Sudo-less Mode: Alternative implementation that doesn’t require root
  4. -
  5. Timeout Handling: Detect when sudo times out waiting for password
  6. -
  7. Multiple Password Attempts: Distinguish between CTRL-C and wrong password
  8. -
-

References

- - -
    -
  • provisioning/core/nulib/servers/ssh.nu - Core implementation
  • -
  • provisioning/core/nulib/servers/create.nu - Calls on_server_ssh
  • -
  • provisioning/core/nulib/servers/generate.nu - Calls on_server_ssh
  • -
  • docs/troubleshooting/CTRL-C_SUDO_HANDLING.md - User-facing docs
  • -
  • docs/quick-reference/SUDO_PASSWORD_HANDLING.md - Quick reference
  • -
-

Changelog

-
    -
  • 2025-01-XX: Initial implementation with return values (v2)
  • -
  • 2025-01-XX: Fixed mutable variable capture with reduce pattern
  • -
  • 2025-01-XX: First attempt with exit 130 (reverted, caused process termination)
  • -
-

Metadata-Driven Authentication System - Implementation Guide

-

Status: ✅ Complete and Production-Ready -Version: 1.0.0 -Last Updated: 2025-12-10

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Architecture
  4. -
  5. Installation
  6. -
  7. Usage Guide
  8. -
  9. Migration Path
  10. -
  11. Developer Guide
  12. -
  13. Testing
  14. -
  15. Troubleshooting
  16. -
-

Overview

-

This guide describes the metadata-driven authentication system implemented over 5 weeks across 14 command handlers and 12 major systems. The system provides:

-
    -
  • Centralized Metadata: All command definitions in Nickel with runtime validation
  • -
  • Automatic Auth Checks: Pre-execution validation before handler logic
  • -
  • Performance Optimization: 40-100x faster through metadata caching
  • -
  • Flexible Deployment: Works with orchestrator, batch workflows, and direct CLI
  • -
-

Architecture

-

System Components

-
┌─────────────────────────────────────────────────────────────┐
-│                     User Command                             │
-└────────────────────────────────┬──────────────────────────────┘
-                                 │
-                    ┌────────────▼─────────────┐
-                    │    CLI Dispatcher       │
-                    │  (main_provisioning)    │
-                    └────────────┬─────────────┘
-                                 │
-                    ┌────────────▼─────────────┐
-                    │  Metadata Loading       │
-                    │  (cached via traits.nu) │
-                    └────────────┬─────────────┘
-                                 │
-                    ┌────────────▼─────────────────────┐
-                    │  Pre-Execution Validation       │
-                    │  - Auth checks                  │
-                    │  - Permission validation        │
-                    │  - Operation type mapping       │
-                    └────────────┬─────────────────────┘
-                                 │
-                    ┌────────────▼─────────────────────┐
-                    │  Command Handler Execution      │
-                    │  - infrastructure.nu            │
-                    │  - orchestration.nu             │
-                    │  - workspace.nu                 │
-                    └────────────┬─────────────────────┘
-                                 │
-                    ┌────────────▼─────────────┐
-                    │   Result/Response        │
-                    └─────────────────────────┘
-
-

Data Flow

-
    -
  1. User Command → CLI Dispatcher
  2. -
  3. Dispatcher → Load cached metadata (or parse Nickel)
  4. -
  5. Validate → Check auth, operation type, permissions
  6. -
  7. Execute → Call appropriate handler
  8. -
  9. Return → Result to user
  10. -
-

Metadata Caching

-
    -
  • Location: ~/.cache/provisioning/command_metadata.json
  • -
  • Format: Serialized JSON (pre-parsed for speed)
  • -
  • TTL: 1 hour (configurable via PROVISIONING_METADATA_TTL)
  • -
  • Invalidation: Automatic on main.ncl modification
  • -
  • Performance: 40-100x faster than Nickel parsing
  • -
-

Installation

-

Prerequisites

-
    -
  • Nushell 0.109.0+
  • -
  • Nickel 1.15.0+
  • -
  • SOPS 3.10.2 (for encrypted configs)
  • -
  • Age 1.2.1 (for encryption)
  • -
-

Installation Steps

-
# 1. Clone or update repository
-git clone https://github.com/your-org/project-provisioning.git
-cd project-provisioning
-
-# 2. Initialize workspace
-./provisioning/core/cli/provisioning workspace init
-
-# 3. Validate system
-./provisioning/core/cli/provisioning validate config
-
-# 4. Run system checks
-./provisioning/core/cli/provisioning health
-
-# 5. Run test suites
-nu tests/test-fase5-e2e.nu
-nu tests/test-security-audit-day20.nu
-nu tests/test-metadata-cache-benchmark.nu
-
-

Usage Guide

-

Basic Commands

-
# Initialize authentication
-provisioning login
-
-# Enroll in MFA
-provisioning mfa totp enroll
-
-# Create infrastructure
-provisioning server create --name web-01 --plan 1xCPU-2 GB
-
-# Deploy with orchestrator
-provisioning workflow submit workflows/deployment.ncl --orchestrated
-
-# Batch operations
-provisioning batch submit workflows/batch-deploy.ncl
-
-# Check without executing
-provisioning server create --name test --check
-
-

Authentication Flow

-
# 1. Login (required for production operations)
-$ provisioning login
-Username: alice@example.com
-Password: ****
-
-# 2. Optional: Setup MFA
-$ provisioning mfa totp enroll
-Scan QR code with authenticator app
-Verify code: 123456
-
-# 3. Use commands (auth checks happen automatically)
-$ provisioning server delete --name old-server --infra production
-Auth check: Check auth for production (delete operation)
-Are you sure? [yes/no] yes
-✓ Server deleted
-
-# 4. All destructive operations require auth
-$ provisioning taskserv delete postgres web-01
-Auth check: Check auth for destructive operation
-✓ Taskserv deleted
-
-

Check Mode (Bypass Auth for Testing)

-
# Dry-run without auth checks
-provisioning server create --name test --check
-
-# Output: Shows what would happen, no auth checks
-Dry-run mode - no changes will be made
-✓ Would create server: test
-✓ Would deploy taskservs: []
-
-

Non-Interactive CI/CD Mode

-
# Automated mode - skip confirmations
-provisioning server create --name web-01 --yes
-
-# Batch operations
-provisioning batch submit workflows/batch.ncl --yes --check
-
-# With environment variable
-PROVISIONING_NON_INTERACTIVE=1 provisioning server create --name web-02 --yes
-
-

Migration Path

-

Phase 1: From Old input to Metadata

-

Old Pattern (Before Fase 5):

-
# Hardcoded auth check
-let response = (input "Delete server? (yes/no): ")
-if $response != "yes" { exit 1 }
-
-# No metadata - auth unknown
-export def delete-server [name: string, --yes] {
-    if not $yes { ... manual confirmation ... }
-    # ... deletion logic ...
-}
-
-

New Pattern (After Fase 5):

-
# Metadata header
-# [command]
-# name = "server delete"
-# group = "infrastructure"
-# tags = ["server", "delete", "destructive"]
-# version = "1.0.0"
-
-# Automatic auth check from metadata
-export def delete-server [name: string, --yes] {
-    # Pre-execution check happens in dispatcher
-    # Auth enforcement via metadata
-    # Operation type: "delete" automatically detected
-    # ... deletion logic ...
-}
-
-

Phase 2: Adding Metadata Headers

-

For each script that was migrated:

-
    -
  1. Add metadata header after shebang:
  2. -
-
#!/usr/bin/env nu
-# [command]
-# name = "server create"
-# group = "infrastructure"
-# tags = ["server", "create", "interactive"]
-# version = "1.0.0"
-
-export def create-server [name: string] {
-    # Logic here
-}
-
-
    -
  1. Register in provisioning/schemas/main.ncl:
  2. -
-
let server_create = {
-    name = "server create",
-    domain = "infrastructure",
-    description = "Create a new server",
-    requirements = {
-        interactive = false,
-        requires_auth = true,
-        auth_type = "jwt",
-        side_effect_type = "create",
-        min_permission = "write",
-    },
-} in
-server_create
-
-
    -
  1. Handler integration (happens in dispatcher):
  2. -
-
# Dispatcher automatically:
-# 1. Loads metadata for "server create"
-# 2. Validates auth based on requirements
-# 3. Checks permission levels
-# 4. Calls handler if validation passes
-
-

Phase 3: Validating Migration

-
# Validate metadata headers
-nu utils/validate-metadata-headers.nu
-
-# Find scripts by tag
-nu utils/search-scripts.nu by-tag destructive
-
-# Find all scripts in group
-nu utils/search-scripts.nu by-group infrastructure
-
-# Find scripts with multiple tags
-nu utils/search-scripts.nu by-tags server delete
+

Part 7: State Management

+

View State

+
# Show current workspace state
+provisioning workspace info
 
-# List all migrated scripts
-nu utils/search-scripts.nu list
-
-

Developer Guide

-

Adding New Commands with Metadata

-

Step 1: Create metadata in main.ncl

-
let new_feature_command = {
-    name = "feature command",
-    domain = "infrastructure",
-    description = "My new feature",
-    requirements = {
-        interactive = false,
-        requires_auth = true,
-        auth_type = "jwt",
-        side_effect_type = "create",
-        min_permission = "write",
-    },
-} in
-new_feature_command
-
-

Step 2: Add metadata header to script

-
#!/usr/bin/env nu
-# [command]
-# name = "feature command"
-# group = "infrastructure"
-# tags = ["feature", "create"]
-# version = "1.0.0"
-
-export def feature-command [param: string] {
-    # Implementation
-}
-
-

Step 3: Implement handler function

-
# Handler registered in dispatcher
-export def handle-feature-command [
-    action: string
-    --flags
-]: nothing -> nothing {
-    # Dispatcher handles:
-    # 1. Metadata validation
-    # 2. Auth checks
-    # 3. Permission validation
-
-    # Your logic here
-}
-
-

Step 4: Test with check mode

-
# Dry-run without auth
-provisioning feature command --check
-
-# Full execution
-provisioning feature command --yes
-
-

Metadata Field Reference

-
- - - - - - - - - -
FieldTypeRequiredDescription
namestringYesCommand canonical name
domainstringYesCommand category (infrastructure, orchestration, etc.)
descriptionstringYesHuman-readable description
requires_authboolYesWhether auth is required
auth_typeenumYes“none”, “jwt”, “mfa”, “cedar”
side_effect_typeenumYes“none”, “create”, “update”, “delete”, “deploy”
min_permissionenumYes“read”, “write”, “admin”, “superadmin”
interactiveboolNoWhether command requires user input
slow_operationboolNoWhether operation takes >60 seconds
-
-

Standard Tags

-

Groups:

-
    -
  • infrastructure - Server, taskserv, cluster operations
  • -
  • orchestration - Workflow, batch operations
  • -
  • workspace - Workspace management
  • -
  • authentication - Auth, MFA, tokens
  • -
  • utilities - Helper commands
  • -
-

Operations:

-
    -
  • create, read, update, delete - CRUD operations
  • -
  • destructive - Irreversible operations
  • -
  • interactive - Requires user input
  • -
-

Performance:

-
    -
  • slow - Operation >60 seconds
  • -
  • optimizable - Candidate for optimization
  • -
-

Performance Optimization Patterns

-

Pattern 1: For Long Operations

-
# Use orchestrator for operations >2 seconds
-if (get-operation-duration "my-operation") > 2000 {
-    submit-to-orchestrator $operation
-    return "Operation submitted in background"
-}
-
-

Pattern 2: For Batch Operations

-
# Use batch workflows for multiple operations
-nu -c "
-use core/nulib/workflows/batch.nu *
-batch submit workflows/batch-deploy.ncl --parallel-limit 5
-"
-
-

Pattern 3: For Metadata Overhead

-
# Cache hit rate optimization
-# Current: 40-100x faster with warm cache
-# Target: >95% cache hit rate
-# Achieved: Metadata stays in cache for 1 hour (TTL)
-
-

Testing

-

Running Tests

-
# End-to-End Integration Tests
-nu tests/test-fase5-e2e.nu
-
-# Security Audit
-nu tests/test-security-audit-day20.nu
-
-# Performance Benchmarks
-nu tests/test-metadata-cache-benchmark.nu
-
-# Run all tests
-for test in tests/test-*.nu { nu $test }
-
-

Test Coverage

-
- - - -
Test SuiteCategoryCoverage
E2E TestsIntegration7 test groups, 40+ checks
Security AuditAuth5 audit categories, 100% pass
BenchmarksPerformance6 benchmark categories
-
-

Expected Results

-

✅ All tests pass -✅ No Nushell syntax violations -✅ Cache hit rate >95% -✅ Auth enforcement 100% -✅ Performance baselines met

-

Troubleshooting

-

Issue: Command not found

-

Solution: Ensure metadata is registered in main.ncl

-
# Check if command is in metadata
-grep "command_name" provisioning/schemas/main.ncl
-
-

Issue: Auth check failing

-

Solution: Verify user has required permission level

-
# Check current user permissions
-provisioning auth whoami
-
-# Check command requirements
-nu -c "
-use core/nulib/lib_provisioning/commands/traits.nu *
-get-command-metadata 'server create'
-"
-
-

Issue: Slow command execution

-

Solution: Check cache status

-
# Force cache reload
-rm ~/.cache/provisioning/command_metadata.json
-
-# Check cache hit rate
-nu tests/test-metadata-cache-benchmark.nu
-
-

Issue: Nushell syntax error

-

Solution: Run compliance check

-
# Validate Nushell compliance
-nu --ide-check 100 <file.nu>
-
-# Check for common issues
-grep "try {" <file.nu>  # Should be empty
-grep "let mut" <file.nu>  # Should be empty
-
-

Performance Characteristics

-

Baseline Metrics

-
- - - - -
OperationColdWarmImprovement
Metadata Load200 ms2-5 ms40-100x
Auth Check<5 ms<5 msSame
Command Dispatch<10 ms<10 msSame
Total Command~210 ms~10 ms21x
-
-

Real-World Impact

-
Scenario: 20 sequential commands
-  Without cache: 20 × 200 ms = 4 seconds
-  With cache:    1 × 200 ms + 19 × 5 ms = 295 ms
-  Speedup:       ~13.5x faster
-
-

Next Steps

-
    -
  1. Deploy: Use installer to deploy to production
  2. -
  3. Monitor: Watch cache hit rates (target >95%)
  4. -
  5. Extend: Add new commands following migration pattern
  6. -
  7. Optimize: Use profiling to identify slow operations
  8. -
  9. Maintain: Run validation scripts regularly
  10. -
-
-

For Support: See docs/troubleshooting-guide.md -For Architecture: See docs/architecture/ -For User Guide: See docs/user/AUTHENTICATION_LAYER_GUIDE.md

-

KMS Simplification Migration Guide

-

Version: 0.2.0 -Date: 2025-10-08 -Status: Active

-

Overview

-

The KMS service has been simplified from supporting 4 backends (Vault, AWS KMS, Age, Cosmian) to supporting only 2 backends:

-
    -
  • Age: Development and local testing
  • -
  • Cosmian KMS: Production deployments
  • -
-

This simplification reduces complexity, removes unnecessary cloud provider dependencies, and provides a clearer separation between development and -production use cases.

-

What Changed

-

Removed

-
    -
  • ❌ HashiCorp Vault backend (src/vault/)
  • -
  • ❌ AWS KMS backend (src/aws/)
  • -
  • ❌ AWS SDK dependencies (aws-sdk-kms, aws-config, aws-credential-types)
  • -
  • ❌ Envelope encryption helpers (AWS-specific)
  • -
  • ❌ Complex multi-backend configuration
  • -
-

Added

-
    -
  • ✅ Age backend for development (src/age/)
  • -
  • ✅ Cosmian KMS backend for production (src/cosmian/)
  • -
  • ✅ Simplified configuration (provisioning/config/kms.toml)
  • -
  • ✅ Clear dev/prod separation
  • -
  • ✅ Better error messages
  • -
-

Modified

-
    -
  • 🔄 KmsBackendConfig enum (now only Age and Cosmian)
  • -
  • 🔄 KmsError enum (removed Vault/AWS-specific errors)
  • -
  • 🔄 Service initialization logic
  • -
  • 🔄 README and documentation
  • -
  • 🔄 Cargo.toml dependencies
  • -
-

Why This Change

-

Problems with Previous Approach

-
    -
  1. Unnecessary Complexity: 4 backends for simple use cases
  2. -
  3. Cloud Lock-in: AWS KMS dependency limited flexibility
  4. -
  5. Operational Overhead: Vault requires server setup even for dev
  6. -
  7. Dependency Bloat: AWS SDK adds significant compile time
  8. -
  9. Unclear Use Cases: When to use which backend?
  10. -
-

Benefits of Simplified Approach

-
    -
  1. Clear Separation: Age = dev, Cosmian = prod
  2. -
  3. Faster Compilation: Removed AWS SDK (saves ~30 s)
  4. -
  5. Offline Development: Age works without network
  6. -
  7. Enterprise Security: Cosmian provides confidential computing
  8. -
  9. Easier Maintenance: 2 backends instead of 4
  10. -
-

Migration Steps

-

For Development Environments

-

If you were using Vault or AWS KMS for development:

-

Step 1: Install Age

-
# macOS
-brew install age
-
-# Ubuntu/Debian
-apt install age
-
-# From source
-go install filippo.io/age/cmd/...@latest
-
-

Step 2: Generate Age Keys

-
mkdir -p ~/.config/provisioning/age
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt
-
-

Step 3: Update Configuration

-

Replace your old Vault/AWS config:

-

Old (Vault):

-
[kms]
-type = "vault"
-address = "http://localhost:8200"
-token = "${VAULT_TOKEN}"
-mount_point = "transit"
-
-

New (Age):

-
[kms]
-environment = "dev"
-
-[kms.age]
-public_key_path = "~/.config/provisioning/age/public_key.txt"
-private_key_path = "~/.config/provisioning/age/private_key.txt"
-
-

Step 4: Re-encrypt Development Secrets

-
# Export old secrets (if using Vault)
-vault kv get -format=json secret/dev > dev-secrets.json
-
-# Encrypt with Age
-cat dev-secrets.json | age -r $(cat ~/.config/provisioning/age/public_key.txt) > dev-secrets.age
-
-# Test decryption
-age -d -i ~/.config/provisioning/age/private_key.txt dev-secrets.age
-
-

For Production Environments

-

If you were using Vault or AWS KMS for production:

-

Step 1: Set Up Cosmian KMS

-

Choose one of these options:

-

Option A: Cosmian Cloud (Managed)

-
# Sign up at https://cosmian.com
-# Get API credentials
-export COSMIAN_KMS_URL=https://kms.cosmian.cloud
-export COSMIAN_API_KEY=your-api-key
-
-

Option B: Self-Hosted Cosmian KMS

-
# Deploy Cosmian KMS server
-# See: https://docs.cosmian.com/kms/deployment/
-
-# Configure endpoint
-export COSMIAN_KMS_URL=https://kms.example.com
-export COSMIAN_API_KEY=your-api-key
-
-

Step 2: Create Master Key in Cosmian

-
# Using Cosmian CLI
-cosmian-kms create-key \
-  --algorithm AES \
-  --key-length 256 \
-  --key-id provisioning-master-key
-
-# Or via API
-curl -X POST $COSMIAN_KMS_URL/api/v1/keys \
-  -H "X-API-Key: $COSMIAN_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{
-    "algorithm": "AES",
-    "keyLength": 256,
-    "keyId": "provisioning-master-key"
-  }'
-
-

Step 3: Migrate Production Secrets

-

From Vault to Cosmian:

-
# Export secrets from Vault
-vault kv get -format=json secret/prod > prod-secrets.json
-
-# Import to Cosmian
-# (Use temporary Age encryption for transfer)
-cat prod-secrets.json | \
-  age -r $(cat ~/.config/provisioning/age/public_key.txt) | \
-  base64 > prod-secrets.enc
-
-# On production server with Cosmian
-cat prod-secrets.enc | \
-  base64 -d | \
-  age -d -i ~/.config/provisioning/age/private_key.txt | \
-  # Re-encrypt with Cosmian
-  curl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \
-    -H "X-API-Key: $COSMIAN_API_KEY" \
-    -d @-
-
-

From AWS KMS to Cosmian:

-
# Decrypt with AWS KMS
-aws kms decrypt \
-  --ciphertext-blob fileb://encrypted-data \
-  --output text \
-  --query Plaintext | \
-  base64 -d > plaintext-data
-
-# Encrypt with Cosmian
-curl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \
-  -H "X-API-Key: $COSMIAN_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d "{\"keyId\":\"provisioning-master-key\",\"data\":\"$(base64 plaintext-data)\"}"
-
-

Step 4: Update Production Configuration

-

Old (AWS KMS):

-
[kms]
-type = "aws-kms"
-region = "us-east-1"
-key_id = "arn:aws:kms:us-east-1:123456789012:key/..."
-
-

New (Cosmian):

-
[kms]
-environment = "prod"
-
-[kms.cosmian]
-server_url = "${COSMIAN_KMS_URL}"
-api_key = "${COSMIAN_API_KEY}"
-default_key_id = "provisioning-master-key"
-tls_verify = true
-use_confidential_computing = false  # Enable if using SGX/SEV
-
-

Step 5: Test Production Setup

-
# Set environment
-export PROVISIONING_ENV=prod
-export COSMIAN_KMS_URL=https://kms.example.com
-export COSMIAN_API_KEY=your-api-key
-
-# Start KMS service
-cargo run --bin kms-service
-
-# Test encryption
-curl -X POST http://localhost:8082/api/v1/kms/encrypt \
-  -H "Content-Type: application/json" \
-  -d '{"plaintext":"SGVsbG8=","context":"env=prod"}'
-
-# Test decryption
-curl -X POST http://localhost:8082/api/v1/kms/decrypt \
-  -H "Content-Type: application/json" \
-  -d '{"ciphertext":"...","context":"env=prod"}'
-
-

Configuration Comparison

-

Before (4 Backends)

-
# Development could use any backend
-[kms]
-type = "vault"  # or "aws-kms"
-address = "http://localhost:8200"
-token = "${VAULT_TOKEN}"
-
-# Production used Vault or AWS
-[kms]
-type = "aws-kms"
-region = "us-east-1"
-key_id = "arn:aws:kms:..."
-
-

After (2 Backends)

-
# Clear environment-based selection
-[kms]
-dev_backend = "age"
-prod_backend = "cosmian"
-environment = "${PROVISIONING_ENV:-dev}"
-
-# Age for development
-[kms.age]
-public_key_path = "~/.config/provisioning/age/public_key.txt"
-private_key_path = "~/.config/provisioning/age/private_key.txt"
-
-# Cosmian for production
-[kms.cosmian]
-server_url = "${COSMIAN_KMS_URL}"
-api_key = "${COSMIAN_API_KEY}"
-default_key_id = "provisioning-master-key"
-tls_verify = true
-
-

Breaking Changes

-

API Changes

-

Removed Functions

-
    -
  • generate_data_key() - Now only available with Cosmian backend
  • -
  • envelope_encrypt() - AWS-specific, removed
  • -
  • envelope_decrypt() - AWS-specific, removed
  • -
  • rotate_key() - Now handled server-side by Cosmian
  • -
-

Changed Error Types

-

Before:

-
KmsError::VaultError(String)
-KmsError::AwsKmsError(String)
-

After:

-
KmsError::AgeError(String)
-KmsError::CosmianError(String)
-

Updated Configuration Enum

-

Before:

-
enum KmsBackendConfig {
-    Vault { address, token, mount_point, ... },
-    AwsKms { region, key_id, assume_role },
-}
-

After:

-
enum KmsBackendConfig {
-    Age { public_key_path, private_key_path },
-    Cosmian { server_url, api_key, default_key_id, tls_verify },
-}
-

Code Migration

-

Rust Code

-

Before (AWS KMS):

-
use kms_service::{KmsService, KmsBackendConfig};
-
-let config = KmsBackendConfig::AwsKms {
-    region: "us-east-1".to_string(),
-    key_id: "arn:aws:kms:...".to_string(),
-    assume_role: None,
-};
-
-let kms = KmsService::new(config).await?;
-

After (Cosmian):

-
use kms_service::{KmsService, KmsBackendConfig};
-
-let config = KmsBackendConfig::Cosmian {
-    server_url: env::var("COSMIAN_KMS_URL")?,
-    api_key: env::var("COSMIAN_API_KEY")?,
-    default_key_id: "provisioning-master-key".to_string(),
-    tls_verify: true,
-};
-
-let kms = KmsService::new(config).await?;
-

Nushell Code

-

Before (Vault):

-
# Set Vault environment
-$env.VAULT_ADDR = "http://localhost:8200"
-$env.VAULT_TOKEN = "root"
-
-# Use KMS
-kms encrypt "secret-data"
-
-

After (Age for dev):

-
# Set environment
-$env.PROVISIONING_ENV = "dev"
-
-# Age keys automatically loaded from config
-kms encrypt "secret-data"
-
-

Rollback Plan

-

If you need to rollback to Vault/AWS KMS:

-
# Checkout previous version
-git checkout tags/v0.1.0
-
-# Rebuild with old dependencies
-cd provisioning/platform/kms-service
-cargo clean
-cargo build --release
-
-# Restore old configuration
-cp provisioning/config/kms.toml.backup provisioning/config/kms.toml
-
-

Testing the Migration

-

Development Testing

-
# 1. Generate Age keys
-age-keygen -o /tmp/test_private.txt
-age-keygen -y /tmp/test_private.txt > /tmp/test_public.txt
-
-# 2. Test encryption
-echo "test-data" | age -r $(cat /tmp/test_public.txt) > /tmp/encrypted
-
-# 3. Test decryption
-age -d -i /tmp/test_private.txt /tmp/encrypted
-
-# 4. Start KMS service with test keys
-export PROVISIONING_ENV=dev
-# Update config to point to /tmp keys
-cargo run --bin kms-service
-
-

Production Testing

-
# 1. Set up test Cosmian instance
-export COSMIAN_KMS_URL=https://kms-staging.example.com
-export COSMIAN_API_KEY=test-api-key
-
-# 2. Create test key
-cosmian-kms create-key --key-id test-key --algorithm AES --key-length 256
-
-# 3. Test encryption
-curl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \
-  -H "X-API-Key: $COSMIAN_API_KEY" \
-  -d '{"keyId":"test-key","data":"dGVzdA=="}'
-
-# 4. Start KMS service
-export PROVISIONING_ENV=prod
-cargo run --bin kms-service
-
-

Troubleshooting

-

Age Keys Not Found

-
# Check keys exist
-ls -la ~/.config/provisioning/age/
-
-# Regenerate if missing
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt
-
-

Cosmian Connection Failed

-
# Check network connectivity
-curl -v $COSMIAN_KMS_URL/api/v1/health
-
-# Verify API key
-curl $COSMIAN_KMS_URL/api/v1/version \
-  -H "X-API-Key: $COSMIAN_API_KEY"
-
-# Check TLS certificate
-openssl s_client -connect kms.example.com:443
-
-

Compilation Errors

-
# Clean and rebuild
-cd provisioning/platform/kms-service
-cargo clean
-cargo update
-cargo build --release
-
-

Support

- -

Timeline

-
    -
  • 2025-10-08: Migration guide published
  • -
  • 2025-10-15: Deprecation notices for Vault/AWS
  • -
  • 2025-11-01: Old backends removed from codebase
  • -
  • 2025-11-15: Migration complete, old configs unsupported
  • -
-

FAQs

-

Q: Can I still use Vault if I really need to? -A: No, Vault support has been removed. Use Age for dev or Cosmian for prod.

-

Q: What about AWS KMS for existing deployments? -A: Migrate to Cosmian KMS. The API is similar, and migration tools are provided.

-

Q: Is Age secure enough for production? -A: No. Age is designed for development only. Use Cosmian KMS for production.

-

Q: Does Cosmian support confidential computing? -A: Yes, Cosmian KMS supports SGX and SEV for confidential computing workloads.

-

Q: How much does Cosmian cost? -A: Cosmian offers both cloud and self-hosted options. Contact Cosmian for pricing.

-

Q: Can I use my own KMS backend? -A: Not currently supported. Only Age and Cosmian are available.

-

Checklist

-

Use this checklist to track your migration:

-

Development Migration

-
    -
  • -Install Age (brew install age or equivalent)
  • -
  • -Generate Age keys (age-keygen)
  • -
  • -Update provisioning/config/kms.toml to use Age backend
  • -
  • -Export secrets from Vault/AWS (if applicable)
  • -
  • -Re-encrypt secrets with Age
  • -
  • -Test KMS service startup
  • -
  • -Test encrypt/decrypt operations
  • -
  • -Update CI/CD pipelines (if applicable)
  • -
  • -Update documentation
  • -
-

Production Migration

-
    -
  • -Set up Cosmian KMS server (cloud or self-hosted)
  • -
  • -Create master key in Cosmian
  • -
  • -Export production secrets from Vault/AWS
  • -
  • -Re-encrypt secrets with Cosmian
  • -
  • -Update provisioning/config/kms.toml to use Cosmian backend
  • -
  • -Set environment variables (COSMIAN_KMS_URL, COSMIAN_API_KEY)
  • -
  • -Test KMS service startup in staging
  • -
  • -Test encrypt/decrypt operations in staging
  • -
  • -Load test Cosmian integration
  • -
  • -Update production deployment configs
  • -
  • -Deploy to production
  • -
  • -Verify all secrets accessible
  • -
  • -Decommission old KMS infrastructure
  • -
-

Conclusion

-

The KMS simplification reduces complexity while providing better separation between development and production use cases. Age offers a fast, offline -solution for development, while Cosmian KMS provides enterprise-grade security for production deployments.

-

For questions or issues, please refer to the documentation or open an issue.

-

Provisioning Platform Glossary

-

Last Updated: 2025-10-10 -Version: 1.0.0

-

This glossary defines key terminology used throughout the Provisioning Platform documentation. Terms are listed alphabetically with definitions, usage -context, and cross-references to related documentation.

-
-

A

-

ADR (Architecture Decision Record)

-

Definition: Documentation of significant architectural decisions, including context, decision, and consequences.

-

Where Used:

-
    -
  • Architecture planning and review
  • -
  • Technical decision-making process
  • -
  • System design documentation
  • -
-

Related Concepts: Architecture, Design Patterns, Technical Debt

-

Examples:

-
    -
  • ADR-001: Project Structure
  • -
  • ADR-006: CLI Refactoring
  • -
  • ADR-009: Complete Security System
  • -
-

See Also: Architecture Documentation

-
-

Agent

-

Definition: A specialized component that performs a specific task in the system orchestration (for example, autonomous execution units in the -orchestrator).

-

Where Used:

-
    -
  • Task orchestration
  • -
  • Workflow management
  • -
  • Parallel execution patterns
  • -
-

Related Concepts: Orchestrator, Workflow, Task

-

See Also: Orchestrator Architecture

-
- -

Definition: An internal document link to a specific section within the same or different markdown file using the # symbol.

-

Where Used:

-
    -
  • Cross-referencing documentation sections
  • -
  • Table of contents generation
  • -
  • Navigation within long documents
  • -
-

Related Concepts: Internal Link, Cross-Reference, Documentation

-

Examples:

-
    -
  • [See Installation](#installation) - Same document
  • -
  • [Configuration Guide](config.md#setup) - Different document
  • -
-
-

API Gateway

-

Definition: Platform service that provides unified REST API access to provisioning operations.

-

Where Used:

-
    -
  • External system integration
  • -
  • Web Control Center backend
  • -
  • MCP server communication
  • -
-

Related Concepts: REST API, Platform Service, Orchestrator

-

Location: provisioning/platform/api-gateway/

-

See Also: REST API Documentation

-
-

Auth (Authentication)

-

Definition: The process of verifying user identity using JWT tokens, MFA, and secure session management.

-

Where Used:

-
    -
  • User login flows
  • -
  • API access control
  • -
  • CLI session management
  • -
-

Related Concepts: Authorization, JWT, MFA, Security

-

See Also:

-
    -
  • Authentication Layer Guide
  • -
  • Auth Quick Reference
  • -
-
-

Authorization

-

Definition: The process of determining user permissions using Cedar policy language.

-

Where Used:

-
    -
  • Access control decisions
  • -
  • Resource permission checks
  • -
  • Multi-tenant security
  • -
-

Related Concepts: Auth, Cedar, Policies, RBAC

-

See Also: Cedar Authorization Implementation

-
-

B

-

Batch Operation

-

Definition: A collection of related infrastructure operations executed as a single workflow unit.

-

Where Used:

-
    -
  • Multi-server deployments
  • -
  • Cluster creation
  • -
  • Bulk taskserv installation
  • -
-

Related Concepts: Workflow, Operation, Orchestrator

-

Commands:

-
provisioning batch submit workflow.ncl
-provisioning batch list
-provisioning batch status <id>
-
-

See Also: Batch Workflow System

-
-

Break-Glass

-

Definition: Emergency access mechanism requiring multi-party approval for critical operations.

-

Where Used:

-
    -
  • Emergency system access
  • -
  • Incident response
  • -
  • Security override scenarios
  • -
-

Related Concepts: Security, Compliance, Audit

-

Commands:

-
provisioning break-glass request "reason"
-provisioning break-glass approve <id>
-
-

See Also: Break-Glass Training Guide

-
-

C

-

Cedar

-

Definition: Amazon’s policy language used for fine-grained authorization decisions.

-

Where Used:

-
    -
  • Authorization policies
  • -
  • Access control rules
  • -
  • Resource permissions
  • -
-

Related Concepts: Authorization, Policies, Security

-

See Also: Cedar Authorization Implementation

-
-

Checkpoint

-

Definition: A saved state of a workflow allowing resume from point of failure.

-

Where Used:

-
    -
  • Workflow recovery
  • -
  • Long-running operations
  • -
  • Batch processing
  • -
-

Related Concepts: Workflow, State Management, Recovery

-

See Also: Batch Workflow System

-
-

CLI (Command-Line Interface)

-

Definition: The provisioning command-line tool providing access to all platform operations.

-

Where Used:

-
    -
  • Daily operations
  • -
  • Script automation
  • -
  • CI/CD pipelines
  • -
-

Related Concepts: Command, Shortcut, Module

-

Location: provisioning/core/cli/provisioning

-

Examples:

-
provisioning server create
-provisioning taskserv install kubernetes
-provisioning workspace switch prod
-
-

See Also:

- -
-

Cluster

-

Definition: A complete, pre-configured deployment of multiple servers and taskservs working together.

-

Where Used:

-
    -
  • Kubernetes deployments
  • -
  • Database clusters
  • -
  • Complete infrastructure stacks
  • -
-

Related Concepts: Infrastructure, Server, Taskserv

-

Location: provisioning/extensions/clusters/{name}/

-

Commands:

-
provisioning cluster create <name>
-provisioning cluster list
-provisioning cluster delete <name>
-
-

See Also: Infrastructure Management

-
-

Compliance

-

Definition: System capabilities ensuring adherence to regulatory requirements (GDPR, SOC2, ISO 27001).

-

Where Used:

-
    -
  • Audit logging
  • -
  • Data retention policies
  • -
  • Incident response
  • -
-

Related Concepts: Audit, Security, GDPR

-

See Also: Compliance Implementation Summary

-
-

Config (Configuration)

-

Definition: System settings stored in TOML files with hierarchical loading and variable interpolation.

-

Where Used:

-
    -
  • System initialization
  • -
  • User preferences
  • -
  • Environment-specific settings
  • -
-

Related Concepts: Settings, Environment, Workspace

-

Files:

-
    -
  • provisioning/config/config.defaults.toml - System defaults
  • -
  • workspace/config/local-overrides.toml - User settings
  • -
-

See Also: Configuration Guide

-
-

Control Center

-

Definition: Web-based UI for managing provisioning operations built with Ratatui/Crossterm.

-

Where Used:

-
    -
  • Visual infrastructure management
  • -
  • Real-time monitoring
  • -
  • Guided workflows
  • -
-

Related Concepts: UI, Platform Service, Orchestrator

-

Location: provisioning/platform/control-center/

-

See Also: Platform Services

-
-

CoreDNS

-

Definition: DNS server taskserv providing service discovery and DNS management.

-

Where Used:

-
    -
  • Kubernetes DNS
  • -
  • Service discovery
  • -
  • Internal DNS resolution
  • -
-

Related Concepts: Taskserv, Kubernetes, Networking

-

See Also:

-
    -
  • CoreDNS Guide
  • -
  • CoreDNS Quick Reference
  • -
-
-

Cross-Reference

-

Definition: Links between related documentation sections or concepts.

-

Where Used:

-
    -
  • Documentation navigation
  • -
  • Related topic discovery
  • -
  • Learning path guidance
  • -
-

Related Concepts: Documentation, Navigation, See Also

-

Examples: “See Also” sections at the end of documentation pages

-
-

D

-

Dependency

-

Definition: A requirement that must be satisfied before installing or running a component.

-

Where Used:

-
    -
  • Taskserv installation order
  • -
  • Version compatibility checks
  • -
  • Cluster deployment sequencing
  • -
-

Related Concepts: Version, Taskserv, Workflow

-

Schema: provisioning/schemas/dependencies.ncl

-

See Also: Nickel Dependency Patterns

-
-

Diagnostics

-

Definition: System health checking and troubleshooting assistance.

-

Where Used:

-
    -
  • System status verification
  • -
  • Problem identification
  • -
  • Guided troubleshooting
  • -
-

Related Concepts: Health Check, Monitoring, Troubleshooting

-

Commands:

-
provisioning status
-provisioning diagnostics run
-
-
-

Dynamic Secrets

-

Definition: Temporary credentials generated on-demand with automatic expiration.

-

Where Used:

-
    -
  • AWS STS tokens
  • -
  • SSH temporary keys
  • -
  • Database credentials
  • -
-

Related Concepts: Security, KMS, Secrets Management

-

See Also:

-
    -
  • Dynamic Secrets Implementation
  • -
  • Dynamic Secrets Quick Reference
  • -
-
-

E

-

Environment

-

Definition: A deployment context (dev, test, prod) with specific configuration overrides.

-

Where Used:

-
    -
  • Configuration loading
  • -
  • Resource isolation
  • -
  • Deployment targeting
  • -
-

Related Concepts: Config, Workspace, Infrastructure

-

Config Files: config.{dev,test,prod}.toml

-

Usage:

-
PROVISIONING_ENV=prod provisioning server list
-
-
-

Extension

-

Definition: A pluggable component adding functionality (provider, taskserv, cluster, or workflow).

-

Where Used:

-
    -
  • Custom cloud providers
  • -
  • Third-party taskservs
  • -
  • Custom deployment patterns
  • -
-

Related Concepts: Provider, Taskserv, Cluster, Workflow

-

Location: provisioning/extensions/{type}/{name}/

-

See Also: Extension Development

-
-

F

-

Feature

-

Definition: A major system capability providing key platform functionality.

-

Where Used:

-
    -
  • Architecture documentation
  • -
  • Feature planning
  • -
  • System capabilities
  • -
-

Related Concepts: ADR, Architecture, System

-

Examples:

-
    -
  • Batch Workflow System
  • -
  • Orchestrator Architecture
  • -
  • CLI Architecture
  • -
  • Configuration System
  • -
-

See Also: Architecture Overview

-
-

G

-

GDPR (General Data Protection Regulation)

-

Definition: EU data protection regulation compliance features in the platform.

-

Where Used:

-
    -
  • Data export requests
  • -
  • Right to erasure
  • -
  • Audit compliance
  • -
-

Related Concepts: Compliance, Audit, Security

-

Commands:

-
provisioning compliance gdpr export <user>
-provisioning compliance gdpr delete <user>
-
-

See Also: Compliance Implementation

-
-

Glossary

-

Definition: This document - a comprehensive terminology reference for the platform.

-

Where Used:

-
    -
  • Learning the platform
  • -
  • Understanding documentation
  • -
  • Resolving terminology questions
  • -
-

Related Concepts: Documentation, Reference, Cross-Reference

-
-

Guide

-

Definition: Step-by-step walkthrough documentation for common workflows.

-

Where Used:

-
    -
  • Onboarding new users
  • -
  • Learning workflows
  • -
  • Reference implementation
  • -
-

Related Concepts: Documentation, Workflow, Tutorial

-

Commands:

-
provisioning guide from-scratch
-provisioning guide update
-provisioning guide customize
-
-

See Also: Guides

-
-

H

-

Health Check

-

Definition: Automated verification that a component is running correctly.

-

Where Used:

-
    -
  • Taskserv validation
  • -
  • System monitoring
  • -
  • Dependency verification
  • -
-

Related Concepts: Diagnostics, Monitoring, Status

-

Example:

-
health_check = {
-    endpoint = "http://localhost:6443/healthz"
-    timeout = 30
-    interval = 10
-}
-
-
-

Hybrid Architecture

-

Definition: System design combining Rust orchestrator with Nushell business logic.

-

Where Used:

-
    -
  • Core platform architecture
  • -
  • Performance optimization
  • -
  • Call stack management
  • -
-

Related Concepts: Orchestrator, Architecture, Design

-

See Also:

- -
-

I

-

Infrastructure

-

Definition: A named collection of servers, configurations, and deployments managed as a unit.

-

Where Used:

-
    -
  • Environment isolation
  • -
  • Resource organization
  • -
  • Deployment targeting
  • -
-

Related Concepts: Workspace, Server, Environment

-

Location: workspace/infra/{name}/

-

Commands:

-
provisioning infra list
-provisioning generate infra --new <name>
-
-

See Also: Infrastructure Management

-
-

Integration

-

Definition: Connection between platform components or external systems.

-

Where Used:

-
    -
  • API integration
  • -
  • CI/CD pipelines
  • -
  • External tool connectivity
  • -
-

Related Concepts: API, Extension, Platform

-

See Also:

-
    -
  • Integration Patterns
  • -
  • Integration Examples
  • -
-
- -

Definition: A markdown link to another documentation file or section within the platform docs.

-

Where Used:

-
    -
  • Cross-referencing documentation
  • -
  • Navigation between topics
  • -
  • Related content discovery
  • -
-

Related Concepts: Anchor Link, Cross-Reference, Documentation

-

Examples:

-
    -
  • [See Configuration](configuration.md)
  • -
  • [Architecture Overview](../architecture/README.md)
  • -
-
-

J

-

JWT (JSON Web Token)

-

Definition: Token-based authentication mechanism using RS256 signatures.

-

Where Used:

-
    -
  • User authentication
  • -
  • API authorization
  • -
  • Session management
  • -
-

Related Concepts: Auth, Security, Token

-

See Also: JWT Auth Implementation

-
-

K

-

Nickel (Nickel Configuration Language)

-

Definition: Declarative configuration language with type safety and lazy evaluation for infrastructure definitions.

-

Where Used:

-
    -
  • Infrastructure schemas
  • -
  • Workflow definitions
  • -
  • Configuration validation
  • -
-

Related Concepts: Schema, Configuration, Validation

-

Version: 1.15.0+

-

Location: provisioning/schemas/*.ncl

-

See Also: Nickel Quick Reference

-
-

KMS (Key Management Service)

-

Definition: Encryption key management system supporting multiple backends (RustyVault, Age, AWS, Vault).

-

Where Used:

-
    -
  • Configuration encryption
  • -
  • Secret management
  • -
  • Data protection
  • -
-

Related Concepts: Security, Encryption, Secrets

-

See Also: RustyVault KMS Guide

-
-

Kubernetes

-

Definition: Container orchestration platform available as a taskserv.

-

Where Used:

-
    -
  • Container deployments
  • -
  • Cluster management
  • -
  • Production workloads
  • -
-

Related Concepts: Taskserv, Cluster, Container

-

Commands:

-
provisioning taskserv create kubernetes
-provisioning test quick kubernetes
-
-
-

L

-

Layer

-

Definition: A level in the configuration hierarchy (Core → Workspace → Infrastructure).

-

Where Used:

-
    -
  • Configuration inheritance
  • -
  • Customization patterns
  • -
  • Settings override
  • -
-

Related Concepts: Config, Workspace, Infrastructure

-

See Also: Configuration Guide

-
-

M

-

MCP (Model Context Protocol)

-

Definition: AI-powered server providing intelligent configuration assistance.

-

Where Used:

-
    -
  • Configuration validation
  • -
  • Troubleshooting guidance
  • -
  • Documentation search
  • -
-

Related Concepts: Platform Service, AI, Guidance

-

Location: provisioning/platform/mcp-server/

-

See Also: Platform Services

-
-

MFA (Multi-Factor Authentication)

-

Definition: Additional authentication layer using TOTP or WebAuthn/FIDO2.

-

Where Used:

-
    -
  • Enhanced security
  • -
  • Compliance requirements
  • -
  • Production access
  • -
-

Related Concepts: Auth, Security, TOTP, WebAuthn

-

Commands:

-
provisioning mfa totp enroll
-provisioning mfa webauthn enroll
-provisioning mfa verify <code>
-
-

See Also: MFA Implementation Summary

-
-

Migration

-

Definition: Process of updating existing infrastructure or moving between system versions.

-

Where Used:

-
    -
  • System upgrades
  • -
  • Configuration changes
  • -
  • Infrastructure evolution
  • -
-

Related Concepts: Update, Upgrade, Version

-

See Also: Migration Guide

-
-

Module

-

Definition: A reusable component (provider, taskserv, cluster) loaded into a workspace.

-

Where Used:

-
    -
  • Extension management
  • -
  • Workspace customization
  • -
  • Component distribution
  • -
-

Related Concepts: Extension, Workspace, Package

-

Commands:

-
provisioning module discover provider
-provisioning module load provider <ws> <name>
-provisioning module list taskserv
-
-

See Also: Module System

-
-

N

-

Nushell

-

Definition: Primary shell and scripting language (v0.107.1) used throughout the platform.

-

Where Used:

-
    -
  • CLI implementation
  • -
  • Automation scripts
  • -
  • Business logic
  • -
-

Related Concepts: CLI, Script, Automation

-

Version: 0.107.1

-

See Also: Nushell Guidelines

-
-

O

-

OCI (Open Container Initiative)

-

Definition: Standard format for packaging and distributing extensions.

-

Where Used:

-
    -
  • Extension distribution
  • -
  • Package registry
  • -
  • Version management
  • -
-

Related Concepts: Registry, Package, Distribution

-

See Also: OCI Registry Guide

-
-

Operation

-

Definition: A single infrastructure action (create server, install taskserv, etc.).

-

Where Used:

-
    -
  • Workflow steps
  • -
  • Batch processing
  • -
  • Orchestrator tasks
  • -
-

Related Concepts: Workflow, Task, Action

-
-

Orchestrator

-

Definition: Hybrid Rust/Nushell service coordinating complex infrastructure operations.

-

Where Used:

-
    -
  • Workflow execution
  • -
  • Task coordination
  • -
  • State management
  • -
-

Related Concepts: Hybrid Architecture, Workflow, Platform Service

-

Location: provisioning/platform/orchestrator/

-

Commands:

-
cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-

See Also: Orchestrator Architecture

-
-

P

-

PAP (Project Architecture Principles)

-

Definition: Core architectural rules and patterns that must be followed.

-

Where Used:

-
    -
  • Code review
  • -
  • Architecture decisions
  • -
  • Design validation
  • -
-

Related Concepts: Architecture, ADR, Best Practices

-

See Also: Architecture Overview

-
-

Platform Service

-

Definition: A core service providing platform-level functionality (Orchestrator, Control Center, MCP, API Gateway).

-

Where Used:

-
    -
  • System infrastructure
  • -
  • Core capabilities
  • -
  • Service integration
  • -
-

Related Concepts: Service, Architecture, Infrastructure

-

Location: provisioning/platform/{service}/

-
-

Plugin

-

Definition: Native Nushell plugin providing performance-optimized operations.

-

Where Used:

-
    -
  • Auth operations (10-50x faster)
  • -
  • KMS encryption
  • -
  • Orchestrator queries
  • -
-

Related Concepts: Nushell, Performance, Native

-

Commands:

-
provisioning plugin list
-provisioning plugin install
-
-

See Also: Nushell Plugins Guide

-
-

Provider

-

Definition: Cloud platform integration (AWS, UpCloud, local) handling infrastructure provisioning.

-

Where Used:

-
    -
  • Server creation
  • -
  • Resource management
  • -
  • Cloud operations
  • -
-

Related Concepts: Extension, Infrastructure, Cloud

-

Location: provisioning/extensions/providers/{name}/

-

Examples: aws, upcloud, local

-

Commands:

-
provisioning module discover provider
-provisioning providers list
-
-

See Also: Quick Provider Guide

-
-

Q

-

Quick Reference

-

Definition: Condensed command and configuration reference for rapid lookup.

-

Where Used:

-
    -
  • Daily operations
  • -
  • Quick reminders
  • -
  • Command syntax
  • -
-

Related Concepts: Guide, Documentation, Cheatsheet

-

Commands:

-
provisioning sc  # Fastest
-provisioning guide quickstart
-
-

See Also: Quickstart Cheatsheet

-
-

R

-

RBAC (Role-Based Access Control)

-

Definition: Permission system with 5 roles (admin, operator, developer, viewer, auditor).

-

Where Used:

-
    -
  • User permissions
  • -
  • Access control
  • -
  • Security policies
  • -
-

Related Concepts: Authorization, Cedar, Security

-

Roles: Admin, Operator, Developer, Viewer, Auditor

-
-

Registry

-

Definition: OCI-compliant repository for storing and distributing extensions.

-

Where Used:

-
    -
  • Extension publishing
  • -
  • Version management
  • -
  • Package distribution
  • -
-

Related Concepts: OCI, Package, Distribution

-

See Also: OCI Registry Guide

-
-

REST API

-

Definition: HTTP endpoints exposing platform operations to external systems.

-

Where Used:

-
    -
  • External integration
  • -
  • Web UI backend
  • -
  • Programmatic access
  • -
-

Related Concepts: API, Integration, HTTP

-

Endpoint: http://localhost:9090

-

See Also: REST API Documentation

-
-

Rollback

-

Definition: Reverting a failed workflow or operation to previous stable state.

-

Where Used:

-
    -
  • Failure recovery
  • -
  • Deployment safety
  • -
  • State restoration
  • -
-

Related Concepts: Workflow, Checkpoint, Recovery

-

Commands:

-
provisioning batch rollback <workflow-id>
-
-
-

RustyVault

-

Definition: Rust-based secrets management backend for KMS.

-

Where Used:

-
    -
  • Key storage
  • -
  • Secret encryption
  • -
  • Configuration protection
  • -
-

Related Concepts: KMS, Security, Encryption

-

See Also: RustyVault KMS Guide

-
-

S

-

Schema

-

Definition: Nickel type definition specifying structure and validation rules.

-

Where Used:

-
    -
  • Configuration validation
  • -
  • Type safety
  • -
  • Documentation
  • -
-

Related Concepts: Nickel, Validation, Type

-

Example:

-
let ServerConfig = {
-    hostname | string,
-    cores | number,
-    memory | number,
-} in
-ServerConfig
-
-

See Also: Nickel Development

-
-

Secrets Management

-

Definition: System for secure storage and retrieval of sensitive data.

-

Where Used:

-
    -
  • Password storage
  • -
  • API keys
  • -
  • Certificates
  • -
-

Related Concepts: KMS, Security, Encryption

-

See Also: Dynamic Secrets Implementation

-
-

Security System

-

Definition: Comprehensive enterprise-grade security with 12 components (Auth, Cedar, MFA, KMS, Secrets, Compliance, etc.).

-

Where Used:

-
    -
  • User authentication
  • -
  • Access control
  • -
  • Data protection
  • -
-

Related Concepts: Auth, Authorization, MFA, KMS, Audit

-

See Also: Security System Implementation

-
-

Server

-

Definition: Virtual machine or physical host managed by the platform.

-

Where Used:

-
    -
  • Infrastructure provisioning
  • -
  • Compute resources
  • -
  • Deployment targets
  • -
-

Related Concepts: Infrastructure, Provider, Taskserv

-

Commands:

-
provisioning server create
+# List all resources
 provisioning server list
-provisioning server ssh <hostname>
-
-

See Also: Infrastructure Management

-
-

Service

-

Definition: A running application or daemon (interchangeable with Taskserv in many contexts).

-

Where Used:

-
    -
  • Service management
  • -
  • Application deployment
  • -
  • System administration
  • -
-

Related Concepts: Taskserv, Daemon, Application

-

See Also: Service Management Guide

-
-

Shortcut

-

Definition: Abbreviated command alias for faster CLI operations.

-

Where Used:

-
    -
  • Daily operations
  • -
  • Quick commands
  • -
  • Productivity enhancement
  • -
-

Related Concepts: CLI, Command, Alias

-

Examples:

-
    -
  • provisioning s createprovisioning server create
  • -
  • provisioning ws listprovisioning workspace list
  • -
  • provisioning sc → Quick reference
  • -
-

See Also: CLI Reference

-
-

SOPS (Secrets OPerationS)

-

Definition: Encryption tool for managing secrets in version control.

-

Where Used:

-
    -
  • Configuration encryption
  • -
  • Secret management
  • -
  • Secure storage
  • -
-

Related Concepts: Encryption, Security, Age

-

Version: 3.10.2

-

Commands:

-
provisioning sops edit <file>
-
-
-

SSH (Secure Shell)

-

Definition: Encrypted remote access protocol with temporal key support.

-

Where Used:

-
    -
  • Server administration
  • -
  • Remote commands
  • -
  • Secure file transfer
  • -
-

Related Concepts: Security, Server, Remote Access

-

Commands:

-
provisioning server ssh <hostname>
-provisioning ssh connect <server>
-
-

See Also: SSH Temporal Keys User Guide

-
-

State Management

-

Definition: Tracking and persisting workflow execution state.

-

Where Used:

-
    -
  • Workflow recovery
  • -
  • Progress tracking
  • -
  • Failure handling
  • -
-

Related Concepts: Workflow, Checkpoint, Orchestrator

-
-

T

-

Task

-

Definition: A unit of work submitted to the orchestrator for execution.

-

Where Used:

-
    -
  • Workflow execution
  • -
  • Job processing
  • -
  • Operation tracking
  • -
-

Related Concepts: Operation, Workflow, Orchestrator

-
-

Taskserv

-

Definition: An installable infrastructure service (Kubernetes, PostgreSQL, Redis, etc.).

-

Where Used:

-
    -
  • Service installation
  • -
  • Application deployment
  • -
  • Infrastructure components
  • -
-

Related Concepts: Service, Extension, Package

-

Location: provisioning/extensions/taskservs/{category}/{name}/

-

Commands:

-
provisioning taskserv create <name>
-provisioning taskserv list
-provisioning test quick <taskserv>
-
-

See Also: Taskserv Developer Guide

-
-

Template

-

Definition: Parameterized configuration file supporting variable substitution.

-

Where Used:

-
    -
  • Configuration generation
  • -
  • Infrastructure customization
  • -
  • Deployment automation
  • -
-

Related Concepts: Config, Generation, Customization

-

Location: provisioning/templates/

-
-

Test Environment

-

Definition: Containerized isolated environment for testing taskservs and clusters.

-

Where Used:

-
    -
  • Development testing
  • -
  • CI/CD integration
  • -
  • Pre-deployment validation
  • -
-

Related Concepts: Container, Testing, Validation

-

Commands:

-
provisioning test quick <taskserv>
-provisioning test env single <taskserv>
-provisioning test env cluster <cluster>
-
-

See Also: Test Environment Guide

-
-

Topology

-

Definition: Multi-node cluster configuration template (Kubernetes HA, etcd cluster, etc.).

-

Where Used:

-
    -
  • Cluster testing
  • -
  • Multi-node deployments
  • -
  • Production simulation
  • -
-

Related Concepts: Test Environment, Cluster, Configuration

-

Examples: kubernetes_3node, etcd_cluster, kubernetes_single

-
-

TOTP (Time-based One-Time Password)

-

Definition: MFA method generating time-sensitive codes.

-

Where Used:

-
    -
  • Two-factor authentication
  • -
  • MFA enrollment
  • -
  • Security enhancement
  • -
-

Related Concepts: MFA, Security, Auth

-

Commands:

-
provisioning mfa totp enroll
-provisioning mfa totp verify <code>
-
-
-

Troubleshooting

-

Definition: System problem diagnosis and resolution guidance.

-

Where Used:

-
    -
  • Problem solving
  • -
  • Error resolution
  • -
  • System debugging
  • -
-

Related Concepts: Diagnostics, Guide, Support

-

See Also: Troubleshooting Guide

-
-

U

-

UI (User Interface)

-

Definition: Visual interface for platform operations (Control Center, Web UI).

-

Where Used:

-
    -
  • Visual management
  • -
  • Guided workflows
  • -
  • Monitoring dashboards
  • -
-

Related Concepts: Control Center, Platform Service, GUI

-
-

Update

-

Definition: Process of upgrading infrastructure components to newer versions.

-

Where Used:

-
    -
  • Version management
  • -
  • Security patches
  • -
  • Feature updates
  • -
-

Related Concepts: Version, Migration, Upgrade

-

Commands:

-
provisioning version check
-provisioning version apply
-
-

See Also: Update Infrastructure Guide

-
-

V

-

Validation

-

Definition: Verification that configuration or infrastructure meets requirements.

-

Where Used:

-
    -
  • Configuration checks
  • -
  • Schema validation
  • -
  • Pre-deployment verification
  • -
-

Related Concepts: Schema, Nickel, Check

-

Commands:

-
provisioning validate config
-provisioning validate infrastructure
-
-

See Also: Config Validation

-
-

Version

-

Definition: Semantic version identifier for components and compatibility.

-

Where Used:

-
    -
  • Component versioning
  • -
  • Compatibility checking
  • -
  • Update management
  • -
-

Related Concepts: Update, Dependency, Compatibility

-

Commands:

-
provisioning version
-provisioning version check
-provisioning taskserv check-updates
-
-
-

W

-

WebAuthn

-

Definition: FIDO2-based passwordless authentication standard.

-

Where Used:

-
    -
  • Hardware key authentication
  • -
  • Passwordless login
  • -
  • Enhanced MFA
  • -
-

Related Concepts: MFA, Security, FIDO2

-

Commands:

-
provisioning mfa webauthn enroll
-provisioning mfa webauthn verify
-
-
-

Workflow

-

Definition: A sequence of related operations with dependency management and state tracking.

-

Where Used:

-
    -
  • Complex deployments
  • -
  • Multi-step operations
  • -
  • Automated processes
  • -
-

Related Concepts: Batch Operation, Orchestrator, Task

-

Commands:

-
provisioning workflow list
-provisioning workflow status <id>
-provisioning workflow monitor <id>
-
-

See Also: Batch Workflow System

-
-

Workspace

-

Definition: An isolated environment containing infrastructure definitions and configuration.

-

Where Used:

-
    -
  • Project isolation
  • -
  • Environment separation
  • -
  • Team workspaces
  • -
-

Related Concepts: Infrastructure, Config, Environment

-

Location: workspace/{name}/

-

Commands:

-
provisioning workspace list
-provisioning workspace switch <name>
-provisioning workspace create <name>
-
-

See Also: Workspace Switching Guide

-
-

X-Z

-

YAML

-

Definition: Data serialization format used for Kubernetes manifests and configuration.

-

Where Used:

-
    -
  • Kubernetes deployments
  • -
  • Configuration files
  • -
  • Data interchange
  • -
-

Related Concepts: Config, Kubernetes, Data Format

-
-

Symbol and Acronym Index

-
- - - - - - - - - - - - - - - - - - -
Symbol/AcronymFull TermCategory
ADRArchitecture Decision RecordArchitecture
APIApplication Programming InterfaceIntegration
CLICommand-Line InterfaceUser Interface
GDPRGeneral Data Protection RegulationCompliance
JWTJSON Web TokenSecurity
NickelNickel Configuration LanguageConfiguration
KMSKey Management ServiceSecurity
MCPModel Context ProtocolPlatform
MFAMulti-Factor AuthenticationSecurity
OCIOpen Container InitiativePackaging
PAPProject Architecture PrinciplesArchitecture
RBACRole-Based Access ControlSecurity
RESTRepresentational State TransferAPI
SOC2Service Organization Control 2Compliance
SOPSSecrets OPerationSSecurity
SSHSecure ShellRemote Access
TOTPTime-based One-Time PasswordSecurity
UIUser InterfaceUser Interface
-
-
-

Cross-Reference Map

-

By Topic Area

-

Infrastructure:

-
    -
  • Infrastructure, Server, Cluster, Provider, Taskserv, Module
  • -
-

Security:

-
    -
  • Auth, Authorization, JWT, MFA, TOTP, WebAuthn, Cedar, KMS, Secrets Management, RBAC, Break-Glass
  • -
-

Configuration:

-
    -
  • Config, Nickel, Schema, Validation, Environment, Layer, Workspace
  • -
-

Workflow & Operations:

-
    -
  • Workflow, Batch Operation, Operation, Task, Orchestrator, Checkpoint, Rollback
  • -
-

Platform Services:

-
    -
  • Orchestrator, Control Center, MCP, API Gateway, Platform Service
  • -
-

Documentation:

-
    -
  • Glossary, Guide, ADR, Cross-Reference, Internal Link, Anchor Link
  • -
-

Development:

-
    -
  • Extension, Plugin, Template, Module, Integration
  • -
-

Testing:

-
    -
  • Test Environment, Topology, Validation, Health Check
  • -
-

Compliance:

-
    -
  • Compliance, GDPR, Audit, Security System
  • -
-

By User Journey

-

New User:

-
    -
  1. Glossary (this document)
  2. -
  3. Guide
  4. -
  5. Quick Reference
  6. -
  7. Workspace
  8. -
  9. Infrastructure
  10. -
  11. Server
  12. -
  13. Taskserv
  14. -
-

Developer:

-
    -
  1. Extension
  2. -
  3. Provider
  4. -
  5. Taskserv
  6. -
  7. Nickel
  8. -
  9. Schema
  10. -
  11. Template
  12. -
  13. Plugin
  14. -
-

Operations:

-
    -
  1. Workflow
  2. -
  3. Orchestrator
  4. -
  5. Monitoring
  6. -
  7. Troubleshooting
  8. -
  9. Security
  10. -
  11. Compliance
  12. -
-
-

Terminology Guidelines

-

Writing Style

-

Consistency: Use the same term throughout documentation (for example, “Taskserv” not “task service” or “task-serv”)

-

Capitalization:

-
    -
  • Proper nouns and acronyms: CAPITALIZE (Nickel, JWT, MFA)
  • -
  • Generic terms: lowercase (server, cluster, workflow)
  • -
  • Platform-specific terms: Title Case (Taskserv, Workspace, Orchestrator)
  • -
-

Pluralization:

-
    -
  • Taskservs (not taskservices)
  • -
  • Workspaces (standard plural)
  • -
  • Topologies (not topologys)
  • -
-

Avoiding Confusion

-
- - - - -
Don’t SaySay InsteadReason
“Task service”“Taskserv”Standard platform term
“Configuration file”“Config” or “Settings”Context-dependent
“Worker”“Agent” or “Task”Clarify context
“Kubernetes service”“K8s taskserv” or “K8s Service resource”Disambiguate
-
-
-

Contributing to the Glossary

-

Adding New Terms

-
    -
  1. -

    Alphabetical placement in appropriate section

    -
  2. -
  3. -

    Include all standard sections:

    -
      -
    • Definition
    • -
    • Where Used
    • -
    • Related Concepts
    • -
    • Examples (if applicable)
    • -
    • Commands (if applicable)
    • -
    • See Also (links to docs)
    • -
    -
  4. -
  5. -

    Cross-reference in related terms

    -
  6. -
  7. -

    Update Symbol and Acronym Index if applicable

    -
  8. -
  9. -

    Update Cross-Reference Map

    -
  10. -
-

Updating Existing Terms

-
    -
  1. Verify changes don’t break cross-references
  2. -
  3. Update “Last Updated” date at top
  4. -
  5. Increment version if major changes
  6. -
  7. Review related terms for consistency
  8. -
-
-

Version History

-
- -
VersionDateChanges
1.0.02025-10-10Initial comprehensive glossary
-
-
-

Maintained By: Documentation Team -Review Cycle: Quarterly or when major features are added -Feedback: Please report missing or unclear terms via issues

-

MCP Server - Model Context Protocol

-

A Rust-native Model Context Protocol (MCP) server for infrastructure automation and AI-assisted DevOps operations.

-
-

Source: provisioning/platform/mcp-server/ -Status: Proof of Concept Complete

-
-

Overview

-

Replaces the Python implementation with significant performance improvements while maintaining philosophical consistency with the Rust ecosystem approach.

-

Performance Results

-
🚀 Rust MCP Server Performance Analysis
-==================================================
-
-📋 Server Parsing Performance:
-  • Sub-millisecond latency across all operations
-  • 0μs average for configuration access
-
-🤖 AI Status Performance:
-  • AI Status: 0μs avg (10000 iterations)
-
-💾 Memory Footprint:
-  • ServerConfig size: 80 bytes
-  • Config size: 272 bytes
-
-✅ Performance Summary:
-  • Server parsing: Sub-millisecond latency
-  • Configuration access: Microsecond latency
-  • Memory efficient: Small struct footprint
-  • Zero-copy string operations where possible
-
-

Architecture

-
src/
-├── simple_main.rs      # Lightweight MCP server entry point
-├── main.rs             # Full MCP server (with SDK integration)
-├── lib.rs              # Library interface
-├── config.rs           # Configuration management
-├── provisioning.rs     # Core provisioning engine
-├── tools.rs            # AI-powered parsing tools
-├── errors.rs           # Error handling
-└── performance_test.rs # Performance benchmarking
-
-

Key Features

-
    -
  1. AI-Powered Server Parsing: Natural language to infrastructure config
  2. -
  3. Multi-Provider Support: AWS, UpCloud, Local
  4. -
  5. Configuration Management: TOML-based with environment overrides
  6. -
  7. Error Handling: Comprehensive error types with recovery hints
  8. -
  9. Performance Monitoring: Built-in benchmarking capabilities
  10. -
-

Rust vs Python Comparison

-
- - - - - -
MetricPython MCP ServerRust MCP ServerImprovement
Startup Time~500 ms~50 ms10x faster
Memory Usage~50 MB~5 MB10x less
Parsing Latency~1 ms~0.001 ms1000x faster
Binary SizePython + deps~15 MB staticPortable
Type SafetyRuntime errorsCompile-timeZero runtime errors
-
-

Usage

-
# Build and run
-cargo run --bin provisioning-mcp-server --release
-
-# Run with custom config
-PROVISIONING_PATH=/path/to/provisioning cargo run --bin provisioning-mcp-server -- --debug
-
-# Run tests
-cargo test
-
-# Run benchmarks
-cargo run --bin provisioning-mcp-server --release
-
-

Configuration

-

Set via environment variables:

-
export PROVISIONING_PATH=/path/to/provisioning
-export PROVISIONING_AI_PROVIDER=openai
-export OPENAI_API_KEY=your-key
-export PROVISIONING_DEBUG=true
-
-

Integration Benefits

-
    -
  1. Philosophical Consistency: Rust throughout the stack
  2. -
  3. Performance: Sub-millisecond response times
  4. -
  5. Memory Safety: No segfaults, no memory leaks
  6. -
  7. Concurrency: Native async/await support
  8. -
  9. Distribution: Single static binary
  10. -
  11. Cross-compilation: ARM64/x86_64 support
  12. -
-

Next Steps

-
    -
  1. Full MCP SDK integration (schema definitions)
  2. -
  3. WebSocket/TCP transport layer
  4. -
  5. Plugin system for extensibility
  6. -
  7. Metrics collection and monitoring
  8. -
  9. Documentation and examples
  10. -
- - -

TypeDialog Platform Configuration Guide

-

Version: 2.0.0 -Last Updated: 2026-01-05 -Status: Production Ready -Target Audience: DevOps Engineers, Infrastructure Administrators

-

Services Covered: 8 platform services (orchestrator, control-center, mcp-server, vault-service, extension-registry, rag, ai-service, -provisioning-daemon)

-

Interactive configuration for cloud-native infrastructure platform services using TypeDialog forms and Nickel.

-

Overview

-

TypeDialog is an interactive form system that generates Nickel configurations for platform services. Instead of manually editing TOML or KCL -files, you answer questions in an interactive form, and TypeDialog generates validated Nickel configuration.

-

Benefits:

-
    -
  • ✅ No manual TOML editing required
  • -
  • ✅ Interactive guidance for each setting
  • -
  • ✅ Automatic validation of inputs
  • -
  • ✅ Type-safe configuration (Nickel contracts)
  • -
  • ✅ Generated configurations ready for deployment
  • -
-

Quick Start

-

1. Configure a Platform Service (5 minutes)

-
# Launch interactive form for orchestrator
-provisioning config platform orchestrator
-
-# Or use TypeDialog directly
-typedialog form .typedialog/provisioning/platform/orchestrator/form.toml
-
-

This opens an interactive form with sections for:

-
    -
  • Workspace configuration
  • -
  • Server settings (host, port, workers)
  • -
  • Storage backend (filesystem or SurrealDB)
  • -
  • Task queue and batch settings
  • -
  • Monitoring and health checks
  • -
  • Rollback and recovery
  • -
  • Logging configuration
  • -
  • Extensions and integrations
  • -
  • Advanced settings
  • -
-

2. Review Generated Configuration

-

After completing the form, TypeDialog generates config.ncl:

-
# View what was generated
-cat workspace_librecloud/config/config.ncl
-
-

3. Validate Configuration

-
# Check Nickel syntax is valid
-nickel typecheck workspace_librecloud/config/config.ncl
-
-# Export to TOML for services
-provisioning config export
-
-

4. Services Use Generated Config

-

Platform services automatically load the exported TOML:

-
# Orchestrator reads config/generated/platform/orchestrator.toml
-provisioning start orchestrator
-
-# Check it's using the right config
-cat workspace_librecloud/config/generated/platform/orchestrator.toml
-
-

Interactive Configuration Workflow

- -

Best for: Most users, no Nickel knowledge needed

-

Workflow:

-
    -
  1. Launch form for a service: provisioning config platform orchestrator
  2. -
  3. Answer questions in interactive prompts about workspace, server, storage, queue
  4. -
  5. Review what was generated: cat workspace_librecloud/config/config.ncl
  6. -
  7. Update running services: provisioning config export && provisioning restart orchestrator
  8. -
-

Advanced Approach: Manual Nickel Editing

-

Best for: Users comfortable with Nickel, want full control

-

Workflow:

-
    -
  1. Create file: touch workspace_librecloud/config/config.ncl
  2. -
  3. Edit directly: vim workspace_librecloud/config/config.ncl
  4. -
  5. Validate syntax: nickel typecheck workspace_librecloud/config/config.ncl
  6. -
  7. Export and deploy: provisioning config export && provisioning restart orchestrator
  8. -
-

Configuration Structure

-

Single File, Three Sections

-

All configuration lives in one Nickel file with three sections:

-
# workspace_librecloud/config/config.ncl
-{
-  # SECTION 1: Workspace metadata
-  workspace = {
-    name = "librecloud",
-    path = "/Users/Akasha/project-provisioning/workspace_librecloud",
-    description = "Production workspace"
-  },
-
-  # SECTION 2: Cloud providers
-  providers = {
-    upcloud = {
-      enabled = true,
-      api_user = "{{env.UPCLOUD_USER}}",
-      api_password = "{{kms.decrypt('upcloud_pass')}}"
-    },
-    aws = { enabled = false },
-    local = { enabled = true }
-  },
-
-  # SECTION 3: Platform services
-  platform = {
-    orchestrator = {
-      enabled = true,
-      server = { host = "127.0.0.1", port = 9090 },
-      storage = { type = "filesystem" }
-    },
-    kms = {
-      enabled = true,
-      backend = "rustyvault",
-      url = "http://localhost:8200"
-    }
-  }
-}
-
-

Available Configuration Sections

-
- - - - - - - - - - - - - - - - - - -
SectionPurposeUsed By
workspaceWorkspace metadata and pathsConfig loader, providers
providers.upcloudUpCloud provider settingsUpCloud provisioning
providers.awsAWS provider settingsAWS provisioning
providers.localLocal VM provider settingsLocal VM provisioning
Core Platform Services
platform.orchestratorOrchestrator service configOrchestrator REST API
platform.control_centerControl center service configControl center REST API
platform.mcp_serverMCP server service configModel Context Protocol integration
platform.installerInstaller service configInfrastructure provisioning
Security & Secrets
platform.vault_serviceVault service configSecrets management and encryption
Extensions & Registry
platform.extension_registryExtension registry configExtension distribution via Gitea/OCI
AI & Intelligence
platform.ragRAG system configRetrieval-Augmented Generation
platform.ai_serviceAI service configAI model integration and DAG workflows
Operations & Daemon
platform.provisioning_daemonProvisioning daemon configBackground provisioning operations
-
-

Service-Specific Configuration

-

Orchestrator Service

-

Purpose: Coordinate infrastructure operations, manage workflows, handle batch operations

-

Key Settings:

-
    -
  • server: HTTP server configuration (host, port, workers)
  • -
  • storage: Task queue storage (filesystem or SurrealDB)
  • -
  • queue: Task processing (concurrency, retries, timeouts)
  • -
  • batch: Batch operation settings (parallelism, timeouts)
  • -
  • monitoring: Health checks and metrics collection
  • -
  • rollback: Checkpoint and recovery strategy
  • -
  • logging: Log level and format
  • -
-

Example:

-
platform = {
-  orchestrator = {
-    enabled = true,
-    server = {
-      host = "127.0.0.1",
-      port = 9090,
-      workers = 4,
-      keep_alive = 75,
-      max_connections = 1000
-    },
-    storage = {
-      type = "filesystem",
-      backend_path = "{{workspace.path}}/.orchestrator/data/queue.rkvs"
-    },
-    queue = {
-      max_concurrent_tasks = 5,
-      retry_attempts = 3,
-      retry_delay_seconds = 5,
-      task_timeout_minutes = 60
-    }
-  }
-}
-
-

KMS Service

-

Purpose: Cryptographic key management, secret encryption/decryption

-

Key Settings:

-
    -
  • backend: KMS backend (rustyvault, age, aws, vault, cosmian)
  • -
  • url: Backend URL or connection string
  • -
  • credentials: Authentication if required
  • -
-

Example:

-
platform = {
-  kms = {
-    enabled = true,
-    backend = "rustyvault",
-    url = "http://localhost:8200"
-  }
-}
-
-

Control Center Service

-

Purpose: Centralized monitoring and control interface

-

Key Settings:

-
    -
  • server: HTTP server configuration
  • -
  • database: Backend database connection
  • -
  • jwt: JWT authentication settings
  • -
  • security: CORS and security policies
  • -
-

Example:

-
platform = {
-  control_center = {
-    enabled = true,
-    server = {
-      host = "127.0.0.1",
-      port = 8080
-    }
-  }
-}
-
-

Deployment Modes

-

All platform services support four deployment modes, each with different resource allocation and feature sets:

-
- - - - -
ModeResourcesUse CaseStorageTLS
soloMinimal (2 workers)Development, testingEmbedded/filesystemNo
multiuserModerate (4 workers)Team environmentsShared databasesOptional
cicdHigh throughput (8+ workers)CI/CD pipelinesEphemeral/memoryNo
enterpriseHigh availability (16+ workers)ProductionClustered/distributedYes
-
-

Mode-based Configuration Loading:

-
# Load a specific mode's configuration
-export VAULT_MODE=enterprise
-export REGISTRY_MODE=multiuser
-export RAG_MODE=cicd
-
-# Services automatically resolve to correct TOML files:
-# Generated from: provisioning/schemas/platform/
-# - vault-service.enterprise.toml (generated from vault-service.ncl)
-# - extension-registry.multiuser.toml (generated from extension-registry.ncl)
-# - rag.cicd.toml (generated from rag.ncl)
-
-

New Platform Services (Phase 13-19)

-

Vault Service

-

Purpose: Secrets management, encryption, and cryptographic key storage

-

Key Settings:

-
    -
  • server: HTTP server configuration (host, port, workers)
  • -
  • storage: Backend storage (filesystem, memory, surrealdb, etcd, postgresql)
  • -
  • vault: Vault mounting and key management
  • -
  • ha: High availability clustering
  • -
  • security: TLS, certificate validation
  • -
  • logging: Log level and audit trails
  • -
-

Mode Characteristics:

-
    -
  • solo: Filesystem storage, no TLS, embedded mode
  • -
  • multiuser: SurrealDB backend, shared storage, TLS optional
  • -
  • cicd: In-memory ephemeral storage, no persistence
  • -
  • enterprise: Etcd HA, TLS required, audit logging enabled
  • -
-

Environment Variable Overrides:

-
VAULT_CONFIG=/path/to/vault.toml              # Explicit config path
-VAULT_MODE=enterprise                          # Mode-specific config
-VAULT_SERVER_URL=http://localhost:8200        # Server URL
-VAULT_STORAGE_BACKEND=etcd                    # Storage backend
-VAULT_AUTH_TOKEN=s.xxxxxxxx                   # Authentication token
-VAULT_TLS_VERIFY=true                         # TLS verification
-
-

Example Configuration:

-
platform = {
-  vault_service = {
-    enabled = true,
-    server = {
-      host = "0.0.0.0",
-      port = 8200,
-      workers = 8
-    },
-    storage = {
-      backend = "surrealdb",
-      url = "http://surrealdb:8000",
-      namespace = "vault",
-      database = "secrets"
-    },
-    vault = {
-      mount_point = "transit",
-      key_name = "provisioning-master"
-    },
-    ha = {
-      enabled = true
-    }
-  }
-}
-
-

Extension Registry Service

-

Purpose: Extension distribution and management via Gitea and OCI registries

-

Key Settings:

-
    -
  • server: HTTP server configuration (host, port, workers)
  • -
  • gitea: Gitea integration for extension source repository
  • -
  • oci: OCI registry for artifact distribution
  • -
  • cache: Metadata and list caching
  • -
  • auth: Registry authentication
  • -
-

Mode Characteristics:

-
    -
  • solo: Gitea only, minimal cache, CORS disabled
  • -
  • multiuser: Gitea + OCI, both enabled, CORS enabled
  • -
  • cicd: OCI only (high-throughput mode), ephemeral cache
  • -
  • enterprise: Both Gitea + OCI, TLS verification, large cache
  • -
-

Environment Variable Overrides:

-
REGISTRY_CONFIG=/path/to/registry.toml       # Explicit config path
-REGISTRY_MODE=multiuser                       # Mode-specific config
-REGISTRY_SERVER_HOST=0.0.0.0                 # Server host
-REGISTRY_SERVER_PORT=8081                    # Server port
-REGISTRY_SERVER_WORKERS=4                    # Worker count
-REGISTRY_GITEA_URL=http://gitea:3000         # Gitea URL
-REGISTRY_GITEA_ORG=provisioning              # Gitea organization
-REGISTRY_OCI_REGISTRY=registry.local:5000     # OCI registry
-REGISTRY_OCI_NAMESPACE=provisioning          # OCI namespace
-
-

Example Configuration:

-
platform = {
-  extension_registry = {
-    enabled = true,
-    server = {
-      host = "0.0.0.0",
-      port = 8081,
-      workers = 4
-    },
-    gitea = {
-      enabled = true,
-      url = "http://gitea:3000",
-      org = "provisioning"
-    },
-    oci = {
-      enabled = true,
-      registry = "registry.local:5000",
-      namespace = "provisioning"
-    },
-    cache = {
-      capacity = 1000,
-      ttl = 300
-    }
-  }
-}
-
-

RAG (Retrieval-Augmented Generation) Service

-

Purpose: Document retrieval, semantic search, and AI-augmented responses

-

Key Settings:

-
    -
  • embeddings: Embedding model provider (openai, local, anthropic)
  • -
  • vector_db: Vector database backend (memory, surrealdb, qdrant, milvus)
  • -
  • llm: Language model provider (anthropic, openai, ollama)
  • -
  • retrieval: Search strategy and parameters
  • -
  • ingestion: Document processing and indexing
  • -
-

Mode Characteristics:

-
    -
  • solo: Local embeddings, in-memory vector DB, Ollama LLM
  • -
  • multiuser: OpenAI embeddings, SurrealDB vector DB, Anthropic LLM
  • -
  • cicd: RAG completely disabled (not applicable for ephemeral pipelines)
  • -
  • enterprise: Large embeddings (3072-dim), distributed vector DB, Claude Opus
  • -
-

Environment Variable Overrides:

-
RAG_CONFIG=/path/to/rag.toml                 # Explicit config path
-RAG_MODE=multiuser                            # Mode-specific config
-RAG_ENABLED=true                              # Enable/disable RAG
-RAG_EMBEDDINGS_PROVIDER=openai               # Embedding provider
-RAG_EMBEDDINGS_API_KEY=sk-xxx                # Embedding API key
-RAG_VECTOR_DB_URL=http://surrealdb:8000     # Vector DB URL
-RAG_LLM_PROVIDER=anthropic                   # LLM provider
-RAG_LLM_API_KEY=sk-ant-xxx                  # LLM API key
-RAG_VECTOR_DB_TYPE=surrealdb                # Vector DB type
-
-

Example Configuration:

-
platform = {
-  rag = {
-    enabled = true,
-    embeddings = {
-      provider = "openai",
-      model = "text-embedding-3-small",
-      api_key = "{{env.OPENAI_API_KEY}}"
-    },
-    vector_db = {
-      db_type = "surrealdb",
-      url = "http://surrealdb:8000",
-      namespace = "rag_prod"
-    },
-    llm = {
-      provider = "anthropic",
-      model = "claude-opus-4-5-20251101",
-      api_key = "{{env.ANTHROPIC_API_KEY}}"
-    },
-    retrieval = {
-      top_k = 10,
-      similarity_threshold = 0.75
-    }
-  }
-}
-
-

AI Service

-

Purpose: AI model integration with RAG and MCP support for multi-step workflows

-

Key Settings:

-
    -
  • server: HTTP server configuration
  • -
  • rag: RAG system integration
  • -
  • mcp: Model Context Protocol integration
  • -
  • dag: Directed acyclic graph task orchestration
  • -
-

Mode Characteristics:

-
    -
  • solo: RAG enabled, no MCP, minimal concurrency (3 tasks)
  • -
  • multiuser: Both RAG and MCP enabled, moderate concurrency (10 tasks)
  • -
  • cicd: RAG disabled, MCP enabled, high concurrency (20 tasks)
  • -
  • enterprise: Both enabled, max concurrency (50 tasks), full monitoring
  • -
-

Environment Variable Overrides:

-
AI_SERVICE_CONFIG=/path/to/ai.toml           # Explicit config path
-AI_SERVICE_MODE=enterprise                    # Mode-specific config
-AI_SERVICE_SERVER_PORT=8082                  # Server port
-AI_SERVICE_SERVER_WORKERS=16                 # Worker count
-AI_SERVICE_RAG_ENABLED=true                  # Enable RAG integration
-AI_SERVICE_MCP_ENABLED=true                  # Enable MCP integration
-AI_SERVICE_DAG_MAX_CONCURRENT_TASKS=50       # Max concurrent tasks
-
-

Example Configuration:

-
platform = {
-  ai_service = {
-    enabled = true,
-    server = {
-      host = "0.0.0.0",
-      port = 8082,
-      workers = 8
-    },
-    rag = {
-      enabled = true,
-      rag_service_url = "http://rag:8083",
-      timeout = 60000
-    },
-    mcp = {
-      enabled = true,
-      mcp_service_url = "http://mcp-server:8084",
-      timeout = 60000
-    },
-    dag = {
-      max_concurrent_tasks = 20,
-      task_timeout = 600000,
-      retry_attempts = 5
-    }
-  }
-}
-
-

Provisioning Daemon

-

Purpose: Background service for provisioning operations, workspace management, and health monitoring

-

Key Settings:

-
    -
  • daemon: Daemon control (poll interval, max workers)
  • -
  • logging: Log level and output configuration
  • -
  • actions: Automated actions (cleanup, updates, sync)
  • -
  • workers: Worker pool configuration
  • -
  • health: Health check settings
  • -
-

Mode Characteristics:

-
    -
  • solo: Minimal polling, no auto-cleanup, debug logging
  • -
  • multiuser: Standard polling, workspace sync enabled, info logging
  • -
  • cicd: Frequent polling, ephemeral cleanup, warning logging
  • -
  • enterprise: Standard polling, full automation, all features enabled
  • -
-

Environment Variable Overrides:

-
DAEMON_CONFIG=/path/to/daemon.toml           # Explicit config path
-DAEMON_MODE=enterprise                        # Mode-specific config
-DAEMON_POLL_INTERVAL=30                      # Polling interval (seconds)
-DAEMON_MAX_WORKERS=16                        # Maximum worker threads
-DAEMON_LOGGING_LEVEL=info                    # Log level (debug/info/warn/error)
-DAEMON_AUTO_CLEANUP=true                     # Enable auto cleanup
-DAEMON_AUTO_UPDATE=true                      # Enable auto updates
-
-

Example Configuration:

-
platform = {
-  provisioning_daemon = {
-    enabled = true,
-    daemon = {
-      poll_interval = 30,
-      max_workers = 8
-    },
-    logging = {
-      level = "info",
-      file = "/var/log/provisioning/daemon.log"
-    },
-    actions = {
-      auto_cleanup = true,
-      auto_update = false,
-      workspace_sync = true
-    }
-  }
-}
-
-

Using TypeDialog Forms

-

Form Navigation

-
    -
  1. Interactive Prompts: Answer questions one at a time
  2. -
  3. Validation: Inputs are validated as you type
  4. -
  5. Defaults: Each field shows a sensible default
  6. -
  7. Skip Optional: Press Enter to use default or skip optional fields
  8. -
  9. Review: Preview generated Nickel before saving
  10. -
-

Field Types

-
- - - - - -
TypeExampleNotes
text“127.0.0.1”Free-form text input
confirmtrue/falseYes/no answer
select“filesystem”Choose from list
custom(u16)9090Number input
custom(u32)1000Larger number
-
-

Special Values

-

Environment Variables:

-
api_user = "{{env.UPCLOUD_USER}}"
-api_password = "{{env.UPCLOUD_PASSWORD}}"
-
-

Workspace Paths:

-
data_dir = "{{workspace.path}}/.orchestrator/data"
-logs_dir = "{{workspace.path}}/.orchestrator/logs"
-
-

KMS Decryption:

-
api_password = "{{kms.decrypt('upcloud_pass')}}"
-
-

Validation & Export

-

Validating Configuration

-
# Check Nickel syntax
-nickel typecheck workspace_librecloud/config/config.ncl
-
-# Detailed validation with error messages
-nickel typecheck workspace_librecloud/config/config.ncl 2>&1
-
-# Schema validation happens during export
-provisioning config export
-
-

Exporting to Service Formats

-
# One-time export
-provisioning config export
-
-# Export creates (pre-configured TOML for all services):
-workspace_librecloud/config/generated/
-├── workspace.toml          # Workspace metadata
-├── providers/
-│   ├── upcloud.toml        # UpCloud provider
-│   └── local.toml          # Local provider
-└── platform/
-    ├── orchestrator.toml   # Orchestrator service
-    ├── control_center.toml # Control center service
-    ├── mcp_server.toml     # MCP server service
-    ├── installer.toml      # Installer service
-    ├── kms.toml            # KMS service
-    ├── vault_service.toml  # Vault service (new)
-    ├── extension_registry.toml  # Extension registry (new)
-    ├── rag.toml            # RAG service (new)
-    ├── ai_service.toml     # AI service (new)
-    └── provisioning_daemon.toml # Daemon service (new)
-
-# Public Nickel Schemas (20 total for 5 new services):
-provisioning/schemas/platform/
-├── schemas/
-│   ├── vault-service.ncl
-│   ├── extension-registry.ncl
-│   ├── rag.ncl
-│   ├── ai-service.ncl
-│   └── provisioning-daemon.ncl
-├── defaults/
-│   ├── vault-service-defaults.ncl
-│   ├── extension-registry-defaults.ncl
-│   ├── rag-defaults.ncl
-│   ├── ai-service-defaults.ncl
-│   ├── provisioning-daemon-defaults.ncl
-│   └── deployment/
-│       ├── solo-defaults.ncl
-│       ├── multiuser-defaults.ncl
-│       ├── cicd-defaults.ncl
-│       └── enterprise-defaults.ncl
-├── validators/
-├── templates/
-├── constraints/
-└── values/
-
-

Using Pre-Generated Configurations:

-

All 5 new services come with pre-built TOML configs for each deployment mode:

-
# View available schemas for vault service
-ls -la provisioning/schemas/platform/schemas/vault-service.ncl
-ls -la provisioning/schemas/platform/defaults/vault-service-defaults.ncl
-
-# Load enterprise mode
-export VAULT_MODE=enterprise
-cargo run -p vault-service
-
-# Or load multiuser mode
-export REGISTRY_MODE=multiuser
-cargo run -p extension-registry
-
-# All 5 services support mode-based loading
-export RAG_MODE=cicd
-export AI_SERVICE_MODE=enterprise
-export DAEMON_MODE=multiuser
-
-

Updating Configuration

-

Change a Setting

-
    -
  1. Edit source config: vim workspace_librecloud/config/config.ncl
  2. -
  3. Validate changes: nickel typecheck workspace_librecloud/config/config.ncl
  4. -
  5. Re-export to TOML: provisioning config export
  6. -
  7. Restart affected service (if needed): provisioning restart orchestrator
  8. -
-

Using TypeDialog to Update

-

If you prefer interactive updating:

-
# Re-run TypeDialog form (overwrites config.ncl)
-provisioning config platform orchestrator
-
-# Or edit via TypeDialog with existing values
-typedialog form .typedialog/provisioning/platform/orchestrator/form.toml
-
-

Troubleshooting

-

Form Won’t Load

-

Problem: Failed to parse config file

-

Solution: Check form.toml syntax and verify required fields are present (name, description, locales_path, templates_path)

-
head -10 .typedialog/provisioning/platform/orchestrator/form.toml
-
-

Validation Fails

-

Problem: Nickel configuration validation failed

-

Solution: Check for syntax errors and correct field names

-
nickel typecheck workspace_librecloud/config/config.ncl 2>&1 | less
-
-

Common issues: Missing closing braces, incorrect field names, wrong data types

-

Export Creates Empty Files

-

Problem: Generated TOML files are empty

-

Solution: Verify config.ncl exports to JSON and check all required sections exist

-
nickel export --format json workspace_librecloud/config/config.ncl | head -20
-
-

Services Don’t Use New Config

-

Problem: Changes don’t take effect

-

Solution:

-
    -
  1. Verify export succeeded: ls -lah workspace_librecloud/config/generated/platform/
  2. -
  3. Check service path: provisioning start orchestrator --check
  4. -
  5. Restart service: provisioning restart orchestrator
  6. -
-

Configuration Examples

-

Development Setup

-
{
-  workspace = {
-    name = "dev",
-    path = "/Users/dev/workspace",
-    description = "Development workspace"
-  },
-
-  providers = {
-    local = {
-      enabled = true,
-      base_path = "/opt/vms"
-    },
-    upcloud = { enabled = false },
-    aws = { enabled = false }
-  },
-
-  platform = {
-    orchestrator = {
-      enabled = true,
-      server = { host = "127.0.0.1", port = 9090 },
-      storage = { type = "filesystem" },
-      logging = { level = "debug", format = "json" }
-    },
-    kms = {
-      enabled = true,
-      backend = "age"
-    }
-  }
-}
-
-

Production Setup

-
{
-  workspace = {
-    name = "prod",
-    path = "/opt/provisioning/prod",
-    description = "Production workspace"
-  },
-
-  providers = {
-    upcloud = {
-      enabled = true,
-      api_user = "{{env.UPCLOUD_USER}}",
-      api_password = "{{kms.decrypt('upcloud_prod')}}",
-      default_zone = "de-fra1"
-    },
-    aws = { enabled = false },
-    local = { enabled = false }
-  },
-
-  platform = {
-    orchestrator = {
-      enabled = true,
-      server = { host = "0.0.0.0", port = 9090, workers = 8 },
-      storage = {
-        type = "surrealdb-server",
-        url = "ws://surreal.internal:8000"
-      },
-      monitoring = {
-        enabled = true,
-        metrics_interval_seconds = 30
-      },
-      logging = { level = "info", format = "json" }
-    },
-    kms = {
-      enabled = true,
-      backend = "vault",
-      url = "https://vault.internal:8200"
-    }
-  }
-}
-
-

Multi-Provider Setup

-
{
-  workspace = {
-    name = "multi",
-    path = "/opt/multi",
-    description = "Multi-cloud workspace"
-  },
-
-  providers = {
-    upcloud = {
-      enabled = true,
-      api_user = "{{env.UPCLOUD_USER}}",
-      default_zone = "de-fra1",
-      zones = ["de-fra1", "us-nyc1", "nl-ams1"]
-    },
-    aws = {
-      enabled = true,
-      access_key = "{{env.AWS_ACCESS_KEY_ID}}"
-    },
-    local = {
-      enabled = true,
-      base_path = "/opt/local-vms"
-    }
-  },
-
-  platform = {
-    orchestrator = {
-      enabled = true,
-      multi_workspace = false,
-      storage = { type = "filesystem" }
-    },
-    kms = {
-      enabled = true,
-      backend = "rustyvault"
-    }
-  }
-}
-
-

Best Practices

-

1. Use TypeDialog for Initial Setup

-

Start with TypeDialog forms for the best experience:

-
provisioning config platform orchestrator
-
-

2. Never Edit Generated Files

-

Only edit the source .ncl file, not the generated TOML files.

-

Correct: vim workspace_librecloud/config/config.ncl

-

Wrong: vim workspace_librecloud/config/generated/platform/orchestrator.toml

-

3. Validate Before Deploy

-

Always validate before deploying changes:

-
nickel typecheck workspace_librecloud/config/config.ncl
-provisioning config export
-
-

4. Use Environment Variables for Secrets

-

Never hardcode credentials in config. Reference environment variables or KMS:

-

Wrong: api_password = "my-password"

-

Correct: api_password = "{{env.UPCLOUD_PASSWORD}}"

-

Better: api_password = "{{kms.decrypt('upcloud_key')}}"

-

5. Document Changes

-

Add comments explaining custom settings in the Nickel file.

- -

Core Resources

-
    -
  • Configuration System: See CLAUDE.md#configuration-file-format-selection
  • -
  • Migration Guide: See provisioning/config/README.md#migration-strategy
  • -
  • Schema Reference: See provisioning/schemas/
  • -
  • Nickel Language: See ADR-011 in docs/architecture/adr/
  • -
-

Platform Services

-
    -
  • Platform Services Overview: See provisioning/platform/*/README.md
  • -
  • Core Services (Phases 8-12): orchestrator, control-center, mcp-server
  • -
  • New Services (Phases 13-19): -
      -
    • vault-service: Secrets management and encryption
    • -
    • extension-registry: Extension distribution via Gitea/OCI
    • -
    • rag: Retrieval-Augmented Generation system
    • -
    • ai-service: AI model integration with DAG workflows
    • -
    • provisioning-daemon: Background provisioning operations
    • -
    -
  • -
-

Note: Installer is a distribution tool (provisioning/tools/distribution/create-installer.nu), not a platform service configurable via TypeDialog.

-

Public Definition Locations

-
    -
  • TypeDialog Forms (Interactive UI): provisioning/.typedialog/platform/forms/
  • -
  • Nickel Schemas (Type Definitions): provisioning/schemas/platform/schemas/
  • -
  • Default Values (Base Configuration): provisioning/schemas/platform/defaults/
  • -
  • Validators (Business Logic): provisioning/schemas/platform/validators/
  • -
  • Deployment Modes (Presets): provisioning/schemas/platform/defaults/deployment/
  • -
  • Rust Integration: provisioning/platform/crates/*/src/config.rs
  • -
-

Getting Help

-

Validation Errors

-

Get detailed error messages and check available fields:

-
nickel typecheck workspace_librecloud/config/config.ncl 2>&1 | less
-grep "prompt =" .typedialog/provisioning/platform/orchestrator/form.toml
-
-

Configuration Questions

-
# Show all available config commands
-provisioning config --help
-
-# Show help for specific service
-provisioning config platform --help
-
-# List providers and services
-provisioning config providers list
-provisioning config services list
-
-

Test Configuration

-
# Validate without deploying
-nickel typecheck workspace_librecloud/config/config.ncl
-
-# Export to see generated config
-provisioning config export
-
-# Check generated files
-ls -la workspace_librecloud/config/generated/
-
-

Extension Development Guide

-

This document provides comprehensive guidance on creating providers, task services, and clusters for provisioning, including templates, testing -frameworks, publishing, and best practices.

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Extension Types
  4. -
  5. Provider Development
  6. -
  7. Task Service Development
  8. -
  9. Cluster Development
  10. -
  11. Testing and Validation
  12. -
  13. Publishing and Distribution
  14. -
  15. Best Practices
  16. -
  17. Troubleshooting
  18. -
-

Overview

-

Provisioning supports three types of extensions that enable customization and expansion of functionality:

-
    -
  • Providers: Cloud provider implementations for resource management
  • -
  • Task Services: Infrastructure service components (databases, monitoring, etc.)
  • -
  • Clusters: Complete deployment solutions combining multiple services
  • -
-

Key Features:

-
    -
  • Template-Based Development: Comprehensive templates for all extension types
  • -
  • Workspace Integration: Extensions developed in isolated workspace environments
  • -
  • Configuration-Driven: KCL schemas for type-safe configuration
  • -
  • Version Management: GitHub integration for version tracking
  • -
  • Testing Framework: Comprehensive testing and validation tools
  • -
  • Hot Reloading: Development-time hot reloading support
  • -
-

Location: workspace/extensions/

-

Extension Types

-

Extension Architecture

-
Extension Ecosystem
-├── Providers                    # Cloud resource management
-│   ├── AWS                     # Amazon Web Services
-│   ├── UpCloud                 # UpCloud platform
-│   ├── Local                   # Local development
-│   └── Custom                  # User-defined providers
-├── Task Services               # Infrastructure components
-│   ├── Kubernetes             # Container orchestration
-│   ├── Database Services      # PostgreSQL, MongoDB, etc.
-│   ├── Monitoring            # Prometheus, Grafana, etc.
-│   ├── Networking            # Cilium, CoreDNS, etc.
-│   └── Custom Services       # User-defined services
-└── Clusters                   # Complete solutions
-    ├── Web Stack             # Web application deployment
-    ├── CI/CD Pipeline        # Continuous integration/deployment
-    ├── Data Platform         # Data processing and analytics
-    └── Custom Clusters       # User-defined clusters
-
-

Extension Discovery

-

Discovery Order:

-
    -
  1. workspace/extensions/{type}/{user}/{name} - User-specific extensions
  2. -
  3. workspace/extensions/{type}/{name} - Workspace shared extensions
  4. -
  5. workspace/extensions/{type}/template - Templates
  6. -
  7. Core system paths (fallback)
  8. -
-

Path Resolution:

-
# Automatic extension discovery
-use workspace/lib/path-resolver.nu
-
-# Find provider extension
-let provider_path = (path-resolver resolve_extension "providers" "my-aws-provider")
-
-# List all available task services
-let taskservs = (path-resolver list_extensions "taskservs" --include-core)
-
-# Resolve cluster definition
-let cluster_path = (path-resolver resolve_extension "clusters" "web-stack")
-
-

Provider Development

-

Provider Architecture

-

Providers implement cloud resource management through a standardized interface that supports multiple cloud platforms while maintaining consistent -APIs.

-

Core Responsibilities:

-
    -
  • Authentication: Secure API authentication and credential management
  • -
  • Resource Management: Server creation, deletion, and lifecycle management
  • -
  • Configuration: Provider-specific settings and validation
  • -
  • Error Handling: Comprehensive error handling and recovery
  • -
  • Rate Limiting: API rate limiting and retry logic
  • -
-

Creating a New Provider

-

1. Initialize from Template:

-
# Copy provider template
-cp -r workspace/extensions/providers/template workspace/extensions/providers/my-cloud
-
-# Navigate to new provider
-cd workspace/extensions/providers/my-cloud
-
-

2. Update Configuration:

-
# Initialize provider metadata
-nu init-provider.nu \
-    --name "my-cloud" \
-    --display-name "MyCloud Provider" \
-    --author "$USER" \
-    --description "MyCloud platform integration"
-
-

Provider Structure

-
my-cloud/
-├── README.md                    # Provider documentation
-├── schemas/                     # Nickel configuration schemas
-│   ├── settings.ncl            # Provider settings schema
-│   ├── servers.ncl             # Server configuration schema
-│   ├── networks.ncl            # Network configuration schema
-│   └── manifest.toml           # Nickel module dependencies
-├── nulib/                      # Nushell implementation
-│   ├── provider.nu             # Main provider interface
-│   ├── servers/                # Server management
-│   │   ├── create.nu           # Server creation logic
-│   │   ├── delete.nu           # Server deletion logic
-│   │   ├── list.nu             # Server listing
-│   │   ├── status.nu           # Server status checking
-│   │   └── utils.nu            # Server utilities
-│   ├── auth/                   # Authentication
-│   │   ├── client.nu           # API client setup
-│   │   ├── tokens.nu           # Token management
-│   │   └── validation.nu       # Credential validation
-│   └── utils/                  # Provider utilities
-│       ├── api.nu              # API interaction helpers
-│       ├── config.nu           # Configuration helpers
-│       └── validation.nu       # Input validation
-├── templates/                  # Jinja2 templates
-│   ├── server-config.j2        # Server configuration
-│   ├── cloud-init.j2           # Cloud initialization
-│   └── network-config.j2       # Network configuration
-├── generate/                   # Code generation
-│   ├── server-configs.nu       # Generate server configurations
-│   └── infrastructure.nu      # Generate infrastructure
-└── tests/                      # Testing framework
-    ├── unit/                   # Unit tests
-    │   ├── test-auth.nu        # Authentication tests
-    │   ├── test-servers.nu     # Server management tests
-    │   └── test-validation.nu  # Validation tests
-    ├── integration/            # Integration tests
-    │   ├── test-lifecycle.nu   # Complete lifecycle tests
-    │   └── test-api.nu         # API integration tests
-    └── mock/                   # Mock data and services
-        ├── api-responses.json  # Mock API responses
-        └── test-configs.toml   # Test configurations
-
-

Provider Implementation

-

Main Provider Interface (nulib/provider.nu):

-
#!/usr/bin/env nu
-# MyCloud Provider Implementation
-
-# Provider metadata
-export const PROVIDER_NAME = "my-cloud"
-export const PROVIDER_VERSION = "1.0.0"
-export const API_VERSION = "v1"
-
-# Main provider initialization
-export def "provider init" [
-    --config-path: string = ""     # Path to provider configuration
-    --validate: bool = true        # Validate configuration on init
-] -> record {
-    let config = if $config_path == "" {
-        load_provider_config
-    } else {
-        open $config_path | from toml
-    }
-
-    if $validate {
-        validate_provider_config $config
-    }
-
-    # Initialize API client
-    let client = (setup_api_client $config)
-
-    # Return provider instance
-    {
-        name: $PROVIDER_NAME,
-        version: $PROVIDER_VERSION,
-        config: $config,
-        client: $client,
-        initialized: true
-    }
-}
-
-# Server management interface
-export def "provider create-server" [
-    name: string                   # Server name
-    plan: string                   # Server plan/size
-    --zone: string = "auto"        # Deployment zone
-    --template: string = "ubuntu22" # OS template
-    --dry-run: bool = false        # Show what would be created
-] -> record {
-    let provider = (provider init)
-
-    # Validate inputs
-    if ($name | str length) == 0 {
-        error make {msg: "Server name cannot be empty"}
-    }
-
-    if not (is_valid_plan $plan) {
-        error make {msg: $"Invalid server plan: ($plan)"}
-    }
-
-    # Build server configuration
-    let server_config = {
-        name: $name,
-        plan: $plan,
-        zone: (resolve_zone $zone),
-        template: $template,
-        provider: $PROVIDER_NAME
-    }
-
-    if $dry_run {
-        return {action: "create", config: $server_config, status: "dry-run"}
-    }
-
-    # Create server via API
-    let result = try {
-        create_server_api $server_config $provider.client
-    } catch { |e|
-        error make {
-            msg: $"Server creation failed: ($e.msg)",
-            help: "Check provider credentials and quota limits"
-        }
-    }
-
-    {
-        server: $name,
-        status: "created",
-        id: $result.id,
-        ip_address: $result.ip_address,
-        created_at: (date now)
-    }
-}
-
-export def "provider delete-server" [
-    name: string                   # Server name or ID
-    --force: bool = false          # Force deletion without confirmation
-] -> record {
-    let provider = (provider init)
-
-    # Find server
-    let server = try {
-        find_server $name $provider.client
-    } catch {
-        error make {msg: $"Server not found: ($name)"}
-    }
-
-    if not $force {
-        let confirm = (input $"Delete server '($name)' (y/N)? ")
-        if $confirm != "y" and $confirm != "yes" {
-            return {action: "delete", server: $name, status: "cancelled"}
-        }
-    }
-
-    # Delete server
-    let result = try {
-        delete_server_api $server.id $provider.client
-    } catch { |e|
-        error make {msg: $"Server deletion failed: ($e.msg)"}
-    }
-
-    {
-        server: $name,
-        status: "deleted",
-        deleted_at: (date now)
-    }
-}
-
-export def "provider list-servers" [
-    --zone: string = ""            # Filter by zone
-    --status: string = ""          # Filter by status
-    --format: string = "table"     # Output format: table, json, yaml
-] -> list<record> {
-    let provider = (provider init)
-
-    let servers = try {
-        list_servers_api $provider.client
-    } catch { |e|
-        error make {msg: $"Failed to list servers: ($e.msg)"}
-    }
-
-    # Apply filters
-    let filtered = $servers
-        | if $zone != "" { filter {|s| $s.zone == $zone} } else { $in }
-        | if $status != "" { filter {|s| $s.status == $status} } else { $in }
-
-    match $format {
-        "json" => ($filtered | to json),
-        "yaml" => ($filtered | to yaml),
-        _ => $filtered
-    }
-}
-
-# Provider testing interface
-export def "provider test" [
-    --test-type: string = "basic"  # Test type: basic, full, integration
-] -> record {
-    match $test_type {
-        "basic" => test_basic_functionality,
-        "full" => test_full_functionality,
-        "integration" => test_integration,
-        _ => (error make {msg: $"Unknown test type: ($test_type)"})
-    }
-}
-
-

Authentication Module (nulib/auth/client.nu):

-
# API client setup and authentication
-
-export def setup_api_client [config: record] -> record {
-    # Validate credentials
-    if not ("api_key" in $config) {
-        error make {msg: "API key not found in configuration"}
-    }
-
-    if not ("api_secret" in $config) {
-        error make {msg: "API secret not found in configuration"}
-    }
-
-    # Setup HTTP client with authentication
-    let client = {
-        base_url: ($config.api_url? | default "https://api.my-cloud.com"),
-        api_key: $config.api_key,
-        api_secret: $config.api_secret,
-        timeout: ($config.timeout? | default 30),
-        retries: ($config.retries? | default 3)
-    }
-
-    # Test authentication
-    try {
-        test_auth_api $client
-    } catch { |e|
-        error make {
-            msg: $"Authentication failed: ($e.msg)",
-            help: "Check your API credentials and network connectivity"
-        }
-    }
-
-    $client
-}
-
-def test_auth_api [client: record] -> bool {
-    let response = http get $"($client.base_url)/auth/test" --headers {
-        "Authorization": $"Bearer ($client.api_key)",
-        "Content-Type": "application/json"
-    }
-
-    $response.status == "success"
-}
-
-

Nickel Configuration Schema (schemas/settings.ncl):

-
# MyCloud Provider Configuration Schema
-
-let MyCloudConfig = {
-    # MyCloud provider configuration
-    api_url | string | default = "https://api.my-cloud.com",
-    api_key | string,
-    api_secret | string,
-    timeout | number | default = 30,
-    retries | number | default = 3,
-
-    # Rate limiting
-    rate_limit | {
-        requests_per_minute | number | default = 60,
-        burst_size | number | default = 10,
-    } | default = {},
-
-    # Default settings
-    defaults | {
-        zone | string | default = "us-east-1",
-        template | string | default = "ubuntu-22.04",
-        network | string | default = "default",
-    } | default = {},
-} in
-MyCloudConfig
-
-let MyCloudServerConfig = {
-    # MyCloud server configuration
-    name | string,
-    plan | string,
-    zone | string | optional,
-    template | string | default = "ubuntu-22.04",
-    storage | number | default = 25,
-    tags | { } | default = {},
-
-    # Network configuration
-    network | {
-        vpc_id | string | optional,
-        subnet_id | string | optional,
-        public_ip | bool | default = true,
-        firewall_rules | array | default = [],
-    } | optional,
-} in
-MyCloudServerConfig
-
-let FirewallRule = {
-    # Firewall rule configuration
-    port | (number | string),
-    protocol | string | default = "tcp",
-    source | string | default = "0.0.0.0/0",
-    description | string | optional,
-} in
-FirewallRule
-
-

Provider Testing

-

Unit Testing (tests/unit/test-servers.nu):

-
# Unit tests for server management
-
-use ../../../nulib/provider.nu
-
-def test_server_creation [] {
-    # Test valid server creation
-    let result = (provider create-server "test-server" "small" --dry-run)
-
-    assert ($result.action == "create")
-    assert ($result.config.name == "test-server")
-    assert ($result.config.plan == "small")
-    assert ($result.status == "dry-run")
-
-    print "✅ Server creation test passed"
-}
-
-def test_invalid_server_name [] {
-    # Test invalid server name
-    try {
-        provider create-server "" "small" --dry-run
-        assert false "Should have failed with empty name"
-    } catch { |e|
-        assert ($e.msg | str contains "Server name cannot be empty")
-    }
-
-    print "✅ Invalid server name test passed"
-}
-
-def test_invalid_plan [] {
-    # Test invalid server plan
-    try {
-        provider create-server "test" "invalid-plan" --dry-run
-        assert false "Should have failed with invalid plan"
-    } catch { |e|
-        assert ($e.msg | str contains "Invalid server plan")
-    }
-
-    print "✅ Invalid plan test passed"
-}
-
-def main [] {
-    print "Running server management unit tests..."
-    test_server_creation
-    test_invalid_server_name
-    test_invalid_plan
-    print "✅ All server management tests passed"
-}
-
-

Integration Testing (tests/integration/test-lifecycle.nu):

-
# Integration tests for complete server lifecycle
-
-use ../../../nulib/provider.nu
-
-def test_complete_lifecycle [] {
-    let test_server = $"test-server-(date now | format date '%Y%m%d%H%M%S')"
-
-    try {
-        # Test server creation (dry run)
-        let create_result = (provider create-server $test_server "small" --dry-run)
-        assert ($create_result.status == "dry-run")
-
-        # Test server listing
-        let servers = (provider list-servers --format json)
-        assert ($servers | length) >= 0
-
-        # Test provider info
-        let provider_info = (provider init)
-        assert ($provider_info.name == "my-cloud")
-        assert $provider_info.initialized
-
-        print $"✅ Complete lifecycle test passed for ($test_server)"
-    } catch { |e|
-        print $"❌ Integration test failed: ($e.msg)"
-        exit 1
-    }
-}
-
-def main [] {
-    print "Running provider integration tests..."
-    test_complete_lifecycle
-    print "✅ All integration tests passed"
-}
-
-

Task Service Development

-

Task Service Architecture

-

Task services are infrastructure components that can be deployed and managed across different environments. They provide standardized interfaces for -installation, configuration, and lifecycle management.

-

Core Responsibilities:

-
    -
  • Installation: Service deployment and setup
  • -
  • Configuration: Dynamic configuration management
  • -
  • Health Checking: Service status monitoring
  • -
  • Version Management: Automatic version updates from GitHub
  • -
  • Integration: Integration with other services and clusters
  • -
-

Creating a New Task Service

-

1. Initialize from Template:

-
# Copy task service template
-cp -r workspace/extensions/taskservs/template workspace/extensions/taskservs/my-service
-
-# Navigate to new service
-cd workspace/extensions/taskservs/my-service
-
-

2. Initialize Service:

-
# Initialize service metadata
-nu init-service.nu \
-    --name "my-service" \
-    --display-name "My Custom Service" \
-    --type "database" \
-    --github-repo "myorg/my-service"
-
-

Task Service Structure

-
my-service/
-├── README.md                    # Service documentation
-├── schemas/                     # Nickel schemas
-│   ├── version.ncl             # Version and GitHub integration
-│   ├── config.ncl              # Service configuration schema
-│   └── manifest.toml           # Module dependencies
-├── nushell/                    # Nushell implementation
-│   ├── taskserv.nu             # Main service interface
-│   ├── install.nu              # Installation logic
-│   ├── uninstall.nu            # Removal logic
-│   ├── config.nu               # Configuration management
-│   ├── status.nu               # Status and health checking
-│   ├── versions.nu             # Version management
-│   └── utils.nu                # Service utilities
-├── templates/                  # Jinja2 templates
-│   ├── deployment.yaml.j2      # Kubernetes deployment
-│   ├── service.yaml.j2         # Kubernetes service
-│   ├── configmap.yaml.j2       # Configuration
-│   ├── install.sh.j2           # Installation script
-│   └── systemd.service.j2      # Systemd service
-├── manifests/                  # Static manifests
-│   ├── rbac.yaml               # RBAC definitions
-│   ├── pvc.yaml                # Persistent volume claims
-│   └── ingress.yaml            # Ingress configuration
-├── generate/                   # Code generation
-│   ├── manifests.nu            # Generate Kubernetes manifests
-│   ├── configs.nu              # Generate configurations
-│   └── docs.nu                 # Generate documentation
-└── tests/                      # Testing framework
-    ├── unit/                   # Unit tests
-    ├── integration/            # Integration tests
-    └── fixtures/               # Test fixtures and data
-
-

Task Service Implementation

-

Main Service Interface (nushell/taskserv.nu):

-
#!/usr/bin/env nu
-# My Custom Service Task Service Implementation
-
-export const SERVICE_NAME = "my-service"
-export const SERVICE_TYPE = "database"
-export const SERVICE_VERSION = "1.0.0"
-
-# Service installation
-export def "taskserv install" [
-    target: string                 # Target server or cluster
-    --config: string = ""          # Custom configuration file
-    --dry-run: bool = false        # Show what would be installed
-    --wait: bool = true            # Wait for installation to complete
-] -> record {
-    # Load service configuration
-    let service_config = if $config != "" {
-        open $config | from toml
-    } else {
-        load_default_config
-    }
-
-    # Validate target environment
-    let target_info = validate_target $target
-    if not $target_info.valid {
-        error make {msg: $"Invalid target: ($target_info.reason)"}
-    }
-
-    if $dry_run {
-        let install_plan = generate_install_plan $target $service_config
-        return {
-            action: "install",
-            service: $SERVICE_NAME,
-            target: $target,
-            plan: $install_plan,
-            status: "dry-run"
-        }
-    }
-
-    # Perform installation
-    print $"Installing ($SERVICE_NAME) on ($target)..."
-
-    let install_result = try {
-        install_service $target $service_config $wait
-    } catch { |e|
-        error make {
-            msg: $"Installation failed: ($e.msg)",
-            help: "Check target connectivity and permissions"
-        }
-    }
-
-    {
-        service: $SERVICE_NAME,
-        target: $target,
-        status: "installed",
-        version: $install_result.version,
-        endpoint: $install_result.endpoint?,
-        installed_at: (date now)
-    }
-}
-
-# Service removal
-export def "taskserv uninstall" [
-    target: string                 # Target server or cluster
-    --force: bool = false          # Force removal without confirmation
-    --cleanup-data: bool = false   # Remove persistent data
-] -> record {
-    let target_info = validate_target $target
-    if not $target_info.valid {
-        error make {msg: $"Invalid target: ($target_info.reason)"}
-    }
-
-    # Check if service is installed
-    let status = get_service_status $target
-    if $status.status != "installed" {
-        error make {msg: $"Service ($SERVICE_NAME) is not installed on ($target)"}
-    }
-
-    if not $force {
-        let confirm = (input $"Remove ($SERVICE_NAME) from ($target)? (y/N) ")
-        if $confirm != "y" and $confirm != "yes" {
-            return {action: "uninstall", service: $SERVICE_NAME, status: "cancelled"}
-        }
-    }
-
-    print $"Removing ($SERVICE_NAME) from ($target)..."
-
-    let removal_result = try {
-        uninstall_service $target $cleanup_data
-    } catch { |e|
-        error make {msg: $"Removal failed: ($e.msg)"}
-    }
-
-    {
-        service: $SERVICE_NAME,
-        target: $target,
-        status: "uninstalled",
-        data_removed: $cleanup_data,
-        uninstalled_at: (date now)
-    }
-}
-
-# Service status checking
-export def "taskserv status" [
-    target: string                 # Target server or cluster
-    --detailed: bool = false       # Show detailed status information
-] -> record {
-    let target_info = validate_target $target
-    if not $target_info.valid {
-        error make {msg: $"Invalid target: ($target_info.reason)"}
-    }
-
-    let status = get_service_status $target
-
-    if $detailed {
-        let health = check_service_health $target
-        let metrics = get_service_metrics $target
-
-        $status | merge {
-            health: $health,
-            metrics: $metrics,
-            checked_at: (date now)
-        }
-    } else {
-        $status
-    }
-}
-
-# Version management
-export def "taskserv check-updates" [
-    --target: string = ""          # Check updates for specific target
-] -> record {
-    let current_version = get_current_version
-    let latest_version = get_latest_version_from_github
-
-    let update_available = $latest_version != $current_version
-
-    {
-        service: $SERVICE_NAME,
-        current_version: $current_version,
-        latest_version: $latest_version,
-        update_available: $update_available,
-        target: $target,
-        checked_at: (date now)
-    }
-}
-
-export def "taskserv update" [
-    target: string                 # Target to update
-    --version: string = "latest"   # Specific version to update to
-    --dry-run: bool = false        # Show what would be updated
-] -> record {
-    let current_status = (taskserv status $target)
-    if $current_status.status != "installed" {
-        error make {msg: $"Service not installed on ($target)"}
-    }
-
-    let target_version = if $version == "latest" {
-        get_latest_version_from_github
-    } else {
-        $version
-    }
-
-    if $dry_run {
-        return {
-            action: "update",
-            service: $SERVICE_NAME,
-            target: $target,
-            from_version: $current_status.version,
-            to_version: $target_version,
-            status: "dry-run"
-        }
-    }
-
-    print $"Updating ($SERVICE_NAME) on ($target) to version ($target_version)..."
-
-    let update_result = try {
-        update_service $target $target_version
-    } catch { |e|
-        error make {msg: $"Update failed: ($e.msg)"}
-    }
-
-    {
-        service: $SERVICE_NAME,
-        target: $target,
-        status: "updated",
-        from_version: $current_status.version,
-        to_version: $target_version,
-        updated_at: (date now)
-    }
-}
-
-# Service testing
-export def "taskserv test" [
-    target: string = "local"       # Target for testing
-    --test-type: string = "basic"  # Test type: basic, integration, full
-] -> record {
-    match $test_type {
-        "basic" => test_basic_functionality $target,
-        "integration" => test_integration $target,
-        "full" => test_full_functionality $target,
-        _ => (error make {msg: $"Unknown test type: ($test_type)"})
-    }
-}
-
-

Version Configuration (schemas/version.ncl):

-
# Version management with GitHub integration
-
-let version_config = {
-    service_name = "my-service",
-
-    # GitHub repository for version checking
-    github = {
-        owner = "myorg",
-        repo = "my-service",
-
-        # Release configuration
-        release = {
-            tag_prefix = "v",
-            prerelease = false,
-            draft = false,
-        },
-
-        # Asset patterns for different platforms
-        assets = {
-            linux_amd64 = "my-service-{version}-linux-amd64.tar.gz",
-            darwin_amd64 = "my-service-{version}-darwin-amd64.tar.gz",
-            windows_amd64 = "my-service-{version}-windows-amd64.zip",
-        },
-    },
-
-    # Version constraints and compatibility
-    compatibility = {
-        min_kubernetes_version = "1.20.0",
-        max_kubernetes_version = "1.28.*",
-
-        # Dependencies
-        requires = {
-            "cert-manager" = ">=1.8.0",
-            "ingress-nginx" = ">=1.0.0",
-        },
-
-        # Conflicts
-        conflicts = {
-            "old-my-service" = "*",
-        },
-    },
-
-    # Installation configuration
-    installation = {
-        default_namespace = "my-service",
-        create_namespace = true,
-
-        # Resource requirements
-        resources = {
-            requests = {
-                cpu = "100m",
-                memory = "128Mi",
-            },
-            limits = {
-                cpu = "500m",
-                memory = "512Mi",
-            },
-        },
-
-        # Persistence
-        persistence = {
-            enabled = true,
-            storage_class = "default",
-            size = "10Gi",
-        },
-    },
-
-    # Health check configuration
-    health_check = {
-        initial_delay_seconds = 30,
-        period_seconds = 10,
-        timeout_seconds = 5,
-        failure_threshold = 3,
-
-        # Health endpoints
-        endpoints = {
-            liveness = "/health/live",
-            readiness = "/health/ready",
-        },
-    },
-} in
-version_config
-
-

Cluster Development

-

Cluster Architecture

-

Clusters represent complete deployment solutions that combine multiple task services, providers, and configurations to create functional environments.

-

Core Responsibilities:

-
    -
  • Service Orchestration: Coordinate multiple task service deployments
  • -
  • Dependency Management: Handle service dependencies and startup order
  • -
  • Configuration Management: Manage cross-service configuration
  • -
  • Health Monitoring: Monitor overall cluster health
  • -
  • Scaling: Handle cluster scaling operations
  • -
-

Creating a New Cluster

-

1. Initialize from Template:

-
# Copy cluster template
-cp -r workspace/extensions/clusters/template workspace/extensions/clusters/my-stack
-
-# Navigate to new cluster
-cd workspace/extensions/clusters/my-stack
-
-

2. Initialize Cluster:

-
# Initialize cluster metadata
-nu init-cluster.nu \
-    --name "my-stack" \
-    --display-name "My Application Stack" \
-    --type "web-application"
-
-

Cluster Implementation

-

Main Cluster Interface (nushell/cluster.nu):

-
#!/usr/bin/env nu
-# My Application Stack Cluster Implementation
-
-export const CLUSTER_NAME = "my-stack"
-export const CLUSTER_TYPE = "web-application"
-export const CLUSTER_VERSION = "1.0.0"
-
-# Cluster creation
-export def "cluster create" [
-    target: string                 # Target infrastructure
-    --config: string = ""          # Custom configuration file
-    --dry-run: bool = false        # Show what would be created
-    --wait: bool = true            # Wait for cluster to be ready
-] -> record {
-    let cluster_config = if $config != "" {
-        open $config | from toml
-    } else {
-        load_default_cluster_config
-    }
-
-    if $dry_run {
-        let deployment_plan = generate_deployment_plan $target $cluster_config
-        return {
-            action: "create",
-            cluster: $CLUSTER_NAME,
-            target: $target,
-            plan: $deployment_plan,
-            status: "dry-run"
-        }
-    }
-
-    print $"Creating cluster ($CLUSTER_NAME) on ($target)..."
-
-    # Deploy services in dependency order
-    let services = get_service_deployment_order $cluster_config.services
-    let deployment_results = []
-
-    for service in $services {
-        print $"Deploying service: ($service.name)"
-
-        let result = try {
-            deploy_service $service $target $wait
-        } catch { |e|
-            # Rollback on failure
-            rollback_cluster $target $deployment_results
-            error make {msg: $"Service deployment failed: ($e.msg)"}
-        }
-
-        $deployment_results = ($deployment_results | append $result)
-    }
-
-    # Configure inter-service communication
-    configure_service_mesh $target $deployment_results
-
-    {
-        cluster: $CLUSTER_NAME,
-        target: $target,
-        status: "created",
-        services: $deployment_results,
-        created_at: (date now)
-    }
-}
-
-# Cluster deletion
-export def "cluster delete" [
-    target: string                 # Target infrastructure
-    --force: bool = false          # Force deletion without confirmation
-    --cleanup-data: bool = false   # Remove persistent data
-] -> record {
-    let cluster_status = get_cluster_status $target
-    if $cluster_status.status != "running" {
-        error make {msg: $"Cluster ($CLUSTER_NAME) is not running on ($target)"}
-    }
-
-    if not $force {
-        let confirm = (input $"Delete cluster ($CLUSTER_NAME) from ($target)? (y/N) ")
-        if $confirm != "y" and $confirm != "yes" {
-            return {action: "delete", cluster: $CLUSTER_NAME, status: "cancelled"}
-        }
-    }
-
-    print $"Deleting cluster ($CLUSTER_NAME) from ($target)..."
-
-    # Delete services in reverse dependency order
-    let services = get_service_deletion_order $cluster_status.services
-    let deletion_results = []
-
-    for service in $services {
-        print $"Removing service: ($service.name)"
-
-        let result = try {
-            remove_service $service $target $cleanup_data
-        } catch { |e|
-            print $"Warning: Failed to remove service ($service.name): ($e.msg)"
-        }
-
-        $deletion_results = ($deletion_results | append $result)
-    }
-
-    {
-        cluster: $CLUSTER_NAME,
-        target: $target,
-        status: "deleted",
-        services_removed: $deletion_results,
-        data_removed: $cleanup_data,
-        deleted_at: (date now)
-    }
-}
-
-

Testing and Validation

-

Testing Framework

-

Test Types:

-
    -
  • Unit Tests: Individual function and module testing
  • -
  • Integration Tests: Cross-component interaction testing
  • -
  • End-to-End Tests: Complete workflow testing
  • -
  • Performance Tests: Load and performance validation
  • -
  • Security Tests: Security and vulnerability testing
  • -
-

Extension Testing Commands

-

Workspace Testing Tools:

-
# Validate extension syntax and structure
-nu workspace.nu tools validate-extension providers/my-cloud
-
-# Run extension unit tests
-nu workspace.nu tools test-extension taskservs/my-service --test-type unit
-
-# Integration testing with real infrastructure
-nu workspace.nu tools test-extension clusters/my-stack --test-type integration --target test-env
-
-# Performance testing
-nu workspace.nu tools test-extension providers/my-cloud --test-type performance --duration 5m
-
-

Automated Testing

-

Test Runner (tests/run-tests.nu):

-
#!/usr/bin/env nu
-# Automated test runner for extensions
-
-def main [
-    extension_type: string         # Extension type: providers, taskservs, clusters
-    extension_name: string         # Extension name
-    --test-types: string = "all"   # Test types to run: unit, integration, e2e, all
-    --target: string = "local"     # Test target environment
-    --verbose: bool = false        # Verbose test output
-    --parallel: bool = true        # Run tests in parallel
-] -> record {
-    let extension_path = $"workspace/extensions/($extension_type)/($extension_name)"
-
-    if not ($extension_path | path exists) {
-        error make {msg: $"Extension not found: ($extension_path)"}
-    }
-
-    let test_types = if $test_types == "all" {
-        ["unit", "integration", "e2e"]
-    } else {
-        $test_types | split row ","
-    }
-
-    print $"Running tests for ($extension_type)/($extension_name)..."
-
-    let test_results = []
-
-    for test_type in $test_types {
-        print $"Running ($test_type) tests..."
-
-        let result = try {
-            run_test_suite $extension_path $test_type $target $verbose
-        } catch { |e|
-            {
-                test_type: $test_type,
-                status: "failed",
-                error: $e.msg,
-                duration: 0
-            }
-        }
-
-        $test_results = ($test_results | append $result)
-    }
-
-    let total_tests = ($test_results | length)
-    let passed_tests = ($test_results | where status == "passed" | length)
-    let failed_tests = ($test_results | where status == "failed" | length)
-
-    {
-        extension: $"($extension_type)/($extension_name)",
-        test_results: $test_results,
-        summary: {
-            total: $total_tests,
-            passed: $passed_tests,
-            failed: $failed_tests,
-            success_rate: ($passed_tests / $total_tests * 100)
-        },
-        completed_at: (date now)
-    }
-}
-
-

Publishing and Distribution

-

Extension Publishing

-

Publishing Process:

-
    -
  1. Validation: Comprehensive testing and validation
  2. -
  3. Documentation: Complete documentation and examples
  4. -
  5. Packaging: Create distribution packages
  6. -
  7. Registry: Publish to extension registry
  8. -
  9. Versioning: Semantic version tagging
  10. -
-

Publishing Commands

-
# Validate extension for publishing
-nu workspace.nu tools validate-for-publish providers/my-cloud
-
-# Create distribution package
-nu workspace.nu tools package-extension providers/my-cloud --version 1.0.0
-
-# Publish to registry
-nu workspace.nu tools publish-extension providers/my-cloud --registry official
-
-# Tag version
-nu workspace.nu tools tag-extension providers/my-cloud --version 1.0.0 --push
-
-

Extension Registry

-

Registry Structure:

-
Extension Registry
-├── providers/
-│   ├── aws/              # Official AWS provider
-│   ├── upcloud/          # Official UpCloud provider
-│   └── community/        # Community providers
-├── taskservs/
-│   ├── kubernetes/       # Official Kubernetes service
-│   ├── databases/        # Database services
-│   └── monitoring/       # Monitoring services
-└── clusters/
-    ├── web-stacks/       # Web application stacks
-    ├── data-platforms/   # Data processing platforms
-    └── ci-cd/            # CI/CD pipelines
-
-

Best Practices

-

Code Quality

-

Function Design:

-
# Good: Single responsibility, clear parameters, comprehensive error handling
-export def "provider create-server" [
-    name: string                   # Server name (must be unique in region)
-    plan: string                   # Server plan (see list-plans for options)
-    --zone: string = "auto"        # Deployment zone (auto-selects optimal zone)
-    --dry-run: bool = false        # Preview changes without creating resources
-] -> record {                      # Returns creation result with server details
-    # Validate inputs first
-    if ($name | str length) == 0 {
-        error make {
-            msg: "Server name cannot be empty"
-            help: "Provide a unique name for the server"
-        }
-    }
-
-    # Implementation with comprehensive error handling
-    # ...
-}
-
-# Bad: Unclear parameters, no error handling
-def create [n, p] {
-    # Missing validation and error handling
-    api_call $n $p
-}
-
-

Configuration Management:

-
# Good: Configuration-driven with validation
-def get_api_endpoint [provider: string] -> string {
-    let config = get-config-value $"providers.($provider).api_url"
-
-    if ($config | is-empty) {
-        error make {
-            msg: $"API URL not configured for provider ($provider)",
-            help: $"Add 'api_url' to providers.($provider) configuration"
-        }
-    }
-
-    $config
-}
-
-# Bad: Hardcoded values
-def get_api_endpoint [] {
-    "https://api.provider.com"  # Never hardcode!
-}
-
-

Error Handling

-

Comprehensive Error Context:

-
def create_server_with_context [name: string, config: record] -> record {
-    try {
-        # Validate configuration
-        validate_server_config $config
-    } catch { |e|
-        error make {
-            msg: $"Invalid server configuration: ($e.msg)",
-            label: {text: "configuration error", span: $e.span?},
-            help: "Check configuration syntax and required fields"
-        }
-    }
-
-    try {
-        # Create server via API
-        let result = api_create_server $name $config
-        return $result
-    } catch { |e|
-        match $e.msg {
-            $msg if ($msg | str contains "quota") => {
-                error make {
-                    msg: $"Server creation failed: quota limit exceeded",
-                    help: "Contact support to increase quota or delete unused servers"
-                }
-            },
-            $msg if ($msg | str contains "auth") => {
-                error make {
-                    msg: "Server creation failed: authentication error",
-                    help: "Check API credentials and permissions"
-                }
-            },
-            _ => {
-                error make {
-                    msg: $"Server creation failed: ($e.msg)",
-                    help: "Check network connectivity and try again"
-                }
-            }
-        }
-    }
-}
-
-

Testing Practices

-

Test Organization:

-
# Organize tests by functionality
-# tests/unit/server-creation-test.nu
-
-def test_valid_server_creation [] {
-    # Test valid cases with various inputs
-    let valid_configs = [
-        {name: "test-1", plan: "small"},
-        {name: "test-2", plan: "medium"},
-        {name: "test-3", plan: "large"}
-    ]
-
-    for config in $valid_configs {
-        let result = create_server $config.name $config.plan --dry-run
-        assert ($result.status == "dry-run")
-        assert ($result.config.name == $config.name)
-    }
-}
-
-def test_invalid_inputs [] {
-    # Test error conditions
-    let invalid_cases = [
-        {name: "", plan: "small", error: "empty name"},
-        {name: "test", plan: "invalid", error: "invalid plan"},
-        {name: "test with spaces", plan: "small", error: "invalid characters"}
-    ]
-
-    for case in $invalid_cases {
-        try {
-            create_server $case.name $case.plan --dry-run
-            assert false $"Should have failed: ($case.error)"
-        } catch { |e|
-            # Verify specific error message
-            assert ($e.msg | str contains $case.error)
-        }
-    }
-}
-
-

Documentation Standards

-

Function Documentation:

-
# Comprehensive function documentation
-def "provider create-server" [
-    name: string                   # Server name - must be unique within the provider
-    plan: string                   # Server size plan (run 'provider list-plans' for options)
-    --zone: string = "auto"        # Target zone - 'auto' selects optimal zone based on load
-    --template: string = "ubuntu22" # OS template - see 'provider list-templates' for options
-    --storage: int = 25             # Storage size in GB (minimum 10, maximum 2048)
-    --dry-run: bool = false        # Preview mode - shows what would be created without creating
-] -> record {                      # Returns server creation details including ID and IP
-    """
-    Creates a new server instance with the specified configuration.
-
-    This function provisions a new server using the provider's API, configures
-    basic security settings, and returns the server details upon successful creation.
-
-    Examples:
-      # Create a small server with default settings
-      provider create-server "web-01" "small"
-
-      # Create with specific zone and storage
-      provider create-server "db-01" "large" --zone "us-west-2" --storage 100
-
-      # Preview what would be created
-      provider create-server "test" "medium" --dry-run
-
-    Error conditions:
-      - Invalid server name (empty, invalid characters)
-      - Invalid plan (not in supported plans list)
-      - Insufficient quota or permissions
-      - Network connectivity issues
-
-    Returns:
-      Record with keys: server, status, id, ip_address, created_at
-    """
-
-    # Implementation...
-}
-
-

Troubleshooting

-

Common Development Issues

-

Extension Not Found

-

Error: Extension 'my-provider' not found

-
# Solution: Check extension location and structure
-ls -la workspace/extensions/providers/my-provider
-nu workspace/lib/path-resolver.nu resolve_extension "providers" "my-provider"
-
-# Validate extension structure
-nu workspace.nu tools validate-extension providers/my-provider
-
-

Configuration Errors

-

Error: Invalid Nickel configuration

-
# Solution: Validate Nickel syntax
-nickel check workspace/extensions/providers/my-provider/schemas/
-
-# Format Nickel files
-nickel fmt workspace/extensions/providers/my-provider/schemas/
-
-# Test with example data
-nickel eval workspace/extensions/providers/my-provider/schemas/settings.ncl
-
-

API Integration Issues

-

Error: Authentication failed

-
# Solution: Test credentials and connectivity
-curl -H "Authorization: Bearer $API_KEY" https://api.provider.com/auth/test
-
-# Debug API calls
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-nu workspace/extensions/providers/my-provider/nulib/provider.nu test --test-type basic
-
-

Debug Mode

-

Enable Extension Debugging:

-
# Set debug environment
-export PROVISIONING_DEBUG=true
-export PROVISIONING_LOG_LEVEL=debug
-export PROVISIONING_WORKSPACE_USER=$USER
-
-# Run extension with debug
-nu workspace/extensions/providers/my-provider/nulib/provider.nu create-server test-server small --dry-run
-
-

Performance Optimization

-

Extension Performance:

-
# Profile extension performance
-time nu workspace/extensions/providers/my-provider/nulib/provider.nu list-servers
-
-# Monitor resource usage
-nu workspace/tools/runtime-manager.nu monitor --duration 1m --interval 5s
-
-# Optimize API calls (use caching)
-export PROVISIONING_CACHE_ENABLED=true
-export PROVISIONING_CACHE_TTL=300  # 5 minutes
-
-

This extension development guide provides a comprehensive framework for creating high-quality, maintainable extensions that integrate seamlessly with -provisioning’s architecture and workflows.

-

Extension Development Guide

-

This guide will help you create custom providers, task services, and cluster configurations to extend provisioning for your specific needs.

-

What You’ll Learn

-
    -
  • Extension architecture and concepts
  • -
  • Creating custom cloud providers
  • -
  • Developing task services
  • -
  • Building cluster configurations
  • -
  • Publishing and sharing extensions
  • -
  • Best practices and patterns
  • -
  • Testing and validation
  • -
-

Extension Architecture

-

Extension Types

-
- - - - -
Extension TypePurposeExamples
ProvidersCloud platform integrationsCustom cloud, on-premises
Task ServicesSoftware componentsCustom databases, monitoring
ClustersService orchestrationApplication stacks, platforms
TemplatesReusable configurationsStandard deployments
-
-

Extension Structure

-
my-extension/
-├── schemas/                # Nickel schemas and models
-│   ├── contracts.ncl      # Type contracts
-│   ├── providers/         # Provider definitions
-│   ├── taskservs/         # Task service definitions
-│   └── clusters/          # Cluster definitions
-├── nulib/                 # Nushell implementation
-│   ├── providers/         # Provider logic
-│   ├── taskservs/         # Task service logic
-│   └── utils/             # Utility functions
-├── templates/             # Configuration templates
-├── tests/                 # Test files
-├── docs/                  # Documentation
-├── extension.toml         # Extension metadata
-└── README.md              # Extension documentation
-
-

Extension Metadata

-

extension.toml:

-
[extension]
-name = "my-custom-provider"
-version = "1.0.0"
-description = "Custom cloud provider integration"
-author = "Your Name <you@example.com>"
-license = "MIT"
-
-[compatibility]
-provisioning_version = ">=1.0.0"
-nickel_version = ">=1.15.0"
-
-[provides]
-providers = ["custom-cloud"]
-taskservs = ["custom-database"]
-clusters = ["custom-stack"]
-
-[dependencies]
-extensions = []
-system_packages = ["curl", "jq"]
-
-[configuration]
-required_env = ["CUSTOM_CLOUD_API_KEY"]
-optional_env = ["CUSTOM_CLOUD_REGION"]
-
-

Creating Custom Providers

-

Provider Architecture

-

A provider handles:

-
    -
  • Authentication with cloud APIs
  • -
  • Resource lifecycle management (create, read, update, delete)
  • -
  • Provider-specific configurations
  • -
  • Cost estimation and billing integration
  • -
-

Step 1: Define Provider Schema

-

schemas/providers/custom_cloud.ncl:

-
# Custom cloud provider schema
-{
-  CustomCloudConfig = {
-    # Configuration for Custom Cloud provider
-    # Authentication
-    api_key | String,
-    api_secret | String = "",
-    region | String = "us-west-1",
-
-    # Provider-specific settings
-    project_id | String = "",
-    organization | String = "",
-
-    # API configuration
-    api_url | String = "https://api.custom-cloud.com/v1",
-    timeout | Number = 30,
-
-    # Cost configuration
-    billing_account | String = "",
-    cost_center | String = "",
-  },
-
-  CustomCloudServer = {
-    # Server configuration for Custom Cloud
-    # Instance configuration
-    machine_type | String,
-    zone | String,
-    disk_size | Number = 20,
-    disk_type | String = "ssd",
-
-    # Network configuration
-    vpc | String = "",
-    subnet | String = "",
-    external_ip | Bool = true,
-
-    # Custom Cloud specific
-    preemptible | Bool = false,
-    labels | {String: String} = {},
-  },
-
-  # Provider capabilities
-  provider_capabilities = {
-    name = "custom-cloud",
-    supports_auto_scaling = true,
-    supports_load_balancing = true,
-    supports_managed_databases = true,
-    regions = [
-      "us-west-1", "us-west-2", "us-east-1", "eu-west-1"
-    ],
-    machine_types = [
-      "micro", "small", "medium", "large", "xlarge"
-    ],
-  },
-}
-
-

Step 2: Implement Provider Logic

-

nulib/providers/custom_cloud.nu:

-
# Custom Cloud provider implementation
-
-# Provider initialization
-export def custom_cloud_init [] {
-    # Validate environment variables
-    if ($env.CUSTOM_CLOUD_API_KEY | is-empty) {
-        error make {
-            msg: "CUSTOM_CLOUD_API_KEY environment variable is required"
-        }
-    }
-
-    # Set up provider context
-    $env.CUSTOM_CLOUD_INITIALIZED = true
-}
-
-# Create server instance
-export def custom_cloud_create_server [
-    server_config: record
-    --check: bool = false    # Dry run mode
-] -> record {
-    custom_cloud_init
-
-    print $"Creating server: ($server_config.name)"
-
-    if $check {
-        return {
-            action: "create"
-            resource: "server"
-            name: $server_config.name
-            status: "planned"
-            estimated_cost: (calculate_server_cost $server_config)
-        }
-    }
-
-    # Make API call to create server
-    let api_response = (custom_cloud_api_call "POST" "instances" $server_config)
-
-    if ($api_response.status | str contains "error") {
-        error make {
-            msg: $"Failed to create server: ($api_response.message)"
-        }
-    }
-
-    # Wait for server to be ready
-    let server_id = $api_response.instance_id
-    custom_cloud_wait_for_server $server_id "running"
-
-    return {
-        id: $server_id
-        name: $server_config.name
-        status: "running"
-        ip_address: $api_response.ip_address
-        created_at: (date now | format date "%Y-%m-%d %H:%M:%S")
-    }
-}
-
-# Delete server instance
-export def custom_cloud_delete_server [
-    server_name: string
-    --keep_storage: bool = false
-] -> record {
-    custom_cloud_init
-
-    let server = (custom_cloud_get_server $server_name)
-
-    if ($server | is-empty) {
-        error make {
-            msg: $"Server not found: ($server_name)"
-        }
-    }
-
-    print $"Deleting server: ($server_name)"
-
-    # Delete the instance
-    let delete_response = (custom_cloud_api_call "DELETE" $"instances/($server.id)" {
-        keep_storage: $keep_storage
-    })
-
-    return {
-        action: "delete"
-        resource: "server"
-        name: $server_name
-        status: "deleted"
-    }
-}
-
-# List servers
-export def custom_cloud_list_servers [] -> list<record> {
-    custom_cloud_init
-
-    let response = (custom_cloud_api_call "GET" "instances" {})
-
-    return ($response.instances | each {|instance|
-        {
-            id: $instance.id
-            name: $instance.name
-            status: $instance.status
-            machine_type: $instance.machine_type
-            zone: $instance.zone
-            ip_address: $instance.ip_address
-            created_at: $instance.created_at
-        }
-    })
-}
-
-# Get server details
-export def custom_cloud_get_server [server_name: string] -> record {
-    let servers = (custom_cloud_list_servers)
-    return ($servers | where name == $server_name | first)
-}
-
-# Calculate estimated costs
-export def calculate_server_cost [server_config: record] -> float {
-    # Cost calculation logic based on machine type
-    let base_costs = {
-        micro: 0.01
-        small: 0.05
-        medium: 0.10
-        large: 0.20
-        xlarge: 0.40
-    }
-
-    let machine_cost = ($base_costs | get $server_config.machine_type)
-    let storage_cost = ($server_config.disk_size | default 20) * 0.001
-
-    return ($machine_cost + $storage_cost)
-}
-
-# Make API call to Custom Cloud
-def custom_cloud_api_call [
-    method: string
-    endpoint: string
-    data: record
-] -> record {
-    let api_url = ($env.CUSTOM_CLOUD_API_URL | default "https://api.custom-cloud.com/v1")
-    let api_key = $env.CUSTOM_CLOUD_API_KEY
-
-    let headers = {
-        "Authorization": $"Bearer ($api_key)"
-        "Content-Type": "application/json"
-    }
-
-    let url = $"($api_url)/($endpoint)"
-
-    match $method {
-        "GET" => {
-            http get $url --headers $headers
-        }
-        "POST" => {
-            http post $url --headers $headers ($data | to json)
-        }
-        "PUT" => {
-            http put $url --headers $headers ($data | to json)
-        }
-        "DELETE" => {
-            http delete $url --headers $headers
-        }
-        _ => {
-            error make {
-                msg: $"Unsupported HTTP method: ($method)"
-            }
-        }
-    }
-}
-
-# Wait for server to reach desired state
-def custom_cloud_wait_for_server [
-    server_id: string
-    target_status: string
-    --timeout: int = 300
-] {
-    let start_time = (date now)
-
-    loop {
-        let response = (custom_cloud_api_call "GET" $"instances/($server_id)" {})
-        let current_status = $response.status
-
-        if $current_status == $target_status {
-            print $"Server ($server_id) reached status: ($target_status)"
-            break
-        }
-
-        let elapsed = ((date now) - $start_time) / 1000000000  # Convert to seconds
-        if $elapsed > $timeout {
-            error make {
-                msg: $"Timeout waiting for server ($server_id) to reach ($target_status)"
-            }
-        }
-
-        sleep 10sec
-        print $"Waiting for server status: ($current_status) -> ($target_status)"
-    }
-}
-
-

Step 3: Provider Registration

-

nulib/providers/mod.nu:

-
# Provider module exports
-export use custom_cloud.nu *
-
-# Provider registry
-export def get_provider_info [] -> record {
-    {
-        name: "custom-cloud"
-        version: "1.0.0"
-        capabilities: {
-            servers: true
-            load_balancers: true
-            databases: false
-            storage: true
-        }
-        regions: ["us-west-1", "us-west-2", "us-east-1", "eu-west-1"]
-        auth_methods: ["api_key", "oauth"]
-    }
-}
-
-

Creating Custom Task Services

-

Task Service Architecture

-

Task services handle:

-
    -
  • Software installation and configuration
  • -
  • Service lifecycle management
  • -
  • Health checking and monitoring
  • -
  • Version management and updates
  • -
-

Step 1: Define Service Schema

-

schemas/taskservs/custom_database.ncl:

-
# Custom database task service
-{
-  CustomDatabaseConfig = {
-    # Configuration for Custom Database service
-    # Database configuration
-    version | String = "14.0",
-    port | Number = 5432,
-    max_connections | Number = 100,
-    memory_limit | String = "512 MB",
-
-    # Data configuration
-    data_directory | String = "/var/lib/customdb",
-    log_directory | String = "/var/log/customdb",
-
-    # Replication
-    replication | {
-      enabled | Bool = false,
-      mode | String = "async",
-      replicas | Number = 1,
-    } = {},
-
-    # Backup configuration
-    backup | {
-      enabled | Bool = true,
-      schedule | String = "0 2 * * *",
-      retention_days | Number = 7,
-      storage_location | String = "local",
-    } = {},
-
-    # Security
-    ssl | {
-      enabled | Bool = true,
-      cert_file | String = "/etc/ssl/certs/customdb.crt",
-      key_file | String = "/etc/ssl/private/customdb.key",
-    } = {},
-
-    # Monitoring
-    monitoring | {
-      enabled | Bool = true,
-      metrics_port | Number = 9187,
-      log_level | String = "info",
-    } = {},
-  },
-
-  # Service metadata
-  service_metadata = {
-    name = "custom-database",
-    description = "Custom Database Server",
-    version = "14.0",
-    category = "database",
-    dependencies = ["systemd"],
-    supported_os = ["ubuntu", "debian", "centos", "rhel"],
-    ports = [5432, 9187],
-    data_directories = ["/var/lib/customdb"],
-  },
-}
-
-

Step 2: Implement Service Logic

-

nulib/taskservs/custom_database.nu:

-
# Custom Database task service implementation
-
-# Install custom database
-export def install_custom_database [
-    config: record
-    --check: bool = false
-] -> record {
-    print "Installing Custom Database..."
-
-    if $check {
-        return {
-            action: "install"
-            service: "custom-database"
-            version: ($config.version | default "14.0")
-            status: "planned"
-            changes: [
-                "Install Custom Database packages"
-                "Configure database server"
-                "Start database service"
-                "Set up monitoring"
-            ]
-        }
-    }
-
-    # Check prerequisites
-    validate_prerequisites $config
-
-    # Install packages
-    install_packages $config
-
-    # Configure service
-    configure_service $config
-
-    # Initialize database
-    initialize_database $config
-
-    # Set up monitoring
-    if ($config.monitoring?.enabled | default true) {
-        setup_monitoring $config
-    }
-
-    # Set up backups
-    if ($config.backup?.enabled | default true) {
-        setup_backups $config
-    }
-
-    # Start service
-    start_service
-
-    # Verify installation
-    let status = (verify_installation $config)
-
-    return {
-        action: "install"
-        service: "custom-database"
-        version: ($config.version | default "14.0")
-        status: $status.status
-        endpoint: $"localhost:($config.port | default 5432)"
-        data_directory: ($config.data_directory | default "/var/lib/customdb")
-    }
-}
-
-# Configure custom database
-export def configure_custom_database [
-    config: record
-] {
-    print "Configuring Custom Database..."
-
-    # Generate configuration file
-    let db_config = generate_config $config
-    $db_config | save "/etc/customdb/customdb.conf"
-
-    # Set up SSL if enabled
-    if ($config.ssl?.enabled | default true) {
-        setup_ssl $config
-    }
-
-    # Configure replication if enabled
-    if ($config.replication?.enabled | default false) {
-        setup_replication $config
-    }
-
-    # Restart service to apply configuration
-    restart_service
-}
-
-# Start service
-export def start_custom_database [] {
-    print "Starting Custom Database service..."
-    ^systemctl start customdb
-    ^systemctl enable customdb
-}
-
-# Stop service
-export def stop_custom_database [] {
-    print "Stopping Custom Database service..."
-    ^systemctl stop customdb
-}
-
-# Check service status
-export def status_custom_database [] -> record {
-    let systemd_status = (^systemctl is-active customdb | str trim)
-    let port_check = (check_port 5432)
-    let version = (get_database_version)
-
-    return {
-        service: "custom-database"
-        status: $systemd_status
-        port_accessible: $port_check
-        version: $version
-        uptime: (get_service_uptime)
-        connections: (get_active_connections)
-    }
-}
-
-# Health check
-export def health_custom_database [] -> record {
-    let status = (status_custom_database)
-    let health_checks = [
-        {
-            name: "Service Running"
-            status: ($status.status == "active")
-            message: $"Systemd status: ($status.status)"
-        }
-        {
-            name: "Port Accessible"
-            status: $status.port_accessible
-            message: "Database port 5432 is accessible"
-        }
-        {
-            name: "Database Responsive"
-            status: (test_database_connection)
-            message: "Database responds to queries"
-        }
-    ]
-
-    let healthy = ($health_checks | all {|check| $check.status})
-
-    return {
-        service: "custom-database"
-        healthy: $healthy
-        checks: $health_checks
-        last_check: (date now | format date "%Y-%m-%d %H:%M:%S")
-    }
-}
-
-# Update service
-export def update_custom_database [
-    target_version: string
-] -> record {
-    print $"Updating Custom Database to version ($target_version)..."
-
-    # Create backup before update
-    backup_database "pre-update"
-
-    # Stop service
-    stop_custom_database
-
-    # Update packages
-    update_packages $target_version
-
-    # Migrate database if needed
-    migrate_database $target_version
-
-    # Start service
-    start_custom_database
-
-    # Verify update
-    let new_version = (get_database_version)
-
-    return {
-        action: "update"
-        service: "custom-database"
-        old_version: (get_previous_version)
-        new_version: $new_version
-        status: "completed"
-    }
-}
-
-# Remove service
-export def remove_custom_database [
-    --keep_data: bool = false
-] -> record {
-    print "Removing Custom Database..."
-
-    # Stop service
-    stop_custom_database
-
-    # Remove packages
-    ^apt remove --purge -y customdb-server customdb-client
-
-    # Remove configuration
-    rm -rf "/etc/customdb"
-
-    # Remove data (optional)
-    if not $keep_data {
-        print "Removing database data..."
-        rm -rf "/var/lib/customdb"
-        rm -rf "/var/log/customdb"
-    }
-
-    return {
-        action: "remove"
-        service: "custom-database"
-        data_preserved: $keep_data
-        status: "completed"
-    }
-}
-
-# Helper functions
-
-def validate_prerequisites [config: record] {
-    # Check operating system
-    let os_info = (^lsb_release -is | str trim | str downcase)
-    let supported_os = ["ubuntu", "debian"]
-
-    if not ($os_info in $supported_os) {
-        error make {
-            msg: $"Unsupported OS: ($os_info). Supported: ($supported_os | str join ', ')"
-        }
-    }
-
-    # Check system resources
-    let memory_mb = (^free -m | lines | get 1 | split row ' ' | get 1 | into int)
-    if $memory_mb < 512 {
-        error make {
-            msg: $"Insufficient memory: ($memory_mb)MB. Minimum 512 MB required."
-        }
-    }
-}
-
-def install_packages [config: record] {
-    let version = ($config.version | default "14.0")
-
-    # Update package list
-    ^apt update
-
-    # Install packages
-    ^apt install -y $"customdb-server-($version)" $"customdb-client-($version)"
-}
-
-def configure_service [config: record] {
-    let config_content = generate_config $config
-    $config_content | save "/etc/customdb/customdb.conf"
-
-    # Set permissions
-    ^chown -R customdb:customdb "/etc/customdb"
-    ^chmod 600 "/etc/customdb/customdb.conf"
-}
-
-def generate_config [config: record] -> string {
-    let port = ($config.port | default 5432)
-    let max_connections = ($config.max_connections | default 100)
-    let memory_limit = ($config.memory_limit | default "512 MB")
-
-    return $"
-# Custom Database Configuration
-port = ($port)
-max_connections = ($max_connections)
-shared_buffers = ($memory_limit)
-data_directory = '($config.data_directory | default "/var/lib/customdb")'
-log_directory = '($config.log_directory | default "/var/log/customdb")'
-
-# Logging
-log_level = '($config.monitoring?.log_level | default "info")'
-
-# SSL Configuration
-ssl = ($config.ssl?.enabled | default true)
-ssl_cert_file = '($config.ssl?.cert_file | default "/etc/ssl/certs/customdb.crt")'
-ssl_key_file = '($config.ssl?.key_file | default "/etc/ssl/private/customdb.key")'
-"
-}
-
-def initialize_database [config: record] {
-    print "Initializing database..."
-
-    # Create data directory
-    let data_dir = ($config.data_directory | default "/var/lib/customdb")
-    mkdir $data_dir
-    ^chown -R customdb:customdb $data_dir
-
-    # Initialize database
-    ^su - customdb -c $"customdb-initdb -D ($data_dir)"
-}
-
-def setup_monitoring [config: record] {
-    if ($config.monitoring?.enabled | default true) {
-        print "Setting up monitoring..."
-
-        # Install monitoring exporter
-        ^apt install -y customdb-exporter
-
-        # Configure exporter
-        let exporter_config = $"
-port: ($config.monitoring?.metrics_port | default 9187)
-database_url: postgresql://localhost:($config.port | default 5432)/postgres
-"
-        $exporter_config | save "/etc/customdb-exporter/config.yaml"
-
-        # Start exporter
-        ^systemctl enable customdb-exporter
-        ^systemctl start customdb-exporter
-    }
-}
-
-def setup_backups [config: record] {
-    if ($config.backup?.enabled | default true) {
-        print "Setting up backups..."
-
-        let schedule = ($config.backup?.schedule | default "0 2 * * *")
-        let retention = ($config.backup?.retention_days | default 7)
-
-        # Create backup script
-        let backup_script = $"#!/bin/bash
-customdb-dump --all-databases > /var/backups/customdb-$(date +%Y%m%d_%H%M%S).sql
-find /var/backups -name 'customdb-*.sql' -mtime +($retention) -delete
-"
-
-        $backup_script | save "/usr/local/bin/customdb-backup.sh"
-        ^chmod +x "/usr/local/bin/customdb-backup.sh"
-
-        # Add to crontab
-        $"($schedule) /usr/local/bin/customdb-backup.sh" | ^crontab -u customdb -
-    }
-}
-
-def test_database_connection [] -> bool {
-    let result = (^customdb-cli -h localhost -c "SELECT 1;" | complete)
-    return ($result.exit_code == 0)
-}
-
-def get_database_version [] -> string {
-    let result = (^customdb-cli -h localhost -c "SELECT version();" | complete)
-    if ($result.exit_code == 0) {
-        return ($result.stdout | lines | first | parse "Custom Database {version}" | get version.0)
-    } else {
-        return "unknown"
-    }
-}
-
-def check_port [port: int] -> bool {
-    let result = (^nc -z localhost $port | complete)
-    return ($result.exit_code == 0)
-}
-
-

Creating Custom Clusters

-

Cluster Architecture

-

Clusters orchestrate multiple services to work together as a cohesive application stack.

-

Step 1: Define Cluster Schema

-

schemas/clusters/custom_web_stack.ncl:

-
# Custom web application stack
-{
-  CustomWebStackConfig = {
-    # Configuration for Custom Web Application Stack
-    # Application configuration
-    app_name | String,
-    app_version | String = "latest",
-    environment | String = "production",
-
-    # Web tier configuration
-    web_tier | {
-      replicas | Number = 3,
-      instance_type | String = "t3.medium",
-      load_balancer | {
-        enabled | Bool = true,
-        ssl | Bool = true,
-        health_check_path | String = "/health",
-      } = {},
-    },
-
-    # Application tier configuration
-    app_tier | {
-      replicas | Number = 5,
-      instance_type | String = "t3.large",
-      auto_scaling | {
-        enabled | Bool = true,
-        min_replicas | Number = 2,
-        max_replicas | Number = 10,
-        cpu_threshold | Number = 70,
-      } = {},
-    },
-
-    # Database tier configuration
-    database_tier | {
-      type | String = "postgresql",
-      instance_type | String = "t3.xlarge",
-      high_availability | Bool = true,
-      backup_enabled | Bool = true,
-    } = {},
-
-    # Monitoring configuration
-    monitoring | {
-      enabled | Bool = true,
-      metrics_retention | String = "30d",
-      alerting | Bool = true,
-    } = {},
-
-    # Networking
-    network | {
-      vpc_cidr | String = "10.0.0.0/16",
-      public_subnets | [String] = ["10.0.1.0/24", "10.0.2.0/24"],
-      private_subnets | [String] = ["10.0.10.0/24", "10.0.20.0/24"],
-      database_subnets | [String] = ["10.0.100.0/24", "10.0.200.0/24"],
-    } = {},
-  },
-
-  # Cluster blueprint
-  cluster_blueprint = {
-    name = "custom-web-stack",
-    description = "Custom web application stack with load balancer, app servers, and database",
-    version = "1.0.0",
-    components = [
-      {
-        name = "load-balancer",
-        type = "taskserv",
-        service = "haproxy",
-        tier = "web",
-      },
-      {
-        name = "web-servers",
-        type = "server",
-        tier = "web",
-        scaling = "horizontal",
-      },
-      {
-        name = "app-servers",
-        type = "server",
-        tier = "app",
-        scaling = "horizontal",
-      },
-      {
-        name = "database",
-        type = "taskserv",
-        service = "postgresql",
-        tier = "database",
-      },
-      {
-        name = "monitoring",
-        type = "taskserv",
-        service = "prometheus",
-        tier = "monitoring",
-      },
-    ],
-  },
-}
-
-

Step 2: Implement Cluster Logic

-

nulib/clusters/custom_web_stack.nu:

-
# Custom Web Stack cluster implementation
-
-# Deploy web stack cluster
-export def deploy_custom_web_stack [
-    config: record
-    --check: bool = false
-] -> record {
-    print $"Deploying Custom Web Stack: ($config.app_name)"
-
-    if $check {
-        return {
-            action: "deploy"
-            cluster: "custom-web-stack"
-            app_name: $config.app_name
-            status: "planned"
-            components: [
-                "Network infrastructure"
-                "Load balancer"
-                "Web servers"
-                "Application servers"
-                "Database"
-                "Monitoring"
-            ]
-            estimated_cost: (calculate_cluster_cost $config)
-        }
-    }
-
-    # Deploy in order
-    let network = (deploy_network $config)
-    let database = (deploy_database $config)
-    let app_servers = (deploy_app_tier $config)
-    let web_servers = (deploy_web_tier $config)
-    let load_balancer = (deploy_load_balancer $config)
-    let monitoring = (deploy_monitoring $config)
-
-    # Configure service discovery
-    configure_service_discovery $config
-
-    # Set up health checks
-    setup_health_checks $config
-
-    return {
-        action: "deploy"
-        cluster: "custom-web-stack"
-        app_name: $config.app_name
-        status: "deployed"
-        components: {
-            network: $network
-            database: $database
-            app_servers: $app_servers
-            web_servers: $web_servers
-            load_balancer: $load_balancer
-            monitoring: $monitoring
-        }
-        endpoints: {
-            web: $load_balancer.public_ip
-            monitoring: $monitoring.grafana_url
-        }
-    }
-}
-
-# Scale cluster
-export def scale_custom_web_stack [
-    app_name: string
-    tier: string
-    replicas: int
-] -> record {
-    print $"Scaling ($tier) tier to ($replicas) replicas for ($app_name)"
-
-    match $tier {
-        "web" => {
-            scale_web_tier $app_name $replicas
-        }
-        "app" => {
-            scale_app_tier $app_name $replicas
-        }
-        _ => {
-            error make {
-                msg: $"Invalid tier: ($tier). Valid options: web, app"
-            }
-        }
-    }
-
-    return {
-        action: "scale"
-        cluster: "custom-web-stack"
-        app_name: $app_name
-        tier: $tier
-        new_replicas: $replicas
-        status: "completed"
-    }
-}
-
-# Update cluster
-export def update_custom_web_stack [
-    app_name: string
-    config: record
-] -> record {
-    print $"Updating Custom Web Stack: ($app_name)"
-
-    # Rolling update strategy
-    update_app_tier $app_name $config
-    update_web_tier $app_name $config
-    update_load_balancer $app_name $config
-
-    return {
-        action: "update"
-        cluster: "custom-web-stack"
-        app_name: $app_name
-        status: "completed"
-    }
-}
-
-# Delete cluster
-export def delete_custom_web_stack [
-    app_name: string
-    --keep_data: bool = false
-] -> record {
-    print $"Deleting Custom Web Stack: ($app_name)"
-
-    # Delete in reverse order
-    delete_load_balancer $app_name
-    delete_web_tier $app_name
-    delete_app_tier $app_name
-
-    if not $keep_data {
-        delete_database $app_name
-    }
-
-    delete_monitoring $app_name
-    delete_network $app_name
-
-    return {
-        action: "delete"
-        cluster: "custom-web-stack"
-        app_name: $app_name
-        data_preserved: $keep_data
-        status: "completed"
-    }
-}
-
-# Cluster status
-export def status_custom_web_stack [
-    app_name: string
-] -> record {
-    let web_status = (get_web_tier_status $app_name)
-    let app_status = (get_app_tier_status $app_name)
-    let db_status = (get_database_status $app_name)
-    let lb_status = (get_load_balancer_status $app_name)
-    let monitoring_status = (get_monitoring_status $app_name)
-
-    let overall_healthy = (
-        $web_status.healthy and
-        $app_status.healthy and
-        $db_status.healthy and
-        $lb_status.healthy and
-        $monitoring_status.healthy
-    )
-
-    return {
-        cluster: "custom-web-stack"
-        app_name: $app_name
-        healthy: $overall_healthy
-        components: {
-            web_tier: $web_status
-            app_tier: $app_status
-            database: $db_status
-            load_balancer: $lb_status
-            monitoring: $monitoring_status
-        }
-        last_check: (date now | format date "%Y-%m-%d %H:%M:%S")
-    }
-}
-
-# Helper functions for deployment
-
-def deploy_network [config: record] -> record {
-    print "Deploying network infrastructure..."
-
-    # Create VPC
-    let vpc_config = {
-        cidr: ($config.network.vpc_cidr | default "10.0.0.0/16")
-        name: $"($config.app_name)-vpc"
-    }
-
-    # Create subnets
-    let subnets = [
-        {name: "public-1", cidr: ($config.network.public_subnets | get 0)}
-        {name: "public-2", cidr: ($config.network.public_subnets | get 1)}
-        {name: "private-1", cidr: ($config.network.private_subnets | get 0)}
-        {name: "private-2", cidr: ($config.network.private_subnets | get 1)}
-        {name: "database-1", cidr: ($config.network.database_subnets | get 0)}
-        {name: "database-2", cidr: ($config.network.database_subnets | get 1)}
-    ]
-
-    return {
-        vpc: $vpc_config
-        subnets: $subnets
-        status: "deployed"
-    }
-}
-
-def deploy_database [config: record] -> record {
-    print "Deploying database tier..."
-
-    let db_config = {
-        name: $"($config.app_name)-db"
-        type: ($config.database_tier.type | default "postgresql")
-        instance_type: ($config.database_tier.instance_type | default "t3.xlarge")
-        high_availability: ($config.database_tier.high_availability | default true)
-        backup_enabled: ($config.database_tier.backup_enabled | default true)
-    }
-
-    # Deploy database servers
-    if $db_config.high_availability {
-        deploy_ha_database $db_config
-    } else {
-        deploy_single_database $db_config
-    }
-
-    return {
-        name: $db_config.name
-        type: $db_config.type
-        high_availability: $db_config.high_availability
-        status: "deployed"
-        endpoint: $"($config.app_name)-db.local:5432"
-    }
-}
-
-def deploy_app_tier [config: record] -> record {
-    print "Deploying application tier..."
-
-    let replicas = ($config.app_tier.replicas | default 5)
-
-    # Deploy app servers
-    mut servers = []
-    for i in 1..$replicas {
-        let server_config = {
-            name: $"($config.app_name)-app-($i | fill --width 2 --char '0')"
-            instance_type: ($config.app_tier.instance_type | default "t3.large")
-            subnet: "private"
-        }
-
-        let server = (deploy_app_server $server_config)
-        $servers = ($servers | append $server)
-    }
-
-    return {
-        tier: "application"
-        servers: $servers
-        replicas: $replicas
-        status: "deployed"
-    }
-}
-
-def calculate_cluster_cost [config: record] -> float {
-    let web_cost = ($config.web_tier.replicas | default 3) * 0.10
-    let app_cost = ($config.app_tier.replicas | default 5) * 0.20
-    let db_cost = if ($config.database_tier.high_availability | default true) { 0.80 } else { 0.40 }
-    let lb_cost = 0.05
-
-    return ($web_cost + $app_cost + $db_cost + $lb_cost)
-}
-
-

Extension Testing

-

Test Structure

-
tests/
-├── unit/                   # Unit tests
-│   ├── provider_test.nu   # Provider unit tests
-│   ├── taskserv_test.nu   # Task service unit tests
-│   └── cluster_test.nu    # Cluster unit tests
-├── integration/            # Integration tests
-│   ├── provider_integration_test.nu
-│   ├── taskserv_integration_test.nu
-│   └── cluster_integration_test.nu
-├── e2e/                   # End-to-end tests
-│   └── full_stack_test.nu
-└── fixtures/              # Test data
-    ├── configs/
-    └── mocks/
-
-

Example Unit Test

-

tests/unit/provider_test.nu:

-
# Unit tests for custom cloud provider
-
-use std testing
-
-export def test_provider_validation [] {
-    # Test valid configuration
-    let valid_config = {
-        api_key: "test-key"
-        region: "us-west-1"
-        project_id: "test-project"
-    }
-
-    let result = (validate_custom_cloud_config $valid_config)
-    assert equal $result.valid true
-
-    # Test invalid configuration
-    let invalid_config = {
-        region: "us-west-1"
-        # Missing api_key
-    }
-
-    let result2 = (validate_custom_cloud_config $invalid_config)
-    assert equal $result2.valid false
-    assert str contains $result2.error "api_key"
-}
-
-export def test_cost_calculation [] {
-    let server_config = {
-        machine_type: "medium"
-        disk_size: 50
-    }
-
-    let cost = (calculate_server_cost $server_config)
-    assert equal $cost 0.15  # 0.10 (medium) + 0.05 (50 GB storage)
-}
-
-export def test_api_call_formatting [] {
-    let config = {
-        name: "test-server"
-        machine_type: "small"
-        zone: "us-west-1a"
-    }
-
-    let api_payload = (format_create_server_request $config)
-
-    assert str contains ($api_payload | to json) "test-server"
-    assert equal $api_payload.machine_type "small"
-    assert equal $api_payload.zone "us-west-1a"
-}
-
-

Integration Test

-

tests/integration/provider_integration_test.nu:

-
# Integration tests for custom cloud provider
-
-use std testing
-
-export def test_server_lifecycle [] {
-    # Set up test environment
-    $env.CUSTOM_CLOUD_API_KEY = "test-api-key"
-    $env.CUSTOM_CLOUD_API_URL = "https://api.test.custom-cloud.com/v1"
-
-    let server_config = {
-        name: "test-integration-server"
-        machine_type: "micro"
-        zone: "us-west-1a"
-    }
-
-    # Test server creation
-    let create_result = (custom_cloud_create_server $server_config --check true)
-    assert equal $create_result.status "planned"
-
-    # Note: Actual creation would require valid API credentials
-    # In integration tests, you might use a test/sandbox environment
-}
-
-export def test_server_listing [] {
-    # Mock API response for testing
-    with-env [CUSTOM_CLOUD_API_KEY "test-key"] {
-        # This would test against a real API in integration environment
-        let servers = (custom_cloud_list_servers)
-        assert ($servers | is-not-empty)
-    }
-}
-
-

Publishing Extensions

-

Extension Package Structure

-
my-extension-package/
-├── extension.toml         # Extension metadata
-├── README.md             # Documentation
-├── LICENSE               # License file
-├── CHANGELOG.md          # Version history
-├── examples/             # Usage examples
-├── src/                  # Source code
-│   ├── kcl/
-│   ├── nulib/
-│   └── templates/
-└── tests/               # Test files
-
-

Publishing Configuration

-

extension.toml:

-
[extension]
-name = "my-custom-provider"
-version = "1.0.0"
-description = "Custom cloud provider integration"
-author = "Your Name <you@example.com>"
-license = "MIT"
-homepage = "https://github.com/username/my-custom-provider"
-repository = "https://github.com/username/my-custom-provider"
-keywords = ["cloud", "provider", "infrastructure"]
-categories = ["providers"]
-
-[compatibility]
-provisioning_version = ">=1.0.0"
-nickel_version = ">=1.15.0"
-
-[provides]
-providers = ["custom-cloud"]
-taskservs = []
-clusters = []
-
-[dependencies]
-system_packages = ["curl", "jq"]
-extensions = []
-
-[build]
-include = ["src/**", "examples/**", "README.md", "LICENSE"]
-exclude = ["tests/**", ".git/**", "*.tmp"]
-
-

Publishing Process

-
# 1. Validate extension
-provisioning extension validate .
-
-# 2. Run tests
-provisioning extension test .
-
-# 3. Build package
-provisioning extension build .
-
-# 4. Publish to registry
-provisioning extension publish ./dist/my-custom-provider-1.0.0.tar.gz
-
-

Best Practices

-

1. Code Organization

-
# Follow standard structure
-extension/
-├── schemas/      # Nickel schemas and models
-├── nulib/        # Nushell implementation
-├── templates/    # Configuration templates
-├── tests/        # Comprehensive tests
-└── docs/         # Documentation
-
-

2. Error Handling

-
# Always provide meaningful error messages
-if ($api_response | get -o status | default "" | str contains "error") {
-    error make {
-        msg: $"API Error: ($api_response.message)"
-        label: {
-            text: "Custom Cloud API failure"
-            span: (metadata $api_response | get span)
-        }
-        help: "Check your API key and network connectivity"
-    }
-}
-
-

3. Configuration Validation

-
# Use Nickel's validation features with contracts
-{
-  CustomConfig = {
-    # Configuration with validation
-    name | String | doc "Name must not be empty",
-    size | Number | doc "Size must be positive and at most 1000",
-  },
-
-  # Validation rules
-  validate_config = fun config =>
-    let valid_name = (std.string.length config.name) > 0 in
-    let valid_size = config.size > 0 && config.size <= 1000 in
-    if valid_name && valid_size then
-      config
-    else
-      (std.fail "Configuration validation failed"),
-}
-
-

4. Testing

-
    -
  • Write comprehensive unit tests
  • -
  • Include integration tests
  • -
  • Test error conditions
  • -
  • Use fixtures for consistent test data
  • -
  • Mock external dependencies
  • -
-

5. Documentation

-
    -
  • Include README with examples
  • -
  • Document all configuration options
  • -
  • Provide troubleshooting guide
  • -
  • Include architecture diagrams
  • -
  • Write API documentation
  • -
-

Next Steps

-

Now that you understand extension development:

-
    -
  1. Study existing extensions in the providers/ and taskservs/ directories
  2. -
  3. Practice with simple extensions before building complex ones
  4. -
  5. Join the community to share and collaborate on extensions
  6. -
  7. Contribute to the core system by improving extension APIs
  8. -
  9. Build a library of reusable templates and patterns
  10. -
-

You’re now equipped to extend provisioning for any custom requirements!

-

Extension Registry Service

-

A high-performance Rust microservice that provides a unified REST API for extension discovery, versioning, and download from multiple Git-based -sources and OCI registries.

-
-

Source: provisioning/platform/crates/extension-registry/

-
-

Features

-
    -
  • Multi-Backend Source Support: Fetch extensions from Gitea, Forgejo, and GitHub releases
  • -
  • Multi-Registry Distribution Support: Distribute extensions to Zot, Harbor, Docker Hub, GHCR, Quay, and other OCI-compliant registries
  • -
  • Unified REST API: Single API for all extension operations across all backends
  • -
  • Smart Caching: LRU cache with TTL to reduce backend API calls
  • -
  • Prometheus Metrics: Built-in metrics for monitoring
  • -
  • Health Monitoring: Parallel health checks for all backends with aggregated status
  • -
  • Aggregation & Fallback: Intelligent request routing with aggregation and fallback strategies
  • -
  • Type-Safe: Strong typing for extension metadata
  • -
  • Async/Await: High-performance async operations with Tokio
  • -
  • Backward Compatible: Old single-instance configs auto-migrate to new multi-instance format
  • -
-

Architecture

-

Dual-Trait System

-

The extension registry uses a trait-based architecture separating source and distribution backends:

-
┌────────────────────────────────────────────────────────────────────┐
-│                    Extension Registry API                           │
-│                          (axum)                                     │
-├────────────────────────────────────────────────────────────────────┤
-│                                                                      │
-│  ┌─ SourceClients ────────────┐  ┌─ DistributionClients ────────┐  │
-│  │                             │  │                              │  │
-│  │ • Gitea (Git releases)      │  │ • OCI Registries             │  │
-│  │ • Forgejo (Git releases)    │  │   - Zot                      │  │
-│  │ • GitHub (Releases API)     │  │   - Harbor                   │  │
-│  │                             │  │   - Docker Hub               │  │
-│  │ Strategy: Aggregation +     │  │   - GHCR / Quay              │  │
-│  │ Fallback across all sources │  │   - Any OCI-compliant        │  │
-│  │                             │  │                              │  │
-│  └─────────────────────────────┘  └──────────────────────────────┘  │
-│                                                                      │
-│  ┌─ LRU Cache ───────────────────────────────────────────────────┐  │
-│  │ • Metadata cache (with TTL)                                   │  │
-│  │ • List cache (with TTL)                                       │  │
-│  │ • Version cache (version strings only)                        │  │
-│  └───────────────────────────────────────────────────────────────┘  │
-│                                                                      │
-└────────────────────────────────────────────────────────────────────┘
-
-

Request Strategies

- -
    -
  1. Parallel Execution: Spawn concurrent tasks for all source and distribution clients
  2. -
  3. Merge Results: Combine results from all backends
  4. -
  5. Deduplication: Remove duplicates, preferring more recent versions
  6. -
  7. Pagination: Apply limit/offset to merged results
  8. -
  9. Caching: Store merged results with composite cache key
  10. -
-

Fallback Strategy (get_extension, download_extension)

-
    -
  1. Sequential Retry: Try source clients first (in configured order)
  2. -
  3. Distribution Fallback: If all sources fail, try distribution clients
  4. -
  5. Return First Success: Return result from first successful client
  6. -
  7. Caching: Cache successful result with backend-specific key
  8. -
-

Installation

-
cd provisioning/platform/extension-registry
-cargo build --release
-
-

Configuration

-

Single-Instance Configuration (Legacy - Auto-Migrated)

-

Old format is automatically migrated to new multi-instance format:

-
[server]
-host = "0.0.0.0"
-port = 8082
-
-# Single Gitea instance (auto-migrated to sources.gitea[0])
-[gitea]
-url = "https://gitea.example.com"
-organization = "provisioning-extensions"
-token_path = "/path/to/gitea-token.txt"
-
-# Single OCI registry (auto-migrated to distributions.oci[0])
-[oci]
-registry = "registry.example.com"
-namespace = "provisioning"
-auth_token_path = "/path/to/oci-token.txt"
-
-[cache]
-capacity = 1000
-ttl_seconds = 300
-
- -

New format supporting multiple backends of each type:

-
[server]
-host = "0.0.0.0"
-port = 8082
-workers = 4
-enable_cors = false
-enable_compression = true
-
-# Multiple Gitea sources
-[sources.gitea]
-
-[[sources.gitea]]
-id = "internal-gitea"
-url = "https://gitea.internal.example.com"
-organization = "provisioning"
-token_path = "/etc/secrets/gitea-internal-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-[[sources.gitea]]
-id = "public-gitea"
-url = "https://gitea.public.example.com"
-organization = "extensions"
-token_path = "/etc/secrets/gitea-public-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-# Forgejo sources (API compatible with Gitea)
-[sources.forgejo]
-
-[[sources.forgejo]]
-id = "community-forgejo"
-url = "https://forgejo.community.example.com"
-organization = "provisioning"
-token_path = "/etc/secrets/forgejo-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-# GitHub sources
-[sources.github]
-
-[[sources.github]]
-id = "org-github"
-organization = "my-organization"
-token_path = "/etc/secrets/github-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-# Multiple OCI distribution registries
-[distributions.oci]
-
-[[distributions.oci]]
-id = "internal-zot"
-registry = "zot.internal.example.com"
-namespace = "extensions"
-timeout_seconds = 30
-verify_ssl = true
-
-[[distributions.oci]]
-id = "public-harbor"
-registry = "harbor.public.example.com"
-namespace = "extensions"
-auth_token_path = "/etc/secrets/harbor-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-[[distributions.oci]]
-id = "docker-hub"
-registry = "docker.io"
-namespace = "myorg"
-auth_token_path = "/etc/secrets/docker-hub-token.txt"
-timeout_seconds = 30
-verify_ssl = true
-
-# Cache configuration
-[cache]
-capacity = 1000
-ttl_seconds = 300
-enable_metadata_cache = true
-enable_list_cache = true
-
-

Configuration Notes

-
    -
  • Backend Identifiers: Use id field to uniquely identify each backend instance (auto-generated if omitted)
  • -
  • Gitea/Forgejo Compatible: Both use same config format; organization field is required for Git repos
  • -
  • GitHub Configuration: Uses organization as owner; token_path points to GitHub Personal Access Token
  • -
  • OCI Registries: Support any OCI-compliant registry (Zot, Harbor, Docker Hub, GHCR, Quay, etc.)
  • -
  • Optional Fields: id, verify_ssl, timeout_seconds have sensible defaults
  • -
  • Token Files: Should contain only the token with no extra whitespace; permissions should be 0600
  • -
-

Environment Variable Overrides

-

Legacy environment variable support (for backward compatibility):

-
REGISTRY_SERVER_HOST=127.0.0.1
-REGISTRY_SERVER_PORT=8083
-REGISTRY_SERVER_WORKERS=8
-REGISTRY_GITEA_URL=https://gitea.example.com
-REGISTRY_GITEA_ORG=extensions
-REGISTRY_GITEA_TOKEN_PATH=/path/to/token
-REGISTRY_OCI_REGISTRY=registry.example.com
-REGISTRY_OCI_NAMESPACE=extensions
-REGISTRY_CACHE_CAPACITY=2000
-REGISTRY_CACHE_TTL=600
-
-

API Endpoints

-

Extension Operations

-

List Extensions

-
GET /api/v1/extensions?type=provider&limit=10
-
-

Get Extension

-
GET /api/v1/extensions/{type}/{name}
-
-

List Versions

-
GET /api/v1/extensions/{type}/{name}/versions
-
-

Download Extension

-
GET /api/v1/extensions/{type}/{name}/{version}
-
-

Search Extensions

-
GET /api/v1/extensions/search?q=kubernetes&type=taskserv
-
-

System Endpoints

-

Health Check

-
GET /api/v1/health
-
-

Response (with multi-backend aggregation):

-
{
-  "status": "healthy|degraded|unhealthy",
-  "version": "0.1.0",
-  "uptime": 3600,
-  "backends": {
-    "gitea": {
-      "enabled": true,
-      "healthy": true,
-      "error": null
-    },
-    "oci": {
-      "enabled": true,
-      "healthy": true,
-      "error": null
-    }
-  }
-}
-
-

Status Values:

-
    -
  • healthy: All configured backends are healthy
  • -
  • degraded: At least one backend is healthy, but some are failing
  • -
  • unhealthy: No backends are responding
  • -
-

Metrics

-
GET /api/v1/metrics
-
-

Cache Statistics

-
GET /api/v1/cache/stats
-
-

Response:

-
{
-  "metadata_hits": 1024,
-  "metadata_misses": 256,
-  "list_hits": 512,
-  "list_misses": 128,
-  "version_hits": 2048,
-  "version_misses": 512,
-  "size": 4096
-}
-
-

Extension Naming Conventions

-

Gitea Repositories

-
    -
  • Providers: {name}_prov (for example, aws_prov)
  • -
  • Task Services: {name}_taskserv (for example, kubernetes_taskserv)
  • -
  • Clusters: {name}_cluster (for example, buildkit_cluster)
  • -
-

OCI Artifacts

-
    -
  • Providers: {namespace}/{name}-provider
  • -
  • Task Services: {namespace}/{name}-taskserv
  • -
  • Clusters: {namespace}/{name}-cluster
  • -
-

Deployment

-

Docker

-
docker build -t extension-registry:latest .
-docker run -d -p 8082:8082 -v $(pwd)/config.toml:/app/config.toml:ro extension-registry:latest
-
-

Kubernetes

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: extension-registry
-spec:
-  replicas: 3
-  template:
-    spec:
-      containers:
-      - name: extension-registry
-        image: extension-registry:latest
-        ports:
-        - containerPort: 8082
-
-

Migration Guide: Single to Multi-Instance

-

Automatic Migration

-

Old single-instance configs are automatically detected and migrated to the new multi-instance format during startup:

-
    -
  1. Detection: Registry checks if old-style fields (gitea, oci) contain values
  2. -
  3. Migration: Single instances are moved to new Vec-based format (sources.gitea[0], distributions.oci[0])
  4. -
  5. Logging: Migration event is logged for audit purposes
  6. -
  7. Transparency: No user action required; old configs continue to work
  8. -
-

Before Migration

-
[gitea]
-url = "https://gitea.example.com"
-organization = "extensions"
-token_path = "/path/to/token"
-
-[oci]
-registry = "registry.example.com"
-namespace = "extensions"
-
-

After Migration (Automatic)

-
[sources.gitea]
-[[sources.gitea]]
-url = "https://gitea.example.com"
-organization = "extensions"
-token_path = "/path/to/token"
-
-[distributions.oci]
-[[distributions.oci]]
-registry = "registry.example.com"
-namespace = "extensions"
-
-

Gradual Upgrade Path

-

To adopt the new format manually:

-
    -
  1. Backup current config - Keep old format as reference
  2. -
  3. Adopt new format - Replace old fields with new structure
  4. -
  5. Test - Verify all backends are reachable and extensions are discovered
  6. -
  7. Add new backends - Use new format to add Forgejo, GitHub, or additional OCI registries
  8. -
  9. Remove old fields - Delete deprecated gitea and oci top-level sections
  10. -
-

Benefits of Upgrading

-
    -
  • Multiple Sources: Support Gitea, Forgejo, and GitHub simultaneously
  • -
  • Multiple Registries: Distribute to multiple OCI registries
  • -
  • Better Resilience: If one backend fails, others continue to work
  • -
  • Flexible Configuration: Each backend can have different credentials and timeouts
  • -
  • Future-Proof: New backends can be added without config restructuring
  • -
- - -

Quick Developer Guide: Adding New Providers

-

This guide shows how to quickly add a new provider to the provider-agnostic infrastructure system.

-

Prerequisites

- -

5-Minute Provider Addition

-

Step 1: Create Provider Directory

-
mkdir -p provisioning/extensions/providers/{provider_name}
-mkdir -p provisioning/extensions/providers/{provider_name}/nulib/{provider_name}
-
-

Step 2: Copy Template and Customize

-
# Copy the local provider as a template
-cp provisioning/extensions/providers/local/provider.nu \
-   provisioning/extensions/providers/{provider_name}/provider.nu
-
-

Step 3: Update Provider Metadata

-

Edit provisioning/extensions/providers/{provider_name}/provider.nu:

-
export def get-provider-metadata []: nothing -> record {
-    {
-        name: "your_provider_name"
-        version: "1.0.0"
-        description: "Your Provider Description"
-        capabilities: {
-            server_management: true
-            network_management: true     # Set based on provider features
-            auto_scaling: false          # Set based on provider features
-            multi_region: true           # Set based on provider features
-            serverless: false            # Set based on provider features
-            # ... customize other capabilities
-        }
-    }
-}
-
-

Step 4: Implement Core Functions

-

The provider interface requires these essential functions:

-
# Required: Server operations
-export def query_servers [find?: string, cols?: string]: nothing -> list {
-    # Call your provider's server listing API
-    your_provider_query_servers $find $cols
-}
-
-export def create_server [settings: record, server: record, check: bool, wait: bool]: nothing -> bool {
-    # Call your provider's server creation API
-    your_provider_create_server $settings $server $check $wait
-}
-
-export def server_exists [server: record, error_exit: bool]: nothing -> bool {
-    # Check if server exists in your provider
-    your_provider_server_exists $server $error_exit
-}
-
-export def get_ip [settings: record, server: record, ip_type: string, error_exit: bool]: nothing -> string {
-    # Get server IP from your provider
-    your_provider_get_ip $settings $server $ip_type $error_exit
-}
-
-# Required: Infrastructure operations
-export def delete_server [settings: record, server: record, keep_storage: bool, error_exit: bool]: nothing -> bool {
-    your_provider_delete_server $settings $server $keep_storage $error_exit
-}
-
-export def server_state [server: record, new_state: string, error_exit: bool, wait: bool, settings: record]: nothing -> bool {
-    your_provider_server_state $server $new_state $error_exit $wait $settings
-}
-
-

Step 5: Create Provider-Specific Functions

-

Create provisioning/extensions/providers/{provider_name}/nulib/{provider_name}/servers.nu:

-
# Example: DigitalOcean provider functions
-export def digitalocean_query_servers [find?: string, cols?: string]: nothing -> list {
-    # Use DigitalOcean API to list droplets
-    let droplets = (http get "https://api.digitalocean.com/v2/droplets"
-        --headers { Authorization: $"Bearer ($env.DO_TOKEN)" })
-
-    $droplets.droplets | select name status memory disk region.name networks.v4
-}
-
-export def digitalocean_create_server [settings: record, server: record, check: bool, wait: bool]: nothing -> bool {
-    # Use DigitalOcean API to create droplet
-    let payload = {
-        name: $server.hostname
-        region: $server.zone
-        size: $server.plan
-        image: ($server.image? | default "ubuntu-20-04-x64")
-    }
-
-    if $check {
-        print $"Would create DigitalOcean droplet: ($payload)"
-        return true
-    }
-
-    let result = (http post "https://api.digitalocean.com/v2/droplets"
-        --headers { Authorization: $"Bearer ($env.DO_TOKEN)" }
-        --content-type application/json
-        $payload)
-
-    $result.droplet.id != null
-}
-
-

Step 6: Test Your Provider

-
# Test provider discovery
-nu -c "use provisioning/core/nulib/lib_provisioning/providers/registry.nu *; init-provider-registry; list-providers"
-
-# Test provider loading
-nu -c "use provisioning/core/nulib/lib_provisioning/providers/loader.nu *; load-provider 'your_provider_name'"
-
-# Test provider functions
-nu -c "use provisioning/extensions/providers/your_provider_name/provider.nu *; query_servers"
-
-

Step 7: Add Provider to Infrastructure

-

Add to your Nickel configuration:

-
# workspace/infra/example/servers.ncl
-let servers = [
-    {
-        hostname = "test-server",
-        provider = "your_provider_name",
-        zone = "your-region-1",
-        plan = "your-instance-type",
-    }
-] in
-servers
-
-

Provider Templates

-

Cloud Provider Template

-

For cloud providers (AWS, GCP, Azure, etc.):

-
# Use HTTP calls to cloud APIs
-export def cloud_query_servers [find?: string, cols?: string]: nothing -> list {
-    let auth_header = { Authorization: $"Bearer ($env.PROVIDER_TOKEN)" }
-    let servers = (http get $"($env.PROVIDER_API_URL)/servers" --headers $auth_header)
-
-    $servers | select name status region instance_type public_ip
-}
-
-

Container Platform Template

-

For container platforms (Docker, Podman, etc.):

-
# Use CLI commands for container platforms
-export def container_query_servers [find?: string, cols?: string]: nothing -> list {
-    let containers = (docker ps --format json | from json)
-
-    $containers | select Names State Status Image
-}
-
-

Bare Metal Provider Template

-

For bare metal or existing servers:

-
# Use SSH or local commands
-export def baremetal_query_servers [find?: string, cols?: string]: nothing -> list {
-    # Read from inventory file or ping servers
-    let inventory = (open inventory.yaml | from yaml)
-
-    $inventory.servers | select hostname ip_address status
-}
-
-

Best Practices

-

1. Error Handling

-
export def provider_operation []: nothing -> any {
-    try {
-        # Your provider operation
-        provider_api_call
-    } catch {|err|
-        log-error $"Provider operation failed: ($err.msg)" "provider"
-        if $error_exit { exit 1 }
-        null
-    }
-}
-
-

2. Authentication

-
# Check for required environment variables
-def check_auth []: nothing -> bool {
-    if ($env | get -o PROVIDER_TOKEN) == null {
-        log-error "PROVIDER_TOKEN environment variable required" "auth"
-        return false
-    }
-    true
-}
-
-

3. Rate Limiting

-
# Add delays for API rate limits
-def api_call_with_retry [url: string]: nothing -> any {
-    mut attempts = 0
-    mut max_attempts = 3
-
-    while $attempts < $max_attempts {
-        try {
-            return (http get $url)
-        } catch {
-            $attempts += 1
-            sleep 1sec
-        }
-    }
-
-    error make { msg: "API call failed after retries" }
-}
-
-

4. Provider Capabilities

-

Set capabilities accurately:

-
capabilities: {
-    server_management: true          # Can create/delete servers
-    network_management: true         # Can manage networks/VPCs
-    storage_management: true         # Can manage block storage
-    load_balancer: false            # No load balancer support
-    dns_management: false           # No DNS support
-    auto_scaling: true              # Supports auto-scaling
-    spot_instances: false           # No spot instance support
-    multi_region: true              # Supports multiple regions
-    containers: false               # No container support
-    serverless: false               # No serverless support
-    encryption_at_rest: true        # Supports encryption
-    compliance_certifications: ["SOC2"]  # Available certifications
-}
-
-

Testing Checklist

-
    -
  • -Provider discovered by registry
  • -
  • -Provider loads without errors
  • -
  • -All required interface functions implemented
  • -
  • -Provider metadata correct
  • -
  • -Authentication working
  • -
  • -Can query existing resources
  • -
  • -Can create new resources (in test mode)
  • -
  • -Error handling working
  • -
  • -Compatible with existing infrastructure configs
  • -
-

Common Issues

-

Provider Not Found

-
# Check provider directory structure
-ls -la provisioning/extensions/providers/your_provider_name/
-
-# Ensure provider.nu exists and has get-provider-metadata function
-grep "get-provider-metadata" provisioning/extensions/providers/your_provider_name/provider.nu
-
-

Interface Validation Failed

-
# Check which functions are missing
-nu -c "use provisioning/core/nulib/lib_provisioning/providers/interface.nu *; validate-provider-interface 'your_provider_name'"
-
-

Authentication Errors

-
# Check environment variables
-env | grep PROVIDER
-
-# Test API access manually
-curl -H "Authorization: Bearer $PROVIDER_TOKEN" https://api.provider.com/test
-
-

Next Steps

-
    -
  1. Documentation: Add provider-specific documentation to docs/providers/
  2. -
  3. Examples: Create example infrastructure using your provider
  4. -
  5. Testing: Add integration tests for your provider
  6. -
  7. Optimization: Implement caching and performance optimizations
  8. -
  9. Features: Add provider-specific advanced features
  10. -
-

Getting Help

-
    -
  • Check existing providers for implementation patterns
  • -
  • Review the Provider Interface Documentation
  • -
  • Test with the provider test suite: ./provisioning/tools/test-provider-agnostic.nu
  • -
  • Run migration checks: ./provisioning/tools/migrate-to-provider-agnostic.nu status
  • -
-

Provider-Agnostic Architecture Documentation

-

Overview

-

The new provider-agnostic architecture eliminates hardcoded provider dependencies and enables true multi-provider infrastructure deployments. This -addresses two critical limitations of the previous middleware:

-
    -
  1. Hardcoded provider dependencies - No longer requires importing specific provider modules
  2. -
  3. Single-provider limitation - Now supports mixing multiple providers in the same deployment (for example, AWS compute + Cloudflare DNS + UpCloud -backup)
  4. -
-

Architecture Components

-

1. Provider Interface (interface.nu)

-

Defines the contract that all providers must implement:

-
# Standard interface functions
-- query_servers
-- server_info
-- server_exists
-- create_server
-- delete_server
-- server_state
-- get_ip
-# ... and 20+ other functions
-
-

Key Features:

-
    -
  • Type-safe function signatures
  • -
  • Comprehensive validation
  • -
  • Provider capability flags
  • -
  • Interface versioning
  • -
-

2. Provider Registry (registry.nu)

-

Manages provider discovery and registration:

-
# Initialize registry
-init-provider-registry
-
-# List available providers
-list-providers --available-only
-
-# Check provider availability
-is-provider-available "aws"
-
-

Features:

-
    -
  • Automatic provider discovery
  • -
  • Core and extension provider support
  • -
  • Caching for performance
  • -
  • Provider capability tracking
  • -
-

3. Provider Loader (loader.nu)

-

Handles dynamic provider loading and validation:

-
# Load provider dynamically
-load-provider "aws"
-
-# Get provider with auto-loading
-get-provider "upcloud"
-
-# Call provider function
-call-provider-function "aws" "query_servers" $find $cols
-
-

Features:

-
    -
  • Lazy loading (load only when needed)
  • -
  • Interface compliance validation
  • -
  • Error handling and recovery
  • -
  • Provider health checking
  • -
-

4. Provider Adapters

-

Each provider implements a standard adapter:

-
provisioning/extensions/providers/
-├── aws/provider.nu        # AWS adapter
-├── upcloud/provider.nu    # UpCloud adapter
-├── local/provider.nu      # Local adapter
-└── {custom}/provider.nu   # Custom providers
-
-

Adapter Structure:

-
# AWS Provider Adapter
-export def query_servers [find?: string, cols?: string] {
-    aws_query_servers $find $cols
-}
-
-export def create_server [settings: record, server: record, check: bool, wait: bool] {
-    # AWS-specific implementation
-}
-
-

5. Provider-Agnostic Middleware (middleware_provider_agnostic.nu)

-

The new middleware that uses dynamic dispatch:

-
# No hardcoded imports!
-export def mw_query_servers [settings: record, find?: string, cols?: string] {
-    $settings.data.servers | each { |server|
-        # Dynamic provider loading and dispatch
-        dispatch_provider_function $server.provider "query_servers" $find $cols
-    }
-}
-
-

Multi-Provider Support

-

Example: Mixed Provider Infrastructure

-
let servers = [
-    {
-        hostname = "compute-01",
-        provider = "aws",
-        # AWS-specific config
-    },
-    {
-        hostname = "backup-01",
-        provider = "upcloud",
-        # UpCloud-specific config
-    },
-    {
-        hostname = "api.example.com",
-        provider = "cloudflare",
-        # DNS-specific config
-    },
-] in
-servers
-
-

Multi-Provider Deployment

-
# Deploy across multiple providers automatically
-mw_deploy_multi_provider_infra $settings $deployment_plan
-
-# Get deployment strategy recommendations
-mw_suggest_deployment_strategy {
-    regions: ["us-east-1", "eu-west-1"]
-    high_availability: true
-    cost_optimization: true
-}
-
-

Provider Capabilities

-

Providers declare their capabilities:

-
capabilities: {
-    server_management: true
-    network_management: true
-    auto_scaling: true        # AWS: yes, Local: no
-    multi_region: true        # AWS: yes, Local: no
-    serverless: true          # AWS: yes, UpCloud: no
-    compliance_certifications: ["SOC2", "HIPAA"]
-}
-
-

Migration Guide

-

From Old Middleware

-

Before (hardcoded):

-
# middleware.nu
-use ../aws/nulib/aws/servers.nu *
-use ../upcloud/nulib/upcloud/servers.nu *
-
-match $server.provider {
-    "aws" => { aws_query_servers $find $cols }
-    "upcloud" => { upcloud_query_servers $find $cols }
-}
-
-

After (provider-agnostic):

-
# middleware_provider_agnostic.nu
-# No hardcoded imports!
-
-# Dynamic dispatch
-dispatch_provider_function $server.provider "query_servers" $find $cols
-
-

Migration Steps

-
    -
  1. -

    Replace middleware file:

    -
    cp provisioning/extensions/providers/prov_lib/middleware.nu \
    -   provisioning/extensions/providers/prov_lib/middleware_legacy.backup
    -
    -cp provisioning/extensions/providers/prov_lib/middleware_provider_agnostic.nu \
    -   provisioning/extensions/providers/prov_lib/middleware.nu
    -
    -
  2. -
  3. -

    Test with existing infrastructure:

    -
    ./provisioning/tools/test-provider-agnostic.nu run-all-tests
    -
    -
  4. -
  5. -

    Update any custom code that directly imported provider modules

    -
  6. -
-

Adding New Providers

-

1. Create Provider Adapter

-

Create provisioning/extensions/providers/{name}/provider.nu:

-
# Digital Ocean Provider Example
-export def get-provider-metadata [] {
-    {
-        name: "digitalocean"
-        version: "1.0.0"
-        capabilities: {
-            server_management: true
-            # ... other capabilities
-        }
-    }
-}
-
-# Implement required interface functions
-export def query_servers [find?: string, cols?: string] {
-    # DigitalOcean-specific implementation
-}
-
-export def create_server [settings: record, server: record, check: bool, wait: bool] {
-    # DigitalOcean-specific implementation
-}
-
-# ... implement all required functions
-
-

2. Provider Discovery

-

The registry will automatically discover the new provider on next initialization.

-

3. Test New Provider

-
# Check if discovered
-is-provider-available "digitalocean"
-
-# Load and test
-load-provider "digitalocean"
-check-provider-health "digitalocean"
-
-

Best Practices

-

Provider Development

-
    -
  1. Implement full interface - All functions must be implemented
  2. -
  3. Handle errors gracefully - Return appropriate error values
  4. -
  5. Follow naming conventions - Use consistent function naming
  6. -
  7. Document capabilities - Accurately declare what your provider supports
  8. -
  9. Test thoroughly - Validate against the interface specification
  10. -
-

Multi-Provider Deployments

-
    -
  1. Use capability-based selection - Choose providers based on required features
  2. -
  3. Handle provider failures - Design for provider unavailability
  4. -
  5. Optimize for cost/performance - Mix providers strategically
  6. -
  7. Monitor cross-provider dependencies - Understand inter-provider communication
  8. -
-

Profile-Based Security

-
# Environment profiles can restrict providers
-PROVISIONING_PROFILE=production  # Only allows certified providers
-PROVISIONING_PROFILE=development # Allows all providers including local
-
-

Troubleshooting

-

Common Issues

-
    -
  1. -

    Provider not found

    -
      -
    • Check provider is in correct directory
    • -
    • Verify provider.nu exists and implements interface
    • -
    • Run init-provider-registry to refresh
    • -
    -
  2. -
  3. -

    Interface validation failed

    -
      -
    • Use validate-provider-interface to check compliance
    • -
    • Ensure all required functions are implemented
    • -
    • Check function signatures match interface
    • -
    -
  4. -
  5. -

    Provider loading errors

    -
      -
    • Check Nushell module syntax
    • -
    • Verify import paths are correct
    • -
    • Use check-provider-health for diagnostics
    • -
    -
  6. -
-

Debug Commands

-
# Registry diagnostics
-get-provider-stats
-list-providers --verbose
-
-# Provider diagnostics
-check-provider-health "aws"
-check-all-providers-health
-
-# Loader diagnostics
-get-loader-stats
-
-

Performance Benefits

-
    -
  1. Lazy Loading - Providers loaded only when needed
  2. -
  3. Caching - Provider registry cached to disk
  4. -
  5. Reduced Memory - No hardcoded imports reducing memory usage
  6. -
  7. Parallel Operations - Multi-provider operations can run in parallel
  8. -
-

Future Enhancements

-
    -
  1. Provider Plugins - Support for external provider plugins
  2. -
  3. Provider Versioning - Multiple versions of same provider
  4. -
  5. Provider Composition - Compose providers for complex scenarios
  6. -
  7. Provider Marketplace - Community provider sharing
  8. -
-

API Reference

-

See the interface specification for complete function documentation:

-
get-provider-interface-docs | table
-
-

This returns the complete API with signatures and descriptions for all provider interface functions.

-

Cloud Provider Development Guide

-

Version: 2.0 -Status: Production Ready -Based On: Hetzner, UpCloud, AWS (3 completed providers)

-
-

Overview: 4-Task Completion Framework

-

A cloud provider is production-ready when it completes all 4 tasks:

-
- - - - -
TaskRequirementsReference
1. Nushell Compliance0 deprecated patterns, full implementationsprovisioning/extensions/providers/hetzner/
2. Test Infrastructure51 tests (14 unit + 37 integration, mock-based)provisioning/extensions/providers/upcloud/tests/
3. Runtime Templates3+ Jinja2/Bash templates for core resourcesprovisioning/extensions/providers/aws/templates/
4. Nickel ValidationSchemas pass nickel typecheckprovisioning/extensions/providers/hetzner/nickel/
-
-

Execution Sequence

-
Tarea 4 (5 min) ──────┐
-Tarea 1 (main) ───┐   ├──> Tarea 2 (tests)
-Tarea 3 (parallel)┘   │
-                      └──> Production Ready ✅
-
-
-

Nushell 0.109.0+ Core Rules

-

These rules are mandatory for all provider Nushell code:

-

Rule 1: Module System & Imports

-
use mod.nu
-use api.nu
-use servers.nu
-
-

Rule 2: Function Signatures

-
def function_name [param: type, optional: type = default] { }
-
-

Rule 3: Return Early, Fail Fast

-
def operation [resource: record] {
-    if ($resource | get -o id | is-empty) {
-        error make {msg: "Resource ID required"}
-    }
-}
-
-

Rule 4: Modern Error Handling (CRITICAL)

-

❌ FORBIDDEN - Deprecated try-catch:

-
try {
-    ^external_command
-} catch {|err|
-    print $"Error: ($err.msg)"
-}
-
-

✅ REQUIRED - Modern do/complete pattern:

-
let result = (do { ^external_command } | complete)
-
-if $result.exit_code != 0 {
-    error make {msg: $"Command failed: ($result.stderr)"}
-}
-
-$result.stdout
-
-

Rule 5: Atomic Operations

-

All operations must fully succeed or fully fail. No partial state changes.

-

Rule 12: Structured Error Returns

-
error make {
-    msg: "Human-readable message",
-    label: {text: "Error context", span: (metadata error).span}
-}
-
-

Critical Violations (INSTANT FAIL)

-

FORBIDDEN:

-
    -
  • try { } catch { } blocks
  • -
  • let mut variable = value (mutable state)
  • -
  • error make {msg: "Not implemented"} (stubs)
  • -
  • Empty function bodies returning ok
  • -
  • Deprecated error patterns
  • -
-
-

Nickel IaC: Three-File Pattern

-

All Nickel schemas follow this pattern:

-

contracts.ncl: Type Definitions

-
{
-  Server = {
-    id | String,
-    name | String,
-    instance_type | String,
-    zone | String,
-  },
-
-  Volume = {
-    id | String,
-    name | String,
-    size | Number,
-    type | String,
-  }
-}
-
-

defaults.ncl: Default Values

-
{
-  Server = {
-    instance_type = "t3.micro",
-    zone = "us-east-1a",
-  },
-
-  Volume = {
-    size = 20,
-    type = "gp3",
-  }
-}
-
-

main.ncl: Public API

-
let contracts = import "contracts.ncl" in
-let defaults = import "defaults.ncl" in
-
-{
-  make_server = fun config => defaults.Server & config,
-  make_volume = fun config => defaults.Volume & config,
-}
-
-

version.ncl: Version Tracking

-
{
-  provider_version = "1.0.0",
-  cli_tools = {
-    hcloud = "1.47.0+",
-  },
-  nickel_version = "1.7.0+",
-}
-
-

Validation:

-
nickel typecheck nickel/contracts.ncl
-nickel typecheck nickel/defaults.ncl
-nickel typecheck nickel/main.ncl
-nickel typecheck nickel/version.ncl
-nickel export nickel/main.ncl
-
-
-

Tarea 1: Nushell Compliance

-

Identify Violations

-
cd provisioning/extensions/providers/{PROVIDER}
-
-grep -r "try {" nulib/ --include="*.nu" | wc -l
-grep -r "let mut " nulib/ --include="*.nu" | wc -l
-grep -r "not implemented" nulib/ --include="*.nu" | wc -l
-
-

All three commands should return 0.

-

Fix Mutable Loops: Accumulation Pattern

-
def retry_with_backoff [
-    closure: closure,
-    max_attempts: int
-]: nothing -> any {
-    let result = (
-        0..$max_attempts | reduce --fold {
-            success: false,
-            value: null,
-            delay: 100 ms
-        } {|attempt, acc|
-            if $acc.success {
-                $acc
-            } else {
-                let op_result = (do { $closure | call } | complete)
-
-                if $op_result.exit_code == 0 {
-                    {success: true, value: $op_result.stdout, delay: $acc.delay}
-                } else if $attempt >= ($max_attempts - 1) {
-                    $acc
-                } else {
-                    sleep $acc.delay
-                    {success: false, value: null, delay: ($acc.delay * 2)}
-                }
-            }
-        }
-    )
-
-    if $result.success {
-        $result.value
-    } else {
-        error make {msg: $"Failed after ($max_attempts) attempts"}
-    }
-}
-
-

Fix Mutable Loops: Recursive Pattern

-
def _wait_for_state [
-    resource_id: string,
-    target_state: string,
-    timeout_sec: int,
-    elapsed: int = 0,
-    interval: int = 2
-]: nothing -> bool {
-    let current = (^aws ec2 describe-volumes \
-        --volume-ids $resource_id \
-        --query "Volumes[0].State" \
-        --output text)
-
-    if ($current | str contains $target_state) {
-        true
-    } else if $elapsed > $timeout_sec {
-        false
-    } else {
-        sleep ($"($interval)sec" | into duration)
-        _wait_for_state $resource_id $target_state $timeout_sec ($elapsed + $interval) $interval
-    }
-}
-
-

Fix Error Handling

-
def create_server [config: record] {
-    if ($config | get -o name | is-empty) {
-        error make {msg: "Server name required"}
-    }
-
-    let api_result = (do {
-        ^hcloud server create \
-            --name $config.name \
-            --type $config.instance_type \
-            --format json
-    } | complete)
-
-    if $api_result.exit_code != 0 {
-        error make {msg: $"Server creation failed: ($api_result.stderr)"}
-    }
-
-    let response = ($api_result.stdout | from json)
-    {
-        id: $response.server.id,
-        name: $response.server.name,
-        status: "created"
-    }
-}
-
-

Validation

-
cd provisioning/extensions/providers/{PROVIDER}
-
-for file in nulib/*/\*.nu; do
-    nu --ide-check 100 "$file" 2>&1 | grep -i error && exit 1
-done
-
-nu -c "use nulib/{provider}/mod.nu; print 'OK'"
-
-echo "✅ Nushell compliance complete"
-
-
-

Tarea 2: Test Infrastructure

-

Directory Structure

-
tests/
-├── mocks/
-│   └── mock_api_responses.json
-├── unit/
-│   └── test_utils.nu
-├── integration/
-│   ├── test_api_client.nu
-│   ├── test_server_lifecycle.nu
-│   └── test_pricing_cache.nu
-└── run_{provider}_tests.nu
-
-

Mock API Responses

-
{
-  "list_servers": {
-    "servers": [
-      {
-        "id": "srv-123",
-        "name": "test-server",
-        "status": "running"
-      }
-    ]
-  },
-  "error_401": {
-    "error": {"message": "Unauthorized", "code": 401}
-  },
-  "error_429": {
-    "error": {"message": "Rate limited", "code": 429}
-  }
-}
-
-

Unit Tests: 14 Tests

-
def test-result [name: string, result: bool] {
-    if $result {
-        print $"✓ ($name)"
-    } else {
-        print $"✗ ($name)"
-    }
-    $result
-}
-
-def test-validate-instance-id [] {
-    let valid = "i-1234567890abcdef0"
-    let invalid = "invalid-id"
-
-    let test1 = (test-result "Instance ID valid" ($valid | str contains "i-"))
-    let test2 = (test-result "Instance ID invalid" (($invalid | str contains "i-") == false))
-
-    $test1 and $test2
-}
-
-def test-validate-ipv4 [] {
-    let valid = "10.0.1.100"
-    let parts = ($valid | split row ".")
-    test-result "IPv4 four octets" (($parts | length) == 4)
-}
-
-def test-validate-instance-type [] {
-    let valid_types = ["t3.micro" "t3.small" "m5.large"]
-    let invalid = "invalid_type"
-
-    let test1 = (test-result "Instance type valid" (($valid_types | contains ["t3.micro"])))
-    let test2 = (test-result "Instance type invalid" (($valid_types | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-zone [] {
-    let valid_zones = ["us-east-1a" "us-east-1b" "eu-west-1a"]
-    let invalid = "invalid-zone"
-
-    let test1 = (test-result "Zone valid" (($valid_zones | contains ["us-east-1a"])))
-    let test2 = (test-result "Zone invalid" (($valid_zones | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-volume-id [] {
-    let valid = "vol-12345678"
-    let invalid = "invalid-vol"
-
-    let test1 = (test-result "Volume ID valid" ($valid | str contains "vol-"))
-    let test2 = (test-result "Volume ID invalid" (($invalid | str contains "vol-") == false))
-
-    $test1 and $test2
-}
-
-def test-validate-volume-state [] {
-    let valid_states = ["available" "in-use" "creating"]
-    let invalid = "pending"
-
-    let test1 = (test-result "Volume state valid" (($valid_states | contains ["available"])))
-    let test2 = (test-result "Volume state invalid" (($valid_states | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-cidr [] {
-    let valid = "10.0.0.0/16"
-    let invalid = "10.0.0.1"
-
-    let test1 = (test-result "CIDR valid" ($valid | str contains "/"))
-    let test2 = (test-result "CIDR invalid" (($invalid | str contains "/") == false))
-
-    $test1 and $test2
-}
-
-def test-validate-volume-type [] {
-    let valid_types = ["gp2" "gp3" "io1" "io2"]
-    let invalid = "invalid-type"
-
-    let test1 = (test-result "Volume type valid" (($valid_types | contains ["gp3"])))
-    let test2 = (test-result "Volume type invalid" (($valid_types | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-timestamp [] {
-    let valid = "2025-01-07T10:00:00.000Z"
-    let invalid = "not-a-timestamp"
-
-    let test1 = (test-result "Timestamp valid" ($valid | str contains "T" and $valid | str contains "Z"))
-    let test2 = (test-result "Timestamp invalid" (($invalid | str contains "T") == false))
-
-    $test1 and $test2
-}
-
-def test-validate-server-state [] {
-    let valid_states = ["running" "stopped" "pending"]
-    let invalid = "hibernating"
-
-    let test1 = (test-result "Server state valid" (($valid_states | contains ["running"])))
-    let test2 = (test-result "Server state invalid" (($valid_states | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-security-group [] {
-    let valid = "sg-12345678"
-    let invalid = "invalid-sg"
-
-    let test1 = (test-result "Security group valid" ($valid | str contains "sg-"))
-    let test2 = (test-result "Security group invalid" (($invalid | str contains "sg-") == false))
-
-    $test1 and $test2
-}
-
-def test-validate-memory [] {
-    let valid_mems = ["512 MB" "1 GB" "2 GB" "4 GB"]
-    let invalid = "0 GB"
-
-    let test1 = (test-result "Memory valid" (($valid_mems | contains ["1 GB"])))
-    let test2 = (test-result "Memory invalid" (($valid_mems | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def test-validate-vcpu [] {
-    let valid_cpus = [1, 2, 4, 8, 16]
-    let invalid = 0
-
-    let test1 = (test-result "vCPU valid" (($valid_cpus | contains [1])))
-    let test2 = (test-result "vCPU invalid" (($valid_cpus | contains [$invalid]) == false))
-
-    $test1 and $test2
-}
-
-def main [] {
-    print "=== Unit Tests ==="
-    print ""
-
-    let results = [
-        (test-validate-instance-id),
-        (test-validate-ipv4),
-        (test-validate-instance-type),
-        (test-validate-zone),
-        (test-validate-volume-id),
-        (test-validate-volume-state),
-        (test-validate-cidr),
-        (test-validate-volume-type),
-        (test-validate-timestamp),
-        (test-validate-server-state),
-        (test-validate-security-group),
-        (test-validate-memory),
-        (test-validate-vcpu)
-    ]
-
-    let passed = ($results | where {|it| $it == true} | length)
-    let failed = ($results | where {|it| $it == false} | length)
-
-    print ""
-    print $"Results: ($passed) passed, ($failed) failed"
-
-    {
-        passed: $passed,
-        failed: $failed,
-        total: ($passed + $failed)
-    }
-}
-
-main
-
-

Integration Tests: 37 Tests across 3 Modules

-

Module 1: test_api_client.nu (13 tests)

-
    -
  • Response structure validation
  • -
  • Error handling for 401, 404, 429
  • -
  • Resource listing operations
  • -
  • Pricing data validation
  • -
-

Module 2: test_server_lifecycle.nu (12 tests)

-
    -
  • Server creation, listing, state
  • -
  • Instance type and zone info
  • -
  • Storage and security attachment
  • -
  • Server state transitions
  • -
-

Module 3: test_pricing_cache.nu (12 tests)

-
    -
  • Pricing data structure validation
  • -
  • On-demand vs reserved pricing
  • -
  • Cost calculations
  • -
  • Volume pricing operations
  • -
-

Test Orchestrator

-
def main [] {
-    print "=== Provider Test Suite ==="
-
-    let unit_result = (nu tests/unit/test_utils.nu)
-    let api_result = (nu tests/integration/test_api_client.nu)
-    let lifecycle_result = (nu tests/integration/test_server_lifecycle.nu)
-    let pricing_result = (nu tests/integration/test_pricing_cache.nu)
-
-    let total_passed = (
-        $unit_result.passed +
-        $api_result.passed +
-        $lifecycle_result.passed +
-        $pricing_result.passed
-    )
-
-    let total_failed = (
-        $unit_result.failed +
-        $api_result.failed +
-        $lifecycle_result.failed +
-        $pricing_result.failed
-    )
-
-    print $"Results: ($total_passed) passed, ($total_failed) failed"
-
-    {
-        passed: $total_passed,
-        failed: $total_failed,
-        success: ($total_failed == 0)
-    }
-}
-
-let result = (main)
-exit (if $result.success {0} else {1})
-
-

Validation

-
cd provisioning/extensions/providers/{PROVIDER}
-nu tests/run_{provider}_tests.nu
-
-

Expected: 51 tests passing, exit code 0

-
-

Tarea 3: Runtime Templates

-

Directory Structure

-
templates/
-├── {provider}_servers.j2
-├── {provider}_networks.j2
-└── {provider}_volumes.j2
-
-

Template Example

-
#!/bin/bash
-# {{ provider_name }} Server Provisioning
-set -e
-{% if debug %}set -x{% endif %}
-
-{%- for server in servers %}
-  {%- if server.name %}
-
-echo "Creating server: {{ server.name }}"
-
-{%- if server.instance_type %}
-INSTANCE_TYPE="{{ server.instance_type }}"
-{%- else %}
-INSTANCE_TYPE="t3.micro"
-{%- endif %}
-
-SERVER_ID=$(^hcloud server create \
-  --name "{{ server.name }}" \
-  --type $INSTANCE_TYPE \
-  --query 'id' \
-  --output text 2>/dev/null)
-
-if [ -z "$SERVER_ID" ]; then
-  echo "Failed to create server {{ server.name }}"
-  exit 1
-fi
-
-echo "✓ Server {{ server.name }} created: $SERVER_ID"
-
-  {%- endif %}
-{%- endfor %}
-
-echo "Server provisioning complete"
-
-

Validation

-
cd provisioning/extensions/providers/{PROVIDER}
-
-for template in templates/*.j2; do
-    bash -n <(sed 's/{%.*%}//' "$template" | sed 's/{{.*}}/x/g')
-done
-
-echo "✅ Templates valid"
-
-
-

Tarea 4: Nickel Schema Validation

-
cd provisioning/extensions/providers/{PROVIDER}
-
-nickel typecheck nickel/contracts.ncl || exit 1
-nickel typecheck nickel/defaults.ncl || exit 1
-nickel typecheck nickel/main.ncl || exit 1
-nickel typecheck nickel/version.ncl || exit 1
-
-nickel export nickel/main.ncl || exit 1
-
-echo "✅ Nickel schemas validated"
-
-
-

Complete Validation Script

-
#!/bin/bash
-set -e
-
-PROVIDER="hetzner"
-PROV="provisioning/extensions/providers/$PROVIDER"
-
-echo "=== Provider Completeness Check: $PROVIDER ==="
-
-echo ""
-echo "✓ Tarea 4: Validating Nickel..."
-nickel typecheck "$PROV/nickel/main.ncl"
-
-echo "✓ Tarea 1: Checking Nushell..."
-[ $(grep -r "try {" "$PROV/nulib" 2>/dev/null | wc -l) -eq 0 ]
-[ $(grep -r "let mut " "$PROV/nulib" 2>/dev/null | wc -l) -eq 0 ]
-echo "  - No deprecated patterns ✓"
-
-echo "✓ Tarea 3: Validating templates..."
-for f in "$PROV"/templates/*.j2; do
-    bash -n <(sed 's/{%.*%}//' "$f" | sed 's/{{.*}}/x/g')
-done
-
-echo "✓ Tarea 2: Running tests..."
-nu "$PROV/tests/run_${PROVIDER}_tests.nu"
-
-echo ""
-echo "╔════════════════════════════════════════╗"
-echo "║  ✅ ALL TASKS COMPLETE                 ║"
-echo "║     PRODUCTION READY                   ║"
-echo "╚════════════════════════════════════════╝"
-
-
-

Reference Implementations

-
    -
  • Hetzner: provisioning/extensions/providers/hetzner/
  • -
  • UpCloud: provisioning/extensions/providers/upcloud/
  • -
  • AWS: provisioning/extensions/providers/aws/
  • -
-

Use these as templates for new providers.

-
-

Quick Start

-
cd provisioning/extensions/providers/{PROVIDER}
-
-# Validate completeness
-nickel typecheck nickel/main.ncl && \
-[ $(grep -r "try {" nulib/ 2>/dev/null | wc -l) -eq 0 ] && \
-nu tests/run_{provider}_tests.nu && \
-for f in templates/*.j2; do bash -n <(sed 's/{%.*%}//' "$f"); done && \
-echo "✅ PRODUCTION READY"
-
-

Provider Distribution Guide

-

Strategic Guide for Provider Management and Distribution

-

This guide explains the two complementary approaches for managing providers in the provisioning system and when to use each.

-
-

Table of Contents

- -
-

Overview

-

The provisioning system supports two complementary approaches for provider management:

-
    -
  1. Module-Loader: Symlink-based local development with dynamic discovery
  2. -
  3. Provider Packs: Versioned, distributable artifacts for production
  4. -
-

Both approaches work seamlessly together and serve different phases of the development lifecycle.

-
-

Module-Loader Approach

-

Purpose

-

Fast, local development with direct access to provider source code.

-

How It Works

-
# Install provider for infrastructure (creates symlinks)
-provisioning providers install upcloud wuji
-
-# Internal Process:
-# 1. Discovers provider in extensions/providers/upcloud/
-# 2. Creates symlink: workspace/infra/wuji/.nickel-modules/upcloud_prov -> extensions/providers/upcloud/nickel/
-# 3. Updates workspace/infra/wuji/manifest.toml with local path dependency
-# 4. Updates workspace/infra/wuji/providers.manifest.yaml
-
-

Key Features

-

Instant Changes: Edit code in extensions/providers/, immediately available in infrastructure -✅ Auto-Discovery: Automatically finds all providers in extensions/ -✅ Simple Commands: providers install/remove/list/validate -✅ Easy Debugging: Direct access to source code -✅ No Packaging: Skip build/package step during development

-

Best Use Cases

-
    -
  • 🔧 Active Development: Writing new provider features
  • -
  • 🧪 Testing: Rapid iteration and testing cycles
  • -
  • 🏠 Local Infrastructure: Single machine or small team
  • -
  • 📝 Debugging: Need to modify and test provider code
  • -
  • 🎓 Learning: Understanding how providers work
  • -
-

Example Workflow

-
# 1. List available providers
-provisioning providers list
-
-# 2. Install provider for infrastructure
-provisioning providers install upcloud wuji
-
-# 3. Verify installation
-provisioning providers validate wuji
-
-# 4. Edit provider code
-vim extensions/providers/upcloud/nickel/server_upcloud.ncl
-
-# 5. Test changes immediately (no repackaging!)
-cd workspace/infra/wuji
-nickel export main.ncl
-
-# 6. Remove when done
-provisioning providers remove upcloud wuji
-
-

File Structure

-
extensions/providers/upcloud/
-├── nickel/
-│   ├── manifest.toml
-│   ├── server_upcloud.ncl
-│   └── network_upcloud.ncl
-└── README.md
-
-workspace/infra/wuji/
-├── .nickel-modules/
-│   └── upcloud_prov -> ../../../../extensions/providers/upcloud/nickel/  # Symlink
-├── manifest.toml        # Updated with local path dependency
-├── providers.manifest.yaml  # Tracks installed providers
-└── schemas/
-    └── servers.ncl
-
-
-

Provider Packs Approach

-

Purpose

-

Create versioned, distributable artifacts for production deployments and team collaboration.

-

How It Works

-
# Package providers into distributable artifacts
-export PROVISIONING=/Users/Akasha/project-provisioning/provisioning
-./provisioning/core/cli/pack providers
-
-# Internal Process:
-# 1. Enters each provider's nickel/ directory
-# 2. Runs: nickel export . --format json (generates JSON for distribution)
-# 3. Creates: upcloud_prov_0.0.1.tar
-# 4. Generates metadata: distribution/registry/upcloud_prov.json
-
-

Key Features

-

Versioned Artifacts: Immutable, reproducible packages -✅ Portable: Share across teams and environments -✅ Registry Publishing: Push to artifact registries -✅ Metadata: Version, maintainer, license information -✅ Production-Ready: What you package is what you deploy

-

Best Use Cases

-
    -
  • 🚀 Production Deployments: Stable, tested provider versions
  • -
  • 📦 Distribution: Share across teams or organizations
  • -
  • 🔄 CI/CD Pipelines: Automated build and deploy
  • -
  • 📊 Version Control: Track provider versions explicitly
  • -
  • 🌐 Registry Publishing: Publish to artifact registries
  • -
  • 🔒 Compliance: Immutable artifacts for auditing
  • -
-

Example Workflow

-
# Set environment variable
-export PROVISIONING=/Users/Akasha/project-provisioning/provisioning
-
-# 1. Package all providers
-./provisioning/core/cli/pack providers
-
-# Output:
-# ✅ Creates: distribution/packages/upcloud_prov_0.0.1.tar
-# ✅ Creates: distribution/packages/aws_prov_0.0.1.tar
-# ✅ Creates: distribution/packages/local_prov_0.0.1.tar
-# ✅ Metadata: distribution/registry/*.json
-
-# 2. List packaged modules
-./provisioning/core/cli/pack list
-
-# 3. Package only core schemas
-./provisioning/core/cli/pack core
-
-# 4. Clean old packages (keep latest 3 versions)
-./provisioning/core/cli/pack clean --keep-latest 3
-
-# 5. Upload to registry (your implementation)
-# rsync distribution/packages/*.tar repo.jesusperez.pro:/registry/
-
-

File Structure

-
provisioning/
-├── distribution/
-│   ├── packages/
-│   │   ├── provisioning_0.0.1.tar       # Core schemas
-│   │   ├── upcloud_prov_0.0.1.tar       # Provider packages
-│   │   ├── aws_prov_0.0.1.tar
-│   │   └── local_prov_0.0.1.tar
-│   └── registry/
-│       ├── provisioning_core.json       # Metadata
-│       ├── upcloud_prov.json
-│       ├── aws_prov.json
-│       └── local_prov.json
-└── extensions/providers/                # Source code
-
-

Package Metadata Example

-
{
-  "name": "upcloud_prov",
-  "version": "0.0.1",
-  "package_file": "/path/to/upcloud_prov_0.0.1.tar",
-  "created": "2025-09-29 20:47:21",
-  "maintainer": "JesusPerezLorenzo",
-  "repository": "https://repo.jesusperez.pro/provisioning",
-  "license": "MIT",
-  "homepage": "https://github.com/jesusperezlorenzo/provisioning"
-}
-
-
-

Comparison Matrix

-
- - - - - - - - - - - - -
FeatureModule-LoaderProvider Packs
Speed⚡ Instant (symlinks)📦 Requires packaging
Versioning❌ No explicit versions✅ Semantic versioning
Portability❌ Local filesystem only✅ Distributable archives
Development✅ Excellent (live reload)⚠️ Need repackage cycle
Production⚠️ Mutable source✅ Immutable artifacts
Discovery✅ Auto-discovery⚠️ Manual tracking
Team Sharing⚠️ Git repository only✅ Registry + Git
Debugging✅ Direct source access❌ Need to unpack
Rollback⚠️ Git revert✅ Version pinning
Compliance❌ Hard to audit✅ Signed artifacts
Setup Time⚡ Seconds⏱️ Minutes
CI/CD⚠️ Not ideal✅ Perfect
-
-
- -

Development Phase

-
# 1. Start with module-loader for development
-provisioning providers list
-provisioning providers install upcloud wuji
-
-# 2. Develop and iterate quickly
-vim extensions/providers/upcloud/nickel/server_upcloud.ncl
-# Test immediately - no packaging needed
-
-# 3. Validate before release
-provisioning providers validate wuji
-nickel export workspace/infra/wuji/main.ncl
-
-

Release Phase

-
# 4. Create release packages
-export PROVISIONING=/Users/Akasha/project-provisioning/provisioning
-./provisioning/core/cli/pack providers
-
-# 5. Verify packages
-./provisioning/core/cli/pack list
-
-# 6. Tag release
-git tag v0.0.2
-git push origin v0.0.2
-
-# 7. Publish to registry (your workflow)
-rsync distribution/packages/*.tar user@repo.jesusperez.pro:/registry/v0.0.2/
-
-

Production Deployment

-
# 8. Download specific version from registry
-wget https://repo.jesusperez.pro/registry/v0.0.2/upcloud_prov_0.0.2.tar
-
-# 9. Extract and install
-tar -xf upcloud_prov_0.0.2.tar -C infrastructure/providers/
-
-# 10. Use in production infrastructure
-# (Configure manifest.toml to point to extracted package)
-
-
-

Command Reference

-

Module-Loader Commands

-
# List all available providers
-provisioning providers list [--kcl] [--format table|json|yaml]
-
-# Show provider information
-provisioning providers info <provider> [--kcl]
-
-# Install provider for infrastructure
-provisioning providers install <provider> <infra> [--version 0.0.1]
-
-# Remove provider from infrastructure
-provisioning providers remove <provider> <infra> [--force]
-
-# List installed providers
-provisioning providers installed <infra> [--format table|json|yaml]
-
-# Validate provider installation
-provisioning providers validate <infra>
-
-# Sync KCL dependencies
-./provisioning/core/cli/module-loader sync-kcl <infra>
-
-

Provider Pack Commands

-
# Set environment variable (required)
-export PROVISIONING=/path/to/provisioning
-
-# Package core provisioning schemas
-./provisioning/core/cli/pack core [--output dir] [--version 0.0.1]
-
-# Package single provider
-./provisioning/core/cli/pack provider <name> [--output dir] [--version 0.0.1]
-
-# Package all providers
-./provisioning/core/cli/pack providers [--output dir]
-
-# List all packages
-./provisioning/core/cli/pack list [--format table|json|yaml]
-
-# Clean old packages
-./provisioning/core/cli/pack clean [--keep-latest 3] [--dry-run]
-
-
-

Real-World Scenarios

-

Scenario 1: Solo Developer - Local Infrastructure

-

Situation: Working alone on local infrastructure projects

-

Recommendation: Module-Loader only

-
# Simple and fast
-providers install upcloud homelab
-providers install aws cloud-backup
-# Edit and test freely
-
-

Why: No need for versioning, packaging overhead unnecessary.

-
-

Scenario 2: Small Team - Shared Development

-

Situation: 2-5 developers sharing code via Git

-

Recommendation: Module-Loader + Git

-
# Each developer
-git clone repo
-providers install upcloud project-x
-# Make changes, commit to Git
-git commit -m "Add upcloud GPU support"
-git push
-# Others pull changes
-git pull
-# Changes immediately available via symlinks
-
-

Why: Git provides version control, symlinks provide instant updates.

-
-

Scenario 3: Medium Team - Multiple Projects

-

Situation: 10+ developers, multiple infrastructure projects

-

Recommendation: Hybrid (Module-Loader dev + Provider Packs releases)

-
# Development (team member)
-providers install upcloud staging-env
-# Make changes...
-
-# Release (release engineer)
-pack providers                    # Create v0.2.0
-git tag v0.2.0
-# Upload to internal registry
-
-# Other projects
-# Download upcloud_prov_0.2.0.tar
-# Use stable, tested version
-
-

Why: Developers iterate fast, other teams use stable versions.

-
-

Scenario 4: Enterprise - Production Infrastructure

-

Situation: Critical production systems, compliance requirements

-

Recommendation: Provider Packs only

-
# CI/CD Pipeline
-pack providers                    # Build artifacts
-# Run tests on packages
-# Sign packages
-# Publish to artifact registry
-
-# Production Deployment
-# Download signed upcloud_prov_1.0.0.tar
-# Verify signature
-# Deploy immutable artifact
-# Document exact versions for compliance
-
-

Why: Immutability, auditability, and rollback capabilities required.

-
-

Scenario 5: Open Source - Public Distribution

-

Situation: Sharing providers with community

-

Recommendation: Provider Packs + Registry

-
# Maintainer
-pack providers
-# Create release on GitHub
-gh release create v1.0.0 distribution/packages/*.tar
-
-# Community User
-# Download from GitHub releases
-wget https://github.com/project/releases/v1.0.0/upcloud_prov_1.0.0.tar
-# Extract and use
-
-

Why: Easy distribution, versioning, and downloading for users.

-
-

Best Practices

-

For Development

-
    -
  1. -

    Use Module-Loader by default

    -
      -
    • Fast iteration is crucial during development
    • -
    • Symlinks allow immediate testing
    • -
    -
  2. -
  3. -

    Keep providers.manifest.yaml in Git

    -
      -
    • Documents which providers are used
    • -
    • Team members can sync easily
    • -
    -
  4. -
  5. -

    Validate before committing

    -
    providers validate wuji
    -nickel eval defs/servers.ncl
    -
    -
  6. -
-

For Releases

-
    -
  1. -

    Version Everything

    -
      -
    • Use semantic versioning (0.1.0, 0.2.0, 1.0.0)
    • -
    • Update version in kcl.mod before packing
    • -
    -
  2. -
  3. -

    Create Packs for Releases

    -
    pack providers --version 0.2.0
    -git tag v0.2.0
    -
    -
  4. -
  5. -

    Test Packs Before Publishing

    -
      -
    • Extract and test packages
    • -
    • Verify metadata is correct
    • -
    -
  6. -
-

For Production

-
    -
  1. -

    Pin Versions

    -
      -
    • Use exact versions in production kcl.mod
    • -
    • Never use “latest” or symlinks
    • -
    -
  2. -
  3. -

    Maintain Artifact Registry

    -
      -
    • Store all production versions
    • -
    • Keep old versions for rollback
    • -
    -
  4. -
  5. -

    Document Deployments

    -
      -
    • Record which versions deployed when
    • -
    • Maintain change log
    • -
    -
  6. -
-

For CI/CD

-
    -
  1. -

    Automate Pack Creation

    -
    # .github/workflows/release.yml
    -- name: Pack Providers
    -  run: |
    -    export PROVISIONING=$GITHUB_WORKSPACE/provisioning
    -    ./provisioning/core/cli/pack providers
    -
    -
  2. -
  3. -

    Run Tests on Packs

    -
      -
    • Extract packages
    • -
    • Run validation tests
    • -
    • Ensure they work in isolation
    • -
    -
  4. -
  5. -

    Publish Automatically

    -
      -
    • Upload to artifact registry on tag
    • -
    • Update package index
    • -
    -
  6. -
-
-

Migration Path

-

From Module-Loader to Packs

-

When you’re ready to move to production:

-
# 1. Clean up development setup
-providers remove upcloud wuji
-
-# 2. Create release pack
-pack providers --version 1.0.0
-
-# 3. Extract pack in infrastructure
-cd workspace/infra/wuji
-tar -xf ../../../distribution/packages/upcloud_prov_1.0.0.tar vendor/
-
-# 4. Update kcl.mod to use vendored path
-# Change from: upcloud_prov = { path = "./.kcl-modules/upcloud_prov" }
-# To: upcloud_prov = { path = "./vendor/upcloud_prov", version = "1.0.0" }
-
-# 5. Test
-nickel eval defs/servers.ncl
-
-

From Packs Back to Module-Loader

-

When you need to debug or develop:

-
# 1. Remove vendored version
-rm -rf workspace/infra/wuji/vendor/upcloud_prov
-
-# 2. Install via module-loader
-providers install upcloud wuji
-
-# 3. Make changes in extensions/providers/upcloud/kcl/
-
-# 4. Test immediately
-cd workspace/infra/wuji
-nickel eval defs/servers.ncl
-
-
-

Configuration

-

Environment Variables

-
# Required for pack commands
-export PROVISIONING=/path/to/provisioning
-
-# Alternative
-export PROVISIONING_CONFIG=/path/to/provisioning
-
-

Config Files

-

Distribution settings in provisioning/config/config.defaults.toml:

-
[distribution]
-pack_path = "{{paths.base}}/distribution/packages"
-registry_path = "{{paths.base}}/distribution/registry"
-cache_path = "{{paths.base}}/distribution/cache"
-registry_type = "local"
-
-[distribution.metadata]
-maintainer = "JesusPerezLorenzo"
-repository = "https://repo.jesusperez.pro/provisioning"
-license = "MIT"
-homepage = "https://github.com/jesusperezlorenzo/provisioning"
-
-[kcl]
-core_module = "{{paths.base}}/kcl"
-core_version = "0.0.1"
-core_package_name = "provisioning_core"
-use_module_loader = true
-modules_dir = ".kcl-modules"
-
-
-

Troubleshooting

-

Module-Loader Issues

-

Problem: Provider not found after install

-
# Check provider exists
-providers list | grep upcloud
-
-# Validate installation
-providers validate wuji
-
-# Check symlink
-ls -la workspace/infra/wuji/.kcl-modules/
-
-

Problem: Changes not reflected

-
# Verify symlink is correct
-readlink workspace/infra/wuji/.kcl-modules/upcloud_prov
-
-# Should point to extensions/providers/upcloud/kcl/
-
-

Provider Pack Issues

-

Problem: No .tar file created

-
# Check KCL version (need 0.11.3+)
-kcl version
-
-# Check kcl.mod exists
-ls extensions/providers/upcloud/kcl/kcl.mod
-
-

Problem: PROVISIONING environment variable not set

-
# Set it
-export PROVISIONING=/Users/Akasha/project-provisioning/provisioning
-
-# Or add to shell profile
-echo 'export PROVISIONING=/path/to/provisioning' >> ~/.zshrc
-
-
-

Conclusion

-

Both approaches are valuable and complementary:

-
    -
  • Module-Loader: Development velocity, rapid iteration
  • -
  • Provider Packs: Production stability, version control
  • -
-

Default Strategy:

-
    -
  • Use Module-Loader for day-to-day development
  • -
  • Create Provider Packs for releases and production
  • -
  • Both systems work seamlessly together
  • -
-

The system is designed for flexibility - choose the right tool for your current phase of work!

-
-

Additional Resources

- -
-

Document Version: 1.0.0 -Last Updated: 2025-09-29 -Maintained by: JesusPerezLorenzo

-

Provider Comparison Matrix

-

This document provides a comprehensive comparison of supported cloud providers: Hetzner, UpCloud, AWS, and DigitalOcean. Use this matrix to make -informed decisions about which provider is best suited for your workloads.

-

Feature Comparison

-

Compute

-
- - - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
Product NameCloud ServersServersEC2Droplets
Instance SizingStandard, dedicated cores2-32 vCPUsExtensive (t2, t3, m5, c5, etc)1-48 vCPUs
Custom CPU/RAMLimited
Hourly Billing
Monthly Discount30%25%~30% (RI)~25%
GPU Instances
Auto-scalingVia APIVia APINative (ASG)Via API
Bare Metal✓ (EC2)
-
-

Block Storage

-
- - - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
Product NameVolumesStorageEBSVolumes
SSD Volumes✓ (gp3, io1)
HDD Volumes✓ (st1, sc1)
Max Volume Size10 TBUnlimited16 TB100 TB
IOPS ProvisioningLimited
Snapshots
Encryption
Backup Service✓ (AWS Backup)
-
-

Object Storage

-
- - - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
Product NameObject StorageS3Spaces
API CompatibilityS3-compatibleS3 (native)S3-compatible
Pricing (per GB)€0.025N/A$0.023$0.015
Regions2N/A30+4
VersioningN/A
Lifecycle RulesN/A
CDN IntegrationN/A✓ (CloudFront)✓ (CDN add-on)
Access ControlBucket policiesN/AIAM + bucket policiesToken-based
-
-

Load Balancing

-
- - - - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
Product NameLoad BalancerLoad BalancerELB/ALB/NLBLoad Balancer
TypeLayer 4/7Layer 4Layer 4/7Layer 4/7
Health Checks
SSL/TLS TerminationLimited
Path-based Routing✓ (ALB)
Host-based Routing✓ (ALB)
Sticky Sessions
Geographic Distribution✓ (multi-region)
DDoS ProtectionBasic✓ (Shield)
-
-

Managed Databases

-
- - - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
PostgreSQL✓ (RDS)
MySQL✓ (RDS)
Redis✓ (ElastiCache)
MongoDB✓ (DocumentDB)
Multi-AZN/AN/A
Automatic BackupsN/AN/A
Read ReplicasN/AN/A
Param GroupsN/AN/A
-
-

Kubernetes

-
- - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
ServiceManual K8sManual K8sEKSDOKS
Managed Service
Control Plane Managed
Node Management✓ (node groups)✓ (node pools)
Multi-AZ
Ingress SupportVia add-onVia add-on✓ (ALB)
Storage ClassesVia add-onVia add-on✓ (EBS)
-
-

CDN/Edge

-
- - - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
CDN Service✓ (CloudFront)
Edge Locations600+12+
Geographic Routing
Cache Invalidation
OriginsAnyHTTP/S, Object Storage
SSL/TLS
DDoS Protection✓ (Shield)
-
-

DNS

-
- - - - - - -
FeatureHetznerUpCloudAWSDigitalOcean
DNS Service✓ (Basic)✓ (Route53)
ZonesN/A
FailoverManualN/A✓ (health checks)✓ (health checks)
GeolocationN/A
DNSSECN/A
API ManagementLimitedN/AFullFull
-
-

Pricing Comparison

-

Compute Pricing (Monthly)

-

Comparison for 1-year term where applicable:

-
- - - - - -
ConfigurationHetznerUpCloudAWS*DigitalOcean
1 vCPU, 1 GB RAM€3.29$5$18 (t3.micro)$6
2 vCPU, 4 GB RAM€6.90$15$36 (t3.small)$24
4 vCPU, 8 GB RAM€13.80$30$73 (t3.medium)$48
8 vCPU, 16 GB RAM€27.60$60$146 (t3.large)$96
16 vCPU, 32 GB RAM€55.20$120$291 (t3.xlarge)$192
-
-

*AWS pricing: on-demand; reserved instances 25-30% discount

-

Storage Pricing (Monthly)

-

Per GB for block storage:

-
- - - - -
ProviderPrice/GBMonthly Cost (100 GB)
Hetzner€0.026€2.60
UpCloud$0.025$2.50
AWS EBS$0.10$10.00
DigitalOcean$0.10$10.00
-
-

Data Transfer Pricing

-

Outbound data transfer (per GB):

-
- - - - -
ProviderFirst 1 TBBeyond 1 TB
HetznerIncluded€0.12/GB
UpCloud$0.02/GB$0.01/GB
AWS$0.09/GB$0.085/GB
DigitalOcean$0.01/GB$0.01/GB
-
-

Total Cost of Ownership (TCO) Examples

-

Small Application (2 servers, 100 GB storage)

-
- - - - -
ProviderComputeStorageData TransferMonthly
Hetzner€13.80€2.60Included€16.40
UpCloud$30$2.50$20$52.50
AWS$72$10$45$127
DigitalOcean$48$10Included$58
-
-

Medium Application (5 servers, 500 GB storage, 10 TB data transfer)

-
- - - - -
ProviderComputeStorageData TransferMonthly
Hetzner€69€13€1,200€1,282
UpCloud$150$12.50$200$362.50
AWS$360$50$900$1,310
DigitalOcean$240$50Included$290
-
-

Regional Availability

-

Hetzner Regions

-
- - - - -
RegionLocationData CenterHighlights
nbg1Nuremberg, Germany3EU hub, good performance
fsn1Falkenstein, Germany1Lower latency, German regulations
hel1Helsinki, Finland1Nordic region option
ashAshburn, USA1North American presence
-
-

UpCloud Regions

-
- - - - - - -
RegionLocationHighlights
fi-hel1Helsinki, FinlandPrimary EU location
de-fra1Frankfurt, GermanyEU alternative
gb-lon1London, UKEuropean coverage
us-nyc1New York, USANorth America
sg-sin1SingaporeAsia Pacific
jp-tok1Tokyo, JapanAPAC alternative
-
-

AWS Regions (Selection)

-
- - - - - -
RegionLocationAvailability ZonesHighlights
us-east-1N. Virginia, USA6Largest, most services
eu-west-1Ireland3EU primary, GDPR compliant
eu-central-1Frankfurt, Germany3German data residency
ap-southeast-1Singapore3APAC primary
ap-northeast-1Tokyo, Japan4Asia alternative
-
-

DigitalOcean Regions

-
- - - - - - -
RegionLocationHighlights
nyc3New York, USAPrimary US location
sfo3San Francisco, USAUS West Coast
lon1London, UKEuropean hub
fra1Frankfurt, GermanyGerman regulations
sgp1SingaporeAPAC coverage
blr1Bangalore, IndiaIndia region
-
-

Regional Coverage Summary

-

Best Global Coverage: AWS (30+ regions, most services) -Best EU Coverage: All providers have good EU options -Best APAC Coverage: AWS (most regions), DigitalOcean (Singapore) -Best North America: All providers have coverage -Emerging Markets: DigitalOcean (India via Bangalore)

-

Compliance and Certifications

-

Security Standards

-
- - - - - - -
StandardHetznerUpCloudAWSDigitalOcean
GDPR
CCPA
SOC 2 Type II
ISO 27001
ISO 9001
FedRAMP
-
-

Industry-Specific Compliance

-
- - - - - -
StandardHetznerUpCloudAWSDigitalOcean
HIPAA✓**
PCI-DSS
HITRUST
FIPS 140-2
SOX (Sarbanes-Oxley)LimitedLimitedLimited
-
-

**DigitalOcean: Requires BAA for HIPAA compliance

-

Data Residency Support

-
- - - - - - - -
RegionHetznerUpCloudAWSDigitalOcean
EU (GDPR)✓ DE,FI✓ FI,DE,GB✓ (multiple)✓ (multiple)
Germany (NIS2)
UK (Post-Brexit)✓ GB
USA (CCPA)
Canada
Australia
India
-
-

Use Case Recommendations

-

1. Cost-Sensitive Startups

-

Recommended: Hetzner primary + DigitalOcean backup

-

Rationale:

-
    -
  • Hetzner has best price/performance ratio
  • -
  • DigitalOcean for geographic diversification
  • -
  • Both have simple interfaces and good documentation
  • -
  • Monthly cost: $30-80 for basic HA setup
  • -
-

Example Setup:

-
    -
  • Primary: Hetzner cx31 (2 vCPU, 4 GB)
  • -
  • Backup: DigitalOcean $24/month droplet
  • -
  • Database: Self-managed PostgreSQL or Hetzner volume
  • -
  • Total: ~$35/month
  • -
-

2. Enterprise Production

-

Recommended: AWS primary + UpCloud backup

-

Rationale:

-
    -
  • AWS for managed services and compliance
  • -
  • UpCloud for cost-effective disaster recovery
  • -
  • AWS compliance certifications (HIPAA, FIPS, SOC2)
  • -
  • Multiple regions within AWS
  • -
  • Mature enterprise support
  • -
-

Example Setup:

-
    -
  • Primary: AWS RDS (managed DB)
  • -
  • Secondary: UpCloud for compute burst
  • -
  • Compliance: Full audit trail and encryption
  • -
-

3. High-Performance Computing

-

Recommended: Hetzner + AWS spot instances

-

Rationale:

-
    -
  • Hetzner for sustained compute (good price)
  • -
  • AWS spot for burst workloads (70-90% discount)
  • -
  • Hetzner bare metal for specialized workloads
  • -
  • Cost-effective scaling
  • -
-

4. Multi-Region Global Application

-

Recommended: AWS + DigitalOcean + Hetzner

-

Rationale:

-
    -
  • AWS for primary regions and managed services
  • -
  • DigitalOcean for edge locations and simpler regions
  • -
  • Hetzner for EU cost optimization
  • -
  • Geographic redundancy across 3 providers
  • -
-

Example Setup:

-
    -
  • US: AWS (primary region)
  • -
  • EU: Hetzner (cost-optimized)
  • -
  • APAC: DigitalOcean (Singapore)
  • -
  • Global: CloudFront CDN
  • -
-

5. Database-Heavy Applications

-

Recommended: AWS RDS/ElastiCache + DigitalOcean Spaces

-

Rationale:

-
    -
  • AWS managed databases are feature-rich
  • -
  • DigitalOcean managed DB for simpler needs
  • -
  • Both support replicas and backups
  • -
  • Cost: $60-200/month for medium database
  • -
-

6. Web Applications

-

Recommended: DigitalOcean + AWS

-

Rationale:

-
    -
  • DigitalOcean for simplicity and speed
  • -
  • Droplets easy to manage and scale
  • -
  • AWS for advanced features and multi-region
  • -
  • Good community and documentation
  • -
-

Provider Strength Matrix

-

Performance ⚡

-
- - - - -
CategoryWinnerNotes
CPU PerformanceHetznerDedicated cores, good specs per price
Network BandwidthAWS1Gbps+ guaranteed in multiple regions
Storage IOPSAWSgp3 with 16K IOPS provisioning
Latency (Global)AWSMost regions, best infrastructure
-
-

Cost 💰

-
- - - - -
CategoryWinnerNotes
ComputeHetzner50% cheaper than AWS on-demand
Managed ServicesAWSOnly provider with full managed stack
Data TransferDigitalOceanIncluded with many services
StorageHetzner Object Storage€0.025/GB vs AWS S3 $0.023/GB
-
-

Ease of Use 🎯

-
- - - - -
CategoryWinnerNotes
UI/DashboardDigitalOceanSimple, intuitive, clear pricing
CLI ToolsAWSComprehensive aws-cli (but steep)
API DocumentationDigitalOceanClear examples, community-driven
Getting StartedDigitalOceanFastest path to first deployment
-
-

Enterprise Features 🏢

-
- - - - -
CategoryWinnerNotes
Managed ServicesAWSRDS, ElastiCache, SQS, SNS, etc
ComplianceAWSMost certifications (HIPAA, FIPS, etc)
SupportAWS24/7 support with paid plans
ScaleAWSBest for 1000+ servers
-
-

Decision Matrix

-

Use this matrix to quickly select a provider:

-
If you need:                           Then use:
-─────────────────────────────────────────────────────────────
-Lowest cost compute                    Hetzner
-Simplest interface                     DigitalOcean
-Managed databases                      AWS or DigitalOcean
-Global multi-region                    AWS
-Compliance (HIPAA/FIPS)                AWS
-European data residency                Hetzner or DigitalOcean
-High performance compute               Hetzner or AWS (bare metal)
-Disaster recovery setup                UpCloud or Hetzner
-Quick startup                          DigitalOcean
-Enterprise SLA                         AWS or UpCloud
-
-

Conclusion

-
    -
  • Hetzner: Best for cost-conscious teams, European focus, good performance
  • -
  • UpCloud: Mid-market option, Nordic/EU focus, reliable alternative
  • -
  • AWS: Enterprise standard, global coverage, most services, highest cost
  • -
  • DigitalOcean: Developer-friendly, simplicity-focused, good value
  • -
-

For most organizations, a multi-provider strategy combining Hetzner (compute), AWS (managed services), and DigitalOcean (edge) provides the best -balance of cost, capability, and resilience.

-

Taskserv Quick Guide

-

🚀 Quick Start

-

Create a New Taskserv (Interactive)

-
nu provisioning/tools/create-taskserv-helper.nu interactive
-
-

Create a New Taskserv (Direct)

-
nu provisioning/tools/create-taskserv-helper.nu create my-api \
-  --category development \
-  --port 8080 \
-  --description "My REST API service"
-
-

📋 5-Minute Setup

-

1. Choose Your Method

-
    -
  • Interactive: nu provisioning/tools/create-taskserv-helper.nu interactive
  • -
  • Command Line: Use the direct command above
  • -
  • Manual: Follow the structure guide below
  • -
-

2. Basic Structure

-
my-service/
-├── nickel/
-│   ├── manifest.toml   # Package definition
-│   ├── my-service.ncl  # Main schema
-│   └── version.ncl     # Version info
-├── default/
-│   ├── defs.toml       # Default config
-│   └── install-*.sh    # Install script
-└── README.md           # Documentation
-
-

3. Essential Files

-

manifest.toml (package definition):

-
[package]
-name = "my-service"
-version = "1.0.0"
-description = "My service"
-
-[dependencies]
-k8s = { oci = "oci://ghcr.io/kcl-lang/k8s", tag = "1.30" }
-
-

my-service.ncl (main schema):

-
let MyService = {
-    name | String,
-    version | String,
-    port | Number,
-    replicas | Number,
-} in
-
-{
-    my_service_config = {
-        name = "my-service",
-        version = "latest",
-        port = 8080,
-        replicas = 1,
-    }
-}
-
-

4. Test Your Taskserv

-
# Discover your taskserv
-nu -c "use provisioning/core/nulib/taskservs/discover.nu *; get-taskserv-info my-service"
-
-# Test layer resolution
-nu -c "use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution my-service wuji upcloud"
-
-# Deploy with check
-provisioning/core/cli/provisioning taskserv create my-service --infra wuji --check
-
-

🎯 Common Patterns

-

Web Service

-
let WebService = {
-    name | String,
-    version | String | default = "latest",
-    port | Number | default = 8080,
-    replicas | Number | default = 1,
-    ingress | {
-        enabled | Bool | default = true,
-        hostname | String,
-        tls | Bool | default = false,
-    },
-    resources | {
-        cpu | String | default = "100m",
-        memory | String | default = "128Mi",
-    },
-} in
-WebService
-
-

Database Service

-
let DatabaseService = {
-    name | String,
-    version | String | default = "latest",
-    port | Number | default = 5432,
-    persistence | {
-        enabled | Bool | default = true,
-        size | String | default = "10Gi",
-        storage_class | String | default = "ssd",
-    },
-    auth | {
-        database | String | default = "app",
-        username | String | default = "user",
-        password_secret | String,
-    },
-} in
-DatabaseService
-
-

Background Worker

-
let BackgroundWorker = {
-    name | String,
-    version | String | default = "latest",
-    replicas | Number | default = 1,
-    job | {
-        schedule | String | optional,  # Cron format for scheduled jobs
-        parallelism | Number | default = 1,
-        completions | Number | default = 1,
-    },
-    resources | {
-        cpu | String | default = "500m",
-        memory | String | default = "512Mi",
-    },
-} in
-BackgroundWorker
-
-

🛠️ CLI Shortcuts

-

Discovery

-
# List all taskservs
-nu -c "use provisioning/core/nulib/taskservs/discover.nu *; discover-taskservs | select name group"
-
-# Search taskservs
-nu -c "use provisioning/core/nulib/taskservs/discover.nu *; search-taskservs redis"
-
-# Show stats
-nu -c "use provisioning/workspace/tools/layer-utils.nu *; show_layer_stats"
-
-

Development

-
# Check Nickel syntax
-nickel typecheck provisioning/extensions/taskservs/{category}/{name}/schemas/{name}.ncl
-
-# Generate configuration
-provisioning/core/cli/provisioning taskserv generate {name} --infra {infra}
-
-# Version management
-provisioning/core/cli/provisioning taskserv versions {name}
-provisioning/core/cli/provisioning taskserv check-updates
-
-

Testing

-
# Dry run deployment
-provisioning/core/cli/provisioning taskserv create {name} --infra {infra} --check
-
-# Layer resolution debug
-nu -c "use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution {name} {infra} {provider}"
-
-

📚 Categories Reference

-
- - - - - - - -
CategoryExamplesUse Case
container-runtimecontainerd, crio, podmanContainer runtime engines
databasespostgres, redisDatabase services
developmentcoder, gitea, desktopDevelopment tools
infrastructurekms, webhook, osSystem infrastructure
kuberneteskubernetesKubernetes orchestration
networkingcilium, coredns, etcdNetwork services
storagerook-ceph, external-nfsStorage solutions
-
-

🔧 Troubleshooting

-

Taskserv Not Found

-
# Check if discovered
-nu -c "use provisioning/core/nulib/taskservs/discover.nu *; discover-taskservs | where name == my-service"
-
-# Verify kcl.mod exists
-ls provisioning/extensions/taskservs/{category}/my-service/kcl/kcl.mod
-
-

Layer Resolution Issues

-
# Debug resolution
-nu -c "use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution my-service wuji upcloud"
-
-# Check template exists
-ls provisioning/workspace/templates/taskservs/{category}/my-service.ncl
-
-

Nickel Syntax Errors

-
# Check syntax
-nickel typecheck provisioning/extensions/taskservs/{category}/my-service/schemas/my-service.ncl
-
-# Format code
-nickel format provisioning/extensions/taskservs/{category}/my-service/schemas/
-
-

💡 Pro Tips

-
    -
  1. Use existing taskservs as templates - Copy and modify similar services
  2. -
  3. Test with –check first - Always use dry run before actual deployment
  4. -
  5. Follow naming conventions - Use kebab-case for consistency
  6. -
  7. Document thoroughly - Good docs save time later
  8. -
  9. Version your schemas - Include version.ncl for compatibility tracking
  10. -
-

🔗 Next Steps

-
    -
  1. Read the full Taskserv Developer Guide
  2. -
  3. Explore existing taskservs in provisioning/extensions/taskservs/
  4. -
  5. Check out templates in provisioning/workspace/templates/taskservs/
  6. -
  7. Join the development community for support
  8. -
-

Taskserv Categorization Plan

-

Categories and Taskservs (38 total)

-

kubernetes/ (1)

-
    -
  • kubernetes
  • -
-

networking/ (6)

-
    -
  • cilium
  • -
  • coredns
  • -
  • etcd
  • -
  • ip-aliases
  • -
  • proxy
  • -
  • resolv
  • -
-

container-runtime/ (6)

-
    -
  • containerd
  • -
  • crio
  • -
  • crun
  • -
  • podman
  • -
  • runc
  • -
  • youki
  • -
-

storage/ (4)

-
    -
  • external-nfs
  • -
  • mayastor
  • -
  • oci-reg
  • -
  • rook-ceph
  • -
-

databases/ (2)

-
    -
  • postgres
  • -
  • redis
  • -
-

development/ (6)

-
    -
  • coder
  • -
  • desktop
  • -
  • gitea
  • -
  • nushell
  • -
  • oras
  • -
  • radicle
  • -
-

infrastructure/ (6)

-
    -
  • kms
  • -
  • os
  • -
  • provisioning
  • -
  • polkadot
  • -
  • webhook
  • -
  • kubectl
  • -
-

misc/ (1)

-
    -
  • generate
  • -
-

Keep in root/ (6)

-
    -
  • info.md
  • -
  • manifest.toml
  • -
  • manifest.lock
  • -
  • README.md
  • -
  • REFERENCE.md
  • -
  • version.ncl
  • -
-

Total categorized: 32 taskservs + 6 root files = 38 items ✓

-

Platform Deployment Guide

-

Version: 1.0.0 -Last Updated: 2026-01-05 -Target Audience: DevOps Engineers, Platform Operators -Status: Production Ready

-

Practical guide for deploying the 9-service provisioning platform in any environment using mode-based configuration.

-

Table of Contents

-
    -
  1. Prerequisites
  2. -
  3. Deployment Modes
  4. -
  5. Quick Start
  6. -
  7. Solo Mode Deployment
  8. -
  9. Multiuser Mode Deployment
  10. -
  11. CICD Mode Deployment
  12. -
  13. Enterprise Mode Deployment
  14. -
  15. Service Management
  16. -
  17. Health Checks & Monitoring
  18. -
  19. Troubleshooting
  20. -
-
-

Prerequisites

-

Required Software

-
    -
  • Rust: 1.70+ (for building services)
  • -
  • Nickel: Latest (for config validation)
  • -
  • Nushell: 0.109.1+ (for scripts)
  • -
  • Cargo: Included with Rust
  • -
  • Git: For cloning and pulling updates
  • -
-

Required Tools (Mode-Dependent)

-
- - - - - -
ToolSoloMultiuserCICDEnterprise
Docker/PodmanNoOptionalYesYes
SurrealDBNoYesNoNo
EtcdNoNoNoYes
PostgreSQLNoOptionalNoOptional
OpenAI/Anthropic APINoOptionalYesYes
-
-

System Requirements

-
- - - - -
ResourceSoloMultiuserCICDEnterprise
CPU Cores2+4+8+16+
Memory2 GB4 GB8 GB16 GB
Disk10 GB50 GB100 GB500 GB
NetworkLocalLocal/CloudCloudHA Cloud
-
-

Directory Structure

-
# Ensure base directories exist
-mkdir -p provisioning/schemas/platform
-mkdir -p provisioning/platform/logs
-mkdir -p provisioning/platform/data
-mkdir -p provisioning/.typedialog/platform
-mkdir -p provisioning/config/runtime
-
-
-

Deployment Modes

-

Mode Selection Matrix

-
- - - - -
RequirementRecommended Mode
Development & testingsolo
Team environment (2-10 people)multiuser
CI/CD pipelines & automationcicd
Production with HAenterprise
-
-

Mode Characteristics

-

Solo Mode

-

Use Case: Development, testing, demonstration

-

Characteristics:

-
    -
  • All services run locally with minimal resources
  • -
  • Filesystem-based storage (no external databases)
  • -
  • No TLS/SSL required
  • -
  • Embedded/in-memory backends
  • -
  • Single machine only
  • -
-

Services Configuration:

-
    -
  • 2-4 workers per service
  • -
  • 30-60 second timeouts
  • -
  • No replication or clustering
  • -
  • Debug-level logging enabled
  • -
-

Startup Time: ~2-5 minutes -Data Persistence: Local files only

-
-

Multiuser Mode

-

Use Case: Team environments, shared infrastructure

-

Characteristics:

-
    -
  • Shared database backends (SurrealDB)
  • -
  • Multiple concurrent users
  • -
  • CORS and multi-user features enabled
  • -
  • Optional TLS support
  • -
  • 2-4 machines (or containerized)
  • -
-

Services Configuration:

-
    -
  • 4-6 workers per service
  • -
  • 60-120 second timeouts
  • -
  • Basic replication available
  • -
  • Info-level logging
  • -
-

Startup Time: ~3-8 minutes (database dependent) -Data Persistence: SurrealDB (shared)

-
-

CICD Mode

-

Use Case: CI/CD pipelines, ephemeral environments

-

Characteristics:

-
    -
  • Ephemeral storage (memory, temporary)
  • -
  • High throughput
  • -
  • RAG system disabled
  • -
  • Minimal logging
  • -
  • Stateless services
  • -
-

Services Configuration:

-
    -
  • 8-12 workers per service
  • -
  • 10-30 second timeouts
  • -
  • No persistence
  • -
  • Warn-level logging
  • -
-

Startup Time: ~1-2 minutes -Data Persistence: None (ephemeral)

-
-

Enterprise Mode

-

Use Case: Production, high availability, compliance

-

Characteristics:

-
    -
  • Distributed, replicated backends
  • -
  • High availability (HA) clustering
  • -
  • TLS/SSL encryption
  • -
  • Audit logging
  • -
  • Full monitoring and observability
  • -
-

Services Configuration:

-
    -
  • 16-32 workers per service
  • -
  • 120-300 second timeouts
  • -
  • Active replication across 3+ nodes
  • -
  • Info-level logging with audit trails
  • -
-

Startup Time: ~5-15 minutes (cluster initialization) -Data Persistence: Replicated across cluster

-
-

Quick Start

-

1. Clone Repository

-
git clone https://github.com/your-org/project-provisioning.git
-cd project-provisioning
-
-

2. Select Deployment Mode

-

Choose your mode based on use case:

-
# For development
-export DEPLOYMENT_MODE=solo
-
-# For team environments
-export DEPLOYMENT_MODE=multiuser
-
-# For CI/CD
-export DEPLOYMENT_MODE=cicd
-
-# For production
-export DEPLOYMENT_MODE=enterprise
-
-

3. Set Environment Variables

-

All services use mode-specific TOML configs automatically loaded via environment variables:

-
# Vault Service
-export VAULT_MODE=$DEPLOYMENT_MODE
-
-# Extension Registry
-export REGISTRY_MODE=$DEPLOYMENT_MODE
-
-# RAG System
-export RAG_MODE=$DEPLOYMENT_MODE
-
-# AI Service
-export AI_SERVICE_MODE=$DEPLOYMENT_MODE
-
-# Provisioning Daemon
-export DAEMON_MODE=$DEPLOYMENT_MODE
-
-

4. Build All Services

-
# Build all platform crates
-cargo build --release -p vault-service \
-                      -p extension-registry \
-                      -p provisioning-rag \
-                      -p ai-service \
-                      -p provisioning-daemon \
-                      -p orchestrator \
-                      -p control-center \
-                      -p mcp-server \
-                      -p installer
-
-

5. Start Services (Order Matters)

-
# Start in dependency order:
-
-# 1. Core infrastructure (KMS, storage)
-cargo run --release -p vault-service &
-
-# 2. Configuration and extensions
-cargo run --release -p extension-registry &
-
-# 3. AI/RAG layer
-cargo run --release -p provisioning-rag &
-cargo run --release -p ai-service &
-
-# 4. Orchestration layer
-cargo run --release -p orchestrator &
-cargo run --release -p control-center &
-cargo run --release -p mcp-server &
-
-# 5. Background operations
-cargo run --release -p provisioning-daemon &
-
-# 6. Installer (optional, for new deployments)
-cargo run --release -p installer &
-
-

6. Verify Services

-
# Check all services are running
-pgrep -l "vault-service|extension-registry|provisioning-rag|ai-service"
-
-# Test endpoints
-curl http://localhost:8200/health   # Vault
-curl http://localhost:8081/health   # Registry
-curl http://localhost:8083/health   # RAG
-curl http://localhost:8082/health   # AI Service
-curl http://localhost:9090/health   # Orchestrator
-curl http://localhost:8080/health   # Control Center
-
-
-

Solo Mode Deployment

-

Perfect for: Development, testing, learning

-

Step 1: Verify Solo Configuration Files

-
# Check that solo schemas are available
-ls -la provisioning/schemas/platform/defaults/deployment/solo-defaults.ncl
-
-# Available schemas for each service:
-# - provisioning/schemas/platform/schemas/vault-service.ncl
-# - provisioning/schemas/platform/schemas/extension-registry.ncl
-# - provisioning/schemas/platform/schemas/rag.ncl
-# - provisioning/schemas/platform/schemas/ai-service.ncl
-# - provisioning/schemas/platform/schemas/provisioning-daemon.ncl
-
-

Step 2: Set Solo Environment Variables

-
# Set all services to solo mode
-export VAULT_MODE=solo
-export REGISTRY_MODE=solo
-export RAG_MODE=solo
-export AI_SERVICE_MODE=solo
-export DAEMON_MODE=solo
-
-# Verify settings
-echo $VAULT_MODE  # Should output: solo
-
-

Step 3: Build Services

-
# Build in release mode for better performance
-cargo build --release
-
-

Step 4: Create Local Data Directories

-
# Create storage directories for solo mode
-mkdir -p /tmp/provisioning-solo/{vault,registry,rag,ai,daemon}
-chmod 755 /tmp/provisioning-solo/{vault,registry,rag,ai,daemon}
-
-

Step 5: Start Services

-
# Start each service in a separate terminal or use tmux:
-
-# Terminal 1: Vault
-cargo run --release -p vault-service
-
-# Terminal 2: Registry
-cargo run --release -p extension-registry
-
-# Terminal 3: RAG
-cargo run --release -p provisioning-rag
-
-# Terminal 4: AI Service
-cargo run --release -p ai-service
-
-# Terminal 5: Orchestrator
-cargo run --release -p orchestrator
-
-# Terminal 6: Control Center
-cargo run --release -p control-center
-
-# Terminal 7: Daemon
-cargo run --release -p provisioning-daemon
-
-

Step 6: Test Services

-
# Wait 10-15 seconds for services to start, then test
-
-# Check service health
-curl -s http://localhost:8200/health | jq .
-curl -s http://localhost:8081/health | jq .
-curl -s http://localhost:8083/health | jq .
-
-# Try a simple operation
-curl -X GET http://localhost:9090/api/v1/health
-
-

Step 7: Verify Persistence (Optional)

-
# Check that data is stored locally
-ls -la /tmp/provisioning-solo/vault/
-ls -la /tmp/provisioning-solo/registry/
-
-# Data should accumulate as you use the services
-
-

Cleanup

-
# Stop all services
-pkill -f "cargo run --release"
-
-# Remove temporary data (optional)
-rm -rf /tmp/provisioning-solo
-
-
-

Multiuser Mode Deployment

-

Perfect for: Team environments, shared infrastructure

-

Prerequisites

-
    -
  • SurrealDB: Running and accessible at http://surrealdb:8000
  • -
  • Network Access: All machines can reach SurrealDB
  • -
  • DNS/Hostnames: Services accessible via hostnames (not just localhost)
  • -
-

Step 1: Deploy SurrealDB

-
# Using Docker (recommended)
-docker run -d \
-  --name surrealdb \
-  -p 8000:8000 \
-  surrealdb/surrealdb:latest \
-  start --user root --pass root
-
-# Or using native installation:
-surreal start --user root --pass root
-
-

Step 2: Verify SurrealDB Connectivity

-
# Test SurrealDB connection
-curl -s http://localhost:8000/health
-
-# Should return: {"version":"v1.x.x"}
-
-

Step 3: Set Multiuser Environment Variables

-
# Configure all services for multiuser mode
-export VAULT_MODE=multiuser
-export REGISTRY_MODE=multiuser
-export RAG_MODE=multiuser
-export AI_SERVICE_MODE=multiuser
-export DAEMON_MODE=multiuser
-
-# Set database connection
-export SURREALDB_URL=http://surrealdb:8000
-export SURREALDB_USER=root
-export SURREALDB_PASS=root
-
-# Set service hostnames (if not localhost)
-export VAULT_SERVICE_HOST=vault.internal
-export REGISTRY_HOST=registry.internal
-export RAG_HOST=rag.internal
-
-

Step 4: Build Services

-
cargo build --release
-
-

Step 5: Create Shared Data Directories

-
# Create directories on shared storage (NFS, etc.)
-mkdir -p /mnt/provisioning-data/{vault,registry,rag,ai}
-chmod 755 /mnt/provisioning-data/{vault,registry,rag,ai}
-
-# Or use local directories if on separate machines
-mkdir -p /var/lib/provisioning/{vault,registry,rag,ai}
-
-

Step 6: Start Services on Multiple Machines

-
# Machine 1: Infrastructure services
-ssh ops@machine1
-export VAULT_MODE=multiuser
-cargo run --release -p vault-service &
-cargo run --release -p extension-registry &
-
-# Machine 2: AI services
-ssh ops@machine2
-export RAG_MODE=multiuser
-export AI_SERVICE_MODE=multiuser
-cargo run --release -p provisioning-rag &
-cargo run --release -p ai-service &
-
-# Machine 3: Orchestration
-ssh ops@machine3
-cargo run --release -p orchestrator &
-cargo run --release -p control-center &
-
-# Machine 4: Background tasks
-ssh ops@machine4
-export DAEMON_MODE=multiuser
-cargo run --release -p provisioning-daemon &
-
-

Step 7: Test Multi-Machine Setup

-
# From any machine, test cross-machine connectivity
-curl -s http://machine1:8200/health
-curl -s http://machine2:8083/health
-curl -s http://machine3:9090/health
-
-# Test integration
-curl -X POST http://machine3:9090/api/v1/provision \
-  -H "Content-Type: application/json" \
-  -d '{"workspace": "test"}'
-
-

Step 8: Enable User Access

-
# Create shared credentials
-export VAULT_TOKEN=s.xxxxxxxxxxx
-
-# Configure TLS (optional but recommended)
-# Update configs to use https:// URLs
-export VAULT_MODE=multiuser
-# Edit provisioning/schemas/platform/schemas/vault-service.ncl
-# Add TLS configuration in the schema definition
-# See: provisioning/schemas/platform/validators/ for constraints
-
-

Monitoring Multiuser Deployment

-
# Check all services are connected to SurrealDB
-for host in machine1 machine2 machine3 machine4; do
-  ssh ops@$host "curl -s http://localhost/api/v1/health | jq .database_connected"
-done
-
-# Monitor SurrealDB
-curl -s http://surrealdb:8000/version
-
-
-

CICD Mode Deployment

-

Perfect for: GitHub Actions, GitLab CI, Jenkins, cloud automation

-

Step 1: Understand Ephemeral Nature

-

CICD mode services:

-
    -
  • Don’t persist data between runs
  • -
  • Use in-memory storage
  • -
  • Have RAG disabled
  • -
  • Optimize for startup speed
  • -
  • Suitable for containerized deployments
  • -
-

Step 2: Set CICD Environment Variables

-
# Use cicd mode for all services
-export VAULT_MODE=cicd
-export REGISTRY_MODE=cicd
-export RAG_MODE=cicd
-export AI_SERVICE_MODE=cicd
-export DAEMON_MODE=cicd
-
-# Disable TLS (not needed in CI)
-export CI_ENVIRONMENT=true
-
-

Step 3: Containerize Services (Optional)

-
# Dockerfile for CICD deployments
-FROM rust:1.75-slim
-
-WORKDIR /app
-COPY . .
-
-# Build all services
-RUN cargo build --release
-
-# Set CICD mode
-ENV VAULT_MODE=cicd
-ENV REGISTRY_MODE=cicd
-ENV RAG_MODE=cicd
-ENV AI_SERVICE_MODE=cicd
-
-# Expose ports
-EXPOSE 8200 8081 8083 8082 9090 8080
-
-# Run services
-CMD ["sh", "-c", "\
-  cargo run --release -p vault-service & \
-  cargo run --release -p extension-registry & \
-  cargo run --release -p provisioning-rag & \
-  cargo run --release -p ai-service & \
-  cargo run --release -p orchestrator & \
-  wait"]
-
-

Step 4: GitHub Actions Example

-
name: CICD Platform Deployment
-
-on:
-  push:
-    branches: [main, develop]
-
-jobs:
-  test-deployment:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Install Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: 1.75
-          profile: minimal
-
-      - name: Set CICD Mode
-        run: |
-          echo "VAULT_MODE=cicd" >> $GITHUB_ENV
-          echo "REGISTRY_MODE=cicd" >> $GITHUB_ENV
-          echo "RAG_MODE=cicd" >> $GITHUB_ENV
-          echo "AI_SERVICE_MODE=cicd" >> $GITHUB_ENV
-          echo "DAEMON_MODE=cicd" >> $GITHUB_ENV
-
-      - name: Build Services
-        run: cargo build --release
-
-      - name: Run Integration Tests
-        run: |
-          # Start services in background
-          cargo run --release -p vault-service &
-          cargo run --release -p extension-registry &
-          cargo run --release -p orchestrator &
-
-          # Wait for startup
-          sleep 10
-
-          # Run tests
-          cargo test --release
-
-      - name: Health Checks
-        run: |
-          curl -f http://localhost:8200/health
-          curl -f http://localhost:8081/health
-          curl -f http://localhost:9090/health
-
-  deploy:
-    needs: test-deployment
-    runs-on: ubuntu-latest
-    if: github.ref == 'refs/heads/main'
-    steps:
-      - uses: actions/checkout@v3
-      - name: Deploy to Production
-        run: |
-          # Deploy production enterprise cluster
-          ./scripts/deploy-enterprise.sh
-
-

Step 5: Run CICD Tests

-
# Simulate CI environment locally
-export VAULT_MODE=cicd
-export CI_ENVIRONMENT=true
-
-# Build
-cargo build --release
-
-# Run short-lived services for testing
-timeout 30 cargo run --release -p vault-service &
-timeout 30 cargo run --release -p extension-registry &
-timeout 30 cargo run --release -p orchestrator &
-
-# Run tests while services are running
-sleep 5
-cargo test --release
-
-# Services auto-cleanup after timeout
-
-
-

Enterprise Mode Deployment

-

Perfect for: Production, high availability, compliance

-

Prerequisites

-
    -
  • 3+ Machines: Minimum 3 for HA
  • -
  • Etcd Cluster: For distributed consensus
  • -
  • Load Balancer: HAProxy, nginx, or cloud LB
  • -
  • TLS Certificates: Valid certificates for all services
  • -
  • Monitoring: Prometheus, ELK, or cloud monitoring
  • -
  • Backup System: Daily snapshots to S3 or similar
  • -
-

Step 1: Deploy Infrastructure

-

1.1 Deploy Etcd Cluster

-
# Node 1, 2, 3
-etcd --name=node-1 \
-     --listen-client-urls=http://0.0.0.0:2379 \
-     --advertise-client-urls=http://node-1.internal:2379 \
-     --initial-cluster="node-1=http://node-1.internal:2380,node-2=http://node-2.internal:2380,node-3=http://node-3.internal:2380" \
-     --initial-cluster-state=new
-
-# Verify cluster
-etcdctl --endpoints=http://localhost:2379 member list
-
-

1.2 Deploy Load Balancer

-
# HAProxy configuration for vault-service (example)
-frontend vault_frontend
-    bind *:8200
-    mode tcp
-    default_backend vault_backend
-
-backend vault_backend
-    mode tcp
-    balance roundrobin
-    server vault-1 10.0.1.10:8200 check
-    server vault-2 10.0.1.11:8200 check
-    server vault-3 10.0.1.12:8200 check
-
-

1.3 Configure TLS

-
# Generate certificates (or use existing)
-mkdir -p /etc/provisioning/tls
-
-# For each service:
-openssl req -x509 -newkey rsa:4096 \
-  -keyout /etc/provisioning/tls/vault-key.pem \
-  -out /etc/provisioning/tls/vault-cert.pem \
-  -days 365 -nodes \
-  -subj "/CN=vault.provisioning.prod"
-
-# Set permissions
-chmod 600 /etc/provisioning/tls/*-key.pem
-chmod 644 /etc/provisioning/tls/*-cert.pem
-
-

Step 2: Set Enterprise Environment Variables

-
# All machines: Set enterprise mode
-export VAULT_MODE=enterprise
-export REGISTRY_MODE=enterprise
-export RAG_MODE=enterprise
-export AI_SERVICE_MODE=enterprise
-export DAEMON_MODE=enterprise
-
-# Database cluster
-export SURREALDB_URL="ws://surrealdb-cluster.internal:8000"
-export SURREALDB_REPLICAS=3
-
-# Etcd cluster
-export ETCD_ENDPOINTS="http://node-1.internal:2379,http://node-2.internal:2379,http://node-3.internal:2379"
-
-# TLS configuration
-export TLS_CERT_PATH=/etc/provisioning/tls
-export TLS_VERIFY=true
-export TLS_CA_CERT=/etc/provisioning/tls/ca.crt
-
-# Monitoring
-export PROMETHEUS_URL=http://prometheus.internal:9090
-export METRICS_ENABLED=true
-export AUDIT_LOG_ENABLED=true
-
-

Step 3: Deploy Services Across Cluster

-
# Ansible playbook (simplified)
----
-- hosts: provisioning_cluster
-  tasks:
-    - name: Build services
-      shell: cargo build --release
-
-    - name: Start vault-service (machine 1-3)
-      shell: "cargo run --release -p vault-service"
-      when: "'vault' in group_names"
-
-    - name: Start orchestrator (machine 2-3)
-      shell: "cargo run --release -p orchestrator"
-      when: "'orchestrator' in group_names"
-
-    - name: Start daemon (machine 3)
-      shell: "cargo run --release -p provisioning-daemon"
-      when: "'daemon' in group_names"
-
-    - name: Verify cluster health
-      uri:
-        url: "https://{{ inventory_hostname }}:9090/health"
-        validate_certs: yes
-
-

Step 4: Monitor Cluster Health

-
# Check cluster status
-curl -s https://vault.internal:8200/health | jq .state
-
-# Check replication
-curl -s https://orchestrator.internal:9090/api/v1/cluster/status
-
-# Monitor etcd
-etcdctl --endpoints=https://node-1.internal:2379 endpoint health
-
-# Check leader election
-etcdctl --endpoints=https://node-1.internal:2379 election list
-
-

Step 5: Enable Monitoring & Alerting

-
# Prometheus configuration
-global:
-  scrape_interval: 30s
-  evaluation_interval: 30s
-
-scrape_configs:
-  - job_name: 'vault-service'
-    scheme: https
-    tls_config:
-      ca_file: /etc/provisioning/tls/ca.crt
-    static_configs:
-      - targets: ['vault-1.internal:8200', 'vault-2.internal:8200', 'vault-3.internal:8200']
-
-  - job_name: 'orchestrator'
-    scheme: https
-    static_configs:
-      - targets: ['orch-1.internal:9090', 'orch-2.internal:9090', 'orch-3.internal:9090']
-
-

Step 6: Backup & Recovery

-
# Daily backup script
-#!/bin/bash
-BACKUP_DIR="/mnt/provisioning-backups"
-DATE=$(date +%Y%m%d_%H%M%S)
-
-# Backup etcd
-etcdctl --endpoints=https://node-1.internal:2379 \
-  snapshot save "$BACKUP_DIR/etcd-$DATE.db"
-
-# Backup SurrealDB
-curl -X POST https://surrealdb.internal:8000/backup \
-  -H "Authorization: Bearer $SURREALDB_TOKEN" \
-  > "$BACKUP_DIR/surreal-$DATE.sql"
-
-# Upload to S3
-aws s3 cp "$BACKUP_DIR/etcd-$DATE.db" \
-  s3://provisioning-backups/etcd/
-
-# Cleanup old backups (keep 30 days)
-find "$BACKUP_DIR" -mtime +30 -delete
-
-
-

Service Management

-

Starting Services

-

Individual Service Startup

-
# Start one service
-export VAULT_MODE=enterprise
-cargo run --release -p vault-service
-
-# In another terminal
-export REGISTRY_MODE=enterprise
-cargo run --release -p extension-registry
-
-

Batch Startup

-
# Start all services (dependency order)
-#!/bin/bash
-set -e
-
-MODE=${1:-solo}
-export VAULT_MODE=$MODE
-export REGISTRY_MODE=$MODE
-export RAG_MODE=$MODE
-export AI_SERVICE_MODE=$MODE
-export DAEMON_MODE=$MODE
-
-echo "Starting provisioning platform in $MODE mode..."
-
-# Core services first
-echo "Starting infrastructure..."
-cargo run --release -p vault-service &
-VAULT_PID=$!
-
-echo "Starting extension registry..."
-cargo run --release -p extension-registry &
-REGISTRY_PID=$!
-
-# AI layer
-echo "Starting AI services..."
-cargo run --release -p provisioning-rag &
-RAG_PID=$!
-
-cargo run --release -p ai-service &
-AI_PID=$!
-
-# Orchestration
-echo "Starting orchestration..."
-cargo run --release -p orchestrator &
-ORCH_PID=$!
-
-echo "All services started. PIDs: $VAULT_PID $REGISTRY_PID $RAG_PID $AI_PID $ORCH_PID"
-
-

Stopping Services

-
# Stop all services gracefully
-pkill -SIGTERM -f "cargo run --release -p"
-
-# Wait for graceful shutdown
-sleep 5
-
-# Force kill if needed
-pkill -9 -f "cargo run --release -p"
-
-# Verify all stopped
-pgrep -f "cargo run --release -p" && echo "Services still running" || echo "All stopped"
-
-

Restarting Services

-
# Restart single service
-pkill -SIGTERM vault-service
-sleep 2
-cargo run --release -p vault-service &
-
-# Restart all services
-./scripts/restart-all.sh $MODE
-
-# Restart with config reload
-export VAULT_MODE=multiuser
-pkill -SIGTERM vault-service
-sleep 2
-cargo run --release -p vault-service &
-
-

Checking Service Status

-
# Check running processes
-pgrep -a "cargo run --release"
-
-# Check listening ports
-netstat -tlnp | grep -E "8200|8081|8083|8082|9090|8080"
-
-# Or using ss (modern alternative)
-ss -tlnp | grep -E "8200|8081|8083|8082|9090|8080"
-
-# Health endpoint checks
-for service in vault registry rag ai orchestrator; do
-  echo "=== $service ==="
-  curl -s http://localhost:${port[$service]}/health | jq .
-done
-
-
-

Health Checks & Monitoring

-

Manual Health Verification

-
# Vault Service
-curl -s http://localhost:8200/health | jq .
-# Expected: {"status":"ok","uptime":123.45}
-
-# Extension Registry
-curl -s http://localhost:8081/health | jq .
-
-# RAG System
-curl -s http://localhost:8083/health | jq .
-# Expected: {"status":"ok","embeddings":"ready","vector_db":"connected"}
-
-# AI Service
-curl -s http://localhost:8082/health | jq .
-
-# Orchestrator
-curl -s http://localhost:9090/health | jq .
-
-# Control Center
-curl -s http://localhost:8080/health | jq .
-
-

Service Integration Tests

-
# Test vault <-> registry integration
-curl -X POST http://localhost:8200/api/encrypt \
-  -H "Content-Type: application/json" \
-  -d '{"plaintext":"secret"}' | jq .
-
-# Test RAG system
-curl -X POST http://localhost:8083/api/ingest \
-  -H "Content-Type: application/json" \
-  -d '{"document":"test.md","content":"# Test"}' | jq .
-
-# Test orchestrator
-curl -X GET http://localhost:9090/api/v1/status | jq .
-
-# End-to-end workflow
-curl -X POST http://localhost:9090/api/v1/provision \
-  -H "Content-Type: application/json" \
-  -d '{
-    "workspace": "test",
-    "services": ["vault", "registry"],
-    "mode": "solo"
-  }' | jq .
-
-

Monitoring Dashboards

-

Prometheus Metrics

-
# Query service uptime
-curl -s 'http://prometheus:9090/api/v1/query?query=up' | jq .
-
-# Query request rate
-curl -s 'http://prometheus:9090/api/v1/query?query=rate(http_requests_total[5m])' | jq .
-
-# Query error rate
-curl -s 'http://prometheus:9090/api/v1/query?query=rate(http_errors_total[5m])' | jq .
-
-

Log Aggregation

-
# Follow vault logs
-tail -f /var/log/provisioning/vault-service.log
-
-# Follow all service logs
-tail -f /var/log/provisioning/*.log
-
-# Search for errors
-grep -r "ERROR" /var/log/provisioning/
-
-# Follow with filtering
-tail -f /var/log/provisioning/orchestrator.log | grep -E "ERROR|WARN"
-
-

Alerting

-
# AlertManager configuration
-groups:
-  - name: provisioning
-    rules:
-      - alert: ServiceDown
-        expr: up{job=~"vault|registry|rag|orchestrator"} == 0
-        for: 5m
-        annotations:
-          summary: "{{ $labels.job }} is down"
-
-      - alert: HighErrorRate
-        expr: rate(http_errors_total[5m]) > 0.05
-        annotations:
-          summary: "High error rate detected"
-
-      - alert: DiskSpaceWarning
-        expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.2
-        annotations:
-          summary: "Disk space below 20%"
-
-
-

Troubleshooting

-

Service Won’t Start

-

Problem: error: failed to bind to port 8200

-

Solutions:

-
# Check if port is in use
-lsof -i :8200
-ss -tlnp | grep 8200
-
-# Kill existing process
-pkill -9 -f vault-service
-
-# Or use different port
-export VAULT_SERVER_PORT=8201
-cargo run --release -p vault-service
-
-

Configuration Loading Fails

-

Problem: error: failed to load config from mode file

-

Solutions:

-
# Verify schemas exist
-ls -la provisioning/schemas/platform/schemas/vault-service.ncl
-
-# Validate schema syntax
-nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
-
-# Check defaults are present
-nickel typecheck provisioning/schemas/platform/defaults/vault-service-defaults.ncl
-
-# Verify deployment mode overlay exists
-ls -la provisioning/schemas/platform/defaults/deployment/$VAULT_MODE-defaults.ncl
-
-# Run service with explicit mode
-export VAULT_MODE=solo
-cargo run --release -p vault-service
-
-

Database Connection Issues

-

Problem: error: failed to connect to database

-

Solutions:

-
# Verify database is running
-curl http://surrealdb:8000/health
-etcdctl --endpoints=http://etcd:2379 endpoint health
-
-# Check connectivity
-nc -zv surrealdb 8000
-nc -zv etcd 2379
-
-# Update connection string
-export SURREALDB_URL=ws://surrealdb:8000
-export ETCD_ENDPOINTS=http://etcd:2379
-
-# Restart service with new config
-pkill -9 vault-service
-cargo run --release -p vault-service
-
-

Service Crashes on Startup

-

Problem: Service exits with code 1 or 139

-

Solutions:

-
# Run with verbose logging
-RUST_LOG=debug cargo run -p vault-service 2>&1 | head -50
-
-# Check system resources
-free -h
-df -h
-
-# Check for core dumps
-coredumpctl list
-
-# Run under debugger (if crash suspected)
-rust-gdb --args target/release/vault-service
-
-

High Memory Usage

-

Problem: Service consuming > expected memory

-

Solutions:

-
# Check memory usage
-ps aux | grep vault-service | grep -v grep
-
-# Monitor over time
-watch -n 1 'ps aux | grep vault-service | grep -v grep'
-
-# Reduce worker count
-export VAULT_SERVER_WORKERS=2
-cargo run --release -p vault-service
-
-# Check for memory leaks
-valgrind --leak-check=full target/release/vault-service
-
-

Network/DNS Issues

-

Problem: error: failed to resolve hostname

-

Solutions:

-
# Test DNS resolution
-nslookup vault.internal
-dig vault.internal
-
-# Test connectivity to service
-curl -v http://vault.internal:8200/health
-
-# Add to /etc/hosts if needed
-echo "10.0.1.10 vault.internal" >> /etc/hosts
-
-# Check network interface
-ip addr show
-netstat -nr
-
-

Data Persistence Issues

-

Problem: Data lost after restart

-

Solutions:

-
# Verify backup exists
-ls -la /mnt/provisioning-backups/
-ls -la /var/lib/provisioning/
-
-# Check disk space
-df -h /var/lib/provisioning
-
-# Verify file permissions
-ls -l /var/lib/provisioning/vault/
-chmod 755 /var/lib/provisioning/vault/*
-
-# Restore from backup
-./scripts/restore-backup.sh /mnt/provisioning-backups/vault-20260105.sql
-
-

Debugging Checklist

-

When troubleshooting, use this systematic approach:

-
# 1. Check service is running
-pgrep -f vault-service || echo "Service not running"
-
-# 2. Check port is listening
-ss -tlnp | grep 8200 || echo "Port not listening"
-
-# 3. Check logs for errors
-tail -20 /var/log/provisioning/vault-service.log | grep -i error
-
-# 4. Test HTTP endpoint
-curl -i http://localhost:8200/health
-
-# 5. Check dependencies
-curl http://surrealdb:8000/health
-etcdctl --endpoints=http://etcd:2379 endpoint health
-
-# 6. Check schema definition
-nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
-
-# 7. Verify environment variables
-env | grep -E "VAULT_|SURREALDB_|ETCD_"
-
-# 8. Check system resources
-free -h && df -h && top -bn1 | head -10
-
-
-

Configuration Updates

-

Updating Service Configuration

-
# 1. Edit the schema definition
-vim provisioning/schemas/platform/schemas/vault-service.ncl
-
-# 2. Update defaults if needed
-vim provisioning/schemas/platform/defaults/vault-service-defaults.ncl
-
-# 3. Validate syntax
-nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
-
-# 4. Re-export configuration from schemas
-./provisioning/.typedialog/platform/scripts/generate-configs.nu vault-service multiuser
-
-# 5. Restart affected service (no downtime for clients)
-pkill -SIGTERM vault-service
-sleep 2
-cargo run --release -p vault-service &
-
-# 4. Verify configuration loaded
-curl http://localhost:8200/api/config | jq .
-
-

Mode Migration

-
# Migrate from solo to multiuser:
-
-# 1. Stop services
-pkill -SIGTERM -f "cargo run"
-sleep 5
-
-# 2. Backup current data
-tar -czf /backup/provisioning-solo-$(date +%s).tar.gz /var/lib/provisioning/
-
-# 3. Set new mode
-export VAULT_MODE=multiuser
-export REGISTRY_MODE=multiuser
-export RAG_MODE=multiuser
-
-# 4. Start services with new config
-cargo run --release -p vault-service &
-cargo run --release -p extension-registry &
-
-# 5. Verify new mode
-curl http://localhost:8200/api/config | jq .deployment_mode
-
-
-

Production Checklist

-

Before deploying to production:

-
    -
  • -All services compiled in release mode (--release)
  • -
  • -TLS certificates installed and valid
  • -
  • -Database cluster deployed and healthy
  • -
  • -Load balancer configured and routing traffic
  • -
  • -Monitoring and alerting configured
  • -
  • -Backup system tested and working
  • -
  • -High availability verified (failover tested)
  • -
  • -Security hardening applied (firewall rules, etc.)
  • -
  • -Documentation updated for your environment
  • -
  • -Team trained on deployment procedures
  • -
  • -Runbooks created for common operations
  • -
  • -Disaster recovery plan tested
  • -
-
-

Getting Help

-

Community Resources

-
    -
  • GitHub Issues: Report bugs at github.com/your-org/provisioning/issues
  • -
  • Documentation: Full docs at provisioning/docs/
  • -
  • Slack Channel: #provisioning-platform
  • -
-

Internal Support

-
    -
  • Platform Team: platform@your-org.com
  • -
  • On-Call: Check PagerDuty for active rotation
  • -
  • Escalation: Contact infrastructure leadership
  • -
-

Useful Commands Reference

-
# View all available commands
-cargo run -- --help
-
-# View service schemas
-ls -la provisioning/schemas/platform/schemas/
-ls -la provisioning/schemas/platform/defaults/
-
-# List running services
-ps aux | grep cargo
-
-# Monitor service logs in real-time
-journalctl -fu provisioning-vault
-
-# Generate diagnostics bundle
-./scripts/generate-diagnostics.sh > /tmp/diagnostics-$(date +%s).tar.gz
-
-

Service Management Guide

-

Version: 1.0.0 -Last Updated: 2025-10-06

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Service Architecture
  4. -
  5. Service Registry
  6. -
  7. Platform Commands
  8. -
  9. Service Commands
  10. -
  11. Deployment Modes
  12. -
  13. Health Monitoring
  14. -
  15. Dependency Management
  16. -
  17. Pre-flight Checks
  18. -
  19. Troubleshooting
  20. -
-
-

Overview

-

The Service Management System provides comprehensive lifecycle management for all platform services (orchestrator, control-center, CoreDNS, Gitea, OCI -registry, MCP server, API gateway).

-

Key Features

-
    -
  • Unified Service Management: Single interface for all services
  • -
  • Automatic Dependency Resolution: Start services in correct order
  • -
  • Health Monitoring: Continuous health checks with automatic recovery
  • -
  • Multiple Deployment Modes: Binary, Docker, Docker Compose, Kubernetes, Remote
  • -
  • Pre-flight Checks: Validate prerequisites before operations
  • -
  • Service Registry: Centralized service configuration
  • -
-

Supported Services

-
- - - - - - - -
ServiceTypeCategoryDescription
orchestratorPlatformOrchestrationRust-based workflow coordinator
control-centerPlatformUIWeb-based management interface
corednsInfrastructureDNSLocal DNS resolution
giteaInfrastructureGitSelf-hosted Git service
oci-registryInfrastructureRegistryOCI-compliant container registry
mcp-serverPlatformAPIModel Context Protocol server
api-gatewayPlatformAPIUnified REST API gateway
-
-
-

Service Architecture

-

System Architecture

-
┌─────────────────────────────────────────┐
-│         Service Management CLI          │
-│  (platform/services commands)           │
-└─────────────────┬───────────────────────┘
-                  │
-       ┌──────────┴──────────┐
-       │                     │
-       ▼                     ▼
-┌──────────────┐    ┌───────────────┐
-│   Manager    │    │   Lifecycle   │
-│   (Core)     │    │   (Start/Stop)│
-└──────┬───────┘    └───────┬───────┘
-       │                    │
-       ▼                    ▼
-┌──────────────┐    ┌───────────────┐
-│   Health     │    │  Dependencies │
-│   (Checks)   │    │  (Resolution) │
-└──────────────┘    └───────────────┘
-       │                    │
-       └────────┬───────────┘
-                │
-                ▼
-       ┌────────────────┐
-       │   Pre-flight   │
-       │   (Validation) │
-       └────────────────┘
-
-

Component Responsibilities

-

Manager (manager.nu)

-
    -
  • Service registry loading
  • -
  • Service status tracking
  • -
  • State persistence
  • -
-

Lifecycle (lifecycle.nu)

-
    -
  • Service start/stop operations
  • -
  • Deployment mode handling
  • -
  • Process management
  • -
-

Health (health.nu)

-
    -
  • Health check execution
  • -
  • HTTP/TCP/Command/File checks
  • -
  • Continuous monitoring
  • -
-

Dependencies (dependencies.nu)

-
    -
  • Dependency graph analysis
  • -
  • Topological sorting
  • -
  • Startup order calculation
  • -
-

Pre-flight (preflight.nu)

-
    -
  • Prerequisite validation
  • -
  • Conflict detection
  • -
  • Auto-start orchestration
  • -
-
-

Service Registry

-

Configuration File

-

Location: provisioning/config/services.toml

-

Service Definition Structure

-
[services.<service-name>]
-name = "<service-name>"
-type = "platform" | "infrastructure" | "utility"
-category = "orchestration" | "auth" | "dns" | "git" | "registry" | "api" | "ui"
-description = "Service description"
-required_for = ["operation1", "operation2"]
-dependencies = ["dependency1", "dependency2"]
-conflicts = ["conflicting-service"]
-
-[services.<service-name>.deployment]
-mode = "binary" | "docker" | "docker-compose" | "kubernetes" | "remote"
-
-# Mode-specific configuration
-[services.<service-name>.deployment.binary]
-binary_path = "/path/to/binary"
-args = ["--arg1", "value1"]
-working_dir = "/working/directory"
-env = { KEY = "value" }
-
-[services.<service-name>.health_check]
-type = "http" | "tcp" | "command" | "file" | "none"
-interval = 10
-retries = 3
-timeout = 5
-
-[services.<service-name>.health_check.http]
-endpoint = "http://localhost:9090/health"
-expected_status = 200
-method = "GET"
-
-[services.<service-name>.startup]
-auto_start = true
-start_timeout = 30
-start_order = 10
-restart_on_failure = true
-max_restarts = 3
-
-

Example: Orchestrator Service

-
[services.orchestrator]
-name = "orchestrator"
-type = "platform"
-category = "orchestration"
-description = "Rust-based orchestrator for workflow coordination"
-required_for = ["server", "taskserv", "cluster", "workflow", "batch"]
-
-[services.orchestrator.deployment]
-mode = "binary"
-
-[services.orchestrator.deployment.binary]
-binary_path = "${HOME}/.provisioning/bin/provisioning-orchestrator"
-args = ["--port", "8080", "--data-dir", "${HOME}/.provisioning/orchestrator/data"]
-
-[services.orchestrator.health_check]
-type = "http"
-
-[services.orchestrator.health_check.http]
-endpoint = "http://localhost:9090/health"
-expected_status = 200
-
-[services.orchestrator.startup]
-auto_start = true
-start_timeout = 30
-start_order = 10
-
-
-

Platform Commands

-

Platform commands manage all services as a cohesive system.

-

Start Platform

-

Start all auto-start services or specific services:

-
# Start all auto-start services
-provisioning platform start
-
-# Start specific services (with dependencies)
-provisioning platform start orchestrator control-center
-
-# Force restart if already running
-provisioning platform start --force orchestrator
-
-

Behavior:

-
    -
  1. Resolves dependencies
  2. -
  3. Calculates startup order (topological sort)
  4. -
  5. Starts services in correct order
  6. -
  7. Waits for health checks
  8. -
  9. Reports success/failure
  10. -
-

Stop Platform

-

Stop all running services or specific services:

-
# Stop all running services
-provisioning platform stop
-
-# Stop specific services
-provisioning platform stop orchestrator control-center
-
-# Force stop (kill -9)
-provisioning platform stop --force orchestrator
-
-

Behavior:

-
    -
  1. Checks for dependent services
  2. -
  3. Stops in reverse dependency order
  4. -
  5. Updates service state
  6. -
  7. Cleans up PID files
  8. -
-

Restart Platform

-

Restart running services:

-
# Restart all running services
-provisioning platform restart
-
-# Restart specific services
-provisioning platform restart orchestrator
-
-

Platform Status

-

Show status of all services:

-
provisioning platform status
-
-

Output:

-
Platform Services Status
-
-Running: 3/7
-
-=== ORCHESTRATION ===
-  🟢 orchestrator - running (uptime: 3600s) ✅
-
-=== UI ===
-  🟢 control-center - running (uptime: 3550s) ✅
-
-=== DNS ===
-  ⚪ coredns - stopped ❓
-
-=== GIT ===
-  ⚪ gitea - stopped ❓
-
-=== REGISTRY ===
-  ⚪ oci-registry - stopped ❓
-
-=== API ===
-  🟢 mcp-server - running (uptime: 3540s) ✅
-  ⚪ api-gateway - stopped ❓
-
-

Platform Health

-

Check health of all running services:

-
provisioning platform health
-
-

Output:

-
Platform Health Check
-
-✅ orchestrator: Healthy - HTTP health check passed
-✅ control-center: Healthy - HTTP status 200 matches expected
-⚪ coredns: Not running
-✅ mcp-server: Healthy - HTTP health check passed
-
-Summary: 3 healthy, 0 unhealthy, 4 not running
-
-

Platform Logs

-

View service logs:

-
# View last 50 lines
-provisioning platform logs orchestrator
-
-# View last 100 lines
-provisioning platform logs orchestrator --lines 100
-
-# Follow logs in real-time
-provisioning platform logs orchestrator --follow
-
-
-

Service Commands

-

Individual service management commands.

-

List Services

-
# List all services
-provisioning services list
-
-# List only running services
-provisioning services list --running
-
-# Filter by category
-provisioning services list --category orchestration
-
-

Output:

-
name             type          category       status   deployment_mode  auto_start
-orchestrator     platform      orchestration  running  binary          true
-control-center   platform      ui             stopped  binary          false
-coredns          infrastructure dns           stopped  docker          false
-
-

Service Status

-

Get detailed status of a service:

-
provisioning services status orchestrator
-
-

Output:

-
Service: orchestrator
-Type: platform
-Category: orchestration
-Status: running
-Deployment: binary
-Health: healthy
-Auto-start: true
-PID: 12345
-Uptime: 3600s
-Dependencies: []
-
-

Start Service

-
# Start service (with pre-flight checks)
-provisioning services start orchestrator
-
-# Force start (skip checks)
-provisioning services start orchestrator --force
-
-

Pre-flight Checks:

-
    -
  1. Validate prerequisites (binary exists, Docker running, etc.)
  2. -
  3. Check for conflicts
  4. -
  5. Verify dependencies are running
  6. -
  7. Auto-start dependencies if needed
  8. -
-

Stop Service

-
# Stop service (with dependency check)
-provisioning services stop orchestrator
-
-# Force stop (ignore dependents)
-provisioning services stop orchestrator --force
-
-

Restart Service

-
provisioning services restart orchestrator
-
-

Service Health

-

Check service health:

-
provisioning services health orchestrator
-
-

Output:

-
Service: orchestrator
-Status: healthy
-Healthy: true
-Message: HTTP health check passed
-Check type: http
-Check duration: 15 ms
-
-

Service Logs

-
# View logs
-provisioning services logs orchestrator
-
-# Follow logs
-provisioning services logs orchestrator --follow
-
-# Custom line count
-provisioning services logs orchestrator --lines 200
-
-

Check Required Services

-

Check which services are required for an operation:

-
provisioning services check server
-
-

Output:

-
Operation: server
-Required services: orchestrator
-All running: true
-
-

Service Dependencies

-

View dependency graph:

-
# View all dependencies
-provisioning services dependencies
-
-# View specific service dependencies
-provisioning services dependencies control-center
-
-

Validate Services

-

Validate all service configurations:

-
provisioning services validate
-
-

Output:

-
Total services: 7
-Valid: 6
-Invalid: 1
-
-Invalid services:
-  ❌ coredns:
-    - Docker is not installed or not running
-
-

Readiness Report

-

Get platform readiness report:

-
provisioning services readiness
-
-

Output:

-
Platform Readiness Report
-
-Total services: 7
-Running: 3
-Ready to start: 6
-
-Services:
-  🟢 orchestrator - platform - orchestration
-  🟢 control-center - platform - ui
-  🔴 coredns - infrastructure - dns
-      Issues: 1
-  🟡 gitea - infrastructure - git
-
-

Monitor Service

-

Continuous health monitoring:

-
# Monitor with default interval (30s)
-provisioning services monitor orchestrator
-
-# Custom interval
-provisioning services monitor orchestrator --interval 10
-
-
-

Deployment Modes

-

Binary Deployment

-

Run services as native binaries.

-

Configuration:

-
[services.orchestrator.deployment]
-mode = "binary"
-
-[services.orchestrator.deployment.binary]
-binary_path = "${HOME}/.provisioning/bin/provisioning-orchestrator"
-args = ["--port", "8080"]
-working_dir = "${HOME}/.provisioning/orchestrator"
-env = { RUST_LOG = "info" }
-
-

Process Management:

-
    -
  • PID tracking in ~/.provisioning/services/pids/
  • -
  • Log output to ~/.provisioning/services/logs/
  • -
  • State tracking in ~/.provisioning/services/state/
  • -
-

Docker Deployment

-

Run services as Docker containers.

-

Configuration:

-
[services.coredns.deployment]
-mode = "docker"
-
-[services.coredns.deployment.docker]
-image = "coredns/coredns:1.11.1"
-container_name = "provisioning-coredns"
-ports = ["5353:53/udp"]
-volumes = ["${HOME}/.provisioning/coredns/Corefile:/Corefile:ro"]
-restart_policy = "unless-stopped"
-
-

Prerequisites:

-
    -
  • Docker daemon running
  • -
  • Docker CLI installed
  • -
-

Docker Compose Deployment

-

Run services via Docker Compose.

-

Configuration:

-
[services.platform.deployment]
-mode = "docker-compose"
-
-[services.platform.deployment.docker_compose]
-compose_file = "${HOME}/.provisioning/platform/docker-compose.yaml"
-service_name = "orchestrator"
-project_name = "provisioning"
-
-

File: provisioning/platform/docker-compose.yaml

-

Kubernetes Deployment

-

Run services on Kubernetes.

-

Configuration:

-
[services.orchestrator.deployment]
-mode = "kubernetes"
-
-[services.orchestrator.deployment.kubernetes]
-namespace = "provisioning"
-deployment_name = "orchestrator"
-manifests_path = "${HOME}/.provisioning/k8s/orchestrator/"
-
-

Prerequisites:

-
    -
  • kubectl installed and configured
  • -
  • Kubernetes cluster accessible
  • -
-

Remote Deployment

-

Connect to remotely-running services.

-

Configuration:

-
[services.orchestrator.deployment]
-mode = "remote"
-
-[services.orchestrator.deployment.remote]
-endpoint = "https://orchestrator.example.com"
-tls_enabled = true
-auth_token_path = "${HOME}/.provisioning/tokens/orchestrator.token"
-
-
-

Health Monitoring

-

Health Check Types

-

HTTP Health Check

-
[services.orchestrator.health_check]
-type = "http"
-
-[services.orchestrator.health_check.http]
-endpoint = "http://localhost:9090/health"
-expected_status = 200
-method = "GET"
-
-

TCP Health Check

-
[services.coredns.health_check]
-type = "tcp"
-
-[services.coredns.health_check.tcp]
-host = "localhost"
-port = 5353
-
-

Command Health Check

-
[services.custom.health_check]
-type = "command"
-
-[services.custom.health_check.command]
-command = "systemctl is-active myservice"
-expected_exit_code = 0
-
-

File Health Check

-
[services.custom.health_check]
-type = "file"
-
-[services.custom.health_check.file]
-path = "/var/run/myservice.pid"
-must_exist = true
-
-

Health Check Configuration

-
    -
  • interval: Seconds between checks (default: 10)
  • -
  • retries: Max retry attempts (default: 3)
  • -
  • timeout: Check timeout in seconds (default: 5)
  • -
-

Continuous Monitoring

-
provisioning services monitor orchestrator --interval 30
-
-

Output:

-
Starting health monitoring for orchestrator (interval: 30s)
-Press Ctrl+C to stop
-2025-10-06 14:30:00 ✅ orchestrator: HTTP health check passed
-2025-10-06 14:30:30 ✅ orchestrator: HTTP health check passed
-2025-10-06 14:31:00 ✅ orchestrator: HTTP health check passed
-
-
-

Dependency Management

-

Dependency Graph

-

Services can depend on other services:

-
[services.control-center]
-dependencies = ["orchestrator"]
-
-[services.api-gateway]
-dependencies = ["orchestrator", "control-center", "mcp-server"]
-
-

Startup Order

-

Services start in topological order:

-
orchestrator (order: 10)
-  └─> control-center (order: 20)
-       └─> api-gateway (order: 45)
-
-

Dependency Resolution

-

Automatic dependency resolution when starting services:

-
# Starting control-center automatically starts orchestrator first
-provisioning services start control-center
-
-

Output:

-
Starting dependency: orchestrator
-✅ Started orchestrator with PID 12345
-Waiting for orchestrator to become healthy...
-✅ Service orchestrator is healthy
-Starting service: control-center
-✅ Started control-center with PID 12346
-✅ Service control-center is healthy
-
-

Conflicts

-

Services can conflict with each other:

-
[services.coredns]
-conflicts = ["dnsmasq", "systemd-resolved"]
-
-

Attempting to start a conflicting service will fail:

-
provisioning services start coredns
-
-

Output:

-
❌ Pre-flight check failed: conflicts
-Conflicting services running: dnsmasq
-
-

Reverse Dependencies

-

Check which services depend on a service:

-
provisioning services dependencies orchestrator
-
-

Output:

-
## orchestrator
-- Type: platform
-- Category: orchestration
-- Required by:
-  - control-center
-  - mcp-server
-  - api-gateway
-
-

Safe Stop

-

System prevents stopping services with running dependents:

-
provisioning services stop orchestrator
-
-

Output:

-
❌ Cannot stop orchestrator:
-  Dependent services running: control-center, mcp-server, api-gateway
-  Use --force to stop anyway
-
-
-

Pre-flight Checks

-

Purpose

-

Pre-flight checks ensure services can start successfully before attempting to start them.

-

Check Types

-
    -
  1. Prerequisites: Binary exists, Docker running, etc.
  2. -
  3. Conflicts: No conflicting services running
  4. -
  5. Dependencies: All dependencies available
  6. -
-

Automatic Checks

-

Pre-flight checks run automatically when starting services:

-
provisioning services start orchestrator
-
-

Check Process:

-
Running pre-flight checks for orchestrator...
-✅ Binary found: /Users/user/.provisioning/bin/provisioning-orchestrator
-✅ No conflicts detected
-✅ All dependencies available
-Starting service: orchestrator
-
-

Manual Validation

-

Validate all services:

-
provisioning services validate
-
-

Validate specific service:

-
provisioning services status orchestrator
-
-

Auto-Start

-

Services with auto_start = true can be started automatically when needed:

-
# Orchestrator auto-starts if needed for server operations
-provisioning server create
-
-

Output:

-
Starting required services...
-✅ Orchestrator started
-Creating server...
-
-
-

Troubleshooting

-

Service Won’t Start

-

Check prerequisites:

-
provisioning services validate
-provisioning services status <service>
-
-

Common issues:

-
    -
  • Binary not found: Check binary_path in config
  • -
  • Docker not running: Start Docker daemon
  • -
  • Port already in use: Check for conflicting processes
  • -
  • Dependencies not running: Start dependencies first
  • -
-

Service Health Check Failing

-

View health status:

-
provisioning services health <service>
-
-

Check logs:

-
provisioning services logs <service> --follow
-
-

Common issues:

-
    -
  • Service not fully initialized: Wait longer or increase start_timeout
  • -
  • Wrong health check endpoint: Verify endpoint in config
  • -
  • Network issues: Check firewall, port bindings
  • -
-

Dependency Issues

-

View dependency tree:

-
provisioning services dependencies <service>
-
-

Check dependency status:

-
provisioning services status <dependency>
-
-

Start with dependencies:

-
provisioning platform start <service>
-
-

Circular Dependencies

-

Validate dependency graph:

-
# This is done automatically but you can check manually
-nu -c "use lib_provisioning/services/mod.nu *; validate-dependency-graph"
-
-

PID File Stale

-

If service reports running but isn’t:

-
# Manual cleanup
-rm ~/.provisioning/services/pids/<service>.pid
-
-# Force restart
-provisioning services restart <service>
-
-

Port Conflicts

-

Find process using port:

-
lsof -i :9090
-
-

Kill conflicting process:

-
kill <PID>
-
-

Docker Issues

-

Check Docker status:

-
docker ps
-docker info
-
-

View container logs:

-
docker logs provisioning-<service>
-
-

Restart Docker daemon:

-
# macOS
-killall Docker && open /Applications/Docker.app
-
-# Linux
-systemctl restart docker
-
-

Service Logs

-

View recent logs:

-
tail -f ~/.provisioning/services/logs/<service>.log
-
-

Search logs:

-
grep "ERROR" ~/.provisioning/services/logs/<service>.log
-
-
-

Advanced Usage

-

Custom Service Registration

-

Add custom services by editing provisioning/config/services.toml.

-

Integration with Workflows

-

Services automatically start when required by workflows:

-
# Orchestrator starts automatically if not running
-provisioning workflow submit my-workflow
-
-

CI/CD Integration

-
# GitLab CI
-before_script:
-  - provisioning platform start orchestrator
-  - provisioning services health orchestrator
-
-test:
-  script:
-    - provisioning test quick kubernetes
-
-

Monitoring Integration

-

Services can integrate with monitoring systems via health endpoints.

-
- - -
-

Quick Reference

-

Version: 1.0.0

-

Platform Commands (Manage All Services)

-
# Start all auto-start services
-provisioning platform start
-
-# Start specific services with dependencies
-provisioning platform start control-center mcp-server
-
-# Stop all running services
-provisioning platform stop
-
-# Stop specific services
-provisioning platform stop orchestrator
-
-# Restart services
-provisioning platform restart
-
-# Show platform status
-provisioning platform status
-
-# Check platform health
-provisioning platform health
-
-# View service logs
-provisioning platform logs orchestrator --follow
-
-
-

Service Commands (Individual Services)

-
# List all services
-provisioning services list
-
-# List only running services
-provisioning services list --running
-
-# Filter by category
-provisioning services list --category orchestration
-
-# Service status
-provisioning services status orchestrator
-
-# Start service (with pre-flight checks)
-provisioning services start orchestrator
-
-# Force start (skip checks)
-provisioning services start orchestrator --force
-
-# Stop service
-provisioning services stop orchestrator
-
-# Force stop (ignore dependents)
-provisioning services stop orchestrator --force
-
-# Restart service
-provisioning services restart orchestrator
-
-# Check health
-provisioning services health orchestrator
-
-# View logs
-provisioning services logs orchestrator --follow --lines 100
-
-# Monitor health continuously
-provisioning services monitor orchestrator --interval 30
-
-
-

Dependency & Validation

-
# View dependency graph
-provisioning services dependencies
-
-# View specific service dependencies
-provisioning services dependencies control-center
-
-# Validate all services
-provisioning services validate
-
-# Check readiness
-provisioning services readiness
-
-# Check required services for operation
-provisioning services check server
-
-
-

Registered Services

-
- - - - - - - -
ServicePortTypeAuto-StartDependencies
orchestrator8080PlatformYes-
control-center8081PlatformNoorchestrator
coredns5353InfrastructureNo-
gitea3000, 222InfrastructureNo-
oci-registry5000InfrastructureNo-
mcp-server8082PlatformNoorchestrator
api-gateway8083PlatformNoorchestrator, control-center, mcp-server
-
-
-

Docker Compose

-
# Start all services
-cd provisioning/platform
-docker-compose up -d
-
-# Start specific services
-docker-compose up -d orchestrator control-center
-
-# Check status
-docker-compose ps
-
-# View logs
-docker-compose logs -f orchestrator
-
-# Stop all services
-docker-compose down
-
-# Stop and remove volumes
-docker-compose down -v
-
-
-

Service State Directories

-
~/.provisioning/services/
-├── pids/          # Process ID files
-├── state/         # Service state (JSON)
-└── logs/          # Service logs
-
-
-

Health Check Endpoints

-
- - - - - - - -
ServiceEndpointType
orchestratorhttp://localhost:9090/healthHTTP
control-centerhttp://localhost:9080/healthHTTP
corednslocalhost:5353TCP
giteahttp://localhost:3000/api/healthzHTTP
oci-registryhttp://localhost:5000/v2/HTTP
mcp-serverhttp://localhost:8082/healthHTTP
api-gatewayhttp://localhost:8083/healthHTTP
-
-
-

Common Workflows

-

Start Platform for Development

-
# Start core services
-provisioning platform start orchestrator
-
-# Check status
-provisioning platform status
-
-# Check health
-provisioning platform health
-
-

Start Full Platform Stack

-
# Use Docker Compose
-cd provisioning/platform
-docker-compose up -d
-
-# Verify
-docker-compose ps
-provisioning platform health
-
-

Debug Service Issues

-
# Check service status
-provisioning services status <service>
-
-# View logs
-provisioning services logs <service> --follow
-
-# Check health
-provisioning services health <service>
-
-# Validate prerequisites
-provisioning services validate
-
-# Restart service
-provisioning services restart <service>
-
-

Safe Service Shutdown

-
# Check dependents
-nu -c "use lib_provisioning/services/mod.nu *; can-stop-service orchestrator"
-
-# Stop with dependency check
-provisioning services stop orchestrator
-
-# Force stop if needed
-provisioning services stop orchestrator --force
-
-
-

Troubleshooting

-

Service Won’t Start

-
# 1. Check prerequisites
-provisioning services validate
-
-# 2. View detailed status
-provisioning services status <service>
-
-# 3. Check logs
-provisioning services logs <service>
-
-# 4. Verify binary/image exists
-ls ~/.provisioning/bin/<service>
-docker images | grep <service>
-
-

Health Check Failing

-
# Check endpoint manually
-curl http://localhost:9090/health
-
-# View health details
-provisioning services health <service>
-
-# Monitor continuously
-provisioning services monitor <service> --interval 10
-
-

PID File Stale

-
# Remove stale PID file
-rm ~/.provisioning/services/pids/<service>.pid
-
-# Restart service
-provisioning services restart <service>
-
-

Port Already in Use

-
# Find process using port
-lsof -i :9090
-
-# Kill process
-kill <PID>
-
-# Restart service
-provisioning services start <service>
-
-
-

Integration with Operations

-

Server Operations

-
# Orchestrator auto-starts if needed
-provisioning server create
-
-# Manual check
-provisioning services check server
-
-

Workflow Operations

-
# Orchestrator auto-starts
-provisioning workflow submit my-workflow
-
-# Check status
-provisioning services status orchestrator
-
-

Test Operations

-
# Orchestrator required for test environments
-provisioning test quick kubernetes
-
-# Pre-flight check
-provisioning services check test-env
-
-
-

Advanced Usage

-

Custom Service Startup Order

-

Services start based on:

-
    -
  1. Dependency order (topological sort)
  2. -
  3. start_order field (lower = earlier)
  4. -
-

Auto-Start Configuration

-

Edit provisioning/config/services.toml:

-
[services.<service>.startup]
-auto_start = true  # Enable auto-start
-start_timeout = 30 # Timeout in seconds
-start_order = 10   # Startup priority
-
-

Health Check Configuration

-
[services.<service>.health_check]
-type = "http"      # http, tcp, command, file
-interval = 10      # Seconds between checks
-retries = 3        # Max retry attempts
-timeout = 5        # Check timeout
-
-[services.<service>.health_check.http]
-endpoint = "http://localhost:9090/health"
-expected_status = 200
-
-
-

Key Files

-
    -
  • Service Registry: provisioning/config/services.toml
  • -
  • KCL Schema: provisioning/kcl/services.k
  • -
  • Docker Compose: provisioning/platform/docker-compose.yaml
  • -
  • User Guide: docs/user/SERVICE_MANAGEMENT_GUIDE.md
  • -
-
-

Getting Help

-
# View documentation
-cat docs/user/SERVICE_MANAGEMENT_GUIDE.md | less
-
-# Run verification
-nu provisioning/core/nulib/tests/verify_services.nu
-
-# Check readiness
-provisioning services readiness
-
-
-

Quick Tip: Use --help flag with any command for detailed usage information.

-
-

Maintained By: Platform Team -Support: GitHub Issues

-

Service Monitoring & Alerting Setup

-

Complete guide for monitoring the 9-service platform with Prometheus, Grafana, and AlertManager

-

Version: 1.0.0 -Last Updated: 2026-01-05 -Target Audience: DevOps Engineers, Platform Operators -Status: Production Ready

-
-

Overview

-

This guide provides complete setup instructions for monitoring and alerting on the provisioning platform using industry-standard tools:

-
    -
  • Prometheus: Metrics collection and time-series database
  • -
  • Grafana: Visualization and dashboarding
  • -
  • AlertManager: Alert routing and notification
  • -
-
-

Architecture

-
Services (metrics endpoints)
-    ↓
-Prometheus (scrapes every 30s)
-    ↓
-AlertManager (evaluates rules)
-    ↓
-Notification Channels (email, slack, pagerduty)
-
-Prometheus Data
-    ↓
-Grafana (queries)
-    ↓
-Dashboards & Visualization
-
-
-

Prerequisites

-

Software Requirements

-
# Prometheus (for metrics)
-wget https://github.com/prometheus/prometheus/releases/download/v2.48.0/prometheus-2.48.0.linux-amd64.tar.gz
-tar xvfz prometheus-2.48.0.linux-amd64.tar.gz
-sudo mv prometheus-2.48.0.linux-amd64 /opt/prometheus
-
-# Grafana (for dashboards)
-sudo apt-get install -y grafana-server
-
-# AlertManager (for alerting)
-wget https://github.com/prometheus/alertmanager/releases/download/v0.26.0/alertmanager-0.26.0.linux-amd64.tar.gz
-tar xvfz alertmanager-0.26.0.linux-amd64.tar.gz
-sudo mv alertmanager-0.26.0.linux-amd64 /opt/alertmanager
-
-

System Requirements

-
    -
  • CPU: 2+ cores
  • -
  • Memory: 4 GB minimum, 8 GB recommended
  • -
  • Disk: 100 GB for metrics retention (30 days)
  • -
  • Network: Access to all service endpoints
  • -
-

Ports

-
- - - - -
ComponentPortPurpose
Prometheus9090Web UI & API
Grafana3000Web UI
AlertManager9093Web UI & API
Node Exporter9100System metrics
-
-
-

Service Metrics Endpoints

-

All platform services expose metrics on the /metrics endpoint:

-
# Health and metrics endpoints for each service
-curl http://localhost:8200/health    # Vault health
-curl http://localhost:8200/metrics   # Vault metrics (Prometheus format)
-
-curl http://localhost:8081/health    # Registry health
-curl http://localhost:8081/metrics   # Registry metrics
-
-curl http://localhost:8083/health    # RAG health
-curl http://localhost:8083/metrics   # RAG metrics
-
-curl http://localhost:8082/health    # AI Service health
-curl http://localhost:8082/metrics   # AI Service metrics
-
-curl http://localhost:9090/health    # Orchestrator health
-curl http://localhost:9090/metrics   # Orchestrator metrics
-
-curl http://localhost:8080/health    # Control Center health
-curl http://localhost:8080/metrics   # Control Center metrics
-
-curl http://localhost:8084/health    # MCP Server health
-curl http://localhost:8084/metrics   # MCP Server metrics
-
-
-

Prometheus Configuration

-

1. Create Prometheus Config

-
# /etc/prometheus/prometheus.yml
-global:
-  scrape_interval: 30s
-  evaluation_interval: 30s
-  external_labels:
-    monitor: 'provisioning-platform'
-    environment: 'production'
-
-alerting:
-  alertmanagers:
-    - static_configs:
-        - targets:
-            - localhost:9093
-
-rule_files:
-  - '/etc/prometheus/rules/*.yml'
-
-scrape_configs:
-  # Core Platform Services
-  - job_name: 'vault-service'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8200']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'vault-service'
-
-  - job_name: 'extension-registry'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8081']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'registry'
-
-  - job_name: 'rag-service'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8083']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'rag'
-
-  - job_name: 'ai-service'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8082']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'ai-service'
-
-  - job_name: 'orchestrator'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:9090']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'orchestrator'
-
-  - job_name: 'control-center'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8080']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'control-center'
-
-  - job_name: 'mcp-server'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['localhost:8084']
-    relabel_configs:
-      - source_labels: [__address__]
-        target_label: instance
-        replacement: 'mcp-server'
-
-  # System Metrics (Node Exporter)
-  - job_name: 'node'
-    static_configs:
-      - targets: ['localhost:9100']
-        labels:
-          instance: 'system'
-
-  # SurrealDB (if multiuser/enterprise)
-  - job_name: 'surrealdb'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['surrealdb:8000']
-
-  # Etcd (if enterprise)
-  - job_name: 'etcd'
-    metrics_path: '/metrics'
-    static_configs:
-      - targets: ['etcd:2379']
-
-

2. Start Prometheus

-
# Create necessary directories
-sudo mkdir -p /etc/prometheus /var/lib/prometheus
-sudo mkdir -p /etc/prometheus/rules
-
-# Start Prometheus
-cd /opt/prometheus
-sudo ./prometheus --config.file=/etc/prometheus/prometheus.yml \
-  --storage.tsdb.path=/var/lib/prometheus \
-  --web.console.templates=consoles \
-  --web.console.libraries=console_libraries
-
-# Or as systemd service
-sudo tee /etc/systemd/system/prometheus.service > /dev/null << EOF
-[Unit]
-Description=Prometheus
-Wants=network-online.target
-After=network-online.target
-
-[Service]
-User=prometheus
-Type=simple
-ExecStart=/opt/prometheus/prometheus \
-  --config.file=/etc/prometheus/prometheus.yml \
-  --storage.tsdb.path=/var/lib/prometheus
-
-Restart=on-failure
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-sudo systemctl daemon-reload
-sudo systemctl enable prometheus
-sudo systemctl start prometheus
-
-

3. Verify Prometheus

-
# Check Prometheus is running
-curl -s http://localhost:9090/-/healthy
-
-# List scraped targets
-curl -s http://localhost:9090/api/v1/targets | jq .
-
-# Query test metric
-curl -s 'http://localhost:9090/api/v1/query?query=up' | jq .
-
-
-

Alert Rules Configuration

-

1. Create Alert Rules

-
# /etc/prometheus/rules/platform-alerts.yml
-groups:
-  - name: platform_availability
-    interval: 30s
-    rules:
-      - alert: ServiceDown
-        expr: up{job=~"vault-service|registry|rag|ai-service|orchestrator"} == 0
-        for: 5m
-        labels:
-          severity: critical
-          service: '{{ $labels.job }}'
-        annotations:
-          summary: "{{ $labels.job }} is DOWN"
-          description: "{{ $labels.job }} has been down for 5+ minutes"
-
-      - alert: ServiceSlowResponse
-        expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 1
-        for: 5m
-        labels:
-          severity: warning
-          service: '{{ $labels.job }}'
-        annotations:
-          summary: "{{ $labels.job }} slow response times"
-          description: "95th percentile latency above 1 second"
-
-  - name: platform_errors
-    interval: 30s
-    rules:
-      - alert: HighErrorRate
-        expr: rate(http_requests_total{status=~"5.."}[5m]) > 0.05
-        for: 5m
-        labels:
-          severity: warning
-          service: '{{ $labels.job }}'
-        annotations:
-          summary: "{{ $labels.job }} high error rate"
-          description: "Error rate above 5% for 5 minutes"
-
-      - alert: DatabaseConnectionError
-        expr: increase(database_connection_errors_total[5m]) > 10
-        for: 2m
-        labels:
-          severity: critical
-          component: database
-        annotations:
-          summary: "Database connection failures detected"
-          description: "{{ $value }} connection errors in last 5 minutes"
-
-      - alert: QueueBacklog
-        expr: orchestrator_queue_depth > 1000
-        for: 5m
-        labels:
-          severity: warning
-          component: orchestrator
-        annotations:
-          summary: "Orchestrator queue backlog growing"
-          description: "Queue depth: {{ $value }} tasks"
-
-  - name: platform_resources
-    interval: 30s
-    rules:
-      - alert: HighMemoryUsage
-        expr: container_memory_usage_bytes / container_spec_memory_limit_bytes > 0.9
-        for: 5m
-        labels:
-          severity: warning
-          resource: memory
-        annotations:
-          summary: "{{ $labels.container_name }} memory usage critical"
-          description: "Memory usage: {{ $value | humanizePercentage }}"
-
-      - alert: HighDiskUsage
-        expr: node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes < 0.1
-        for: 5m
-        labels:
-          severity: warning
-          resource: disk
-        annotations:
-          summary: "Disk space critically low"
-          description: "Available disk space: {{ $value | humanizePercentage }}"
-
-      - alert: HighCPUUsage
-        expr: (1 - avg(rate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance)) > 0.9
-        for: 10m
-        labels:
-          severity: warning
-          resource: cpu
-        annotations:
-          summary: "High CPU usage detected"
-          description: "CPU usage: {{ $value | humanizePercentage }}"
-
-      - alert: DiskIOLatency
-        expr: node_disk_io_time_seconds_total > 100
-        for: 5m
-        labels:
-          severity: warning
-          resource: disk
-        annotations:
-          summary: "High disk I/O latency"
-          description: "I/O latency: {{ $value }}ms"
-
-  - name: platform_network
-    interval: 30s
-    rules:
-      - alert: HighNetworkLatency
-        expr: probe_duration_seconds > 0.5
-        for: 5m
-        labels:
-          severity: warning
-          component: network
-        annotations:
-          summary: "High network latency detected"
-          description: "Latency: {{ $value }}ms"
-
-      - alert: PacketLoss
-        expr: node_network_transmit_errors_total > 100
-        for: 5m
-        labels:
-          severity: warning
-          component: network
-        annotations:
-          summary: "Packet loss detected"
-          description: "Transmission errors: {{ $value }}"
-
-  - name: platform_services
-    interval: 30s
-    rules:
-      - alert: VaultSealed
-        expr: vault_core_unsealed == 0
-        for: 1m
-        labels:
-          severity: critical
-          service: vault
-        annotations:
-          summary: "Vault is sealed"
-          description: "Vault instance is sealed and requires unseal operation"
-
-      - alert: RegistryAuthError
-        expr: increase(registry_auth_failures_total[5m]) > 5
-        for: 2m
-        labels:
-          severity: warning
-          service: registry
-        annotations:
-          summary: "Registry authentication failures"
-          description: "{{ $value }} auth failures in last 5 minutes"
-
-      - alert: RAGVectorDBDown
-        expr: rag_vectordb_connection_status == 0
-        for: 2m
-        labels:
-          severity: critical
-          service: rag
-        annotations:
-          summary: "RAG Vector Database disconnected"
-          description: "Vector DB connection lost"
-
-      - alert: AIServiceMCPError
-        expr: increase(ai_service_mcp_errors_total[5m]) > 10
-        for: 2m
-        labels:
-          severity: warning
-          service: ai_service
-        annotations:
-          summary: "AI Service MCP integration errors"
-          description: "{{ $value }} errors in last 5 minutes"
-
-      - alert: OrchestratorLeaderElectionIssue
-        expr: orchestrator_leader_elected == 0
-        for: 5m
-        labels:
-          severity: critical
-          service: orchestrator
-        annotations:
-          summary: "Orchestrator leader election failed"
-          description: "No leader elected in cluster"
-
-

2. Validate Alert Rules

-
# Check rule syntax
-/opt/prometheus/promtool check rules /etc/prometheus/rules/platform-alerts.yml
-
-# Reload Prometheus with new rules (without restart)
-curl -X POST http://localhost:9090/-/reload
-
-
-

AlertManager Configuration

-

1. Create AlertManager Config

-
# /etc/alertmanager/alertmanager.yml
-global:
-  resolve_timeout: 5m
-  slack_api_url: 'YOUR_SLACK_WEBHOOK_URL'
-  pagerduty_url: 'https://events.pagerduty.com/v2/enqueue'
-
-route:
-  receiver: 'platform-notifications'
-  group_by: ['alertname', 'service', 'severity']
-  group_wait: 10s
-  group_interval: 10s
-  repeat_interval: 12h
-
-  routes:
-    # Critical alerts go to PagerDuty
-    - match:
-        severity: critical
-      receiver: 'pagerduty-critical'
-      group_wait: 0s
-      repeat_interval: 5m
-
-    # Warnings go to Slack
-    - match:
-        severity: warning
-      receiver: 'slack-warnings'
-      repeat_interval: 1h
-
-    # Service-specific routing
-    - match:
-        service: vault
-      receiver: 'vault-team'
-      group_by: ['service', 'severity']
-
-    - match:
-        service: orchestrator
-      receiver: 'orchestrator-team'
-      group_by: ['service', 'severity']
-
-receivers:
-  - name: 'platform-notifications'
-    slack_configs:
-      - channel: '#platform-alerts'
-        title: 'Platform Alert'
-        text: '{{ range .Alerts }}{{ .Annotations.description }}{{ end }}'
-        send_resolved: true
-
-  - name: 'slack-warnings'
-    slack_configs:
-      - channel: '#platform-warnings'
-        title: 'Warning: {{ .GroupLabels.alertname }}'
-        text: '{{ range .Alerts }}{{ .Annotations.description }}{{ end }}'
-
-  - name: 'pagerduty-critical'
-    pagerduty_configs:
-      - service_key: 'YOUR_PAGERDUTY_SERVICE_KEY'
-        description: '{{ .GroupLabels.alertname }}'
-        details:
-          firing: '{{ template "pagerduty.default.instances" .Alerts.Firing }}'
-
-  - name: 'vault-team'
-    email_configs:
-      - to: 'vault-team@company.com'
-        from: 'alertmanager@company.com'
-        smarthost: 'smtp.company.com:587'
-        auth_username: 'alerts@company.com'
-        auth_password: 'PASSWORD'
-        headers:
-          Subject: 'Vault Alert: {{ .GroupLabels.alertname }}'
-
-  - name: 'orchestrator-team'
-    email_configs:
-      - to: 'orchestrator-team@company.com'
-        from: 'alertmanager@company.com'
-        smarthost: 'smtp.company.com:587'
-
-inhibit_rules:
-  # Don't alert on errors if service is already down
-  - source_match:
-      severity: 'critical'
-      alertname: 'ServiceDown'
-    target_match_re:
-      severity: 'warning|info'
-    equal: ['service', 'instance']
-
-  # Don't alert on resource exhaustion if service is down
-  - source_match:
-      alertname: 'ServiceDown'
-    target_match_re:
-      alertname: 'HighMemoryUsage|HighCPUUsage'
-    equal: ['instance']
-
-

2. Start AlertManager

-
cd /opt/alertmanager
-sudo ./alertmanager --config.file=/etc/alertmanager/alertmanager.yml \
-  --storage.path=/var/lib/alertmanager
-
-# Or as systemd service
-sudo tee /etc/systemd/system/alertmanager.service > /dev/null << EOF
-[Unit]
-Description=AlertManager
-Wants=network-online.target
-After=network-online.target
-
-[Service]
-User=alertmanager
-Type=simple
-ExecStart=/opt/alertmanager/alertmanager \
-  --config.file=/etc/alertmanager/alertmanager.yml \
-  --storage.path=/var/lib/alertmanager
-
-Restart=on-failure
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target
-EOF
-
-sudo systemctl daemon-reload
-sudo systemctl enable alertmanager
-sudo systemctl start alertmanager
-
-

3. Verify AlertManager

-
# Check AlertManager is running
-curl -s http://localhost:9093/-/healthy
-
-# List active alerts
-curl -s http://localhost:9093/api/v1/alerts | jq .
-
-# Check configuration
-curl -s http://localhost:9093/api/v1/status | jq .
-
-
-

Grafana Dashboards

-

1. Install Grafana

-
# Install Grafana
-sudo apt-get install -y grafana-server
-
-# Start Grafana
-sudo systemctl enable grafana-server
-sudo systemctl start grafana-server
-
-# Access at http://localhost:3000
-# Default: admin/admin
-
-

2. Add Prometheus Data Source

-
# Via API
-curl -X POST http://localhost:3000/api/datasources \
-  -H "Content-Type: application/json" \
-  -u admin:admin \
-  -d '{
-    "name": "Prometheus",
-    "type": "prometheus",
-    "url": "http://localhost:9090",
-    "access": "proxy",
-    "isDefault": true
-  }'
-
-

3. Create Platform Overview Dashboard

-
{
-  "dashboard": {
-    "title": "Platform Overview",
-    "description": "9-service provisioning platform metrics",
-    "tags": ["platform", "overview"],
-    "timezone": "browser",
-    "panels": [
-      {
-        "title": "Service Status",
-        "type": "stat",
-        "targets": [
-          {
-            "expr": "up{job=~\"vault-service|registry|rag|ai-service|orchestrator|control-center|mcp-server\"}"
-          }
-        ],
-        "fieldConfig": {
-          "defaults": {
-            "mappings": [
-              {
-                "type": "value",
-                "value": "1",
-                "text": "UP"
-              },
-              {
-                "type": "value",
-                "value": "0",
-                "text": "DOWN"
-              }
-            ]
-          }
-        }
-      },
-      {
-        "title": "Request Rate",
-        "type": "graph",
-        "targets": [
-          {
-            "expr": "rate(http_requests_total[5m])"
-          }
-        ]
-      },
-      {
-        "title": "Error Rate",
-        "type": "graph",
-        "targets": [
-          {
-            "expr": "rate(http_requests_total{status=~\"5..\"}[5m])"
-          }
-        ]
-      },
-      {
-        "title": "Latency (p95)",
-        "type": "graph",
-        "targets": [
-          {
-            "expr": "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))"
-          }
-        ]
-      },
-      {
-        "title": "Memory Usage",
-        "type": "graph",
-        "targets": [
-          {
-            "expr": "container_memory_usage_bytes / 1024 / 1024"
-          }
-        ]
-      },
-      {
-        "title": "Disk Usage",
-        "type": "gauge",
-        "targets": [
-          {
-            "expr": "(1 - (node_filesystem_avail_bytes / node_filesystem_size_bytes)) * 100"
-          }
-        ]
-      }
-    ]
-  }
-}
-
-

4. Import Dashboard via API

-
# Save dashboard JSON to file
-cat > platform-overview.json << 'EOF'
-{
-  "dashboard": { ... }
-}
-EOF
-
-# Import dashboard
-curl -X POST http://localhost:3000/api/dashboards/db \
-  -H "Content-Type: application/json" \
-  -u admin:admin \
-  -d @platform-overview.json
-
-
-

Health Check Monitoring

-

1. Service Health Check Script

-
#!/bin/bash
-# scripts/check-service-health.sh
-
-SERVICES=(
-  "vault:8200"
-  "registry:8081"
-  "rag:8083"
-  "ai-service:8082"
-  "orchestrator:9090"
-  "control-center:8080"
-  "mcp-server:8084"
-)
-
-UNHEALTHY=0
-
-for service in "${SERVICES[@]}"; do
-  IFS=':' read -r name port <<< "$service"
-
-  response=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:$port/health)
-
-  if [ "$response" = "200" ]; then
-    echo "✓ $name is healthy"
-  else
-    echo "✗ $name is UNHEALTHY (HTTP $response)"
-    ((UNHEALTHY++))
-  fi
-done
-
-if [ $UNHEALTHY -gt 0 ]; then
-  echo ""
-  echo "WARNING: $UNHEALTHY service(s) unhealthy"
-  exit 1
-fi
-
-exit 0
-
-

2. Liveness Probe Configuration

-
# For Kubernetes deployments
-apiVersion: v1
-kind: Pod
-metadata:
-  name: vault-service
-spec:
-  containers:
-  - name: vault-service
-    image: vault-service:latest
-    livenessProbe:
-      httpGet:
-        path: /health
-        port: 8200
-      initialDelaySeconds: 30
-      periodSeconds: 10
-      failureThreshold: 3
-
-    readinessProbe:
-      httpGet:
-        path: /health
-        port: 8200
-      initialDelaySeconds: 10
-      periodSeconds: 5
-      failureThreshold: 2
-
-
-

Log Aggregation (ELK Stack)

-

1. Elasticsearch Setup

-
# Install Elasticsearch
-wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.11.0-linux-x86_64.tar.gz
-tar xvfz elasticsearch-8.11.0-linux-x86_64.tar.gz
-cd elasticsearch-8.11.0/bin
-./elasticsearch
-
-

2. Filebeat Configuration

-
# /etc/filebeat/filebeat.yml
-filebeat.inputs:
-  - type: log
-    enabled: true
-    paths:
-      - /var/log/provisioning/*.log
-    fields:
-      service: provisioning-platform
-      environment: production
-
-output.elasticsearch:
-  hosts: ["localhost:9200"]
-  username: "elastic"
-  password: "changeme"
-
-logging.level: info
-logging.to_files: true
-logging.files:
-  path: /var/log/filebeat
-
-

3. Kibana Dashboard

-
# Access at http://localhost:5601
-# Create index pattern: provisioning-*
-# Create visualizations for:
-# - Error rate over time
-# - Service availability
-# - Performance metrics
-# - Request volume
-
-
-

Monitoring Dashboard Queries

-

Common Prometheus Queries

-
# Service availability (last hour)
-avg(increase(up[1h])) by (job)
-
-# Request rate per service
-sum(rate(http_requests_total[5m])) by (job)
-
-# Error rate per service
-sum(rate(http_requests_total{status=~"5.."}[5m])) by (job)
-
-# Latency percentiles
-histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))
-histogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m]))
-
-# Memory usage per service
-container_memory_usage_bytes / 1024 / 1024 / 1024
-
-# CPU usage per service
-rate(container_cpu_usage_seconds_total[5m]) * 100
-
-# Disk I/O operations
-rate(node_disk_io_time_seconds_total[5m])
-
-# Network throughput
-rate(node_network_transmit_bytes_total[5m])
-
-# Queue depth (Orchestrator)
-orchestrator_queue_depth
-
-# Task processing rate
-rate(orchestrator_tasks_total[5m])
-
-# Task failure rate
-rate(orchestrator_tasks_failed_total[5m])
-
-# Cache hit ratio
-rate(service_cache_hits_total[5m]) / (rate(service_cache_hits_total[5m]) + rate(service_cache_misses_total[5m]))
-
-# Database connection pool status
-database_connection_pool_usage{job="orchestrator"}
-
-# TLS certificate expiration
-(ssl_certificate_expiry - time()) / 86400
-
-
-

Alert Testing

-

1. Test Alert Firing

-
# Manually fire test alert
-curl -X POST http://localhost:9093/api/v1/alerts \
-  -H 'Content-Type: application/json' \
-  -d '[
-    {
-      "status": "firing",
-      "labels": {
-        "alertname": "TestAlert",
-        "severity": "critical"
-      },
-      "annotations": {
-        "summary": "This is a test alert",
-        "description": "Test alert to verify notification routing"
-      }
-    }
-  ]'
-
-

2. Stop Service to Trigger Alert

-
# Stop a service to trigger ServiceDown alert
-pkill -9 vault-service
-
-# Within 5 minutes, alert should fire
-# Check AlertManager UI: http://localhost:9093
-
-# Restart service
-cargo run --release -p vault-service &
-
-# Alert should resolve after service is back up
-
-

3. Generate Load to Test Error Alerts

-
# Generate request load
-ab -n 10000 -c 100 http://localhost:9090/api/v1/health
-
-# Monitor error rate in Prometheus
-curl -s 'http://localhost:9090/api/v1/query?query=rate(http_requests_total{status=~"5.."}[5m])' | jq .
-
-
-

Backup & Retention Policies

-

1. Prometheus Data Backup

-
#!/bin/bash
-# scripts/backup-prometheus-data.sh
-
-BACKUP_DIR="/backups/prometheus"
-RETENTION_DAYS=30
-
-# Create snapshot
-curl -X POST http://localhost:9090/api/v1/admin/tsdb/snapshot
-
-# Backup snapshot
-SNAPSHOT=$(ls -t /var/lib/prometheus/snapshots | head -1)
-tar -czf "$BACKUP_DIR/prometheus-$SNAPSHOT.tar.gz" \
-  "/var/lib/prometheus/snapshots/$SNAPSHOT"
-
-# Upload to S3
-aws s3 cp "$BACKUP_DIR/prometheus-$SNAPSHOT.tar.gz" \
-  s3://backups/prometheus/
-
-# Clean old backups
-find "$BACKUP_DIR" -mtime +$RETENTION_DAYS -delete
-
-

2. Prometheus Retention Configuration

-
# Keep metrics for 15 days
-/opt/prometheus/prometheus \
-  --storage.tsdb.retention.time=15d \
-  --storage.tsdb.retention.size=50 GB
-
-
-

Maintenance & Troubleshooting

-

Common Issues

-

Prometheus Won’t Scrape Service

-
# Check configuration
-/opt/prometheus/promtool check config /etc/prometheus/prometheus.yml
-
-# Verify service is accessible
-curl http://localhost:8200/metrics
-
-# Check Prometheus targets
-curl -s http://localhost:9090/api/v1/targets | jq '.data.activeTargets[] | select(.job=="vault-service")'
-
-# Check scrape error
-curl -s http://localhost:9090/api/v1/targets | jq '.data.activeTargets[] | .lastError'
-
-

AlertManager Not Sending Notifications

-
# Verify AlertManager config
-/opt/alertmanager/amtool config routes
-
-# Test webhook
-curl -X POST http://localhost:3012/ -d '{"test": "alert"}'
-
-# Check AlertManager logs
-journalctl -u alertmanager -n 100 -f
-
-# Verify notification channels configured
-curl -s http://localhost:9093/api/v1/receivers
-
-

High Memory Usage

-
# Reduce Prometheus retention
-prometheus --storage.tsdb.retention.time=7d --storage.tsdb.max-block-duration=2h
-
-# Disable unused scrape jobs
-# Edit prometheus.yml and remove unused jobs
-
-# Monitor memory
-ps aux | grep prometheus | grep -v grep
-
-
-

Production Deployment Checklist

-
    -
  • -Prometheus installed and running
  • -
  • -AlertManager installed and running
  • -
  • -Grafana installed and configured
  • -
  • -Prometheus scraping all 8 services
  • -
  • -Alert rules deployed and validated
  • -
  • -Notification channels configured (Slack, email, PagerDuty)
  • -
  • -AlertManager webhooks tested
  • -
  • -Grafana dashboards created
  • -
  • -Log aggregation stack deployed (optional)
  • -
  • -Backup scripts configured
  • -
  • -Retention policies set
  • -
  • -Health checks configured
  • -
  • -Team notified of alerting setup
  • -
  • -Runbooks created for common alerts
  • -
  • -Alert testing procedure documented
  • -
-
-

Quick Commands Reference

-
# Prometheus
-curl http://localhost:9090/api/v1/targets           # List scrape targets
-curl 'http://localhost:9090/api/v1/query?query=up' # Query metric
-curl -X POST http://localhost:9090/-/reload         # Reload config
-
-# AlertManager
-curl http://localhost:9093/api/v1/alerts            # List active alerts
-curl http://localhost:9093/api/v1/receivers         # List receivers
-curl http://localhost:9093/api/v2/status            # Check status
-
-# Grafana
-curl -u admin:admin http://localhost:3000/api/datasources  # List data sources
-curl -u admin:admin http://localhost:3000/api/dashboards   # List dashboards
-
-# Validation
-promtool check config /etc/prometheus/prometheus.yml
-promtool check rules /etc/prometheus/rules/platform-alerts.yml
-amtool config routes
-
-
-

Documentation & Runbooks

-

Sample Runbook: Service Down

-
# Service Down Alert
-
-## Detection
-Alert fires when service is unreachable for 5+ minutes
-
-## Immediate Actions
-1. Check service is running: pgrep -f service-name
-2. Check service port: ss -tlnp | grep 8200
-3. Check service logs: tail -100 /var/log/provisioning/service.log
-
-## Diagnosis
-1. Service crashed: look for panic/error in logs
-2. Port conflict: lsof -i :8200
-3. Configuration issue: validate config file
-4. Dependency down: check database/cache connectivity
-
-## Remediation
-1. Restart service: pkill service && cargo run --release -p service &
-2. Check health: curl http://localhost:8200/health
-3. Verify dependencies: curl http://localhost:5432/health
-
-## Escalation
-If service doesn't recover after restart, escalate to on-call engineer
-
-
-

Resources

- -
-

Last Updated: 2026-01-05 -Version: 1.0.0 -Status: Production Ready ✅

-

CoreDNS Integration Guide

-

Version: 1.0.0 -Date: 2025-10-06 -Author: CoreDNS Integration Agent

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Installation
  4. -
  5. Configuration
  6. -
  7. CLI Commands
  8. -
  9. Zone Management
  10. -
  11. Record Management
  12. -
  13. Docker Deployment
  14. -
  15. Integration
  16. -
  17. Troubleshooting
  18. -
  19. Advanced Topics
  20. -
-
-

Overview

-

The CoreDNS integration provides comprehensive DNS management capabilities for the provisioning system. It supports:

-
    -
  • Local DNS service - Run CoreDNS as binary or Docker container
  • -
  • Dynamic DNS updates - Automatic registration of infrastructure changes
  • -
  • Multi-zone support - Manage multiple DNS zones
  • -
  • Provider integration - Seamless integration with orchestrator
  • -
  • REST API - Programmatic DNS management
  • -
  • Docker deployment - Containerized CoreDNS with docker-compose
  • -
-

Key Features

-

Automatic Server Registration - Servers automatically registered in DNS on creation -✅ Zone File Management - Create, update, and manage zone files programmatically -✅ Multiple Deployment Modes - Binary, Docker, remote, or hybrid -✅ Health Monitoring - Built-in health checks and metrics -✅ CLI Interface - Comprehensive command-line tools -✅ API Integration - REST API for external integration

-
-

Installation

-

Prerequisites

-
    -
  • Nushell 0.107+ - For CLI and scripts
  • -
  • Docker (optional) - For containerized deployment
  • -
  • dig (optional) - For DNS queries
  • -
-

Install CoreDNS Binary

-
# Install latest version
-provisioning dns install
-
-# Install specific version
-provisioning dns install 1.11.1
-
-# Check mode
-provisioning dns install --check
-
-

The binary will be installed to ~/.provisioning/bin/coredns.

-

Verify Installation

-
# Check CoreDNS version
-~/.provisioning/bin/coredns -version
-
-# Verify installation
-ls -lh ~/.provisioning/bin/coredns
-
-
-

Configuration

-

Nickel Configuration Schema

-

Add CoreDNS configuration to your infrastructure config:

-
# In workspace/infra/{name}/config.ncl
-let coredns_config = {
-  mode = "local",
-
-  local = {
-    enabled = true,
-    deployment_type = "binary",  # or "docker"
-    binary_path = "~/.provisioning/bin/coredns",
-    config_path = "~/.provisioning/coredns/Corefile",
-    zones_path = "~/.provisioning/coredns/zones",
-    port = 5353,
-    auto_start = true,
-    zones = ["provisioning.local", "workspace.local"],
-  },
-
-  dynamic_updates = {
-    enabled = true,
-    api_endpoint = "http://localhost:9090/dns",
-    auto_register_servers = true,
-    auto_unregister_servers = true,
-    ttl = 300,
-  },
-
-  upstream = ["8.8.8.8", "1.1.1.1"],
-  default_ttl = 3600,
-  enable_logging = true,
-  enable_metrics = true,
-  metrics_port = 9153,
-} in
-coredns_config
-
-

Configuration Modes

-

Local Mode (Binary)

-

Run CoreDNS as a local binary process:

-
let coredns_config = {
-  mode = "local",
-  local = {
-    deployment_type = "binary",
-    auto_start = true,
-  },
-} in
-coredns_config
-
-

Local Mode (Docker)

-

Run CoreDNS in Docker container:

-
let coredns_config = {
-  mode = "local",
-  local = {
-    deployment_type = "docker",
-    docker = {
-      image = "coredns/coredns:1.11.1",
-      container_name = "provisioning-coredns",
-      restart_policy = "unless-stopped",
-    },
-  },
-} in
-coredns_config
-
-

Remote Mode

-

Connect to external CoreDNS service:

-
let coredns_config = {
-  mode = "remote",
-  remote = {
-    enabled = true,
-    endpoints = ["https://dns1.example.com", "https://dns2.example.com"],
-    zones = ["production.local"],
-    verify_tls = true,
-  },
-} in
-coredns_config
-
-

Disabled Mode

-

Disable CoreDNS integration:

-
let coredns_config = {
-  mode = "disabled",
-} in
-coredns_config
-
-
-

CLI Commands

-

Service Management

-
# Check status
-provisioning dns status
-
-# Start service
-provisioning dns start
-
-# Start in foreground (for debugging)
-provisioning dns start --foreground
-
-# Stop service
-provisioning dns stop
-
-# Restart service
-provisioning dns restart
-
-# Reload configuration (graceful)
-provisioning dns reload
-
-# View logs
-provisioning dns logs
-
-# Follow logs
-provisioning dns logs --follow
-
-# Show last 100 lines
-provisioning dns logs --lines 100
-
-

Health & Monitoring

-
# Check health
-provisioning dns health
-
-# View configuration
-provisioning dns config show
-
-# Validate configuration
-provisioning dns config validate
-
-# Generate new Corefile
-provisioning dns config generate
-
-
-

Zone Management

-

List Zones

-
# List all zones
-provisioning dns zone list
-
-

Output:

-
DNS Zones
-=========
-  • provisioning.local ✓
-  • workspace.local ✓
-
-

Create Zone

-
# Create new zone
-provisioning dns zone create myapp.local
-
-# Check mode
-provisioning dns zone create myapp.local --check
-
-

Show Zone Details

-
# Show all records in zone
-provisioning dns zone show provisioning.local
-
-# JSON format
-provisioning dns zone show provisioning.local --format json
-
-# YAML format
-provisioning dns zone show provisioning.local --format yaml
-
-

Delete Zone

-
# Delete zone (with confirmation)
-provisioning dns zone delete myapp.local
-
-# Force deletion (skip confirmation)
-provisioning dns zone delete myapp.local --force
-
-# Check mode
-provisioning dns zone delete myapp.local --check
-
-
-

Record Management

-

Add Records

-

A Record (IPv4)

-
provisioning dns record add server-01 A 10.0.1.10
-
-# With custom TTL
-provisioning dns record add server-01 A 10.0.1.10 --ttl 600
-
-# With comment
-provisioning dns record add server-01 A 10.0.1.10 --comment "Web server"
-
-# Different zone
-provisioning dns record add server-01 A 10.0.1.10 --zone myapp.local
-
-

AAAA Record (IPv6)

-
provisioning dns record add server-01 AAAA 2001:db8::1
-
-

CNAME Record

-
provisioning dns record add web CNAME server-01.provisioning.local
-
-

MX Record

-
provisioning dns record add @ MX mail.example.com --priority 10
-
-

TXT Record

-
provisioning dns record add @ TXT "v=spf1 mx -all"
-
-

Remove Records

-
# Remove record
-provisioning dns record remove server-01
-
-# Different zone
-provisioning dns record remove server-01 --zone myapp.local
-
-# Check mode
-provisioning dns record remove server-01 --check
-
-

Update Records

-
# Update record value
-provisioning dns record update server-01 A 10.0.1.20
-
-# With new TTL
-provisioning dns record update server-01 A 10.0.1.20 --ttl 1800
-
-

List Records

-
# List all records in zone
-provisioning dns record list
-
-# Different zone
-provisioning dns record list --zone myapp.local
-
-# JSON format
-provisioning dns record list --format json
-
-# YAML format
-provisioning dns record list --format yaml
-
-

Example Output:

-
DNS Records - Zone: provisioning.local
-
-╭───┬──────────────┬──────┬─────────────┬─────╮
-│ # │     name     │ type │    value    │ ttl │
-├───┼──────────────┼──────┼─────────────┼─────┤
-│ 0 │ server-01    │ A    │ 10.0.1.10   │ 300 │
-│ 1 │ server-02    │ A    │ 10.0.1.11   │ 300 │
-│ 2 │ db-01        │ A    │ 10.0.2.10   │ 300 │
-│ 3 │ web          │ CNAME│ server-01   │ 300 │
-╰───┴──────────────┴──────┴─────────────┴─────╯
-
-
-

Docker Deployment

-

Prerequisites

-

Ensure Docker and docker-compose are installed:

-
docker --version
-docker-compose --version
-
-

Start CoreDNS in Docker

-
# Start CoreDNS container
-provisioning dns docker start
-
-# Check mode
-provisioning dns docker start --check
-
-

Manage Docker Container

-
# Check status
-provisioning dns docker status
-
-# View logs
-provisioning dns docker logs
-
-# Follow logs
-provisioning dns docker logs --follow
-
-# Restart container
-provisioning dns docker restart
-
-# Stop container
-provisioning dns docker stop
-
-# Check health
-provisioning dns docker health
-
-

Update Docker Image

-
# Pull latest image
-provisioning dns docker pull
-
-# Pull specific version
-provisioning dns docker pull --version 1.11.1
-
-# Update and restart
-provisioning dns docker update
-
-

Remove Container

-
# Remove container (with confirmation)
-provisioning dns docker remove
-
-# Remove with volumes
-provisioning dns docker remove --volumes
-
-# Force remove (skip confirmation)
-provisioning dns docker remove --force
-
-# Check mode
-provisioning dns docker remove --check
-
-

View Configuration

-
# Show docker-compose config
-provisioning dns docker config
-
-
-

Integration

-

Automatic Server Registration

-

When dynamic DNS is enabled, servers are automatically registered:

-
# Create server (automatically registers in DNS)
-provisioning server create web-01 --infra myapp
-
-# Server gets DNS record: web-01.provisioning.local -> <server-ip>
-
-

Manual Registration

-
use lib_provisioning/coredns/integration.nu *
-
-# Register server
-register-server-in-dns "web-01" "10.0.1.10"
-
-# Unregister server
-unregister-server-from-dns "web-01"
-
-# Bulk register
-bulk-register-servers [
-    {hostname: "web-01", ip: "10.0.1.10"}
-    {hostname: "web-02", ip: "10.0.1.11"}
-    {hostname: "db-01", ip: "10.0.2.10"}
-]
-
-

Sync Infrastructure with DNS

-
# Sync all servers in infrastructure with DNS
-provisioning dns sync myapp
-
-# Check mode
-provisioning dns sync myapp --check
-
-

Service Registration

-
use lib_provisioning/coredns/integration.nu *
-
-# Register service
-register-service-in-dns "api" "10.0.1.10"
-
-# Unregister service
-unregister-service-from-dns "api"
-
-
-

Query DNS

-

Using CLI

-
# Query A record
-provisioning dns query server-01
-
-# Query specific type
-provisioning dns query server-01 --type AAAA
-
-# Query different server
-provisioning dns query server-01 --server 8.8.8.8 --port 53
-
-# Query from local CoreDNS
-provisioning dns query server-01 --server 127.0.0.1 --port 5353
-
-

Using dig

-
# Query from local CoreDNS
-dig @127.0.0.1 -p 5353 server-01.provisioning.local
-
-# Query CNAME
-dig @127.0.0.1 -p 5353 web.provisioning.local CNAME
-
-# Query MX
-dig @127.0.0.1 -p 5353 example.com MX
-
-
-

Troubleshooting

-

CoreDNS Not Starting

-

Symptoms: dns start fails or service doesn’t respond

-

Solutions:

-
    -
  1. -

    Check if port is in use:

    -
    lsof -i :5353
    -netstat -an | grep 5353
    -
    -
  2. -
  3. -

    Validate Corefile:

    -
    provisioning dns config validate
    -
    -
  4. -
  5. -

    Check logs:

    -
    provisioning dns logs
    -tail -f ~/.provisioning/coredns/coredns.log
    -
    -
  6. -
  7. -

    Verify binary exists:

    -
    ls -lh ~/.provisioning/bin/coredns
    -provisioning dns install
    -
    -
  8. -
-

DNS Queries Not Working

-

Symptoms: dig returns SERVFAIL or timeout

-

Solutions:

-
    -
  1. -

    Check CoreDNS is running:

    -
    provisioning dns status
    -provisioning dns health
    -
    -
  2. -
  3. -

    Verify zone file exists:

    -
    ls -lh ~/.provisioning/coredns/zones/
    -cat ~/.provisioning/coredns/zones/provisioning.local.zone
    -
    -
  4. -
  5. -

    Test with dig:

    -
    dig @127.0.0.1 -p 5353 provisioning.local SOA
    -
    -
  6. -
  7. -

    Check firewall:

    -
    # macOS
    -sudo pfctl -sr | grep 5353
    -
    -# Linux
    -sudo iptables -L -n | grep 5353
    -
    -
  8. -
-

Zone File Validation Errors

-

Symptoms: dns config validate shows errors

-

Solutions:

-
    -
  1. -

    Backup zone file:

    -
    cp ~/.provisioning/coredns/zones/provisioning.local.zone \
    -   ~/.provisioning/coredns/zones/provisioning.local.zone.backup
    -
    -
  2. -
  3. -

    Regenerate zone:

    -
    provisioning dns zone create provisioning.local --force
    -
    -
  4. -
  5. -

    Check syntax manually:

    -
    cat ~/.provisioning/coredns/zones/provisioning.local.zone
    -
    -
  6. -
  7. -

    Increment serial:

    -
      -
    • Edit zone file manually
    • -
    • Increase serial number in SOA record
    • -
    -
  8. -
-

Docker Container Issues

-

Symptoms: Docker container won’t start or crashes

-

Solutions:

-
    -
  1. -

    Check Docker logs:

    -
    provisioning dns docker logs
    -docker logs provisioning-coredns
    -
    -
  2. -
  3. -

    Verify volumes exist:

    -
    ls -lh ~/.provisioning/coredns/
    -
    -
  4. -
  5. -

    Check container status:

    -
    provisioning dns docker status
    -docker ps -a | grep coredns
    -
    -
  6. -
  7. -

    Recreate container:

    -
    provisioning dns docker stop
    -provisioning dns docker remove --volumes
    -provisioning dns docker start
    -
    -
  8. -
-

Dynamic Updates Not Working

-

Symptoms: Servers not auto-registered in DNS

-

Solutions:

-
    -
  1. -

    Check if enabled:

    -
    provisioning dns config show | grep -A 5 dynamic_updates
    -
    -
  2. -
  3. -

    Verify orchestrator running:

    -
    curl http://localhost:9090/health
    -
    -
  4. -
  5. -

    Check logs for errors:

    -
    provisioning dns logs | grep -i error
    -
    -
  6. -
  7. -

    Test manual registration:

    -
    use lib_provisioning/coredns/integration.nu *
    -register-server-in-dns "test-server" "10.0.0.1"
    -
    -
  8. -
-
-

Advanced Topics

-

Custom Corefile Plugins

-

Add custom plugins to Corefile:

-
use lib_provisioning/coredns/corefile.nu *
-
-# Add plugin to zone
-add-corefile-plugin \
-    "~/.provisioning/coredns/Corefile" \
-    "provisioning.local" \
-    "cache 30"
-
-

Backup and Restore

-
# Backup configuration
-tar czf coredns-backup.tar.gz ~/.provisioning/coredns/
-
-# Restore configuration
-tar xzf coredns-backup.tar.gz -C ~/
-
-

Zone File Backup

-
use lib_provisioning/coredns/zones.nu *
-
-# Backup zone
-backup-zone-file "provisioning.local"
-
-# Creates: ~/.provisioning/coredns/zones/provisioning.local.zone.YYYYMMDD-HHMMSS.bak
-
-

Metrics and Monitoring

-

CoreDNS exposes Prometheus metrics on port 9153:

-
# View metrics
-curl http://localhost:9153/metrics
-
-# Common metrics:
-# - coredns_dns_request_duration_seconds
-# - coredns_dns_requests_total
-# - coredns_dns_responses_total
-
-

Multi-Zone Setup

-
coredns_config: CoreDNSConfig = {
-    local = {
-        zones = [
-            "provisioning.local",
-            "workspace.local",
-            "dev.local",
-            "staging.local",
-            "prod.local"
-        ]
-    }
-}
-
-

Split-Horizon DNS

-

Configure different zones for internal/external:

-
coredns_config: CoreDNSConfig = {
-    local = {
-        zones = ["internal.local"]
-        port = 5353
-    }
-    remote = {
-        zones = ["external.com"]
-        endpoints = ["https://dns.external.com"]
-    }
-}
-
-
-

Configuration Reference

-

CoreDNSConfig Fields

-
- - - - - - - - - -
FieldTypeDefaultDescription
mode"local" | "remote" | "hybrid" | "disabled""local"Deployment mode
localLocalCoreDNS?-Local config (required for local mode)
remoteRemoteCoreDNS?-Remote config (required for remote mode)
dynamic_updatesDynamicDNS-Dynamic DNS configuration
upstream[str]["8.8.8.8", "1.1.1.1"]Upstream DNS servers
default_ttlint300Default TTL (seconds)
enable_loggingboolTrueEnable query logging
enable_metricsboolTrueEnable Prometheus metrics
metrics_portint9153Metrics port
-
-

LocalCoreDNS Fields

-
- - - - - - - - -
FieldTypeDefaultDescription
enabledboolTrueEnable local CoreDNS
deployment_type"binary" | "docker""binary"How to deploy
binary_pathstr"~/.provisioning/bin/coredns"Path to binary
config_pathstr"~/.provisioning/coredns/Corefile"Corefile path
zones_pathstr"~/.provisioning/coredns/zones"Zones directory
portint5353DNS listening port
auto_startboolTrueAuto-start on boot
zones[str]["provisioning.local"]Managed zones
-
-

DynamicDNS Fields

-
- - - - - - -
FieldTypeDefaultDescription
enabledboolTrueEnable dynamic updates
api_endpointstr"http://localhost:9090/dns"Orchestrator API
auto_register_serversboolTrueAuto-register on create
auto_unregister_serversboolTrueAuto-unregister on delete
ttlint300TTL for dynamic records
update_strategy"immediate" | "batched" | "scheduled""immediate"Update strategy
-
-
-

Examples

-

Complete Setup Example

-
# 1. Install CoreDNS
-provisioning dns install
-
-# 2. Generate configuration
-provisioning dns config generate
-
-# 3. Start service
-provisioning dns start
-
-# 4. Create custom zone
-provisioning dns zone create myapp.local
-
-# 5. Add DNS records
-provisioning dns record add web-01 A 10.0.1.10
-provisioning dns record add web-02 A 10.0.1.11
-provisioning dns record add api CNAME web-01.myapp.local --zone myapp.local
-
-# 6. Query records
-provisioning dns query web-01 --server 127.0.0.1 --port 5353
-
-# 7. Check status
-provisioning dns status
-provisioning dns health
-
-

Docker Deployment Example

-
# 1. Start CoreDNS in Docker
-provisioning dns docker start
-
-# 2. Check status
-provisioning dns docker status
-
-# 3. View logs
-provisioning dns docker logs --follow
-
-# 4. Add records (container must be running)
-provisioning dns record add server-01 A 10.0.1.10
-
-# 5. Query
-dig @127.0.0.1 -p 5353 server-01.provisioning.local
-
-# 6. Stop
-provisioning dns docker stop
-
-
-

Best Practices

-
    -
  1. Use TTL wisely - Lower TTL (300s) for frequently changing records, higher (3600s) for stable
  2. -
  3. Enable logging - Essential for troubleshooting
  4. -
  5. Regular backups - Backup zone files before major changes
  6. -
  7. Validate before reload - Always run dns config validate before reloading
  8. -
  9. Monitor metrics - Track DNS query rates and error rates
  10. -
  11. Use comments - Add comments to records for documentation
  12. -
  13. Separate zones - Use different zones for different environments (dev, staging, prod)
  14. -
-
-

See Also

- -
-

Quick Reference

-

Quick command reference for CoreDNS DNS management

-
-

Installation

-
# Install CoreDNS binary
-provisioning dns install
-
-# Install specific version
-provisioning dns install 1.11.1
-
-
-

Service Management

-
# Status
-provisioning dns status
-
-# Start
-provisioning dns start
-
-# Stop
-provisioning dns stop
-
-# Restart
-provisioning dns restart
-
-# Reload (graceful)
-provisioning dns reload
-
-# Logs
-provisioning dns logs
-provisioning dns logs --follow
-provisioning dns logs --lines 100
-
-# Health
-provisioning dns health
-
-
-

Zone Management

-
# List zones
-provisioning dns zone list
-
-# Create zone
-provisioning dns zone create myapp.local
-
-# Show zone records
-provisioning dns zone show provisioning.local
-provisioning dns zone show provisioning.local --format json
-
-# Delete zone
-provisioning dns zone delete myapp.local
-provisioning dns zone delete myapp.local --force
-
-
-

Record Management

-
# Add A record
-provisioning dns record add server-01 A 10.0.1.10
-
-# Add with custom TTL
-provisioning dns record add server-01 A 10.0.1.10 --ttl 600
-
-# Add with comment
-provisioning dns record add server-01 A 10.0.1.10 --comment "Web server"
-
-# Add to specific zone
-provisioning dns record add server-01 A 10.0.1.10 --zone myapp.local
-
-# Add CNAME
-provisioning dns record add web CNAME server-01.provisioning.local
-
-# Add MX
-provisioning dns record add @ MX mail.example.com --priority 10
-
-# Add TXT
-provisioning dns record add @ TXT "v=spf1 mx -all"
-
-# Remove record
-provisioning dns record remove server-01
-provisioning dns record remove server-01 --zone myapp.local
-
-# Update record
-provisioning dns record update server-01 A 10.0.1.20
-
-# List records
-provisioning dns record list
-provisioning dns record list --zone myapp.local
-provisioning dns record list --format json
-
-
-

DNS Queries

-
# Query A record
-provisioning dns query server-01
-
-# Query CNAME
-provisioning dns query web --type CNAME
-
-# Query from local CoreDNS
-provisioning dns query server-01 --server 127.0.0.1 --port 5353
-
-# Using dig
-dig @127.0.0.1 -p 5353 server-01.provisioning.local
-dig @127.0.0.1 -p 5353 provisioning.local SOA
-
-
-

Configuration

-
# Show configuration
-provisioning dns config show
-
-# Validate configuration
-provisioning dns config validate
-
-# Generate Corefile
-provisioning dns config generate
-
-
-

Docker Deployment

-
# Start Docker container
-provisioning dns docker start
-
-# Status
-provisioning dns docker status
-
-# Logs
-provisioning dns docker logs
-provisioning dns docker logs --follow
-
-# Restart
-provisioning dns docker restart
-
-# Stop
-provisioning dns docker stop
-
-# Health
-provisioning dns docker health
-
-# Remove
-provisioning dns docker remove
-provisioning dns docker remove --volumes
-provisioning dns docker remove --force
-
-# Pull image
-provisioning dns docker pull
-provisioning dns docker pull --version 1.11.1
-
-# Update
-provisioning dns docker update
-
-# Show config
-provisioning dns docker config
-
-
-

Common Workflows

-

Initial Setup

-
# 1. Install
-provisioning dns install
-
-# 2. Start
-provisioning dns start
-
-# 3. Verify
-provisioning dns status
-provisioning dns health
-
-

Add Server

-
# Add DNS record for new server
-provisioning dns record add web-01 A 10.0.1.10
-
-# Verify
-provisioning dns query web-01
-
-

Create Custom Zone

-
# 1. Create zone
-provisioning dns zone create myapp.local
-
-# 2. Add records
-provisioning dns record add web-01 A 10.0.1.10 --zone myapp.local
-provisioning dns record add api CNAME web-01.myapp.local --zone myapp.local
-
-# 3. List records
-provisioning dns record list --zone myapp.local
-
-# 4. Query
-dig @127.0.0.1 -p 5353 web-01.myapp.local
-
-

Docker Setup

-
# 1. Start container
-provisioning dns docker start
-
-# 2. Check status
-provisioning dns docker status
-
-# 3. Add records
-provisioning dns record add server-01 A 10.0.1.10
-
-# 4. Query
-dig @127.0.0.1 -p 5353 server-01.provisioning.local
-
-
-

Troubleshooting

-
# Check if CoreDNS is running
-provisioning dns status
-ps aux | grep coredns
-
-# Check port usage
-lsof -i :5353
-netstat -an | grep 5353
-
-# View logs
-provisioning dns logs
-tail -f ~/.provisioning/coredns/coredns.log
-
-# Validate configuration
-provisioning dns config validate
-
-# Test DNS query
-dig @127.0.0.1 -p 5353 provisioning.local SOA
-
-# Restart service
-provisioning dns restart
-
-# For Docker
-provisioning dns docker logs
-provisioning dns docker health
-docker ps -a | grep coredns
-
-
-

File Locations

-
# Binary
-~/.provisioning/bin/coredns
-
-# Corefile
-~/.provisioning/coredns/Corefile
-
-# Zone files
-~/.provisioning/coredns/zones/
-
-# Logs
-~/.provisioning/coredns/coredns.log
-
-# PID file
-~/.provisioning/coredns/coredns.pid
-
-# Docker compose
-provisioning/config/coredns/docker-compose.yml
-
-
-

Configuration Example

-
import provisioning.coredns as dns
-
-coredns_config: dns.CoreDNSConfig = {
-    mode = "local"
-    local = {
-        enabled = True
-        deployment_type = "binary"  # or "docker"
-        port = 5353
-        zones = ["provisioning.local", "myapp.local"]
-    }
-    dynamic_updates = {
-        enabled = True
-        auto_register_servers = True
-    }
-    upstream = ["8.8.8.8", "1.1.1.1"]
-}
-
-
-

Environment Variables

-
# None required - configuration via Nickel
-
-
-

Default Values

-
- - - - - - - - - -
SettingDefault
Port5353
Zones[“provisioning.local”]
Upstream[“8.8.8.8”, “1.1.1.1”]
TTL300
Deploymentbinary
Auto-starttrue
Loggingenabled
Metricsenabled
Metrics Port9153
-
-
-

See Also

-
    -
  • Complete Guide - Full documentation
  • -
  • Implementation Summary - Technical details
  • -
  • Nickel Schema - Configuration schema
  • -
-
-

Last Updated: 2025-10-06 -Version: 1.0.0

-

Production Readiness Checklist

-

Status: ✅ PRODUCTION READY -Version: 1.0.0 -Last Verified: 2025-12-09

-

Executive Summary

-

The Provisioning Setup System is production-ready for enterprise deployment. All components have been tested, validated, and verified to meet -production standards.

-

Quality Metrics

-
    -
  • Code Quality: 100% Nushell 0.109 compliant
  • -
  • Test Coverage: 33/33 tests passing (100% pass rate)
  • -
  • Security: Enterprise-grade security controls
  • -
  • Performance: Sub-second response times
  • -
  • Documentation: Comprehensive user and admin guides
  • -
  • Reliability: Graceful error handling and fallbacks
  • -
-
-

Pre-Deployment Verification

-

1. System Requirements ✅

-
    -
  • -Nushell 0.109.0 or higher
  • -
  • -bash shell available
  • -
  • -One deployment tool (Docker/Kubernetes/SSH/systemd)
  • -
  • -2+ CPU cores (4+ recommended)
  • -
  • -4+ GB RAM (8+ recommended)
  • -
  • -Network connectivity (optional for offline mode)
  • -
-

2. Code Quality ✅

-
    -
  • -All 9 modules passing syntax validation
  • -
  • -46 total issues identified and resolved
  • -
  • -Nushell 0.109 compatibility verified
  • -
  • -Code style guidelines followed
  • -
  • -No hardcoded credentials or secrets
  • -
-

3. Testing ✅

-
    -
  • -Unit tests: 33/33 passing
  • -
  • -Integration tests: All passing
  • -
  • -E2E tests: All passing
  • -
  • -Health check: Operational
  • -
  • -Deployment validation: Working
  • -
-

4. Security ✅

-
    -
  • -Configuration encryption ready
  • -
  • -Credential management secure
  • -
  • -No sensitive data in logs
  • -
  • -GDPR-compliant audit logging
  • -
  • -Role-based access control (RBAC) ready
  • -
-

5. Documentation ✅

-
    -
  • -User Quick Start Guide
  • -
  • -Comprehensive Setup Guide
  • -
  • -Installation Guide
  • -
  • -Troubleshooting Guide
  • -
  • -API Documentation
  • -
-

6. Deployment Readiness ✅

-
    -
  • -Installation script tested
  • -
  • -Health check script operational
  • -
  • -Configuration validation working
  • -
  • -Backup/restore functionality verified
  • -
  • -Migration path available
  • -
-
-

Pre-Production Checklist

-

Team Preparation

-
    -
  • -Team trained on provisioning basics
  • -
  • -Admin team trained on configuration management
  • -
  • -Support team trained on troubleshooting
  • -
  • -Operations team ready for deployment
  • -
  • -Security team reviewed security controls
  • -
-

Infrastructure Preparation

-
    -
  • -Target deployment environment prepared
  • -
  • -Network connectivity verified
  • -
  • -Required tools installed and tested
  • -
  • -Backup systems in place
  • -
  • -Monitoring configured
  • -
-

Configuration Preparation

-
    -
  • -Provider credentials securely stored
  • -
  • -Network configuration planned
  • -
  • -Workspace structure defined
  • -
  • -Deployment strategy documented
  • -
  • -Rollback plan prepared
  • -
-

Testing in Production-Like Environment

-
    -
  • -System installed on staging environment
  • -
  • -All capabilities tested
  • -
  • -Health checks passing
  • -
  • -Full deployment scenario tested
  • -
  • -Failover procedures tested
  • -
-
-

Deployment Steps

-

Phase 1: Installation (30 minutes)

-
# 1. Run installation script
-./scripts/install-provisioning.sh
-
-# 2. Verify installation
-provisioning -v
-
-# 3. Run health check
-nu scripts/health-check.nu
-
-

Phase 2: Initial Configuration (15 minutes)

-
# 1. Run setup wizard
-provisioning setup system --interactive
-
-# 2. Validate configuration
-provisioning setup validate
-
-# 3. Test health
-provisioning platform health
-
-

Phase 3: Workspace Setup (10 minutes)

-
# 1. Create production workspace
-provisioning setup workspace production
-
-# 2. Configure providers
-provisioning setup provider upcloud --config config.toml
-
-# 3. Validate workspace
-provisioning setup validate
-
-

Phase 4: Verification (10 minutes)

-
# 1. Run comprehensive health check
-provisioning setup validate --verbose
-
-# 2. Test deployment (dry-run)
-provisioning server create --check
-
-# 3. Verify no errors
-# Review output and confirm readiness
-
-
-

Post-Deployment Verification

-

Immediate (Within 1 hour)

-
    -
  • -All services running and healthy
  • -
  • -Configuration loaded correctly
  • -
  • -First test deployment successful
  • -
  • -Monitoring and logging working
  • -
  • -Backup system operational
  • -
-

Daily (First week)

-
    -
  • -Run health checks daily
  • -
  • -Monitor error logs
  • -
  • -Verify backup operations
  • -
  • -Check workspace synchronization
  • -
  • -Validate credentials refresh
  • -
-

Weekly (First month)

-
    -
  • -Run comprehensive validation
  • -
  • -Test backup/restore procedures
  • -
  • -Review audit logs
  • -
  • -Performance analysis
  • -
  • -Security review
  • -
-

Ongoing (Production)

-
    -
  • -Weekly health checks
  • -
  • -Monthly comprehensive validation
  • -
  • -Quarterly security review
  • -
  • -Annual disaster recovery test
  • -
-
-

Troubleshooting Reference

-

Issue: Setup wizard won’t start

-

Solution:

-
# Check Nushell installation
-nu --version
-
-# Run with debug
-provisioning -x setup system --interactive
-
-

Issue: Configuration validation fails

-

Solution:

-
# Check configuration
-provisioning setup validate --verbose
-
-# View configuration paths
-provisioning info paths
-
-# Reset and reconfigure
-provisioning setup reset --confirm
-provisioning setup system --interactive
-
-

Issue: Health check shows warnings

-

Solution:

-
# Run detailed health check
-nu scripts/health-check.nu
-
-# Check specific service
-provisioning platform status
-
-# Restart services if needed
-provisioning platform restart
-
-

Issue: Deployment fails

-

Solution:

-
# Dry-run to see what would happen
-provisioning server create --check
-
-# Check logs
-provisioning logs tail -f
-
-# Verify provider credentials
-provisioning setup validate provider upcloud
-
-
-

Performance Baselines

-

Expected performance on modern hardware (4+ cores, 8+ GB RAM):

-
- - - - - -
OperationExpected TimeMaximum Time
Setup system2-5 seconds10 seconds
Health check< 3 seconds5 seconds
Configuration validation< 500 ms1 second
Server creation< 30 seconds60 seconds
Workspace switch< 100 ms500 ms
-
-
-

Support and Escalation

-

Level 1 Support (Team)

-
    -
  • Review troubleshooting guide
  • -
  • Check system health
  • -
  • Review logs
  • -
  • Restart services if needed
  • -
-

Level 2 Support (Engineering)

-
    -
  • Review configuration
  • -
  • Analyze performance metrics
  • -
  • Check resource constraints
  • -
  • Plan optimization
  • -
-

Level 3 Support (Development)

-
    -
  • Code-level debugging
  • -
  • Feature requests
  • -
  • Bug fixes
  • -
  • Architecture changes
  • -
-
-

Rollback Procedure

-

If issues occur post-deployment:

-
# 1. Take backup of current configuration
-provisioning setup backup --path rollback-$(date +%Y%m%d-%H%M%S).tar.gz
-
-# 2. Stop running deployments
-provisioning workflow stop --all
-
-# 3. Restore from previous backup
-provisioning setup restore --path <previous-backup>
-
-# 4. Verify restoration
-provisioning setup validate --verbose
-
-# 5. Run health check
-nu scripts/health-check.nu
-
-
-

Success Criteria

-

System is production-ready when:

-
    -
  • ✅ All tests passing
  • -
  • ✅ Health checks show no critical issues
  • -
  • ✅ Configuration validates successfully
  • -
  • ✅ Team trained and ready
  • -
  • ✅ Documentation complete
  • -
  • ✅ Backup and recovery tested
  • -
  • ✅ Monitoring configured
  • -
  • ✅ Support procedures established
  • -
-
-

Sign-Off

-
    -
  • -Technical Lead: System validated and tested
  • -
  • -Operations: Infrastructure ready and monitored
  • -
  • -Security: Security controls reviewed and approved
  • -
  • -Management: Deployment approved for production
  • -
-
-

Verification Date: 2025-12-09 -Status: ✅ APPROVED FOR PRODUCTION DEPLOYMENT -Next Review: 2025-12-16 (Weekly)

-

Break-Glass Emergency Access - Training Guide

-

Version: 1.0.0 -Date: 2025-10-08 -Audience: Platform Administrators, SREs, Security Team -Training Duration: 45-60 minutes -Certification: Required annually

-
-

🚨 What is Break-Glass

-

Break-glass is an emergency access procedure that allows authorized personnel to bypass normal security controls during critical incidents (for -example, production outages, security breaches, data loss).

-

Key Principles

-
    -
  1. Last Resort Only: Use only when normal access is insufficient
  2. -
  3. Multi-Party Approval: Requires 2+ approvers from different teams
  4. -
  5. Time-Limited: Maximum 4 hours, auto-revokes
  6. -
  7. Enhanced Audit: 7-year retention, immutable logs
  8. -
  9. Real-Time Alerts: Security team notified immediately
  10. -
-
-

📋 Table of Contents

-
    -
  1. When to Use Break-Glass
  2. -
  3. When NOT to Use
  4. -
  5. Roles & Responsibilities
  6. -
  7. Break-Glass Workflow
  8. -
  9. Using the System
  10. -
  11. Examples
  12. -
  13. Auditing & Compliance
  14. -
  15. Post-Incident Review
  16. -
  17. FAQ
  18. -
  19. Emergency Contacts
  20. -
-
-

When to Use Break-Glass

-

✅ Valid Emergency Scenarios

-
- - - - - -
ScenarioExampleUrgency
Production OutageDatabase cluster unresponsive, affecting all usersCritical
Security IncidentActive breach detected, need immediate containmentCritical
Data LossAccidental deletion of critical data, need restoreHigh
System FailureInfrastructure failure requiring emergency fixesHigh
Locked OutNormal admin accounts compromised, need recoveryHigh
-
-

Criteria Checklist

-

Use break-glass if ALL apply:

-
    -
  • -Production systems affected OR security incident
  • -
  • -Normal access insufficient OR unavailable
  • -
  • -Immediate action required (cannot wait for approval process)
  • -
  • -Clear justification for emergency access
  • -
  • -Incident properly documented
  • -
-
-

When NOT to Use

-

❌ Invalid Scenarios (Do NOT Use Break-Glass)

-
- - - - - -
ScenarioWhy NotAlternative
Forgot passwordNot an emergencyUse password reset
Routine maintenanceCan be scheduledUse normal change process
ConvenienceNormal process “too slow”Follow standard approval
Deadline pressureBusiness pressure ≠ emergencyPlan ahead
TestingWant to test emergency accessUse dev environment
-
-

Consequences of Misuse

-
    -
  • Immediate suspension of break-glass privileges
  • -
  • Security team investigation
  • -
  • Disciplinary action (up to termination)
  • -
  • All actions audited and reviewed
  • -
-
-

Roles & Responsibilities

-

Requester

-

Who: Platform Admin, SRE on-call, Security Officer -Responsibilities:

-
    -
  • Assess if situation warrants emergency access
  • -
  • Provide clear justification and reason
  • -
  • Document incident timeline
  • -
  • Use access only for stated purpose
  • -
  • Revoke access immediately after resolution
  • -
-

Approvers

-

Who: 2+ from different teams (Security, Platform, Engineering Leadership) -Responsibilities:

-
    -
  • Verify emergency is genuine
  • -
  • Assess risk of granting access
  • -
  • Review requester’s justification
  • -
  • Monitor usage during active session
  • -
  • Participate in post-incident review
  • -
-

Security Team

-

Who: Security Operations team -Responsibilities:

-
    -
  • Monitor all break-glass activations (real-time)
  • -
  • Review audit logs during session
  • -
  • Alert on suspicious activity
  • -
  • Lead post-incident review
  • -
  • Update policies based on learnings
  • -
-
-

Break-Glass Workflow

-

Phase 1: Request (5 minutes)

-
┌─────────────────────────────────────────────────────────┐
-│ 1. Requester submits emergency access request          │
-│    - Reason: "Production database cluster down"        │
-│    - Justification: "Need direct SSH to diagnose"      │
-│    - Duration: 2 hours                                  │
-│    - Resources: ["database/*"]                          │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 2. System creates request ID: BG-20251008-001          │
-│    - Sends notifications to approver pool               │
-│    - Starts approval timeout (1 hour)                   │
-└─────────────────────────────────────────────────────────┘
-
-

Phase 2: Approval (10-15 minutes)

-
┌─────────────────────────────────────────────────────────┐
-│ 3. First approver reviews request                      │
-│    - Verifies emergency is real                         │
-│    - Checks requester's justification                   │
-│    - Approves with reason                               │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 4. Second approver (different team) reviews             │
-│    - Independent verification                            │
-│    - Approves with reason                               │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 5. System validates approvals                           │
-│    - ✓ Min 2 approvers                                  │
-│    - ✓ Different teams                                  │
-│    - ✓ Within approval window                           │
-│    - Status → APPROVED                                  │
-└─────────────────────────────────────────────────────────┘
-
-

Phase 3: Activation (1-2 minutes)

-
┌─────────────────────────────────────────────────────────┐
-│ 6. Requester activates approved session                │
-│    - Receives emergency JWT token                       │
-│    - Token valid for 2 hours (or requested duration)    │
-│    - All actions logged with session ID                 │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 7. Security team notified                               │
-│    - Real-time alert: "Break-glass activated"           │
-│    - Monitoring dashboard shows active session          │
-└─────────────────────────────────────────────────────────┘
-
-

Phase 4: Usage (Variable)

-
┌─────────────────────────────────────────────────────────┐
-│ 8. Requester performs emergency actions                │
-│    - Uses emergency token for access                    │
-│    - Every action audited                               │
-│    - Security team monitors in real-time                │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 9. Background monitoring                                │
-│    - Checks for suspicious activity                     │
-│    - Enforces inactivity timeout (30 min)               │
-│    - Alerts on unusual patterns                         │
-└─────────────────────────────────────────────────────────┘
-
-

Phase 5: Revocation (Immediate)

-
┌─────────────────────────────────────────────────────────┐
-│ 10. Session ends (one of):                             │
-│     - Manual revocation by requester                    │
-│     - Expiration (max 4 hours)                          │
-│     - Inactivity timeout (30 minutes)                   │
-│     - Security team revocation                          │
-└─────────────────────────────────────────────────────────┘
-                          ↓
-┌─────────────────────────────────────────────────────────┐
-│ 11. System audit                                        │
-│     - All actions logged (7-year retention)             │
-│     - Incident report generated                         │
-│     - Post-incident review scheduled                    │
-└─────────────────────────────────────────────────────────┘
-
-
-

Using the System

-

CLI Commands

-

1. Request Emergency Access

-
provisioning break-glass request \
-  "Production database cluster unresponsive" \
-  --justification "Need direct SSH access to diagnose PostgreSQL failure. \
-  Monitoring shows cluster down. Application offline affecting 10,000+ users." \
-  --resources '["database/*", "server/db-*"]' \
-  --duration 2hr
-
-# Output:
-# ✓ Break-glass request created
-# Request ID: BG-20251008-001
-# Status: Pending Approval
-# Approvers needed: 2
-# Expires: 2025-10-08 11:30:00 (1 hour)
-#
-# Notifications sent to:
-# - security-team@example.com
-# - platform-admin@example.com
-
-

2. Approve Request (Approver)

-
# First approver (Security team)
-provisioning break-glass approve BG-20251008-001 \
-  --reason "Emergency verified via incident INC-2025-234. Database cluster confirmed down, affecting production."
-
-# Output:
-# ✓ Approval granted
-# Approver: alice@example.com (Security Team)
-# Approvals: 1/2
-# Status: Pending (need 1 more approval)
-
-
# Second approver (Platform team)
-provisioning break-glass approve BG-20251008-001 \
-  --reason "Confirmed with monitoring. PostgreSQL master node unreachable. Emergency access justified."
-
-# Output:
-# ✓ Approval granted
-# Approver: bob@example.com (Platform Team)
-# Approvals: 2/2
-# Status: APPROVED
-#
-# Requester can now activate session
-
-

3. Activate Session

-
provisioning break-glass activate BG-20251008-001
-
-# Output:
-# ✓ Emergency session activated
-# Session ID: BGS-20251008-001
-# Token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...
-# Expires: 2025-10-08 12:30:00 (2 hours)
-# Max inactivity: 30 minutes
-#
-# ⚠️  WARNING ⚠️
-# - All actions are logged and monitored
-# - Security team has been notified
-# - Session will auto-revoke after 2 hours
-# - Use ONLY for stated emergency purpose
-#
-# Export token:
-export EMERGENCY_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9..."
-
-

4. Use Emergency Access

-
# SSH to database server
-provisioning ssh connect db-master-01 \
-  --token $EMERGENCY_TOKEN
-
-# Execute emergency commands
-sudo systemctl status postgresql
-sudo tail -f /var/log/postgresql/postgresql.log
-
-# Diagnose issue...
-# Fix issue...
-
-

5. Revoke Session

-
# When done, immediately revoke
-provisioning break-glass revoke BGS-20251008-001 \
-  --reason "Database cluster restored. PostgreSQL master node restarted successfully. All services online."
-
-# Output:
-# ✓ Emergency session revoked
-# Duration: 47 minutes
-# Actions performed: 23
-# Audit log: /var/log/provisioning/break-glass/BGS-20251008-001.json
-#
-# Post-incident review scheduled: 2025-10-09 10:00am
-
-

Web UI (Control Center)

-

Request Flow

-
    -
  1. Navigate: Control Center → Security → Break-Glass
  2. -
  3. Click: “Request Emergency Access”
  4. -
  5. Fill Form: -
      -
    • Reason: “Production database cluster down”
    • -
    • Justification: (detailed description)
    • -
    • Duration: 2 hours
    • -
    • Resources: Select from dropdown or wildcard
    • -
    -
  6. -
  7. Submit: Request sent to approvers
  8. -
-

Approver Flow

-
    -
  1. Receive: Email/Slack notification
  2. -
  3. Navigate: Control Center → Break-Glass → Pending Requests
  4. -
  5. Review: Request details, reason, justification
  6. -
  7. Decision: Approve or Deny
  8. -
  9. Reason: Provide approval/denial reason
  10. -
-

Monitor Active Sessions

-
    -
  1. Navigate: Control Center → Security → Break-Glass → Active Sessions
  2. -
  3. View: Real-time dashboard of active sessions -
      -
    • Who, What, When, How long
    • -
    • Actions performed (live)
    • -
    • Inactivity timer
    • -
    -
  4. -
  5. Revoke: Emergency revoke button (if needed)
  6. -
-
-

Examples

-

Example 1: Production Database Outage

-

Scenario: PostgreSQL cluster unresponsive, affecting all users

-

Request:

-
provisioning break-glass request \
-  "Production PostgreSQL cluster completely unresponsive" \
-  --justification "Database cluster (3 nodes) not responding. \
-  All services offline, 10,000+ users affected. Need SSH to diagnose. \
-  Monitoring shows all nodes down. Last state: replication failure during backup." \
-  --resources '["database/*", "server/db-prod-*"]' \
-  --duration 2hr
-
-

Approval 1 (Security):

-
-

“Verified incident INC-2025-234. Database monitoring confirms cluster down. Application completely offline. Emergency justified.”

-
-

Approval 2 (Platform):

-
-

“Confirmed. PostgreSQL master and replicas unreachable. On-call SRE needs immediate access. Approved.”

-
-

Actions Taken:

-
    -
  1. SSH to db-prod-01, db-prod-02, db-prod-03
  2. -
  3. Check PostgreSQL status: systemctl status postgresql
  4. -
  5. Review logs: /var/log/postgresql/
  6. -
  7. Diagnose: Disk full on master node
  8. -
  9. Fix: Clear old WAL files, restart PostgreSQL
  10. -
  11. Verify: Cluster restored, replication working
  12. -
  13. Revoke access
  14. -
-

Outcome: Cluster restored in 47 minutes. Root cause: Backup retention not working.

-
-

Example 2: Security Incident

-

Scenario: Suspicious activity detected, need immediate containment

-

Request:

-
provisioning break-glass request \
-  "Active security breach detected - need immediate containment" \
-  --justification "IDS alerts show unauthorized access from IP 203.0.113.42 to API. \
-  Multiple failed sudo attempts. Isolate affected servers and investigate. \
-  Potential data exfiltration in progress." \
-  --resources '["server/api-prod-*", "firewall/*", "network/*"]' \
-  --duration 4hr
-
-

Approval 1 (Security):

-
-

“Security incident SI-2025-089 confirmed. IDS shows sustained attack from external IP. Immediate containment required. Approved.”

-
-

Approval 2 (Engineering Director):

-
-

“Concur with security assessment. Production impact acceptable vs risk of data breach. Approved.”

-
-

Actions Taken:

-
    -
  1. Firewall block on 203.0.113.42
  2. -
  3. Isolate affected API servers
  4. -
  5. Snapshot servers for forensics
  6. -
  7. Review access logs
  8. -
  9. Identify compromised service account
  10. -
  11. Rotate credentials
  12. -
  13. Restore from clean backup
  14. -
  15. Re-enable servers with patched vulnerability
  16. -
-

Outcome: Breach contained in 3h 15 min. No data loss. Vulnerability patched across fleet.

-
-

Example 3: Accidental Data Deletion

-

Scenario: Critical production data accidentally deleted

-

Request:

-
provisioning break-glass request \
-  "Critical customer data accidentally deleted from production" \
-  --justification "Database migration script ran against production instead of staging. \
-  50,000+ customer records deleted. Need immediate restore from backup. \
-  Normal restore requires 4-6 hours for approval. Time-critical window." \
-  --resources '["database/customers", "backup/*"]' \
-  --duration 3hr
-
-

Approval 1 (Platform):

-
-

“Verified data deletion in production database. 50,284 records deleted at 10:42am. Backup available from 10:00am (42 minutes ago). Time-critical -restore needed. Approved.”

-
-

Approval 2 (Security):

-
-

“Risk assessment: Restore from trusted backup less risky than data loss. Emergency justified. Ensure post-incident review of deployment process. -Approved.”

-
-

Actions Taken:

-
    -
  1. Stop application writes to affected tables
  2. -
  3. Identify latest good backup (10:00am)
  4. -
  5. Restore deleted records from backup
  6. -
  7. Verify data integrity
  8. -
  9. Compare record counts
  10. -
  11. Re-enable application writes
  12. -
  13. Notify affected users (if any noticed)
  14. -
-

Outcome: Data restored in 1h 38 min. Only 42 minutes of data lost (from backup to deletion). Zero customer impact.

-
-

Auditing & Compliance

-

What is Logged

-

Every break-glass session logs:

-
    -
  1. -

    Request Details:

    -
      -
    • Requester identity
    • -
    • Reason and justification
    • -
    • Requested resources
    • -
    • Requested duration
    • -
    • Timestamp
    • -
    -
  2. -
  3. -

    Approval Process:

    -
      -
    • Each approver identity
    • -
    • Approval/denial reason
    • -
    • Approval timestamp
    • -
    • Team affiliation
    • -
    -
  4. -
  5. -

    Session Activity:

    -
      -
    • Activation timestamp
    • -
    • Every action performed
    • -
    • Resources accessed
    • -
    • Commands executed
    • -
    • Inactivity periods
    • -
    -
  6. -
  7. -

    Revocation:

    -
      -
    • Revocation reason
    • -
    • Who revoked (system or manual)
    • -
    • Total duration
    • -
    • Final status
    • -
    -
  8. -
-

Retention

-
    -
  • Break-glass logs: 7 years (immutable)
  • -
  • Cannot be deleted: Only anonymized for GDPR
  • -
  • Exported to SIEM: Real-time
  • -
-

Compliance Reports

-
# Generate break-glass usage report
-provisioning break-glass audit \
-  --from "2025-01-01" \
-  --to "2025-12-31" \
-  --format pdf \
-  --output break-glass-2025-report.pdf
-
-# Report includes:
-# - Total break-glass activations
-# - Average duration
-# - Most common reasons
-# - Approval times
-# - Incidents resolved
-# - Misuse incidents (if any)
-
-
-

Post-Incident Review

-

Within 24 Hours

-

Required attendees:

-
    -
  • Requester
  • -
  • Approvers
  • -
  • Security team
  • -
  • Incident commander
  • -
-

Agenda:

-
    -
  1. Timeline Review: What happened, when
  2. -
  3. Actions Taken: What was done with emergency access
  4. -
  5. Outcome: Was issue resolved? Any side effects?
  6. -
  7. Process: Did break-glass work as intended?
  8. -
  9. Lessons Learned: What can be improved?
  10. -
-

Review Checklist

-
    -
  • -Was break-glass appropriate for this incident?
  • -
  • -Were approvals granted timely?
  • -
  • -Was access used only for stated purpose?
  • -
  • -Were any security policies violated?
  • -
  • -Could incident be prevented in future?
  • -
  • -Do we need policy updates?
  • -
  • -Do we need system changes?
  • -
-

Output

-

Incident Report:

-
# Break-Glass Incident Report: BG-20251008-001
-
-**Incident**: Production database cluster outage
-**Duration**: 47 minutes
-**Impact**: 10,000+ users, complete service outage
-
-## Timeline
-- 10:15: Incident detected
-- 10:17: Break-glass requested
-- 10:25: Approved (2/2)
-- 10:27: Activated
-- 11:02: Database restored
-- 11:04: Session revoked
-
-## Actions Taken
-1. SSH access to database servers
-2. Diagnosed disk full issue
-3. Cleared old WAL files
-4. Restarted PostgreSQL
-5. Verified replication
-
-## Root Cause
-Backup retention job failed silently for 2 weeks, causing WAL files to accumulate until disk full.
-
-## Prevention
-- ✅ Add disk space monitoring alerts
-- ✅ Fix backup retention job
-- ✅ Test recovery procedures
-- ✅ Implement WAL archiving to S3
-
-## Break-Glass Assessment
-- ✓ Appropriate use
-- ✓ Timely approvals
-- ✓ No policy violations
-- ✓ Access revoked promptly
-
-
-

FAQ

-

Q: How quickly can break-glass be activated

-

A: Typically 15-20 minutes:

-
    -
  • 5 min: Request submission
  • -
  • 10 min: Approvals (2 people)
  • -
  • 2 min: Activation
  • -
-

In extreme emergencies, approvers can be on standby.

-

Q: Can I use break-glass for scheduled maintenance

-

A: No. Break-glass is for emergencies only. Schedule maintenance through normal change process.

-

Q: What if I can’t get 2 approvers

-

A: System requires 2 approvers from different teams. If unavailable:

-
    -
  1. Escalate to on-call manager
  2. -
  3. Contact security team directly
  4. -
  5. Use emergency contact list
  6. -
-

Q: Can approvers be from the same team

-

A: No. System enforces team diversity to prevent collusion.

-

Q: What if security team revokes my session

-

A: Security team can revoke for:

-
    -
  • Suspicious activity
  • -
  • Policy violation
  • -
  • Incident resolved
  • -
  • Misuse detected
  • -
-

You’ll receive immediate notification. Contact security team for details.

-

Q: Can I extend an active session

-

A: No. Maximum duration is 4 hours. If you need more time, submit a new request with updated justification.

-

Q: What happens if I forget to revoke

-

A: Session auto-revokes after:

-
    -
  • Maximum duration (4 hours), OR
  • -
  • Inactivity timeout (30 minutes)
  • -
-

Always manually revoke when done.

-

Q: Is break-glass monitored

-

A: Yes. Security team monitors in real-time:

-
    -
  • Session activation alerts
  • -
  • Action logging
  • -
  • Suspicious activity detection
  • -
  • Compliance verification
  • -
-

Q: Can I practice break-glass

-

A: Yes, in development environment only:

-
PROVISIONING_ENV=dev provisioning break-glass request "Test emergency access procedure"
-
-

Never practice in staging or production.

-
-

Emergency Contacts

-

During Incident

-
- - - -
RoleContactResponse Time
Security On-Call+1-555-SECURITY5 minutes
Platform On-Call+1-555-PLATFORM5 minutes
Engineering Director+1-555-ENG-DIR15 minutes
-
-

Escalation Path

-
    -
  1. L1: On-call SRE
  2. -
  3. L2: Platform team lead
  4. -
  5. L3: Engineering manager
  6. -
  7. L4: Director of Engineering
  8. -
  9. L5: CTO
  10. -
-

Communication Channels

-
    -
  • Incident Slack: #incidents
  • -
  • Security Slack: #security-alerts
  • -
  • Email: security-team@example.com
  • -
  • PagerDuty: Break-glass policy
  • -
-
-

Training Certification

-

I certify that I have:

-
    -
  • -Read and understood this training guide
  • -
  • -Understand when to use (and not use) break-glass
  • -
  • -Know the approval workflow
  • -
  • -Can use the CLI commands
  • -
  • -Understand auditing and compliance requirements
  • -
  • -Will follow post-incident review process
  • -
-

Signature: _________________________ -Date: _________________________ -Next Training Due: _________________________ (1 year)

-
-

Version: 1.0.0 -Maintained By: Security Team -Last Updated: 2025-10-08 -Next Review: 2026-10-08

-

Cedar Policies Production Guide

-

Version: 1.0.0 -Date: 2025-10-08 -Audience: Platform Administrators, Security Teams -Prerequisites: Understanding of Cedar policy language, Provisioning platform architecture

-
-

Table of Contents

-
    -
  1. Introduction
  2. -
  3. Cedar Policy Basics
  4. -
  5. Production Policy Strategy
  6. -
  7. Policy Templates
  8. -
  9. Policy Development Workflow
  10. -
  11. Testing Policies
  12. -
  13. Deployment
  14. -
  15. Monitoring & Auditing
  16. -
  17. Troubleshooting
  18. -
  19. Best Practices
  20. -
-
-

Introduction

-

Cedar policies control who can do what in the Provisioning platform. This guide helps you create, test, and deploy production-ready Cedar policies -that balance security with operational efficiency.

-

Why Cedar

-
    -
  • Fine-grained: Control access at resource + action level
  • -
  • Context-aware: Decisions based on MFA, IP, time, approvals
  • -
  • Auditable: Every decision is logged with policy ID
  • -
  • Hot-reload: Update policies without restarting services
  • -
  • Type-safe: Schema validation prevents errors
  • -
-
-

Cedar Policy Basics

-

Core Concepts

-
permit (
-  principal,    # Who (user, team, role)
-  action,       # What (create, delete, deploy)
-  resource      # Where (server, cluster, environment)
-) when {
-  condition     # Context (MFA, IP, time)
-};
-
-

Entities

-
- - - - - -
TypeExamplesDescription
UserUser::"alice"Individual users
TeamTeam::"platform-admin"User groups
RoleRole::"Admin"Permission levels
ResourceServer::"web-01"Infrastructure resources
EnvironmentEnvironment::"production"Deployment targets
-
-

Actions

-
- - - - -
CategoryActions
Readread, list
Writecreate, update, delete
Deploydeploy, rollback
Adminssh, execute, admin
-
-
-

Production Policy Strategy

-

Security Levels

-

Level 1: Development (Permissive)

-
// Developers have full access to dev environment
-permit (
-  principal in Team::"developers",
-  action,
-  resource in Environment::"development"
-);
-
-

Level 2: Staging (MFA Required)

-
// All operations require MFA
-permit (
-  principal in Team::"developers",
-  action,
-  resource in Environment::"staging"
-) when {
-  context.mfa_verified == true
-};
-
-

Level 3: Production (MFA + Approval)

-
// Deployments require MFA + approval
-permit (
-  principal in Team::"platform-admin",
-  action in [Action::"deploy", Action::"delete"],
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  context has approval_id &&
-  context.approval_id.startsWith("APPROVAL-")
-};
-
-

Level 4: Critical (Break-Glass Only)

-
// Only emergency access
-permit (
-  principal,
-  action,
-  resource in Resource::"production-database"
-) when {
-  context.emergency_access == true &&
-  context.session_approved == true
-};
-
-
-

Policy Templates

-

1. Role-Based Access Control (RBAC)

-
// Admin: Full access
-permit (
-  principal in Role::"Admin",
-  action,
-  resource
-);
-
-// Operator: Server management + read clusters
-permit (
-  principal in Role::"Operator",
-  action in [
-    Action::"create",
-    Action::"update",
-    Action::"delete"
-  ],
-  resource is Server
-);
-
-permit (
-  principal in Role::"Operator",
-  action in [Action::"read", Action::"list"],
-  resource is Cluster
-);
-
-// Viewer: Read-only everywhere
-permit (
-  principal in Role::"Viewer",
-  action in [Action::"read", Action::"list"],
-  resource
-);
-
-// Auditor: Read audit logs only
-permit (
-  principal in Role::"Auditor",
-  action in [Action::"read", Action::"list"],
-  resource is AuditLog
-);
-
-

2. Team-Based Policies

-
// Platform team: Infrastructure management
-permit (
-  principal in Team::"platform",
-  action in [
-    Action::"create",
-    Action::"update",
-    Action::"delete",
-    Action::"deploy"
-  ],
-  resource in [Server, Cluster, Taskserv]
-);
-
-// Security team: Access control + audit
-permit (
-  principal in Team::"security",
-  action,
-  resource in [User, Role, AuditLog, BreakGlass]
-);
-
-// DevOps team: Application deployments
-permit (
-  principal in Team::"devops",
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  context.has_approval == true
-};
-
-

3. Time-Based Restrictions

-
// Deployments only during business hours
-permit (
-  principal,
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.time.hour >= 9 &&
-  context.time.hour <= 17 &&
-  context.time.weekday in ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]
-};
-
-// Maintenance window
-permit (
-  principal in Team::"platform",
-  action,
-  resource
-) when {
-  context.maintenance_window == true
-};
-
-

4. IP-Based Restrictions

-
// Production access only from office network
-permit (
-  principal,
-  action,
-  resource in Environment::"production"
-) when {
-  context.ip_address.isInRange("10.0.0.0/8") ||
-  context.ip_address.isInRange("192.168.1.0/24")
-};
-
-// VPN access for remote work
-permit (
-  principal,
-  action,
-  resource in Environment::"production"
-) when {
-  context.vpn_connected == true &&
-  context.mfa_verified == true
-};
-
-

5. Resource-Specific Policies

-
// Database servers: Extra protection
-forbid (
-  principal,
-  action == Action::"delete",
-  resource in Resource::"database-*"
-) unless {
-  context.emergency_access == true
-};
-
-// Critical clusters: Require multiple approvals
-permit (
-  principal,
-  action in [Action::"update", Action::"delete"],
-  resource in Resource::"k8s-production-*"
-) when {
-  context.approval_count >= 2 &&
-  context.mfa_verified == true
-};
-
-

6. Self-Service Policies

-
// Users can manage their own MFA devices
-permit (
-  principal,
-  action in [Action::"create", Action::"delete"],
-  resource is MfaDevice
-) when {
-  resource.owner == principal
-};
-
-// Users can view their own audit logs
-permit (
-  principal,
-  action == Action::"read",
-  resource is AuditLog
-) when {
-  resource.user_id == principal.id
-};
-
-
-

Policy Development Workflow

-

Step 1: Define Requirements

-

Document:

-
    -
  • Who needs access? (roles, teams, individuals)
  • -
  • To what resources? (servers, clusters, environments)
  • -
  • What actions? (read, write, deploy, delete)
  • -
  • Under what conditions? (MFA, IP, time, approvals)
  • -
-

Example Requirements Document:

-
# Requirement: Production Deployment
-
-**Who**: DevOps team members
-**What**: Deploy applications to production
-**When**: Business hours (9am-5pm Mon-Fri)
-**Conditions**:
-- MFA verified
-- Change request approved
-- From office network or VPN
-
-

Step 2: Write Policy

-
@id("prod-deploy-devops")
-@description("DevOps can deploy to production during business hours with approval")
-permit (
-  principal in Team::"devops",
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  context has approval_id &&
-  context.time.hour >= 9 &&
-  context.time.hour <= 17 &&
-  context.time.weekday in ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"] &&
-  (context.ip_address.isInRange("10.0.0.0/8") || context.vpn_connected == true)
-};
-
-

Step 3: Validate Syntax

-
# Use Cedar CLI to validate
-cedar validate \
-  --policies provisioning/config/cedar-policies/production.cedar \
-  --schema provisioning/config/cedar-policies/schema.cedar
-
-# Expected output: ✓ Policy is valid
-
-

Step 4: Test in Development

-
# Deploy to development environment first
-cp production.cedar provisioning/config/cedar-policies/development.cedar
-
-# Restart orchestrator to load new policies
-systemctl restart provisioning-orchestrator
-
-# Test with real requests
-provisioning server create test-server --check
-
-

Step 5: Review & Approve

-

Review Checklist:

-
    -
  • -Policy syntax valid
  • -
  • -Policy ID unique
  • -
  • -Description clear
  • -
  • -Conditions appropriate for security level
  • -
  • -Tested in development
  • -
  • -Reviewed by security team
  • -
  • -Documented in change log
  • -
-

Step 6: Deploy to Production

-
# Backup current policies
-cp provisioning/config/cedar-policies/production.cedar \
-   provisioning/config/cedar-policies/production.cedar.backup.$(date +%Y%m%d)
-
-# Deploy new policy
-cp new-production.cedar provisioning/config/cedar-policies/production.cedar
-
-# Hot reload (no restart needed)
-provisioning cedar reload
-
-# Verify loaded
-provisioning cedar list
-
-
-

Testing Policies

-

Unit Testing

-

Create test cases for each policy:

-
# tests/cedar/prod-deploy-devops.yaml
-policy_id: prod-deploy-devops
-
-test_cases:
-  - name: "DevOps can deploy with approval and MFA"
-    principal: { type: "Team", id: "devops" }
-    action: "deploy"
-    resource: { type: "Environment", id: "production" }
-    context:
-      mfa_verified: true
-      approval_id: "APPROVAL-123"
-      time: { hour: 10, weekday: "Monday" }
-      ip_address: "10.0.1.5"
-    expected: Allow
-
-  - name: "DevOps cannot deploy without MFA"
-    principal: { type: "Team", id: "devops" }
-    action: "deploy"
-    resource: { type: "Environment", id: "production" }
-    context:
-      mfa_verified: false
-      approval_id: "APPROVAL-123"
-      time: { hour: 10, weekday: "Monday" }
-    expected: Deny
-
-  - name: "DevOps cannot deploy outside business hours"
-    principal: { type: "Team", id: "devops" }
-    action: "deploy"
-    resource: { type: "Environment", id: "production" }
-    context:
-      mfa_verified: true
-      approval_id: "APPROVAL-123"
-      time: { hour: 22, weekday: "Monday" }
-    expected: Deny
-
-

Run tests:

-
provisioning cedar test tests/cedar/
-
-

Integration Testing

-

Test with real API calls:

-
# Setup test user
-export TEST_USER="alice"
-export TEST_TOKEN=$(provisioning login --user $TEST_USER --output token)
-
-# Test allowed action
-curl -H "Authorization: Bearer $TEST_TOKEN" \
-  http://localhost:9090/api/v1/servers \
-  -X POST -d '{"name": "test-server"}'
-
-# Expected: 200 OK
-
-# Test denied action (without MFA)
-curl -H "Authorization: Bearer $TEST_TOKEN" \
-  http://localhost:9090/api/v1/servers/prod-server-01 \
-  -X DELETE
-
-# Expected: 403 Forbidden (MFA required)
-
-

Load Testing

-

Verify policy evaluation performance:

-
# Generate load
-provisioning cedar bench \
-  --policies production.cedar \
-  --requests 10000 \
-  --concurrency 100
-
-# Expected: <10 ms per evaluation
-
-
-

Deployment

-

Development → Staging → Production

-
#!/bin/bash
-# deploy-policies.sh
-
-ENVIRONMENT=$1  # dev, staging, prod
-
-# Validate policies
-cedar validate \
-  --policies provisioning/config/cedar-policies/$ENVIRONMENT.cedar \
-  --schema provisioning/config/cedar-policies/schema.cedar
-
-if [ $? -ne 0 ]; then
-  echo "❌ Policy validation failed"
-  exit 1
-fi
-
-# Backup current policies
-BACKUP_DIR="provisioning/config/cedar-policies/backups/$ENVIRONMENT"
-mkdir -p $BACKUP_DIR
-cp provisioning/config/cedar-policies/$ENVIRONMENT.cedar \
-   $BACKUP_DIR/$ENVIRONMENT.cedar.$(date +%Y%m%d-%H%M%S)
-
-# Deploy new policies
-scp provisioning/config/cedar-policies/$ENVIRONMENT.cedar \
-    $ENVIRONMENT-orchestrator:/etc/provisioning/cedar-policies/production.cedar
-
-# Hot reload on remote
-ssh $ENVIRONMENT-orchestrator "provisioning cedar reload"
-
-echo "✅ Policies deployed to $ENVIRONMENT"
-
-

Rollback Procedure

-
# List backups
-ls -ltr provisioning/config/cedar-policies/backups/production/
-
-# Restore previous version
-cp provisioning/config/cedar-policies/backups/production/production.cedar.20251008-143000 \
-   provisioning/config/cedar-policies/production.cedar
-
-# Reload
-provisioning cedar reload
-
-# Verify
-provisioning cedar list
-
-
-

Monitoring & Auditing

-

Monitor Authorization Decisions

-
# Query denied requests (last 24 hours)
-provisioning audit query \
-  --action authorization_denied \
-  --from "24h" \
-  --out table
-
-# Expected output:
-# ┌─────────┬────────┬──────────┬────────┬────────────────┐
-# │ Time    │ User   │ Action   │ Resour │ Reason         │
-# ├─────────┼────────┼──────────┼────────┼────────────────┤
-# │ 10:15am │ bob    │ deploy   │ prod   │ MFA not verif  │
-# │ 11:30am │ alice  │ delete   │ db-01  │ No approval    │
-# └─────────┴────────┴──────────┴────────┴────────────────┘
-
-

Alert on Suspicious Activity

-
# alerts/cedar-policies.yaml
-alerts:
-  - name: "High Denial Rate"
-    query: "authorization_denied"
-    threshold: 10
-    window: "5m"
-    action: "notify:security-team"
-
-  - name: "Policy Bypass Attempt"
-    query: "action:deploy AND result:denied"
-    user: "critical-users"
-    action: "page:oncall"
-
-

Policy Usage Statistics

-
# Which policies are most used?
-provisioning cedar stats --top 10
-
-# Example output:
-# Policy ID              | Uses  | Allows | Denies
-# ---------------------- | ------- | -------- | -------
-# prod-deploy-devops    | 1,234 | 1,100  | 134
-# admin-full-access     |   892 |   892  | 0
-# viewer-read-only      | 5,421 | 5,421  | 0
-
-
-

Troubleshooting

-

Policy Not Applying

-

Symptom: Policy changes not taking effect

-

Solutions:

-
    -
  1. -

    Verify hot reload:

    -
    provisioning cedar reload
    -provisioning cedar list  # Should show updated timestamp
    -
    -
  2. -
  3. -

    Check orchestrator logs:

    -
    journalctl -u provisioning-orchestrator -f | grep cedar
    -
    -
  4. -
  5. -

    Restart orchestrator:

    -
    systemctl restart provisioning-orchestrator
    -
    -
  6. -
-

Unexpected Denials

-

Symptom: User denied access when policy should allow

-

Debug:

-
# Enable debug mode
-export PROVISIONING_DEBUG=1
-
-# View authorization decision
-provisioning audit query \
-  --user alice \
-  --action deploy \
-  --from "1h" \
-  --out json | jq '.authorization'
-
-# Shows which policy evaluated, context used, reason for denial
-
-

Policy Conflicts

-

Symptom: Multiple policies match, unclear which applies

-

Resolution:

-
    -
  • Cedar uses deny-override: If any forbid matches, request denied
  • -
  • Use @priority annotations (higher number = higher priority)
  • -
  • Make policies more specific to avoid conflicts
  • -
-
@priority(100)
-permit (
-  principal in Role::"Admin",
-  action,
-  resource
-);
-
-@priority(50)
-forbid (
-  principal,
-  action == Action::"delete",
-  resource is Database
-);
-
-// Admin can do anything EXCEPT delete databases
-
-
-

Best Practices

-

1. Start Restrictive, Loosen Gradually

-
// ❌ BAD: Too permissive initially
-permit (principal, action, resource);
-
-// ✅ GOOD: Explicit allow, expand as needed
-permit (
-  principal in Role::"Admin",
-  action in [Action::"read", Action::"list"],
-  resource
-);
-
-

2. Use Annotations

-
@id("prod-deploy-mfa")
-@description("Production deployments require MFA verification")
-@owner("platform-team")
-@reviewed("2025-10-08")
-@expires("2026-10-08")
-permit (
-  principal in Team::"platform-admin",
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true
-};
-
-

3. Principle of Least Privilege

-

Give users minimum permissions needed:

-
// ❌ BAD: Overly broad
-permit (principal in Team::"developers", action, resource);
-
-// ✅ GOOD: Specific permissions
-permit (
-  principal in Team::"developers",
-  action in [Action::"read", Action::"create", Action::"update"],
-  resource in Environment::"development"
-);
-
-

4. Document Context Requirements

-
// Context required for this policy:
-// - mfa_verified: boolean (from JWT claims)
-// - approval_id: string (from request header)
-// - ip_address: IpAddr (from connection)
-permit (
-  principal in Role::"Operator",
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  context has approval_id &&
-  context.ip_address.isInRange("10.0.0.0/8")
-};
-
-

5. Separate Policies by Concern

-

File organization:

-
cedar-policies/
-├── schema.cedar              # Entity/action definitions
-├── rbac.cedar                # Role-based policies
-├── teams.cedar               # Team-based policies
-├── time-restrictions.cedar   # Time-based policies
-├── ip-restrictions.cedar     # Network-based policies
-├── production.cedar          # Production-specific
-└── development.cedar         # Development-specific
-
-

6. Version Control

-
# Git commit each policy change
-git add provisioning/config/cedar-policies/production.cedar
-git commit -m "feat(cedar): Add MFA requirement for prod deployments
-
-- Require MFA for all production deployments
-- Applies to devops and platform-admin teams
-- Effective 2025-10-08
-
-Policy ID: prod-deploy-mfa
-Reviewed by: security-team
-Ticket: SEC-1234"
-
-git push
-
-

7. Regular Policy Audits

-

Quarterly review:

-
    -
  • -Remove unused policies
  • -
  • -Tighten overly permissive policies
  • -
  • -Update for new resources/actions
  • -
  • -Verify team memberships current
  • -
  • -Test break-glass procedures
  • -
-
-

Quick Reference

-

Common Policy Patterns

-
# Allow all
-permit (principal, action, resource);
-
-# Deny all
-forbid (principal, action, resource);
-
-# Role-based
-permit (principal in Role::"Admin", action, resource);
-
-# Team-based
-permit (principal in Team::"platform", action, resource);
-
-# Resource-based
-permit (principal, action, resource in Environment::"production");
-
-# Action-based
-permit (principal, action in [Action::"read", Action::"list"], resource);
-
-# Condition-based
-permit (principal, action, resource) when { context.mfa_verified == true };
-
-# Complex
-permit (
-  principal in Team::"devops",
-  action == Action::"deploy",
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  context has approval_id &&
-  context.time.hour >= 9 &&
-  context.time.hour <= 17
-};
-
-

Useful Commands

-
# Validate policies
-provisioning cedar validate
-
-# Reload policies (hot reload)
-provisioning cedar reload
-
-# List active policies
-provisioning cedar list
-
-# Test policies
-provisioning cedar test tests/
-
-# Query denials
-provisioning audit query --action authorization_denied
-
-# Policy statistics
-provisioning cedar stats
-
-
-

Support

-
    -
  • Documentation: docs/architecture/CEDAR_AUTHORIZATION_IMPLEMENTATION.md
  • -
  • Policy Examples: provisioning/config/cedar-policies/
  • -
  • Issues: Report to platform-team
  • -
  • Emergency: Use break-glass procedure
  • -
-
-

Version: 1.0.0 -Maintained By: Platform Team -Last Updated: 2025-10-08

-

MFA Admin Setup Guide - Production Operations Manual

-

Document Version: 1.0.0 -Last Updated: 2025-10-08 -Target Audience: Platform Administrators, Security Team -Prerequisites: Control Center deployed, admin user created

-
-

📋 Table of Contents

-
    -
  1. Overview
  2. -
  3. MFA Requirements
  4. -
  5. Admin Enrollment Process
  6. -
  7. TOTP Setup (Authenticator Apps)
  8. -
  9. WebAuthn Setup (Hardware Keys)
  10. -
  11. Enforcing MFA via Cedar Policies
  12. -
  13. Backup Codes Management
  14. -
  15. Recovery Procedures
  16. -
  17. Troubleshooting
  18. -
  19. Best Practices
  20. -
  21. Audit and Compliance
  22. -
-
-

Overview

-

What is MFA

-

Multi-Factor Authentication (MFA) adds a second layer of security beyond passwords. Admins must provide:

-
    -
  1. Something they know: Password
  2. -
  3. Something they have: TOTP code (authenticator app) or WebAuthn device (YubiKey, Touch ID)
  4. -
-

Why MFA for Admins

-

Administrators have elevated privileges including:

-
    -
  • Server creation/deletion
  • -
  • Production deployments
  • -
  • Secret management
  • -
  • User management
  • -
  • Break-glass approval
  • -
-

MFA protects against:

-
    -
  • Password compromise (phishing, leaks, brute force)
  • -
  • Unauthorized access to critical systems
  • -
  • Compliance violations (SOC2, ISO 27001)
  • -
-

MFA Methods Supported

-
- - - -
MethodTypeExamplesRecommended For
TOTPSoftwareGoogle Authenticator, Authy, 1PasswordAll admins (primary)
WebAuthn/FIDO2HardwareYubiKey, Touch ID, Windows HelloHigh-security admins
Backup CodesOne-time10 single-use codesEmergency recovery
-
-
-

MFA Requirements

-

Mandatory MFA Enforcement

-

All administrators MUST enable MFA for:

-
    -
  • Production environment access
  • -
  • Server creation/deletion operations
  • -
  • Deployment to production clusters
  • -
  • Secret access (KMS, dynamic secrets)
  • -
  • Break-glass approval
  • -
  • User management operations
  • -
-

Grace Period

-
    -
  • Development: MFA optional (not recommended)
  • -
  • Staging: MFA recommended, not enforced
  • -
  • Production: MFA mandatory (enforced by Cedar policies)
  • -
-

Timeline for Rollout

-
Week 1-2: Pilot Program
-  ├─ Platform admins enable MFA
-  ├─ Document issues and refine process
-  └─ Create training materials
-
-Week 3-4: Full Deployment
-  ├─ All admins enable MFA
-  ├─ Cedar policies enforce MFA for production
-  └─ Monitor compliance
-
-Week 5+: Maintenance
-  ├─ Regular MFA device audits
-  ├─ Backup code rotation
-  └─ User support for MFA issues
-
-
-

Admin Enrollment Process

-

Step 1: Initial Login (Password Only)

-
# Login with username/password
-provisioning login --user admin@example.com --workspace production
-
-# Response (partial token, MFA not yet verified):
-{
-  "status": "mfa_required",
-  "partial_token": "eyJhbGci...",  # Limited access token
-  "message": "MFA enrollment required for production access"
-}
-
-

Partial token limitations:

-
    -
  • Cannot access production resources
  • -
  • Can only access MFA enrollment endpoints
  • -
  • Expires in 15 minutes
  • -
-

Step 2: Choose MFA Method

-
# Check available MFA methods
-provisioning mfa methods
-
-# Output:
-Available MFA Methods:
-  • TOTP (Authenticator apps) - Recommended for all users
-  • WebAuthn (Hardware keys) - Recommended for high-security roles
-  • Backup Codes - Emergency recovery only
-
-# Check current MFA status
-provisioning mfa status
-
-# Output:
-MFA Status:
-  TOTP: Not enrolled
-  WebAuthn: Not enrolled
-  Backup Codes: Not generated
-  MFA Required: Yes (production workspace)
-
-

Step 3: Enroll MFA Device

-

Choose one or both methods (TOTP + WebAuthn recommended):

- -

Step 4: Verify and Activate

-

After enrollment, login again with MFA:

-
# Login (returns partial token)
-provisioning login --user admin@example.com --workspace production
-
-# Verify MFA code (returns full access token)
-provisioning mfa verify 123456
-
-# Response:
-{
-  "status": "authenticated",
-  "access_token": "eyJhbGci...",      # Full access token (15 min)
-  "refresh_token": "eyJhbGci...",     # Refresh token (7 days)
-  "mfa_verified": true,
-  "expires_in": 900
-}
-
-
-

TOTP Setup (Authenticator Apps)

-

Supported Authenticator Apps

-
- - - - - -
AppPlatformNotes
Google AuthenticatoriOS, AndroidSimple, widely used
AuthyiOS, Android, DesktopCloud backup, multi-device
1PasswordAll platformsIntegrated with password manager
Microsoft AuthenticatoriOS, AndroidEnterprise integration
BitwardenAll platformsOpen source
-
-

Step-by-Step TOTP Enrollment

-

1. Initiate TOTP Enrollment

-
provisioning mfa totp enroll
-
-

Output:

-
╔════════════════════════════════════════════════════════════╗
-║                   TOTP ENROLLMENT                          ║
-╚════════════════════════════════════════════════════════════╝
-
-Scan this QR code with your authenticator app:
-
-█████████████████████████████████
-█████████████████████████████████
-████ ▄▄▄▄▄ █▀ █▀▀██ ▄▄▄▄▄ ████
-████ █   █ █▀▄ ▀ ▄█ █   █ ████
-████ █▄▄▄█ █ ▀▀ ▀▀█ █▄▄▄█ ████
-████▄▄▄▄▄▄▄█ █▀█ ▀ █▄▄▄▄▄▄████
-█████████████████████████████████
-█████████████████████████████████
-
-Manual entry (if QR code doesn't work):
-  Secret: JBSWY3DPEHPK3PXP
-  Account: admin@example.com
-  Issuer: Provisioning Platform
-
-TOTP Configuration:
-  Algorithm: SHA1
-  Digits: 6
-  Period: 30 seconds
-
-

2. Add to Authenticator App

-

Option A: Scan QR Code (Recommended)

-
    -
  1. Open authenticator app (Google Authenticator, Authy, etc.)
  2. -
  3. Tap “+” or “Add Account”
  4. -
  5. Select “Scan QR Code”
  6. -
  7. Point camera at QR code displayed in terminal
  8. -
  9. Account added automatically
  10. -
-

Option B: Manual Entry

-
    -
  1. Open authenticator app
  2. -
  3. Tap “+” or “Add Account”
  4. -
  5. Select “Enter a setup key” or “Manual entry”
  6. -
  7. Enter: -
      -
    • Account name: admin@example.com
    • -
    • Key: JBSWY3DPEHPK3PXP (secret shown above)
    • -
    • Type of key: Time-based
    • -
    -
  8. -
  9. Save account
  10. -
-

3. Verify TOTP Code

-
# Get current code from authenticator app (6 digits, changes every 30s)
-# Example code: 123456
-
-provisioning mfa totp verify 123456
-
-

Success Response:

-
✓ TOTP verified successfully!
-
-Backup Codes (SAVE THESE SECURELY):
-  1. A3B9-C2D7-E1F4
-  2. G8H5-J6K3-L9M2
-  3. N4P7-Q1R8-S5T2
-  4. U6V3-W9X1-Y7Z4
-  5. A2B8-C5D1-E9F3
-  6. G7H4-J2K6-L8M1
-  7. N3P9-Q5R2-S7T4
-  8. U1V6-W3X8-Y2Z5
-  9. A9B4-C7D2-E5F1
- 10. G3H8-J1K5-L6M9
-
-⚠ Store backup codes in a secure location (password manager, encrypted file)
-⚠ Each code can only be used once
-⚠ These codes allow access if you lose your authenticator device
-
-TOTP enrollment complete. MFA is now active for your account.
-
-

4. Save Backup Codes

-

Critical: Store backup codes in a secure location:

-
# Copy backup codes to password manager or encrypted file
-# NEVER store in plaintext, email, or cloud storage
-
-# Example: Store in encrypted file
-provisioning mfa backup-codes --save-encrypted ~/secure/mfa-backup-codes.enc
-
-# Or display again (requires existing MFA verification)
-provisioning mfa backup-codes --show
-
-

5. Test TOTP Login

-
# Logout to test full login flow
-provisioning logout
-
-# Login with password (returns partial token)
-provisioning login --user admin@example.com --workspace production
-
-# Get current TOTP code from authenticator app
-# Verify with TOTP code (returns full access token)
-provisioning mfa verify 654321
-
-# ✓ Full access granted
-
-
-

WebAuthn Setup (Hardware Keys)

-

Supported WebAuthn Devices

-
- - - - -
Device TypeExamplesSecurity Level
USB Security KeysYubiKey 5, SoloKey, Titan KeyHighest
NFC KeysYubiKey 5 NFC, Google TitanHigh (mobile compatible)
BiometricTouch ID (macOS), Windows Hello, Face IDHigh (convenience)
Platform AuthenticatorsBuilt-in laptop/phone biometricsMedium-High
-
-

Step-by-Step WebAuthn Enrollment

-

1. Check WebAuthn Support

-
# Verify WebAuthn support on your system
-provisioning mfa webauthn check
-
-# Output:
-WebAuthn Support:
-  ✓ Browser: Chrome 120.0 (WebAuthn supported)
-  ✓ Platform: macOS 14.0 (Touch ID available)
-  ✓ USB: YubiKey 5 NFC detected
-
-

2. Initiate WebAuthn Registration

-
provisioning mfa webauthn register --device-name "YubiKey-Admin-Primary"
-
-

Output:

-
╔════════════════════════════════════════════════════════════╗
-║               WEBAUTHN DEVICE REGISTRATION                 ║
-╚════════════════════════════════════════════════════════════╝
-
-Device Name: YubiKey-Admin-Primary
-Relying Party: provisioning.example.com
-
-⚠ Please insert your security key and touch it when it blinks
-
-Waiting for device interaction...
-
-

3. Complete Device Registration

-

For USB Security Keys (YubiKey, SoloKey):

-
    -
  1. Insert USB key into computer
  2. -
  3. Terminal shows “Touch your security key”
  4. -
  5. Touch the gold/silver contact on the key (it will blink)
  6. -
  7. Registration completes
  8. -
-

For Touch ID (macOS):

-
    -
  1. Terminal shows “Touch ID prompt will appear”
  2. -
  3. Touch ID dialog appears on screen
  4. -
  5. Place finger on Touch ID sensor
  6. -
  7. Registration completes
  8. -
-

For Windows Hello:

-
    -
  1. Terminal shows “Windows Hello prompt”
  2. -
  3. Windows Hello biometric prompt appears
  4. -
  5. Complete biometric scan (fingerprint/face)
  6. -
  7. Registration completes
  8. -
-

Success Response:

-
✓ WebAuthn device registered successfully!
-
-Device Details:
-  Name: YubiKey-Admin-Primary
-  Type: USB Security Key
-  AAGUID: 2fc0579f-8113-47ea-b116-bb5a8 d9202a
-  Credential ID: kZj8C3bx...
-  Registered: 2025-10-08T14:32:10Z
-
-You can now use this device for authentication.
-
-

4. Register Additional Devices (Optional)

-

Best Practice: Register 2+ WebAuthn devices (primary + backup)

-
# Register backup YubiKey
-provisioning mfa webauthn register --device-name "YubiKey-Admin-Backup"
-
-# Register Touch ID (for convenience on personal laptop)
-provisioning mfa webauthn register --device-name "MacBook-TouchID"
-
-

5. List Registered Devices

-
provisioning mfa webauthn list
-
-# Output:
-Registered WebAuthn Devices:
-
-  1. YubiKey-Admin-Primary (USB Security Key)
-     Registered: 2025-10-08T14:32:10Z
-     Last Used: 2025-10-08T14:32:10Z
-
-  2. YubiKey-Admin-Backup (USB Security Key)
-     Registered: 2025-10-08T14:35:22Z
-     Last Used: Never
-
-  3. MacBook-TouchID (Platform Authenticator)
-     Registered: 2025-10-08T14:40:15Z
-     Last Used: 2025-10-08T15:20:05Z
-
-Total: 3 devices
-
-

6. Test WebAuthn Login

-
# Logout to test
-provisioning logout
-
-# Login with password (partial token)
-provisioning login --user admin@example.com --workspace production
-
-# Authenticate with WebAuthn
-provisioning mfa webauthn verify
-
-# Output:
-⚠ Insert and touch your security key
-[Touch YubiKey when it blinks]
-
-✓ WebAuthn verification successful
-✓ Full access granted
-
-
-

Enforcing MFA via Cedar Policies

-

Production MFA Enforcement Policy

-

Location: provisioning/config/cedar-policies/production.cedar

-
// Production operations require MFA verification
-permit (
-  principal,
-  action in [
-    Action::"server:create",
-    Action::"server:delete",
-    Action::"cluster:deploy",
-    Action::"secret:read",
-    Action::"user:manage"
-  ],
-  resource in Environment::"production"
-) when {
-  // MFA MUST be verified
-  context.mfa_verified == true
-};
-
-// Admin role requires MFA for ALL production actions
-permit (
-  principal in Role::"Admin",
-  action,
-  resource in Environment::"production"
-) when {
-  context.mfa_verified == true
-};
-
-// Break-glass approval requires MFA
-permit (
-  principal,
-  action == Action::"break_glass:approve",
-  resource
-) when {
-  context.mfa_verified == true &&
-  principal.role in [Role::"Admin", Role::"SecurityLead"]
-};
-
- -

Location: provisioning/config/cedar-policies/development.cedar

-
// Development: MFA recommended but not enforced
-permit (
-  principal,
-  action,
-  resource in Environment::"dev"
-) when {
-  // MFA not required for dev, but logged if missing
-  true
-};
-
-// Staging: MFA recommended for destructive operations
-permit (
-  principal,
-  action in [Action::"server:delete", Action::"cluster:delete"],
-  resource in Environment::"staging"
-) when {
-  // Allow without MFA but log warning
-  context.mfa_verified == true || context has mfa_warning_acknowledged
-};
-
-

Policy Deployment

-
# Validate Cedar policies
-provisioning cedar validate --policies config/cedar-policies/
-
-# Test policies with sample requests
-provisioning cedar test --policies config/cedar-policies/ \
-  --test-file tests/cedar-test-cases.yaml
-
-# Deploy to production (requires MFA + approval)
-provisioning cedar deploy production --policies config/cedar-policies/production.cedar
-
-# Verify policy is active
-provisioning cedar status production
-
-

Testing MFA Enforcement

-
# Test 1: Production access WITHOUT MFA (should fail)
-provisioning login --user admin@example.com --workspace production
-provisioning server create web-01 --plan medium --check
-
-# Expected: Authorization denied (MFA not verified)
-
-# Test 2: Production access WITH MFA (should succeed)
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify 123456
-provisioning server create web-01 --plan medium --check
-
-# Expected: Server creation initiated
-
-
-

Backup Codes Management

-

Generating Backup Codes

-

Backup codes are automatically generated during first MFA enrollment:

-
# View existing backup codes (requires MFA verification)
-provisioning mfa backup-codes --show
-
-# Regenerate backup codes (invalidates old ones)
-provisioning mfa backup-codes --regenerate
-
-# Output:
-⚠ WARNING: Regenerating backup codes will invalidate all existing codes.
-Continue? (yes/no): yes
-
-New Backup Codes:
-  1. X7Y2-Z9A4-B6C1
-  2. D3E8-F5G2-H9J4
-  3. K6L1-M7N3-P8Q2
-  4. R4S9-T6U1-V3W7
-  5. X2Y5-Z8A3-B9C4
-  6. D7E1-F4G6-H2J8
-  7. K5L9-M3N6-P1Q4
-  8. R8S2-T5U7-V9W3
-  9. X4Y6-Z1A8-B3C5
- 10. D9E2-F7G4-H6J1
-
-✓ Backup codes regenerated successfully
-⚠ Save these codes in a secure location
-
-

Using Backup Codes

-

When to use backup codes:

-
    -
  • Lost authenticator device (phone stolen, broken)
  • -
  • WebAuthn key not available (traveling, left at office)
  • -
  • Authenticator app not working (time sync issue)
  • -
-

Login with backup code:

-
# Login (partial token)
-provisioning login --user admin@example.com --workspace production
-
-# Use backup code instead of TOTP/WebAuthn
-provisioning mfa verify-backup X7Y2-Z9A4-B6C1
-
-# Output:
-✓ Backup code verified
-⚠ Backup code consumed (9 remaining)
-⚠ Enroll a new MFA device as soon as possible
-✓ Full access granted (temporary)
-
-

Backup Code Storage Best Practices

-

✅ DO:

-
    -
  • Store in password manager (1Password, Bitwarden, LastPass)
  • -
  • Print and store in physical safe
  • -
  • Encrypt and store in secure cloud storage (with encryption key stored separately)
  • -
  • Share with trusted IT team member (encrypted)
  • -
-

❌ DON’T:

-
    -
  • Email to yourself
  • -
  • Store in plaintext file on laptop
  • -
  • Save in browser notes/bookmarks
  • -
  • Share via Slack/Teams/unencrypted chat
  • -
  • Screenshot and save to Photos
  • -
-

Example: Encrypted Storage:

-
# Encrypt backup codes with Age
-provisioning mfa backup-codes --export | \
-  age -p -o ~/secure/mfa-backup-codes.age
-
-# Decrypt when needed
-age -d ~/secure/mfa-backup-codes.age
-
-
-

Recovery Procedures

-

Scenario 1: Lost Authenticator Device (TOTP)

-

Situation: Phone stolen/broken, authenticator app not accessible

-

Recovery Steps:

-
# Step 1: Use backup code to login
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify-backup X7Y2-Z9A4-B6C1
-
-# Step 2: Remove old TOTP enrollment
-provisioning mfa totp unenroll
-
-# Step 3: Enroll new TOTP device
-provisioning mfa totp enroll
-# [Scan QR code with new phone/authenticator app]
-provisioning mfa totp verify 654321
-
-# Step 4: Generate new backup codes
-provisioning mfa backup-codes --regenerate
-
-

Scenario 2: Lost WebAuthn Key (YubiKey)

-

Situation: YubiKey lost, stolen, or damaged

-

Recovery Steps:

-
# Step 1: Login with alternative method (TOTP or backup code)
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify 123456  # TOTP from authenticator app
-
-# Step 2: List registered WebAuthn devices
-provisioning mfa webauthn list
-
-# Step 3: Remove lost device
-provisioning mfa webauthn remove "YubiKey-Admin-Primary"
-
-# Output:
-⚠ Remove WebAuthn device "YubiKey-Admin-Primary"?
-This cannot be undone. (yes/no): yes
-
-✓ Device removed
-
-# Step 4: Register new WebAuthn device
-provisioning mfa webauthn register --device-name "YubiKey-Admin-Replacement"
-
-

Scenario 3: All MFA Methods Lost

-

Situation: Lost phone (TOTP), lost YubiKey, no backup codes

-

Recovery Steps (Requires Admin Assistance):

-
# User contacts Security Team / Platform Admin
-
-# Admin performs MFA reset (requires 2+ admin approval)
-provisioning admin mfa-reset admin@example.com \
-  --reason "Employee lost all MFA devices (phone + YubiKey)" \
-  --ticket SUPPORT-12345
-
-# Output:
-⚠ MFA Reset Request Created
-
-Reset Request ID: MFA-RESET-20251008-001
-User: admin@example.com
-Reason: Employee lost all MFA devices (phone + YubiKey)
-Ticket: SUPPORT-12345
-
-Required Approvals: 2
-Approvers: 0/2
-
-# Two other admins approve (with their own MFA)
-provisioning admin mfa-reset approve MFA-RESET-20251008-001 \
-  --reason "Verified via video call + employee badge"
-
-# After 2 approvals, MFA is reset
-✓ MFA reset approved (2/2 approvals)
-✓ User admin@example.com can now re-enroll MFA devices
-
-# User re-enrolls TOTP and WebAuthn
-provisioning mfa totp enroll
-provisioning mfa webauthn register --device-name "YubiKey-New"
-
-

Scenario 4: Backup Codes Depleted

-

Situation: Used 9 out of 10 backup codes

-

Recovery Steps:

-
# Login with last backup code
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify-backup D9E2-F7G4-H6J1
-
-# Output:
-⚠ WARNING: This is your LAST backup code!
-✓ Backup code verified
-⚠ Regenerate backup codes immediately!
-
-# Immediately regenerate backup codes
-provisioning mfa backup-codes --regenerate
-
-# Save new codes securely
-
-
-

Troubleshooting

-

Issue 1: “Invalid TOTP code” Error

-

Symptoms:

-
provisioning mfa verify 123456
-✗ Error: Invalid TOTP code
-
-

Possible Causes:

-
    -
  1. Time sync issue (most common)
  2. -
  3. Wrong secret key entered during enrollment
  4. -
  5. Code expired (30-second window)
  6. -
-

Solutions:

-
# Check time sync (device clock must be accurate)
-# macOS:
-sudo sntp -sS time.apple.com
-
-# Linux:
-sudo ntpdate pool.ntp.org
-
-# Verify TOTP configuration
-provisioning mfa totp status
-
-# Output:
-TOTP Configuration:
-  Algorithm: SHA1
-  Digits: 6
-  Period: 30 seconds
-  Time Window: ±1 period (90 seconds total)
-
-# Check system time vs NTP
-date && curl -s http://worldtimeapi.org/api/ip | grep datetime
-
-# If time is off by >30 seconds, sync time and retry
-
-

Issue 2: WebAuthn Not Detected

-

Symptoms:

-
provisioning mfa webauthn register
-✗ Error: No WebAuthn authenticator detected
-
-

Solutions:

-
# Check USB connection (for hardware keys)
-# macOS:
-system_profiler SPUSBDataType | grep -i yubikey
-
-# Linux:
-lsusb | grep -i yubico
-
-# Check browser WebAuthn support
-provisioning mfa webauthn check
-
-# Try different USB port (USB-A vs USB-C)
-
-# For Touch ID: Ensure finger is enrolled in System Preferences
-# For Windows Hello: Ensure biometrics are configured in Settings
-
-

Issue 3: “MFA Required” Despite Verification

-

Symptoms:

-
provisioning server create web-01
-✗ Error: Authorization denied (MFA verification required)
-
-

Cause: Access token expired (15 min) or MFA verification not in token claims

-

Solution:

-
# Check token expiration
-provisioning auth status
-
-# Output:
-Authentication Status:
-  Logged in: Yes
-  User: admin@example.com
-  Access Token: Expired (issued 16 minutes ago)
-  MFA Verified: Yes (but token expired)
-
-# Re-authenticate (will prompt for MFA again)
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify 654321
-
-# Verify MFA claim in token
-provisioning auth decode-token
-
-# Output (JWT claims):
-{
-  "sub": "admin@example.com",
-  "role": "Admin",
-  "mfa_verified": true,  # ← Must be true
-  "mfa_method": "totp",
-  "iat": 1696766400,
-  "exp": 1696767300
-}
-
-

Issue 4: QR Code Not Displaying

-

Symptoms: QR code appears garbled or doesn’t display in terminal

-

Solutions:

-
# Use manual entry instead
-provisioning mfa totp enroll --manual
-
-# Output (no QR code):
-Manual TOTP Setup:
-  Secret: JBSWY3DPEHPK3PXP
-  Account: admin@example.com
-  Issuer: Provisioning Platform
-
-Enter this secret manually in your authenticator app.
-
-# Or export QR code to image file
-provisioning mfa totp enroll --qr-image ~/mfa-qr.png
-open ~/mfa-qr.png  # View in image viewer
-
-

Issue 5: Backup Code Not Working

-

Symptoms:

-
provisioning mfa verify-backup X7Y2-Z9A4-B6C1
-✗ Error: Invalid or already used backup code
-
-

Possible Causes:

-
    -
  1. Code already used (single-use only)
  2. -
  3. Backup codes regenerated (old codes invalidated)
  4. -
  5. Typo in code entry
  6. -
-

Solutions:

-
# Check backup code status (requires alternative login method)
-provisioning mfa backup-codes --status
-
-# Output:
-Backup Codes Status:
-  Total Generated: 10
-  Used: 3
-  Remaining: 7
-  Last Used: 2025-10-05T10:15:30Z
-
-# Contact admin for MFA reset if all codes used
-# Or use alternative MFA method (TOTP, WebAuthn)
-
-
-

Best Practices

-

For Individual Admins

-

1. Use Multiple MFA Methods

-

✅ Recommended Setup:

-
    -
  • Primary: TOTP (Google Authenticator, Authy)
  • -
  • Backup: WebAuthn (YubiKey or Touch ID)
  • -
  • Emergency: Backup codes (stored securely)
  • -
-
# Enroll all three
-provisioning mfa totp enroll
-provisioning mfa webauthn register --device-name "YubiKey-Primary"
-provisioning mfa backup-codes --save-encrypted ~/secure/codes.enc
-
-

2. Secure Backup Code Storage

-
# Store in password manager (1Password example)
-provisioning mfa backup-codes --show | \
-  op item create --category "Secure Note" \
-    --title "Provisioning MFA Backup Codes" \
-    --vault "Work"
-
-# Or encrypted file
-provisioning mfa backup-codes --export | \
-  age -p -o ~/secure/mfa-backup-codes.age
-
-

3. Regular Device Audits

-
# Monthly: Review registered devices
-provisioning mfa devices --all
-
-# Remove unused/old devices
-provisioning mfa webauthn remove "Old-YubiKey"
-provisioning mfa totp remove "Old-Phone"
-
-

4. Test Recovery Procedures

-
# Quarterly: Test backup code login
-provisioning logout
-provisioning login --user admin@example.com --workspace dev
-provisioning mfa verify-backup [test-code]
-
-# Verify backup codes are accessible
-cat ~/secure/mfa-backup-codes.enc | age -d
-
-

For Security Teams

-

1. MFA Enrollment Verification

-
# Generate MFA enrollment report
-provisioning admin mfa-report --format csv > mfa-enrollment.csv
-
-# Output (CSV):
-# User,MFA_Enabled,TOTP,WebAuthn,Backup_Codes,Last_MFA_Login,Role
-# admin@example.com,Yes,Yes,Yes,10,2025-10-08T14:00:00Z,Admin
-# dev@example.com,No,No,No,0,Never,Developer
-
-

2. Enforce MFA Deadlines

-
# Set MFA enrollment deadline
-provisioning admin mfa-deadline set 2025-11-01 \
-  --roles Admin,Developer \
-  --environment production
-
-# Send reminder emails
-provisioning admin mfa-remind \
-  --users-without-mfa \
-  --template "MFA enrollment required by Nov 1"
-
-

3. Monitor MFA Usage

-
# Audit: Find production logins without MFA
-provisioning audit query \
-  --action "auth:login" \
-  --filter 'mfa_verified == false && environment == "production"' \
-  --since 7d
-
-# Alert on repeated MFA failures
-provisioning monitoring alert create \
-  --name "MFA Brute Force" \
-  --condition "mfa_failures > 5 in 5 min" \
-  --action "notify security-team"
-
-

4. MFA Reset Policy

-

MFA Reset Requirements:

-
    -
  • User verification (video call + ID check)
  • -
  • Support ticket created (incident tracking)
  • -
  • 2+ admin approvals (different teams)
  • -
  • Time-limited reset window (24 hours)
  • -
  • Mandatory re-enrollment before production access
  • -
-
# MFA reset workflow
-provisioning admin mfa-reset create user@example.com \
-  --reason "Lost all devices" \
-  --ticket SUPPORT-12345 \
-  --expires-in 24h
-
-# Requires 2 approvals
-provisioning admin mfa-reset approve MFA-RESET-001
-
-

For Platform Admins

-

1. Cedar Policy Best Practices

-
// Require MFA for high-risk actions
-permit (
-  principal,
-  action in [
-    Action::"server:delete",
-    Action::"cluster:delete",
-    Action::"secret:delete",
-    Action::"user:delete"
-  ],
-  resource
-) when {
-  context.mfa_verified == true &&
-  context.mfa_age_seconds < 300  // MFA verified within last 5 minutes
-};
-
-

2. MFA Grace Periods (For Rollout)

-
# Development: No MFA required
-export PROVISIONING_MFA_REQUIRED=false
-
-# Staging: MFA recommended (warnings only)
-export PROVISIONING_MFA_REQUIRED=warn
-
-# Production: MFA mandatory (strict enforcement)
-export PROVISIONING_MFA_REQUIRED=true
-
-

3. Backup Admin Account

-

Emergency Admin (break-glass scenario):

-
    -
  • Separate admin account with MFA enrollment
  • -
  • Credentials stored in physical safe
  • -
  • Only used when primary admins locked out
  • -
  • Requires incident report after use
  • -
-
# Create emergency admin
-provisioning admin create emergency-admin@example.com \
-  --role EmergencyAdmin \
-  --mfa-required true \
-  --max-concurrent-sessions 1
-
-# Print backup codes and store in safe
-provisioning mfa backup-codes --show --user emergency-admin@example.com > emergency-codes.txt
-# [Print and store in physical safe]
-
-
-

Audit and Compliance

-

MFA Audit Logging

-

All MFA events are logged to the audit system:

-
# View MFA enrollment events
-provisioning audit query \
-  --action-type "mfa:*" \
-  --since 30d
-
-# Output (JSON):
-[
-  {
-    "timestamp": "2025-10-08T14:32:10Z",
-    "action": "mfa:totp:enroll",
-    "user": "admin@example.com",
-    "result": "success",
-    "device_type": "totp",
-    "ip_address": "203.0.113.42"
-  },
-  {
-    "timestamp": "2025-10-08T14:35:22Z",
-    "action": "mfa:webauthn:register",
-    "user": "admin@example.com",
-    "result": "success",
-    "device_name": "YubiKey-Admin-Primary",
-    "ip_address": "203.0.113.42"
-  }
-]
-
-

Compliance Reports

-

SOC2 Compliance (Access Control)

-
# Generate SOC2 access control report
-provisioning compliance report soc2 \
-  --control "CC6.1" \
-  --period "2025-Q3"
-
-# Output:
-SOC2 Trust Service Criteria - CC6.1 (Logical Access)
-
-MFA Enforcement:
-  ✓ MFA enabled for 100% of production admins (15/15)
-  ✓ MFA verified for 98.7% of production logins (2,453/2,485)
-  ✓ MFA policies enforced via Cedar authorization
-  ✓ Failed MFA attempts logged and monitored
-
-Evidence:
-  - Cedar policy: production.cedar (lines 15-25)
-  - Audit logs: mfa-verification-logs-2025-q3.json
-  - Enrollment report: mfa-enrollment-status.csv
-
-

ISO 27001 Compliance (A.9.4.2 - Secure Log-on)

-
# ISO 27001 A.9.4.2 compliance report
-provisioning compliance report iso27001 \
-  --control "A.9.4.2" \
-  --format pdf \
-  --output iso27001-a942-mfa-report.pdf
-
-# Report Sections:
-# 1. MFA Implementation Details
-# 2. Enrollment Procedures
-# 3. Audit Trail
-# 4. Policy Enforcement
-# 5. Recovery Procedures
-
-

GDPR Compliance (MFA Data Handling)

-
# GDPR data subject request (MFA data export)
-provisioning compliance gdpr export admin@example.com \
-  --include mfa
-
-# Output (JSON):
-{
-  "user": "admin@example.com",
-  "mfa_data": {
-    "totp_enrolled": true,
-    "totp_enrollment_date": "2025-10-08T14:32:10Z",
-    "webauthn_devices": [
-      {
-        "name": "YubiKey-Admin-Primary",
-        "registered": "2025-10-08T14:35:22Z",
-        "last_used": "2025-10-08T16:20:05Z"
-      }
-    ],
-    "backup_codes_remaining": 7,
-    "mfa_login_history": [...]  # Last 90 days
-  }
-}
-
-# GDPR deletion (MFA data removal after account deletion)
-provisioning compliance gdpr delete admin@example.com --include-mfa
-
-

MFA Metrics Dashboard

-
# Generate MFA metrics
-provisioning admin mfa-metrics --period 30d
-
-# Output:
-MFA Metrics (Last 30 Days)
-
-Enrollment:
-  Total Users: 42
-  MFA Enabled: 38 (90.5%)
-  TOTP Only: 22 (57.9%)
-  WebAuthn Only: 3 (7.9%)
-  Both TOTP + WebAuthn: 13 (34.2%)
-  No MFA: 4 (9.5%) ⚠
-
-Authentication:
-  Total Logins: 3,847
-  MFA Verified: 3,802 (98.8%)
-  MFA Failed: 45 (1.2%)
-  Backup Code Used: 7 (0.2%)
-
-Devices:
-  TOTP Devices: 35
-  WebAuthn Devices: 47
-  Backup Codes Remaining (avg): 8.3
-
-Incidents:
-  MFA Resets: 2
-  Lost Devices: 3
-  Lockouts: 1
-
-
-

Quick Reference Card

-

Daily Admin Operations

-
# Login with MFA
-provisioning login --user admin@example.com --workspace production
-provisioning mfa verify 123456
-
-# Check MFA status
-provisioning mfa status
-
-# View registered devices
-provisioning mfa devices
-
-

MFA Management

-
# TOTP
-provisioning mfa totp enroll              # Enroll TOTP
-provisioning mfa totp verify 123456       # Verify TOTP code
-provisioning mfa totp unenroll            # Remove TOTP
-
-# WebAuthn
-provisioning mfa webauthn register --device-name "YubiKey"  # Register key
-provisioning mfa webauthn list            # List devices
-provisioning mfa webauthn remove "YubiKey"  # Remove device
-
-# Backup Codes
-provisioning mfa backup-codes --show      # View codes
-provisioning mfa backup-codes --regenerate  # Generate new codes
-provisioning mfa verify-backup X7Y2-Z9A4-B6C1  # Use backup code
-
-

Emergency Procedures

-
# Lost device recovery (use backup code)
-provisioning login --user admin@example.com
-provisioning mfa verify-backup [code]
-provisioning mfa totp enroll  # Re-enroll new device
-
-# MFA reset (admin only)
-provisioning admin mfa-reset user@example.com --reason "Lost all devices"
-
-# Check MFA compliance
-provisioning admin mfa-report
-
-
-

Summary Checklist

-

For New Admins

-
    -
  • -Complete initial login with password
  • -
  • -Enroll TOTP (Google Authenticator, Authy)
  • -
  • -Verify TOTP code successfully
  • -
  • -Save backup codes in password manager
  • -
  • -Register WebAuthn device (YubiKey or Touch ID)
  • -
  • -Test full login flow with MFA
  • -
  • -Store backup codes in secure location
  • -
  • -Verify production access works with MFA
  • -
-

For Security Team

-
    -
  • -Deploy Cedar MFA enforcement policies
  • -
  • -Verify 100% admin MFA enrollment
  • -
  • -Configure MFA audit logging
  • -
  • -Setup MFA compliance reports (SOC2, ISO 27001)
  • -
  • -Document MFA reset procedures
  • -
  • -Train admins on MFA usage
  • -
  • -Create emergency admin account (break-glass)
  • -
  • -Schedule quarterly MFA audits
  • -
-

For Platform Team

-
    -
  • -Configure MFA settings in config/mfa.toml
  • -
  • -Deploy Cedar policies with MFA requirements
  • -
  • -Setup monitoring for MFA failures
  • -
  • -Configure alerts for MFA bypass attempts
  • -
  • -Document MFA architecture in ADR
  • -
  • -Test MFA enforcement in all environments
  • -
  • -Verify audit logs capture MFA events
  • -
  • -Create runbooks for MFA incidents
  • -
-
-

Support and Resources

-

Documentation

-
    -
  • MFA Implementation: /docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
  • Cedar Policies: /docs/operations/CEDAR_POLICIES_PRODUCTION_GUIDE.md
  • -
  • Break-Glass: /docs/operations/BREAK_GLASS_TRAINING_GUIDE.md
  • -
  • Audit Logging: /docs/architecture/AUDIT_LOGGING_IMPLEMENTATION.md
  • -
-

Configuration Files

-
    -
  • MFA Config: provisioning/config/mfa.toml
  • -
  • Cedar Policies: provisioning/config/cedar-policies/production.cedar
  • -
  • Control Center: provisioning/platform/control-center/config.toml
  • -
-

CLI Help

-
provisioning mfa help          # MFA command help
-provisioning mfa totp --help   # TOTP-specific help
-provisioning mfa webauthn --help  # WebAuthn-specific help
-
-

Contact

- -
-

Document Status: ✅ Complete -Review Date: 2025-11-08 -Maintained By: Security Team, Platform Team

-

Provisioning Orchestrator

-

A Rust-based orchestrator service that coordinates infrastructure provisioning workflows with pluggable storage backends and comprehensive migration tools.

-
-

Source: provisioning/platform/orchestrator/

-
-

Architecture

-

The orchestrator implements a hybrid multi-storage approach:

-
    -
  • Rust Orchestrator: Handles coordination, queuing, and parallel execution
  • -
  • Nushell Scripts: Execute the actual provisioning logic
  • -
  • Pluggable Storage: Multiple storage backends with seamless migration
  • -
  • REST API: HTTP interface for workflow submission and monitoring
  • -
-

Key Features

-
    -
  • Multi-Storage Backends: Filesystem, SurrealDB Embedded, and SurrealDB Server options
  • -
  • Task Queue: Priority-based task scheduling with retry logic
  • -
  • Seamless Migration: Move data between storage backends with zero downtime
  • -
  • Feature Flags: Compile-time backend selection for minimal dependencies
  • -
  • Parallel Execution: Multiple tasks can run concurrently
  • -
  • Status Tracking: Real-time task status and progress monitoring
  • -
  • Advanced Features: Authentication, audit logging, and metrics (SurrealDB)
  • -
  • Nushell Integration: Seamless execution of existing provisioning scripts
  • -
  • RESTful API: HTTP endpoints for workflow management
  • -
  • Test Environment Service: Automated containerized testing for taskservs, servers, and clusters
  • -
  • Multi-Node Support: Test complex topologies including Kubernetes and etcd clusters
  • -
  • Docker Integration: Automated container lifecycle management via Docker API
  • -
-

Quick Start

-

Build and Run

-

Default Build (Filesystem Only):

-
cd provisioning/platform/orchestrator
-cargo build --release
-cargo run -- --port 8080 --data-dir ./data
-
-

With SurrealDB Support:

-
cargo build --release --features surrealdb
-
-# Run with SurrealDB embedded
-cargo run --features surrealdb -- --storage-type surrealdb-embedded --data-dir ./data
-
-# Run with SurrealDB server
-cargo run --features surrealdb -- --storage-type surrealdb-server \
-  --surrealdb-url ws://localhost:8000 \
-  --surrealdb-username admin --surrealdb-password secret
-
-

Submit Workflow

-
curl -X POST http://localhost:8080/workflows/servers/create \
-  -H "Content-Type: application/json" \
-  -d '{
-    "infra": "production",
-    "settings": "./settings.yaml",
-    "servers": ["web-01", "web-02"],
-    "check_mode": false,
-    "wait": true
-  }'
-
-

API Endpoints

-

Core Endpoints

-
    -
  • GET /health - Service health status
  • -
  • GET /tasks - List all tasks
  • -
  • GET /tasks/{id} - Get specific task status
  • -
-

Workflow Endpoints

-
    -
  • POST /workflows/servers/create - Submit server creation workflow
  • -
  • POST /workflows/taskserv/create - Submit taskserv creation workflow
  • -
  • POST /workflows/cluster/create - Submit cluster creation workflow
  • -
-

Test Environment Endpoints

-
    -
  • POST /test/environments/create - Create test environment
  • -
  • GET /test/environments - List all test environments
  • -
  • GET /test/environments/{id} - Get environment details
  • -
  • POST /test/environments/{id}/run - Run tests in environment
  • -
  • DELETE /test/environments/{id} - Cleanup test environment
  • -
  • GET /test/environments/{id}/logs - Get environment logs
  • -
-

Test Environment Service

-

The orchestrator includes a comprehensive test environment service for automated containerized testing.

-

Test Environment Types

-

1. Single Taskserv

-

Test individual taskserv in isolated container.

-

2. Server Simulation

-

Test complete server configurations with multiple taskservs.

-

3. Cluster Topology

-

Test multi-node cluster configurations (Kubernetes, etcd, etc.).

-

Nushell CLI Integration

-
# Quick test
-provisioning test quick kubernetes
-
-# Single taskserv test
-provisioning test env single postgres --auto-start --auto-cleanup
-
-# Server simulation
-provisioning test env server web-01 [containerd kubernetes cilium] --auto-start
-
-# Cluster from template
-provisioning test topology load kubernetes_3node | test env cluster kubernetes
-
-

Topology Templates

-

Predefined multi-node cluster topologies:

-
    -
  • kubernetes_3node: 3-node HA Kubernetes cluster
  • -
  • kubernetes_single: All-in-one Kubernetes node
  • -
  • etcd_cluster: 3-member etcd cluster
  • -
  • containerd_test: Standalone containerd testing
  • -
  • postgres_redis: Database stack testing
  • -
-

Storage Backends

-
- - - - - - -
FeatureFilesystemSurrealDB EmbeddedSurrealDB Server
DependenciesNoneLocal databaseRemote server
Auth/RBACBasicAdvancedAdvanced
Real-timeNoYesYes
ScalabilityLimitedMediumHigh
ComplexityLowMediumHigh
Best ForDevelopmentProductionDistributed
-
- - -

Hybrid Orchestrator Architecture (v3.0.0)

-

🚀 Orchestrator Implementation Completed (2025-09-25)

-

A production-ready hybrid Rust/Nushell orchestrator has been implemented to solve deep call stack limitations while preserving all Nushell business logic.

-

Architecture Overview

-
    -
  • Rust Orchestrator: High-performance coordination layer with REST API
  • -
  • Nushell Business Logic: All existing scripts preserved and enhanced
  • -
  • File-based Persistence: Reliable task queue using lightweight file storage
  • -
  • Priority Processing: Intelligent task scheduling with retry logic
  • -
  • Deep Call Stack Solution: Eliminates template.nu:71 “Type not supported” errors
  • -
-

Orchestrator Management

-
# Start orchestrator in background
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background --provisioning-path "/usr/local/bin/provisioning"
-
-# Check orchestrator status
-./scripts/start-orchestrator.nu --check
-
-# Stop orchestrator
-./scripts/start-orchestrator.nu --stop
-
-# View logs
-tail -f ./data/orchestrator.log
-
-

Workflow System

-

The orchestrator provides comprehensive workflow management:

-

Server Workflows

-
# Submit server creation workflow
-nu -c "use core/nulib/workflows/server_create.nu *; server_create_workflow 'wuji' '' [] --check"
-
-# Traditional orchestrated server creation
-provisioning servers create --orchestrated --check
-
-

Taskserv Workflows

-
# Create taskserv workflow
-nu -c "use core/nulib/workflows/taskserv.nu *; taskserv create 'kubernetes' 'wuji' --check"
-
-# Other taskserv operations
-nu -c "use core/nulib/workflows/taskserv.nu *; taskserv delete 'kubernetes' 'wuji' --check"
-nu -c "use core/nulib/workflows/taskserv.nu *; taskserv generate 'kubernetes' 'wuji'"
-nu -c "use core/nulib/workflows/taskserv.nu *; taskserv check-updates"
-
-

Cluster Workflows

-
# Create cluster workflow
-nu -c "use core/nulib/workflows/cluster.nu *; cluster create 'buildkit' 'wuji' --check"
-
-# Delete cluster workflow
-nu -c "use core/nulib/workflows/cluster.nu *; cluster delete 'buildkit' 'wuji' --check"
-
-

Workflow Management

-
# List all workflows
-nu -c "use core/nulib/workflows/management.nu *; workflow list"
-
-# Get workflow statistics
-nu -c "use core/nulib/workflows/management.nu *; workflow stats"
-
-# Monitor workflow in real-time
-nu -c "use core/nulib/workflows/management.nu *; workflow monitor <task_id>"
-
-# Check orchestrator health
-nu -c "use core/nulib/workflows/management.nu *; workflow orchestrator"
-
-# Get specific workflow status
-nu -c "use core/nulib/workflows/management.nu *; workflow status <task_id>"
-
-

REST API Endpoints

-

The orchestrator exposes HTTP endpoints for external integration:

-
    -
  • Health: GET http://localhost:9090/v1/health
  • -
  • List Tasks: GET http://localhost:9090/v1/tasks
  • -
  • Task Status: GET http://localhost:9090/v1/tasks/{id}
  • -
  • Server Workflow: POST http://localhost:9090/v1/workflows/servers/create
  • -
  • Taskserv Workflow: POST http://localhost:9090/v1/workflows/taskserv/create
  • -
  • Cluster Workflow: POST http://localhost:9090/v1/workflows/cluster/create
  • -
-

Control Center - Cedar Policy Engine

-

A comprehensive Cedar policy engine implementation with advanced security features, compliance checking, and anomaly detection.

-
-

Source: provisioning/platform/control-center/

-
-

Key Features

-

Cedar Policy Engine

-
    -
  • Policy Evaluation: High-performance policy evaluation with context injection
  • -
  • Versioning: Complete policy versioning with rollback capabilities
  • -
  • Templates: Configuration-driven policy templates with variable substitution
  • -
  • Validation: Comprehensive policy validation with syntax and semantic checking
  • -
-

Security & Authentication

-
    -
  • JWT Authentication: Secure token-based authentication
  • -
  • Multi-Factor Authentication: MFA support for sensitive operations
  • -
  • Role-Based Access Control: Flexible RBAC with policy integration
  • -
  • Session Management: Secure session handling with timeouts
  • -
-

Compliance Framework

-
    -
  • SOC2 Type II: Complete SOC2 compliance validation
  • -
  • HIPAA: Healthcare data protection compliance
  • -
  • Audit Trail: Comprehensive audit logging and reporting
  • -
  • Impact Analysis: Policy change impact assessment
  • -
-

Anomaly Detection

-
    -
  • Statistical Analysis: Multiple statistical methods (Z-Score, IQR, Isolation Forest)
  • -
  • Real-time Detection: Continuous monitoring of policy evaluations
  • -
  • Alert Management: Configurable alerting through multiple channels
  • -
  • Baseline Learning: Adaptive baseline calculation for improved accuracy
  • -
-

Storage & Persistence

-
    -
  • SurrealDB Integration: High-performance graph database backend
  • -
  • Policy Storage: Versioned policy storage with metadata
  • -
  • Metrics Storage: Policy evaluation metrics and analytics
  • -
  • Compliance Records: Complete compliance audit trails
  • -
-

Quick Start

-

Installation

-
cd provisioning/platform/control-center
-cargo build --release
-
-

Configuration

-

Copy and edit the configuration:

-
cp config.toml.example config.toml
-
-

Configuration example:

-
[database]
-url = "surreal://localhost:8000"
-username = "root"
-password = "your-password"
-
-[auth]
-jwt_secret = "your-super-secret-key"
-require_mfa = true
-
-[compliance.soc2]
-enabled = true
-
-[anomaly]
-enabled = true
-detection_threshold = 2.5
-
-

Start Server

-
./target/release/control-center server --port 8080
-
-

Test Policy Evaluation

-
curl -X POST http://localhost:8080/policies/evaluate \
-  -H "Content-Type: application/json" \
-  -d '{
-    "principal": {"id": "user123", "roles": ["Developer"]},
-    "action": {"id": "access"},
-    "resource": {"id": "sensitive-db", "classification": "confidential"},
-    "context": {"mfa_enabled": true, "location": "US"}
-  }'
-
-

Policy Examples

-

Multi-Factor Authentication Policy

-
permit(
-    principal,
-    action == Action::"access",
-    resource
-) when {
-    resource has classification &&
-    resource.classification in ["sensitive", "confidential"] &&
-    principal has mfa_enabled &&
-    principal.mfa_enabled == true
-};
-
-

Production Approval Policy

-
permit(
-    principal,
-    action in [Action::"deploy", Action::"modify", Action::"delete"],
-    resource
-) when {
-    resource has environment &&
-    resource.environment == "production" &&
-    principal has approval &&
-    principal.approval.approved_by in ["ProductionAdmin", "SRE"]
-};
-
-

Geographic Restrictions

-
permit(
-    principal,
-    action,
-    resource
-) when {
-    context has geo &&
-    context.geo has country &&
-    context.geo.country in ["US", "CA", "GB", "DE"]
-};
-
-

CLI Commands

-

Policy Management

-
# Validate policies
-control-center policy validate policies/
-
-# Test policy with test data
-control-center policy test policies/mfa.cedar tests/data/mfa_test.json
-
-# Analyze policy impact
-control-center policy impact policies/new_policy.cedar
-
-

Compliance Checking

-
# Check SOC2 compliance
-control-center compliance soc2
-
-# Check HIPAA compliance
-control-center compliance hipaa
-
-# Generate compliance report
-control-center compliance report --format html
-
-

API Endpoints

-

Policy Evaluation

-
    -
  • POST /policies/evaluate - Evaluate policy decision
  • -
  • GET /policies - List all policies
  • -
  • POST /policies - Create new policy
  • -
  • PUT /policies/{id} - Update policy
  • -
  • DELETE /policies/{id} - Delete policy
  • -
-

Policy Versions

-
    -
  • GET /policies/{id}/versions - List policy versions
  • -
  • GET /policies/{id}/versions/{version} - Get specific version
  • -
  • POST /policies/{id}/rollback/{version} - Rollback to version
  • -
-

Compliance

-
    -
  • GET /compliance/soc2 - SOC2 compliance check
  • -
  • GET /compliance/hipaa - HIPAA compliance check
  • -
  • GET /compliance/report - Generate compliance report
  • -
-

Anomaly Detection

-
    -
  • GET /anomalies - List detected anomalies
  • -
  • GET /anomalies/{id} - Get anomaly details
  • -
  • POST /anomalies/detect - Trigger anomaly detection
  • -
-

Architecture

-

Core Components

-
    -
  1. -

    Policy Engine (src/policies/engine.rs)

    -
      -
    • Cedar policy evaluation
    • -
    • Context injection
    • -
    • Caching and optimization
    • -
    -
  2. -
  3. -

    Storage Layer (src/storage/)

    -
      -
    • SurrealDB integration
    • -
    • Policy versioning
    • -
    • Metrics storage
    • -
    -
  4. -
  5. -

    Compliance Framework (src/compliance/)

    -
      -
    • SOC2 checker
    • -
    • HIPAA validator
    • -
    • Report generation
    • -
    -
  6. -
  7. -

    Anomaly Detection (src/anomaly/)

    -
      -
    • Statistical analysis
    • -
    • Real-time monitoring
    • -
    • Alert management
    • -
    -
  8. -
  9. -

    Authentication (src/auth.rs)

    -
      -
    • JWT token management
    • -
    • Password hashing
    • -
    • Session handling
    • -
    -
  10. -
-

Configuration-Driven Design

-

The system follows PAP (Project Architecture Principles) with:

-
    -
  • No hardcoded values: All behavior controlled via configuration
  • -
  • Dynamic loading: Policies and rules loaded from configuration
  • -
  • Template-based: Policy generation through templates
  • -
  • Environment-aware: Different configs for dev/test/prod
  • -
-

Deployment

-

Docker

-
FROM rust:1.75 as builder
-WORKDIR /app
-COPY . .
-RUN cargo build --release
-
-FROM debian:bookworm-slim
-RUN apt-get update && apt-get install -y ca-certificates
-COPY --from=builder /app/target/release/control-center /usr/local/bin/
-EXPOSE 8080
-CMD ["control-center", "server"]
-
-

Kubernetes

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: control-center
-spec:
-  replicas: 3
-  template:
-    spec:
-      containers:
-      - name: control-center
-        image: control-center:latest
-        ports:
-        - containerPort: 8080
-        env:
-        - name: DATABASE_URL
-          value: "surreal://surrealdb:8000"
-
- - -

Provisioning Platform Installer

-

Interactive Ratatui-based installer for the Provisioning Platform with Nushell fallback for automation.

-
-

Source: provisioning/platform/installer/ -Status: COMPLETE - All 7 UI screens implemented (1,480 lines)

-
-

Features

-
    -
  • Rich Interactive TUI: Beautiful Ratatui interface with real-time feedback
  • -
  • Headless Mode: Automation-friendly with Nushell scripts
  • -
  • One-Click Deploy: Single command to deploy entire platform
  • -
  • Platform Agnostic: Supports Docker, Podman, Kubernetes, OrbStack
  • -
  • Live Progress: Real-time deployment progress and logs
  • -
  • Health Checks: Automatic service health verification
  • -
-

Installation

-
cd provisioning/platform/installer
-cargo build --release
-cargo install --path .
-
-

Usage

-

Interactive TUI (Default)

-
provisioning-installer
-
-

The TUI guides you through:

-
    -
  1. Platform detection (Docker, Podman, K8s, OrbStack)
  2. -
  3. Deployment mode selection (Solo, Multi-User, CI/CD, Enterprise)
  4. -
  5. Service selection (check/uncheck services)
  6. -
  7. Configuration (domain, ports, secrets)
  8. -
  9. Live deployment with progress tracking
  10. -
  11. Success screen with access URLs
  12. -
-

Headless Mode (Automation)

-
# Quick deploy with auto-detection
-provisioning-installer --headless --mode solo --yes
-
-# Fully specified
-provisioning-installer \
-  --headless \
-  --platform orbstack \
-  --mode solo \
-  --services orchestrator,control-center,coredns \
-  --domain localhost \
-  --yes
-
-# Use existing config file
-provisioning-installer --headless --config my-deployment.toml --yes
-
-

Configuration Generation

-
# Generate config without deploying
-provisioning-installer --config-only
-
-# Deploy later with generated config
-provisioning-installer --headless --config ~/.provisioning/installer-config.toml --yes
-
-

Deployment Platforms

-

Docker Compose

-
provisioning-installer --platform docker --mode solo
-
-

Requirements: Docker 20.10+, docker-compose 2.0+

-

OrbStack (macOS)

-
provisioning-installer --platform orbstack --mode solo
-
-

Requirements: OrbStack installed, 4 GB RAM, 2 CPU cores

-

Podman (Rootless)

-
provisioning-installer --platform podman --mode solo
-
-

Requirements: Podman 4.0+, systemd

-

Kubernetes

-
provisioning-installer --platform kubernetes --mode enterprise
-
-

Requirements: kubectl configured, Helm 3.0+

-

Deployment Modes

-

Solo Mode (Development)

-
    -
  • Services: 5 core services
  • -
  • Resources: 2 CPU cores, 4 GB RAM, 20 GB disk
  • -
  • Use case: Single developer, local testing
  • -
-

Multi-User Mode (Team)

-
    -
  • Services: 7 services
  • -
  • Resources: 4 CPU cores, 8 GB RAM, 50 GB disk
  • -
  • Use case: Team collaboration, shared infrastructure
  • -
-

CI/CD Mode (Automation)

-
    -
  • Services: 8-10 services
  • -
  • Resources: 8 CPU cores, 16 GB RAM, 100 GB disk
  • -
  • Use case: Automated pipelines, webhooks
  • -
-

Enterprise Mode (Production)

-
    -
  • Services: 15+ services
  • -
  • Resources: 16 CPU cores, 32 GB RAM, 500 GB disk
  • -
  • Use case: Production deployments, full observability
  • -
-

CLI Options

-
provisioning-installer [OPTIONS]
-
-OPTIONS:
-  --headless              Run in headless mode (no TUI)
-  --mode <MODE>           Deployment mode [solo|multi-user|cicd|enterprise]
-  --platform <PLATFORM>   Target platform [docker|podman|kubernetes|orbstack]
-  --services <SERVICES>   Comma-separated list of services
-  --domain <DOMAIN>       Domain/hostname (default: localhost)
-  --yes, -y               Skip confirmation prompts
-  --config-only           Generate config without deploying
-  --config <FILE>         Use existing config file
-  -h, --help              Print help
-  -V, --version           Print version
-
-

CI/CD Integration

-

GitLab CI

-
deploy_platform:
-  stage: deploy
-  script:
-    - provisioning-installer --headless --mode cicd --platform kubernetes --yes
-  only:
-    - main
-
-

GitHub Actions

-
- name: Deploy Provisioning Platform
-  run: |
-    provisioning-installer --headless --mode cicd --platform docker --yes
-
-

Nushell Scripts (Fallback)

-

If the Rust binary is unavailable:

-
cd provisioning/platform/installer/scripts
-nu deploy.nu --mode solo --platform orbstack --yes
-
- - -

Provisioning Platform Installer (v3.5.0)

-

🚀 Flexible Installation and Configuration System

-

A comprehensive installer system supporting interactive, headless, and unattended deployment modes with automatic configuration management via TOML -and MCP integration.

-

Installation Modes

-

1. Interactive TUI Mode

-

Beautiful terminal user interface with step-by-step guidance.

-
provisioning-installer
-
-

Features:

-
    -
  • 7 interactive screens with progress tracking
  • -
  • Real-time validation and error feedback
  • -
  • Visual feedback for each configuration step
  • -
  • Beautiful formatting with color and styling
  • -
  • Nushell fallback for unsupported terminals
  • -
-

Screens:

-
    -
  1. Welcome and prerequisites check
  2. -
  3. Deployment mode selection
  4. -
  5. Infrastructure provider selection
  6. -
  7. Configuration details
  8. -
  9. Resource allocation (CPU, memory)
  10. -
  11. Security settings
  12. -
  13. Review and confirm
  14. -
-

2. Headless Mode

-

CLI-only installation without interactive prompts, suitable for scripting.

-
provisioning-installer --headless --mode solo --yes
-
-

Features:

-
    -
  • Fully automated CLI options
  • -
  • All settings via command-line flags
  • -
  • No user interaction required
  • -
  • Perfect for CI/CD pipelines
  • -
  • Verbose output with progress tracking
  • -
-

Common Usage:

-
# Solo deployment
-provisioning-installer --headless --mode solo --provider upcloud --yes
-
-# Multi-user deployment
-provisioning-installer --headless --mode multiuser --cpu 4 --memory 8192 --yes
-
-# CI/CD mode
-provisioning-installer --headless --mode cicd --config ci-config.toml --yes
-
-

3. Unattended Mode

-

Zero-interaction mode using pre-defined configuration files, ideal for infrastructure automation.

-
provisioning-installer --unattended --config config.toml
-
-

Features:

-
    -
  • Load all settings from TOML file
  • -
  • Complete automation for GitOps workflows
  • -
  • No user interaction or prompts
  • -
  • Suitable for production deployments
  • -
  • Comprehensive logging and audit trails
  • -
-

Deployment Modes

-

Each mode configures resource allocation and features appropriately:

-
- - - - -
ModeCPUsMemoryUse Case
Solo24 GBSingle user development
MultiUser48 GBTeam development, testing
CICD816 GBCI/CD pipelines, testing
Enterprise1632 GBProduction deployment
-
-

Configuration System

-

TOML Configuration

-

Define installation parameters in TOML format for unattended mode:

-
[installation]
-mode = "solo"  # solo, multiuser, cicd, enterprise
-provider = "upcloud"  # upcloud, aws, etc.
-
-[resources]
-cpu = 2000  # millicores
-memory = 4096  # MB
-disk = 50  # GB
-
-[security]
-enable_mfa = true
-enable_audit = true
-tls_enabled = true
-
-[mcp]
-enabled = true
-endpoint = "http://localhost:9090"
-
-

Configuration Loading Priority

-

Settings are loaded in this order (highest priority wins):

-
    -
  1. CLI Arguments - Direct command-line flags
  2. -
  3. Environment Variables - PROVISIONING_* variables
  4. -
  5. Configuration File - TOML file specified via --config
  6. -
  7. MCP Integration - AI-powered intelligent defaults
  8. -
  9. Built-in Defaults - System defaults
  10. -
-

MCP Integration

-

Model Context Protocol integration provides intelligent configuration:

-

7 AI-Powered Settings Tools:

-
    -
  • Resource recommendation engine
  • -
  • Provider selection helper
  • -
  • Security policy suggester
  • -
  • Performance optimizer
  • -
  • Compliance checker
  • -
  • Network configuration advisor
  • -
  • Monitoring setup assistant
  • -
-
# Use MCP for intelligent config suggestion
-provisioning-installer --unattended --mcp-suggest > config.toml
-
-

Deployment Automation

-

Nushell Scripts

-

Complete deployment automation scripts for popular container runtimes:

-
# Docker deployment
-./provisioning/platform/installer/deploy/docker.nu --config config.toml
-
-# Podman deployment
-./provisioning/platform/installer/deploy/podman.nu --config config.toml
-
-# Kubernetes deployment
-./provisioning/platform/installer/deploy/kubernetes.nu --config config.toml
-
-# OrbStack deployment
-./provisioning/platform/installer/deploy/orbstack.nu --config config.toml
-
-

Self-Installation

-

Infrastructure components can query MCP and install themselves:

-
# Taskservs auto-install with dependencies
-taskserv install-self kubernetes
-taskserv install-self prometheus
-taskserv install-self cilium
-
-

Command Reference

-
# Show interactive installer
-provisioning-installer
-
-# Show help
-provisioning-installer --help
-
-# Show available modes
-provisioning-installer --list-modes
-
-# Show available providers
-provisioning-installer --list-providers
-
-# List available templates
-provisioning-installer --list-templates
-
-# Validate configuration file
-provisioning-installer --validate --config config.toml
-
-# Dry-run (check without installing)
-provisioning-installer --config config.toml --check
-
-# Full unattended installation
-provisioning-installer --unattended --config config.toml
-
-# Headless with specific settings
-provisioning-installer --headless --mode solo --provider upcloud --cpu 2 --memory 4096 --yes
-
-

Integration Examples

-

GitOps Workflow

-
# Define in Git
-cat > infrastructure/installer.toml << EOF
-[installation]
-mode = "multiuser"
-provider = "upcloud"
-
-[resources]
-cpu = 4
-memory = 8192
-EOF
-
-# Deploy via CI/CD
-provisioning-installer --unattended --config infrastructure/installer.toml
-
-

Terraform Integration

-
# Call installer as part of Terraform provisioning
-resource "null_resource" "provisioning_installer" {
-  provisioner "local-exec" {
-    command = "provisioning-installer --unattended --config ${var.config_file}"
-  }
-}
-
-

Ansible Integration

-
- name: Run provisioning installer
-  shell: provisioning-installer --unattended --config /tmp/config.toml
-  vars:
-    ansible_python_interpreter: /usr/bin/python3
-
-

Configuration Templates

-

Pre-built templates available in provisioning/config/installer-templates/:

-
    -
  • solo-dev.toml - Single developer setup
  • -
  • team-test.toml - Team testing environment
  • -
  • cicd-pipeline.toml - CI/CD integration
  • -
  • enterprise-prod.toml - Production deployment
  • -
  • kubernetes-ha.toml - High-availability Kubernetes
  • -
  • multicloud.toml - Multi-provider setup
  • -
-

Documentation

-
    -
  • User Guide: user/provisioning-installer-guide.md
  • -
  • Deployment Guide: operations/installer-deployment-guide.md
  • -
  • Configuration Guide: infrastructure/installer-configuration-guide.md
  • -
-

Help and Support

-
# Show installer help
-provisioning-installer --help
-
-# Show detailed documentation
-provisioning help installer
-
-# Validate your configuration
-provisioning-installer --validate --config your-config.toml
-
-# Get configuration suggestions from MCP
-provisioning-installer --config-suggest
-
-

Nushell Fallback

-

If Ratatui TUI is not available, the installer automatically falls back to:

-
    -
  • Interactive Nushell prompt system
  • -
  • Same functionality, text-based interface
  • -
  • Full feature parity with TUI version
  • -
-

Provisioning API Server

-

A comprehensive REST API server for remote provisioning operations, enabling thin clients and CI/CD pipeline integration.

-
-

Source: provisioning/platform/provisioning-server/

-
-

Features

-
    -
  • Comprehensive REST API: Complete provisioning operations via HTTP
  • -
  • JWT Authentication: Secure token-based authentication
  • -
  • RBAC System: Role-based access control (Admin, Operator, Developer, Viewer)
  • -
  • Async Operations: Long-running tasks with status tracking
  • -
  • Nushell Integration: Direct execution of provisioning CLI commands
  • -
  • Audit Logging: Complete operation tracking for compliance
  • -
  • Metrics: Prometheus-compatible metrics endpoint
  • -
  • CORS Support: Configurable cross-origin resource sharing
  • -
  • Health Checks: Built-in health and readiness endpoints
  • -
-

Architecture

-
┌─────────────────┐
-│  REST Client    │
-│  (curl, CI/CD)  │
-└────────┬────────┘
-         │ HTTPS/JWT
-         ▼
-┌─────────────────┐
-│  API Gateway    │
-│  - Routes       │
-│  - Auth         │
-│  - RBAC         │
-└────────┬────────┘
-         │
-         ▼
-┌─────────────────┐
-│ Async Task Mgr  │
-│ - Queue         │
-│  - Status       │
-└────────┬────────┘
-         │
-         ▼
-┌─────────────────┐
-│ Nushell Exec    │
-│ - CLI wrapper   │
-│ - Timeout       │
-└─────────────────┘
-
-

Installation

-
cd provisioning/platform/provisioning-server
-cargo build --release
-
-

Configuration

-

Create config.toml:

-
[server]
-host = "0.0.0.0"
-port = 8083
-cors_enabled = true
-
-[auth]
-jwt_secret = "your-secret-key-here"
-token_expiry_hours = 24
-refresh_token_expiry_hours = 168
-
-[provisioning]
-cli_path = "/usr/local/bin/provisioning"
-timeout_seconds = 300
-max_concurrent_operations = 10
-
-[logging]
-level = "info"
-json_format = false
-
-

Usage

-

Starting the Server

-
# Using config file
-provisioning-server --config config.toml
-
-# Custom settings
-provisioning-server \
-  --host 0.0.0.0 \
-  --port 8083 \
-  --jwt-secret "my-secret" \
-  --cli-path "/usr/local/bin/provisioning" \
-  --log-level debug
-
-

Authentication

-

Login

-
curl -X POST http://localhost:8083/v1/auth/login \
-  -H "Content-Type: application/json" \
-  -d '{
-    "username": "admin",
-    "password": "admin123"
-  }'
-
-

Response:

-
{
-  "token": "eyJhbGc...",
-  "refresh_token": "eyJhbGc...",
-  "expires_in": 86400
-}
-
-

Using Token

-
export TOKEN="eyJhbGc..."
-
-curl -X GET http://localhost:8083/v1/servers \
-  -H "Authorization: Bearer $TOKEN"
-
-

API Endpoints

-

Authentication

-
    -
  • POST /v1/auth/login - User login
  • -
  • POST /v1/auth/refresh - Refresh access token
  • -
-

Servers

-
    -
  • GET /v1/servers - List all servers
  • -
  • POST /v1/servers/create - Create new server
  • -
  • DELETE /v1/servers/{id} - Delete server
  • -
  • GET /v1/servers/{id}/status - Get server status
  • -
-

Taskservs

-
    -
  • GET /v1/taskservs - List all taskservs
  • -
  • POST /v1/taskservs/create - Create taskserv
  • -
  • DELETE /v1/taskservs/{id} - Delete taskserv
  • -
  • GET /v1/taskservs/{id}/status - Get taskserv status
  • -
-

Workflows

-
    -
  • POST /v1/workflows/submit - Submit workflow
  • -
  • GET /v1/workflows/{id} - Get workflow details
  • -
  • GET /v1/workflows/{id}/status - Get workflow status
  • -
  • POST /v1/workflows/{id}/cancel - Cancel workflow
  • -
-

Operations

-
    -
  • GET /v1/operations - List all operations
  • -
  • GET /v1/operations/{id} - Get operation status
  • -
  • POST /v1/operations/{id}/cancel - Cancel operation
  • -
-

System

-
    -
  • GET /health - Health check (no auth required)
  • -
  • GET /v1/version - Version information
  • -
  • GET /v1/metrics - Prometheus metrics
  • -
-

RBAC Roles

-

Admin Role

-

Full system access including all operations, workspace management, and system administration.

-

Operator Role

-

Infrastructure operations including create/delete servers, taskservs, clusters, and workflow management.

-

Developer Role

-

Read access plus SSH to servers, view workflows and operations.

-

Viewer Role

-

Read-only access to all resources and status information.

-

Security Best Practices

-
    -
  1. Change Default Credentials: Update all default usernames/passwords
  2. -
  3. Use Strong JWT Secret: Generate secure random string (32+ characters)
  4. -
  5. Enable TLS: Use HTTPS in production
  6. -
  7. Restrict CORS: Configure specific allowed origins
  8. -
  9. Enable mTLS: For client certificate authentication
  10. -
  11. Regular Token Rotation: Implement token refresh strategy
  12. -
  13. Audit Logging: Enable audit logs for compliance
  14. -
-

CI/CD Integration

-

GitHub Actions

-
- name: Deploy Infrastructure
-  run: |
-    TOKEN=$(curl -X POST https://api.example.com/v1/auth/login \
-      -H "Content-Type: application/json" \
-      -d '{"username":"${{ secrets.API_USER }}","password":"${{ secrets.API_PASS }}"}' \
-      | jq -r '.token')
-
-    curl -X POST https://api.example.com/v1/servers/create \
-      -H "Authorization: Bearer $TOKEN" \
-      -H "Content-Type: application/json" \
-      -d '{"workspace": "production", "provider": "upcloud", "plan": "2xCPU-4 GB"}'
-
- - -

Infrastructure Management Guide

-

This comprehensive guide covers creating, managing, and maintaining infrastructure using Infrastructure Automation.

-

What You’ll Learn

-
    -
  • Infrastructure lifecycle management
  • -
  • Server provisioning and management
  • -
  • Task service installation and configuration
  • -
  • Cluster deployment and orchestration
  • -
  • Scaling and optimization strategies
  • -
  • Monitoring and maintenance procedures
  • -
  • Cost management and optimization
  • -
-

Infrastructure Concepts

-

Infrastructure Components

-
- - - - - -
ComponentDescriptionExamples
ServersVirtual machines or containersWeb servers, databases, workers
Task ServicesSoftware installed on serversKubernetes, Docker, databases
ClustersGroups of related servicesWeb clusters, database clusters
NetworksConnectivity between resourcesVPCs, subnets, load balancers
StoragePersistent data storageBlock storage, object storage
-
-

Infrastructure Lifecycle

-
Plan → Create → Deploy → Monitor → Scale → Update → Retire
-
-

Each phase has specific commands and considerations.

-

Server Management

-

Understanding Server Configuration

-

Servers are defined in Nickel configuration files:

-
# Example server configuration
-import models.server
-
-servers: [
-    server.Server {
-        name = "web-01"
-        provider = "aws"          # aws, upcloud, local
-        plan = "t3.medium"        # Instance type/plan
-        os = "ubuntu-22.04"       # Operating system
-        zone = "us-west-2a"       # Availability zone
-
-        # Network configuration
-        vpc = "main"
-        subnet = "web"
-        security_groups = ["web", "ssh"]
-
-        # Storage configuration
-        storage = {
-            root_size = "50 GB"
-            additional = [
-                {name = "data", size = "100 GB", type = "gp3"}
-            ]
-        }
-
-        # Task services to install
-        taskservs = [
-            "containerd",
-            "kubernetes",
-            "monitoring"
-        ]
-
-        # Tags for organization
-        tags = {
-            environment = "production"
-            team = "platform"
-            cost_center = "engineering"
-        }
-    }
-]
-
-

Server Lifecycle Commands

-

Creating Servers

-
# Plan server creation (dry run)
-provisioning server create --infra my-infra --check
-
-# Create servers
-provisioning server create --infra my-infra
-
-# Create with specific parameters
-provisioning server create --infra my-infra --wait --yes
-
-# Create single server type
-provisioning server create web --infra my-infra
-
-

Managing Existing Servers

-
# List all servers
-provisioning server list --infra my-infra
-
-# Show detailed server information
-provisioning show servers --infra my-infra
-
-# Show specific server
-provisioning show servers web-01 --infra my-infra
-
-# Get server status
-provisioning server status web-01 --infra my-infra
-
-

Server Operations

-
# Start/stop servers
-provisioning server start web-01 --infra my-infra
-provisioning server stop web-01 --infra my-infra
-
-# Restart servers
-provisioning server restart web-01 --infra my-infra
-
-# Resize server
-provisioning server resize web-01 --plan t3.large --infra my-infra
-
-# Update server configuration
-provisioning server update web-01 --infra my-infra
-
-

SSH Access

-
# SSH to server
-provisioning server ssh web-01 --infra my-infra
-
-# SSH with specific user
-provisioning server ssh web-01 --user admin --infra my-infra
-
-# Execute command on server
-provisioning server exec web-01 "systemctl status kubernetes" --infra my-infra
-
-# Copy files to/from server
-provisioning server copy local-file.txt web-01:/tmp/ --infra my-infra
-provisioning server copy web-01:/var/log/app.log ./logs/ --infra my-infra
-
-

Server Deletion

-
# Plan server deletion (dry run)
-provisioning server delete --infra my-infra --check
-
-# Delete specific server
-provisioning server delete web-01 --infra my-infra
-
-# Delete with confirmation
-provisioning server delete web-01 --infra my-infra --yes
-
-# Delete but keep storage
-provisioning server delete web-01 --infra my-infra --keepstorage
-
-

Task Service Management

-

Understanding Task Services

-

Task services are software components installed on servers:

-
    -
  • Container Runtimes: containerd, cri-o, docker
  • -
  • Orchestration: kubernetes, nomad
  • -
  • Networking: cilium, calico, haproxy
  • -
  • Storage: rook-ceph, longhorn, nfs
  • -
  • Databases: postgresql, mysql, mongodb
  • -
  • Monitoring: prometheus, grafana, alertmanager
  • -
-

Task Service Configuration

-
# Task service configuration example
-taskservs: {
-    kubernetes: {
-        version = "1.28"
-        network_plugin = "cilium"
-        ingress_controller = "nginx"
-        storage_class = "gp3"
-
-        # Cluster configuration
-        cluster = {
-            name = "production"
-            pod_cidr = "10.244.0.0/16"
-            service_cidr = "10.96.0.0/12"
-        }
-
-        # Node configuration
-        nodes = {
-            control_plane = ["master-01", "master-02", "master-03"]
-            workers = ["worker-01", "worker-02", "worker-03"]
-        }
-    }
-
-    postgresql: {
-        version = "15"
-        port = 5432
-        max_connections = 200
-        shared_buffers = "256 MB"
-
-        # High availability
-        replication = {
-            enabled = true
-            replicas = 2
-            sync_mode = "synchronous"
-        }
-
-        # Backup configuration
-        backup = {
-            enabled = true
-            schedule = "0 2 * * *"  # Daily at 2 AM
-            retention = "30d"
-        }
-    }
-}
-
-

Task Service Commands

-

Installing Services

-
# Install single service
-provisioning taskserv create kubernetes --infra my-infra
-
-# Install multiple services
-provisioning taskserv create containerd kubernetes cilium --infra my-infra
-
-# Install with specific version
-provisioning taskserv create kubernetes --version 1.28 --infra my-infra
-
-# Install on specific servers
-provisioning taskserv create postgresql --servers db-01,db-02 --infra my-infra
-
-

Managing Services

-
# List available services
 provisioning taskserv list
 
-# List installed services
-provisioning taskserv list --infra my-infra --installed
-
-# Show service details
-provisioning taskserv show kubernetes --infra my-infra
-
-# Check service status
-provisioning taskserv status kubernetes --infra my-infra
-
-# Check service health
-provisioning taskserv health kubernetes --infra my-infra
+# Export state for backup
+provisioning workspace export > k8s-cluster-state.json
 
-

Service Operations

-
# Start/stop services
-provisioning taskserv start kubernetes --infra my-infra
-provisioning taskserv stop kubernetes --infra my-infra
+

Configuration Backup

+
# Backup workspace configuration
+tar -czf k8s-cluster-backup.tar.gz infra/ config/ runtime/
 
-# Restart services
-provisioning taskserv restart kubernetes --infra my-infra
-
-# Update services
-provisioning taskserv update kubernetes --infra my-infra
-
-# Configure services
-provisioning taskserv configure kubernetes --config cluster.yaml --infra my-infra
+# Store securely (encrypted)
+sops -e k8s-cluster-backup.tar.gz > k8s-cluster-backup.tar.gz.enc
 
-

Service Removal

-
# Remove service
-provisioning taskserv delete kubernetes --infra my-infra
-
-# Remove with data cleanup
-provisioning taskserv delete postgresql --cleanup-data --infra my-infra
-
-# Remove from specific servers
-provisioning taskserv delete kubernetes --servers worker-03 --infra my-infra
-
-

Version Management

-
# Check for updates
-provisioning taskserv check-updates --infra my-infra
-
-# Check specific service updates
-provisioning taskserv check-updates kubernetes --infra my-infra
-
-# Show available versions
-provisioning taskserv versions kubernetes
-
-# Upgrade to latest version
-provisioning taskserv upgrade kubernetes --infra my-infra
-
-# Upgrade to specific version
-provisioning taskserv upgrade kubernetes --version 1.29 --infra my-infra
-
-

Cluster Management

-

Understanding Clusters

-

Clusters are collections of services that work together to provide functionality:

-
# Cluster configuration example
-clusters: {
-    web_cluster: {
-        name = "web-application"
-        description = "Web application cluster"
-
-        # Services in the cluster
-        services = [
-            {
-                name = "nginx"
-                replicas = 3
-                image = "nginx:1.24"
-                ports = [80, 443]
-            }
-            {
-                name = "app"
-                replicas = 5
-                image = "myapp:latest"
-                ports = [8080]
-            }
-        ]
-
-        # Load balancer configuration
-        load_balancer = {
-            type = "application"
-            health_check = "/health"
-            ssl_cert = "wildcard.example.com"
-        }
-
-        # Auto-scaling
-        auto_scaling = {
-            min_replicas = 2
-            max_replicas = 10
-            target_cpu = 70
-            target_memory = 80
-        }
-    }
-}
-
-

Cluster Commands

-

Creating Clusters

-
# Create cluster
-provisioning cluster create web-cluster --infra my-infra
-
-# Create with specific configuration
-provisioning cluster create web-cluster --config cluster.yaml --infra my-infra
-
-# Create and deploy
-provisioning cluster create web-cluster --deploy --infra my-infra
-
-

Managing Clusters

-
# List available clusters
-provisioning cluster list
-
-# List deployed clusters
-provisioning cluster list --infra my-infra --deployed
-
-# Show cluster details
-provisioning cluster show web-cluster --infra my-infra
-
-# Get cluster status
-provisioning cluster status web-cluster --infra my-infra
-
-

Cluster Operations

-
# Deploy cluster
-provisioning cluster deploy web-cluster --infra my-infra
-
-# Scale cluster
-provisioning cluster scale web-cluster --replicas 10 --infra my-infra
-
-# Update cluster
-provisioning cluster update web-cluster --infra my-infra
-
-# Rolling update
-provisioning cluster update web-cluster --rolling --infra my-infra
-
-

Cluster Deletion

-
# Delete cluster
-provisioning cluster delete web-cluster --infra my-infra
-
-# Delete with data cleanup
-provisioning cluster delete web-cluster --cleanup --infra my-infra
-
-

Network Management

-

Network Configuration

-
# Network configuration
-network: {
-    vpc = {
-        cidr = "10.0.0.0/16"
-        enable_dns = true
-        enable_dhcp = true
-    }
-
-    subnets = [
-        {
-            name = "web"
-            cidr = "10.0.1.0/24"
-            zone = "us-west-2a"
-            public = true
-        }
-        {
-            name = "app"
-            cidr = "10.0.2.0/24"
-            zone = "us-west-2b"
-            public = false
-        }
-        {
-            name = "data"
-            cidr = "10.0.3.0/24"
-            zone = "us-west-2c"
-            public = false
-        }
-    ]
-
-    security_groups = [
-        {
-            name = "web"
-            rules = [
-                {protocol = "tcp", port = 80, source = "0.0.0.0/0"}
-                {protocol = "tcp", port = 443, source = "0.0.0.0/0"}
-            ]
-        }
-        {
-            name = "app"
-            rules = [
-                {protocol = "tcp", port = 8080, source = "10.0.1.0/24"}
-            ]
-        }
-    ]
-
-    load_balancers = [
-        {
-            name = "web-lb"
-            type = "application"
-            scheme = "internet-facing"
-            subnets = ["web"]
-            targets = ["web-01", "web-02"]
-        }
-    ]
-}
-
-

Network Commands

-
# Show network configuration
-provisioning network show --infra my-infra
-
-# Create network resources
-provisioning network create --infra my-infra
-
-# Update network configuration
-provisioning network update --infra my-infra
-
-# Test network connectivity
-provisioning network test --infra my-infra
-
-

Storage Management

-

Storage Configuration

-
# Storage configuration
-storage: {
-    # Block storage
-    volumes = [
-        {
-            name = "app-data"
-            size = "100 GB"
-            type = "gp3"
-            encrypted = true
-        }
-    ]
-
-    # Object storage
-    buckets = [
-        {
-            name = "app-assets"
-            region = "us-west-2"
-            versioning = true
-            encryption = "AES256"
-        }
-    ]
-
-    # Backup configuration
-    backup = {
-        schedule = "0 1 * * *"  # Daily at 1 AM
-        retention = {
-            daily = 7
-            weekly = 4
-            monthly = 12
-        }
-    }
-}
-
-

Storage Commands

-
# Create storage resources
-provisioning storage create --infra my-infra
-
-# List storage
-provisioning storage list --infra my-infra
-
-# Backup data
-provisioning storage backup --infra my-infra
-
-# Restore from backup
-provisioning storage restore --backup latest --infra my-infra
-
-

Monitoring and Observability

-

Monitoring Setup

-
# Install monitoring stack
-provisioning taskserv create prometheus --infra my-infra
-provisioning taskserv create grafana --infra my-infra
-provisioning taskserv create alertmanager --infra my-infra
-
-# Configure monitoring
-provisioning taskserv configure prometheus --config monitoring.yaml --infra my-infra
-
-

Health Checks

-
# Check overall infrastructure health
-provisioning health check --infra my-infra
-
-# Check specific components
-provisioning health check servers --infra my-infra
-provisioning health check taskservs --infra my-infra
-provisioning health check clusters --infra my-infra
-
-# Continuous monitoring
-provisioning health monitor --infra my-infra --watch
-
-

Metrics and Alerting

-
# Get infrastructure metrics
-provisioning metrics get --infra my-infra
-
-# Set up alerts
-provisioning alerts create --config alerts.yaml --infra my-infra
-
-# List active alerts
-provisioning alerts list --infra my-infra
-
-

Cost Management

-

Cost Monitoring

-
# Show current costs
-provisioning cost show --infra my-infra
-
-# Cost breakdown by component
-provisioning cost breakdown --infra my-infra
-
-# Cost trends
-provisioning cost trends --period 30d --infra my-infra
-
-# Set cost alerts
-provisioning cost alert --threshold 1000 --infra my-infra
-
-

Cost Optimization

-
# Analyze cost optimization opportunities
-provisioning cost optimize --infra my-infra
-
-# Show unused resources
-provisioning cost unused --infra my-infra
-
-# Right-size recommendations
-provisioning cost recommendations --infra my-infra
-
-

Scaling Strategies

-

Manual Scaling

-
# Scale servers
-provisioning server scale --count 5 --infra my-infra
-
-# Scale specific service
-provisioning taskserv scale kubernetes --nodes 3 --infra my-infra
-
-# Scale cluster
-provisioning cluster scale web-cluster --replicas 10 --infra my-infra
-
-

Auto-scaling Configuration

-
# Auto-scaling configuration
-auto_scaling: {
-    servers = {
-        min_count = 2
-        max_count = 10
-
-        # Scaling metrics
-        cpu_threshold = 70
-        memory_threshold = 80
-
-        # Scaling behavior
-        scale_up_cooldown = "5m"
-        scale_down_cooldown = "10m"
-    }
-
-    clusters = {
-        web_cluster = {
-            min_replicas = 3
-            max_replicas = 20
-            metrics = [
-                {type = "cpu", target = 70}
-                {type = "memory", target = 80}
-                {type = "requests", target = 1000}
-            ]
-        }
-    }
-}
-
-

Disaster Recovery

-

Backup Strategies

-
# Full infrastructure backup
-provisioning backup create --type full --infra my-infra
-
-# Incremental backup
-provisioning backup create --type incremental --infra my-infra
-
-# Schedule automated backups
-provisioning backup schedule --daily --time "02:00" --infra my-infra
-
-

Recovery Procedures

-
# List available backups
-provisioning backup list --infra my-infra
-
-# Restore infrastructure
-provisioning restore --backup latest --infra my-infra
-
-# Partial restore
-provisioning restore --backup latest --components servers --infra my-infra
-
-# Test restore (dry run)
-provisioning restore --backup latest --test --infra my-infra
-
-

Advanced Infrastructure Patterns

-

Multi-Region Deployment

-
# Multi-region configuration
-regions: {
-    primary = {
-        name = "us-west-2"
-        servers = ["web-01", "web-02", "db-01"]
-        availability_zones = ["us-west-2a", "us-west-2b"]
-    }
-
-    secondary = {
-        name = "us-east-1"
-        servers = ["web-03", "web-04", "db-02"]
-        availability_zones = ["us-east-1a", "us-east-1b"]
-    }
-
-    # Cross-region replication
-    replication = {
-        database = {
-            primary = "us-west-2"
-            replicas = ["us-east-1"]
-            sync_mode = "async"
-        }
-
-        storage = {
-            sync_schedule = "*/15 * * * *"  # Every 15 minutes
-        }
-    }
-}
-
-

Blue-Green Deployment

-
# Create green environment
-provisioning generate infra --from production --name production-green
-
-# Deploy to green
-provisioning server create --infra production-green
-provisioning taskserv create --infra production-green
-provisioning cluster deploy --infra production-green
-
-# Switch traffic to green
-provisioning network switch --from production --to production-green
-
-# Decommission blue
-provisioning server delete --infra production --yes
-
-

Canary Deployment

-
# Create canary environment
-provisioning cluster create web-cluster-canary --replicas 1 --infra my-infra
-
-# Route small percentage of traffic
-provisioning network route --target web-cluster-canary --weight 10 --infra my-infra
-
-# Monitor canary metrics
-provisioning metrics monitor web-cluster-canary --infra my-infra
-
-# Promote or rollback
-provisioning cluster promote web-cluster-canary --infra my-infra
-# or
-provisioning cluster rollback web-cluster-canary --infra my-infra
-
-

Troubleshooting Infrastructure

-

Common Issues

-

Server Creation Failures

-
# Check provider status
-provisioning provider status aws
-
-# Validate server configuration
-provisioning server validate web-01 --infra my-infra
-
-# Check quota limits
-provisioning provider quota --infra my-infra
-
-# Debug server creation
-provisioning --debug server create web-01 --infra my-infra
-
-

Service Installation Failures

-
# Check service prerequisites
-provisioning taskserv check kubernetes --infra my-infra
-
-# Validate service configuration
-provisioning taskserv validate kubernetes --infra my-infra
-
-# Check service logs
-provisioning taskserv logs kubernetes --infra my-infra
-
-# Debug service installation
-provisioning --debug taskserv create kubernetes --infra my-infra
-
-

Network Connectivity Issues

-
# Test network connectivity
-provisioning network test --infra my-infra
-
-# Check security groups
-provisioning network security-groups --infra my-infra
-
-# Trace network path
-provisioning network trace --from web-01 --to db-01 --infra my-infra
-
-

Performance Optimization

-
# Analyze performance bottlenecks
-provisioning performance analyze --infra my-infra
-
-# Get performance recommendations
-provisioning performance recommendations --infra my-infra
-
-# Monitor resource utilization
-provisioning performance monitor --infra my-infra --duration 1h
-
-

Testing Infrastructure

-

The provisioning system includes a comprehensive Test Environment Service for automated testing of infrastructure components before deployment.

-

Why Test Infrastructure

-

Testing infrastructure before production deployment helps:

+

What You’ve Learned

+

This deployment demonstrated:

    -
  • Validate taskserv configurations before installing on production servers
  • -
  • Test integration between multiple taskservs
  • -
  • Verify cluster topologies (Kubernetes, etcd, etc.) before deployment
  • -
  • Catch configuration errors early in the development cycle
  • -
  • Ensure compatibility between components
  • +
  • Workspace creation and configuration
  • +
  • Nickel schema structure for infrastructure-as-code
  • +
  • Type-safe configuration validation
  • +
  • Automatic dependency resolution
  • +
  • Multi-server provisioning
  • +
  • Task service installation with health checks
  • +
  • Kubernetes cluster deployment
  • +
  • Storage and networking configuration
  • +
  • Verification and testing workflows
  • +
  • State management and backup
-

Test Environment Types

-

1. Single Taskserv Testing

-

Test individual taskservs in isolated containers:

-
# Quick test (create, run, cleanup automatically)
-provisioning test quick kubernetes
-
-# Single taskserv with custom resources
-provisioning test env single postgres \
-  --cpu 2000 \
-  --memory 4096 \
-  --auto-start \
-  --auto-cleanup
-
-# Test with specific infrastructure context
-provisioning test env single redis --infra my-infra
-
-

2. Server Simulation

-

Test complete server configurations with multiple taskservs:

-
# Simulate web server with multiple taskservs
-provisioning test env server web-01 [containerd kubernetes cilium] \
-  --auto-start
-
-# Simulate database server
-provisioning test env server db-01 [postgres redis] \
-  --infra prod-stack \
-  --auto-start
-
-

3. Multi-Node Cluster Testing

-

Test complex cluster topologies before production deployment:

-
# Test 3-node Kubernetes cluster
-provisioning test topology load kubernetes_3node | \
-  test env cluster kubernetes --auto-start
-
-# Test etcd cluster
-provisioning test topology load etcd_cluster | \
-  test env cluster etcd --auto-start
-
-# Test single-node Kubernetes
-provisioning test topology load kubernetes_single | \
-  test env cluster kubernetes --auto-start
-
-

Managing Test Environments

-
# List all test environments
-provisioning test env list
-
-# Check environment status
-provisioning test env status <env-id>
-
-# View environment logs
-provisioning test env logs <env-id>
-
-# Cleanup environment when done
-provisioning test env cleanup <env-id>
-
-

Available Topology Templates

-

Pre-configured multi-node cluster templates:

-
- - - - - -
TemplateDescriptionUse Case
kubernetes_3node3-node HA K8s clusterProduction-like K8s testing
kubernetes_singleAll-in-one K8s nodeDevelopment K8s testing
etcd_cluster3-member etcd clusterDistributed consensus testing
containerd_testStandalone containerdContainer runtime testing
postgres_redisDatabase stackDatabase integration testing
-
-

Test Environment Workflow

-

Typical testing workflow:

-
# 1. Test new taskserv before deploying
-provisioning test quick kubernetes
-
-# 2. If successful, test server configuration
-provisioning test env server k8s-node [containerd kubernetes cilium] \
-  --auto-start
-
-# 3. Test complete cluster topology
-provisioning test topology load kubernetes_3node | \
-  test env cluster kubernetes --auto-start
-
-# 4. Deploy to production
-provisioning server create --infra production
-provisioning taskserv create kubernetes --infra production
-
-

CI/CD Integration

-

Integrate infrastructure testing into CI/CD pipelines:

-
# GitLab CI example
-test-infrastructure:
-  stage: test
-  script:
-    # Start orchestrator
-    - ./scripts/start-orchestrator.nu --background
-
-    # Test critical taskservs
-    - provisioning test quick kubernetes
-    - provisioning test quick postgres
-    - provisioning test quick redis
-
-    # Test cluster topology
-    - provisioning test topology load kubernetes_3node |
-        test env cluster kubernetes --auto-start
-
-  artifacts:
-    when: on_failure
-    paths:
-      - test-logs/
-
-

Prerequisites

-

Test environments require:

-
    -
  1. -

    Docker Running: Test environments use Docker containers

    -
    docker ps  # Should work without errors
    -
    -
  2. -
  3. -

    Orchestrator Running: The orchestrator manages test containers

    -
    cd provisioning/platform/orchestrator
    -./scripts/start-orchestrator.nu --background
    -
    -
  4. -
-

Advanced Testing

-

Custom Topology Testing

-

Create custom topology configurations:

-
# custom-topology.toml
-[my_cluster]
-name = "Custom Test Cluster"
-cluster_type = "custom"
-
-[[my_cluster.nodes]]
-name = "node-01"
-role = "primary"
-taskservs = ["postgres", "redis"]
-[my_cluster.nodes.resources]
-cpu_millicores = 2000
-memory_mb = 4096
-
-[[my_cluster.nodes]]
-name = "node-02"
-role = "replica"
-taskservs = ["postgres"]
-[my_cluster.nodes.resources]
-cpu_millicores = 1000
-memory_mb = 2048
-
-

Load and test custom topology:

-
provisioning test env cluster custom-app custom-topology.toml --auto-start
-
-

Integration Testing

-

Test taskserv dependencies:

-
# Test Kubernetes dependencies in order
-provisioning test quick containerd
-provisioning test quick etcd
-provisioning test quick kubernetes
-provisioning test quick cilium
-
-# Test complete stack
-provisioning test env server k8s-stack \
-  [containerd etcd kubernetes cilium] \
-  --auto-start
-
-

Documentation

-

For complete test environment documentation:

+

Next Steps

    -
  • Test Environment Guide: docs/user/test-environment-guide.md
  • -
  • Detailed Usage: docs/user/test-environment-usage.md
  • -
  • Orchestrator README: provisioning/platform/orchestrator/README.md
  • +
  • Verification - Comprehensive platform health checks
  • +
  • Workspace Management - Advanced workspace patterns
  • +
  • Batch Workflows - Multi-cloud orchestration
  • +
  • Security System - Secure your infrastructure
-

Best Practices

-

1. Infrastructure Design

-
    -
  • Principle of Least Privilege: Grant minimal necessary access
  • -
  • Defense in Depth: Multiple layers of security
  • -
  • High Availability: Design for failure resilience
  • -
  • Scalability: Plan for growth from the start
  • -
-

2. Operational Excellence

-
# Always validate before applying changes
-provisioning validate config --infra my-infra
-
-# Use check mode for dry runs
-provisioning server create --check --infra my-infra
-
-# Monitor continuously
-provisioning health monitor --infra my-infra
-
-# Regular backups
-provisioning backup schedule --daily --infra my-infra
-
-

3. Security

-
# Regular security updates
-provisioning taskserv update --security-only --infra my-infra
-
-# Encrypt sensitive data
-provisioning sops settings.ncl --infra my-infra
-
-# Audit access
-provisioning audit logs --infra my-infra
-
-

4. Cost Optimization

-
# Regular cost reviews
-provisioning cost analyze --infra my-infra
-
-# Right-size resources
-provisioning cost optimize --apply --infra my-infra
-
-# Use reserved instances for predictable workloads
-provisioning server reserve --infra my-infra
-
-

Next Steps

-

Now that you understand infrastructure management:

-
    -
  1. Learn about extensions: Extension Development Guide
  2. -
  3. Master configuration: Configuration Guide
  4. -
  5. Explore advanced examples: Examples and Tutorials
  6. -
  7. Set up monitoring and alerting
  8. -
  9. Implement automated scaling
  10. -
  11. Plan disaster recovery procedures
  12. -
-

You now have the knowledge to build and manage robust, scalable cloud infrastructure!

-

Infrastructure-from-Code (IaC) Guide

-

Overview

-

The Infrastructure-from-Code system automatically detects technologies in your project and infers infrastructure requirements based on -organization-specific rules. It consists of three main commands:

-
    -
  • detect: Scan a project and identify technologies
  • -
  • complete: Analyze gaps and recommend infrastructure components
  • -
  • ifc: Full-pipeline orchestration (workflow)
  • -
-

Quick Start

-

1. Detect Technologies in Your Project

-

Scan a project directory for detected technologies:

-
provisioning detect /path/to/project --out json
-
-

Output Example:

-
{
-  "detections": [
-    {"technology": "nodejs", "confidence": 0.95},
-    {"technology": "postgres", "confidence": 0.92}
-  ],
-  "overall_confidence": 0.93
-}
-
-

2. Analyze Infrastructure Gaps

-

Get a completeness assessment and recommendations:

-
provisioning complete /path/to/project --out json
-
-

Output Example:

-
{
-  "completeness": 1.0,
-  "changes_needed": 2,
-  "is_safe": true,
-  "change_summary": "+ Adding: postgres-backup, pg-monitoring"
-}
-
-

3. Run Full Workflow

-

Orchestrate detection → completion → assessment pipeline:

-
provisioning ifc /path/to/project --org default
-
-

Output:

-
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-🔄 Infrastructure-from-Code Workflow
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-
-STEP 1: Technology Detection
-────────────────────────────
-✓ Detected 2 technologies
-
-STEP 2: Infrastructure Completion
-─────────────────────────────────
-✓ Completeness: 1%
-
-✅ Workflow Complete
-
-

Command Reference

-

detect

-

Scan and detect technologies in a project.

-

Usage:

-
provisioning detect [PATH] [OPTIONS]
-
-

Arguments:

-
    -
  • PATH: Project directory to analyze (default: current directory)
  • -
-

Options:

-
    -
  • -o, --out TEXT: Output format - text, json, yaml (default: text)
  • -
  • -C, --high-confidence-only: Only show detections with confidence > 0.8
  • -
  • --pretty: Pretty-print JSON/YAML output
  • -
  • -x, --debug: Enable debug output
  • -
-

Examples:

-
# Detect with default text output
-provisioning detect /path/to/project
-
-# Get JSON output for parsing
-provisioning detect /path/to/project --out json | jq '.detections'
-
-# Show only high-confidence detections
-provisioning detect /path/to/project --high-confidence-only
-
-# Pretty-printed YAML output
-provisioning detect /path/to/project --out yaml --pretty
-
-

complete

-

Analyze infrastructure completeness and recommend changes.

-

Usage:

-
provisioning complete [PATH] [OPTIONS]
-
-

Arguments:

-
    -
  • PATH: Project directory to analyze (default: current directory)
  • -
-

Options:

-
    -
  • -o, --out TEXT: Output format - text, json, yaml (default: text)
  • -
  • -c, --check: Check mode (report only, no changes)
  • -
  • --pretty: Pretty-print JSON/YAML output
  • -
  • -x, --debug: Enable debug output
  • -
-

Examples:

-
# Analyze completeness
-provisioning complete /path/to/project
-
-# Get detailed JSON report
-provisioning complete /path/to/project --out json
-
-# Check mode (dry-run, no changes)
-provisioning complete /path/to/project --check
-
-

ifc (workflow)

-

Run the full Infrastructure-from-Code pipeline.

-

Usage:

-
provisioning ifc [PATH] [OPTIONS]
-
-

Arguments:

-
    -
  • PATH: Project directory to process (default: current directory)
  • -
-

Options:

-
    -
  • --org TEXT: Organization name for rule loading (default: default)
  • -
  • -o, --out TEXT: Output format - text, json (default: text)
  • -
  • --apply: Apply recommendations (future feature)
  • -
  • -v, --verbose: Verbose output with timing
  • -
  • --pretty: Pretty-print output
  • -
  • -x, --debug: Enable debug output
  • -
-

Examples:

-
# Run workflow with default rules
-provisioning ifc /path/to/project
-
-# Run with organization-specific rules
-provisioning ifc /path/to/project --org acme-corp
-
-# Verbose output with timing
-provisioning ifc /path/to/project --verbose
-
-# JSON output for automation
-provisioning ifc /path/to/project --out json
-
-

Organization-Specific Inference Rules

-

Customize how infrastructure is inferred for your organization.

-

Understanding Inference Rules

-

An inference rule tells the system: “If we detect technology X, we should recommend taskservice Y.”

-

Rule Structure:

-
version: "1.0.0"
-organization: "your-org"
-rules:
-  - name: "rule-name"
-    technology: ["detected-tech"]
-    infers: "required-taskserv"
-    confidence: 0.85
-    reason: "Why this taskserv is needed"
-    required: true
-
-

Creating Custom Rules

-

Create an organization-specific rules file:

-
# ACME Corporation rules
-cat > $PROVISIONING/config/inference-rules/acme-corp.yaml << 'EOF'
-version: "1.0.0"
-organization: "acme-corp"
-description: "ACME Corporation infrastructure standards"
-
-rules:
-  - name: "nodejs-to-redis"
-    technology: ["nodejs", "express"]
-    infers: "redis"
-    confidence: 0.85
-    reason: "Node.js applications need caching"
-    required: false
-
-  - name: "postgres-to-backup"
-    technology: ["postgres"]
-    infers: "postgres-backup"
-    confidence: 0.95
-    reason: "All databases require backup strategy"
-    required: true
-
-  - name: "all-services-monitoring"
-    technology: ["nodejs", "python", "postgres"]
-    infers: "monitoring"
-    confidence: 0.90
-    reason: "ACME requires monitoring on production services"
-    required: true
-EOF
-
-

Then use them:

-
provisioning ifc /path/to/project --org acme-corp
-
-

Default Rules

-

If no organization rules are found, the system uses sensible defaults:

-
    -
  • Node.js + Express → Redis (caching)
  • -
  • Node.js → Nginx (reverse proxy)
  • -
  • Database → Backup (data protection)
  • -
  • Docker → Kubernetes (orchestration)
  • -
  • Python → Gunicorn (WSGI server)
  • -
  • PostgreSQL → Monitoring (production safety)
  • -
-

Output Formats

-

Text Output (Default)

-

Human-readable format with visual indicators:

-
STEP 1: Technology Detection
-────────────────────────────
-✓ Detected 2 technologies
-
-STEP 2: Infrastructure Completion
-─────────────────────────────────
-✓ Completeness: 1%
-
-

JSON Output

-

Structured format for automation and parsing:

-
provisioning detect /path/to/project --out json | jq '.detections[0]'
-
-

Output:

-
{
-  "technology": "nodejs",
-  "confidence": 0.8333333134651184,
-  "evidence_count": 1
-}
-
-

YAML Output

-

Alternative structured format:

-
provisioning detect /path/to/project --out yaml
-
-

Practical Examples

-

Example 1: Node.js + PostgreSQL Project

-
# Step 1: Detect
-$ provisioning detect my-app
-✓ Detected: nodejs, express, postgres, docker
-
-# Step 2: Complete
-$ provisioning complete my-app
-✓ Changes needed: 3
-  - redis (caching)
-  - nginx (reverse proxy)
-  - pg-backup (database backup)
-
-# Step 3: Full workflow
-$ provisioning ifc my-app --org acme-corp
-
-

Example 2: Python Django Project

-
$ provisioning detect django-app --out json
-{
-  "detections": [
-    {"technology": "python", "confidence": 0.95},
-    {"technology": "django", "confidence": 0.92}
-  ]
-}
-
-# Inferred requirements (with gunicorn, monitoring, backup)
-
-

Example 3: Microservices Architecture

-
$ provisioning ifc microservices/ --org mycompany --verbose
-🔍 Processing microservices/
-  - service-a: nodejs + postgres
-  - service-b: python + redis
-  - service-c: go + mongodb
-
-✓ Detected common patterns
-✓ Applied 12 inference rules
-✓ Generated deployment plan
-
-

Integration with Automation

-

CI/CD Pipeline Example

-
#!/bin/bash
-# Check infrastructure completeness in CI/CD
-
-PROJECT_PATH=${1:-.}
-COMPLETENESS=$(provisioning complete $PROJECT_PATH --out json | jq '.completeness')
-
-if (( $(echo "$COMPLETENESS < 0.9" | bc -l) )); then
-    echo "❌ Infrastructure completeness too low: $COMPLETENESS"
-    exit 1
-fi
-
-echo "✅ Infrastructure is complete: $COMPLETENESS"
-
-

Configuration as Code Integration

-
# Generate JSON for infrastructure config
-provisioning detect /path/to/project --out json > infra-report.json
-
-# Use in your config processing
-cat infra-report.json | jq '.detections[]' | while read -r tech; do
-    echo "Processing technology: $tech"
-done
-
-

Troubleshooting

-

“Detector binary not found”

-

Solution: Ensure the provisioning project is properly built:

-
cd $PROVISIONING/platform
-cargo build --release --bin provisioning-detector
-
-

No technologies detected

-

Check:

-
    -
  1. Project path is correct: provisioning detect /actual/path
  2. -
  3. Project contains recognizable technologies (package.json, Dockerfile, requirements.txt, etc.)
  4. -
  5. Use --debug flag for more details: provisioning detect /path --debug
  6. -
-

Organization rules not being applied

-

Check:

-
    -
  1. Rules file exists: $PROVISIONING/config/inference-rules/{org}.yaml
  2. -
  3. Organization name is correct: provisioning ifc /path --org myorg
  4. -
  5. Verify rules structure with: cat $PROVISIONING/config/inference-rules/myorg.yaml
  6. -
-

Advanced Usage

-

Custom Rule Template

-

Generate a template for a new organization:

-
# Template will be created with proper structure
-provisioning rules create --org neworg
-
-

Validate Rule Files

-
# Check for syntax errors
-provisioning rules validate /path/to/rules.yaml
-
-

Export Rules for Integration

-

Export as Rust code for embedding:

-
provisioning rules export myorg --format rust > rules.rs
-
-

Best Practices

-
    -
  1. Organize by Organization: Keep separate rules for different organizations
  2. -
  3. High Confidence First: Start with rules you’re confident about (confidence > 0.8)
  4. -
  5. Document Reasons: Always fill in the reason field for maintainability
  6. -
  7. Test Locally: Run on sample projects before applying organization-wide
  8. -
  9. Version Control: Commit inference rules to version control
  10. -
  11. Review Changes: Always inspect recommendations with --check first
  12. -
- -
# View available taskservs that can be inferred
-provisioning taskserv list
-
-# Create inferred infrastructure
-provisioning taskserv create {inferred-name}
-
-# View current configuration
-provisioning env | grep PROVISIONING
-
-

Support and Documentation

-
    -
  • Full CLI Help: provisioning help
  • -
  • Specific Command Help: provisioning help detect
  • -
  • Configuration Guide: See CONFIG_ENCRYPTION_GUIDE.md
  • -
  • Task Services: See SERVICE_MANAGEMENT_GUIDE.md
  • -
-
-

Quick Reference

-

3-Step Workflow

-
# 1. Detect technologies
-provisioning detect /path/to/project
-
-# 2. Analyze infrastructure gaps
-provisioning complete /path/to/project
-
-# 3. Run full workflow (detect + complete)
-provisioning ifc /path/to/project --org myorg
-
-

Common Commands

-
- - - - - - - -
TaskCommand
Detect technologiesprovisioning detect /path
Get JSON outputprovisioning detect /path --out json
Check completenessprovisioning complete /path
Dry-run (check mode)provisioning complete /path --check
Full workflowprovisioning ifc /path --org myorg
Verbose outputprovisioning ifc /path --verbose
Debug modeprovisioning detect /path --debug
-
-

Output Formats

-
# Text (human-readable)
-provisioning detect /path --out text
-
-# JSON (for automation)
-provisioning detect /path --out json | jq '.detections'
-
-# YAML (for configuration)
-provisioning detect /path --out yaml
-
-

Organization Rules

-

Use Organization Rules

-
provisioning ifc /path --org acme-corp
-
-

Create Rules File

-
mkdir -p $PROVISIONING/config/inference-rules
-cat > $PROVISIONING/config/inference-rules/myorg.yaml << 'EOF'
-version: "1.0.0"
-organization: "myorg"
-rules:
-  - name: "nodejs-to-redis"
-    technology: ["nodejs"]
-    infers: "redis"
-    confidence: 0.85
-    reason: "Caching layer"
-    required: false
-EOF
-
-

Example: Node.js + PostgreSQL

-
$ provisioning detect myapp
-✓ Detected: nodejs, postgres
-
-$ provisioning complete myapp
-✓ Changes: +redis, +nginx, +pg-backup
-
-$ provisioning ifc myapp --org default
-✓ Detection: 2 technologies
-✓ Completion: recommended changes
-✅ Workflow complete
-
-

CI/CD Integration

-
#!/bin/bash
-# Check infrastructure is complete before deploy
-COMPLETENESS=$(provisioning complete . --out json | jq '.completeness')
-
-if (( $(echo "$COMPLETENESS < 0.9" | bc -l) )); then
-    echo "Infrastructure incomplete: $COMPLETENESS"
-    exit 1
-fi
-
-

JSON Output Examples

-

Detect Output

-
{
-  "detections": [
-    {"technology": "nodejs", "confidence": 0.95},
-    {"technology": "postgres", "confidence": 0.92}
-  ],
-  "overall_confidence": 0.93
-}
-
-

Complete Output

-
{
-  "completeness": 1.0,
-  "changes_needed": 2,
-  "is_safe": true,
-  "change_summary": "+ redis, + monitoring"
-}
-
-

Flag Reference

-
- - - - - - - -
FlagShortPurpose
--out TEXT-oOutput format: text, json, yaml
--debug-xEnable debug output
--prettyPretty-print JSON/YAML
--check-cDry-run (detect/complete)
--org TEXTOrganization name (ifc)
--verbose-vVerbose output (ifc)
--applyApply changes (ifc, future)
-
-

Troubleshooting

-
- - - - -
IssueSolution
“Detector binary not found”cd $PROVISIONING/platform && cargo build --release
No technologies detectedCheck file types (.py, .js, go.mod, package.json, etc.)
Organization rules not foundVerify file exists: $PROVISIONING/config/inference-rules/{org}.yaml
Invalid path errorUse absolute path: provisioning detect /full/path
-
-

Environment Variables

-
- - -
VariablePurpose
$PROVISIONINGPath to provisioning root
$PROVISIONING_ORGDefault organization (optional)
-
-

Default Inference Rules

-
    -
  • Node.js + Express → Redis (caching)
  • -
  • Node.js → Nginx (reverse proxy)
  • -
  • Database → Backup (data protection)
  • -
  • Docker → Kubernetes (orchestration)
  • -
  • Python → Gunicorn (WSGI)
  • -
  • PostgreSQL → Monitoring (production)
  • -
-

Useful Aliases

-
# Add to shell config
-alias detect='provisioning detect'
-alias complete='provisioning complete'
-alias ifc='provisioning ifc'
-
-# Usage
-detect /my/project
-complete /my/project
-ifc /my/project --org myorg
-
-

Tips & Tricks

-

Parse JSON in bash:

-
provisioning detect . --out json | \
-  jq '.detections[] | .technology' | \
-  sort | uniq
-
-

Watch for changes:

-
watch -n 5 'provisioning complete . --out json | jq ".completeness"'
-
-

Generate reports:

-
provisioning detect . --out yaml > detection-report.yaml
-provisioning complete . --out yaml > completion-report.yaml
-
-

Validate all organizations:

-
for org in $PROVISIONING/config/inference-rules/*.yaml; do
-    org_name=$(basename "$org" .yaml)
-    echo "Testing $org_name..."
-    provisioning ifc . --org "$org_name" --check
-done
-
- -
    -
  • Full guide: docs/user/INFRASTRUCTURE_FROM_CODE_GUIDE.md
  • -
  • Inference rules: docs/user/INFRASTRUCTURE_FROM_CODE_GUIDE.md#organization-specific-inference-rules
  • -
  • Service management: docs/user/SERVICE_MANAGEMENT_QUICKREF.md
  • -
  • Configuration: docs/user/CONFIG_ENCRYPTION_QUICKREF.md
  • -
-

Batch Workflow System (v3.1.0 - TOKEN-OPTIMIZED ARCHITECTURE)

-

🚀 Batch Workflow System Completed (2025-09-25)

-

A comprehensive batch workflow system has been implemented using 10 token-optimized agents achieving 85-90% token efficiency over monolithic -approaches. The system enables provider-agnostic batch operations with mixed provider support (UpCloud + AWS + local).

-

Key Achievements

-
    -
  • Provider-Agnostic Design: Single workflows supporting multiple cloud providers
  • -
  • Nickel Schema Integration: Type-safe workflow definitions with comprehensive validation
  • -
  • Dependency Resolution: Topological sorting with soft/hard dependency support
  • -
  • State Management: Checkpoint-based recovery with rollback capabilities
  • -
  • Real-time Monitoring: Live workflow progress tracking and health monitoring
  • -
  • Token Optimization: 85-90% efficiency using parallel specialized agents
  • -
-

Batch Workflow Commands

-
# Submit batch workflow from Nickel definition
-nu -c "use core/nulib/workflows/batch.nu *; batch submit workflows/example_batch.ncl"
-
-# Monitor batch workflow progress
-nu -c "use core/nulib/workflows/batch.nu *; batch monitor <workflow_id>"
-
-# List batch workflows with filtering
-nu -c "use core/nulib/workflows/batch.nu *; batch list --status Running"
-
-# Get detailed batch status
-nu -c "use core/nulib/workflows/batch.nu *; batch status <workflow_id>"
-
-# Initiate rollback for failed workflow
-nu -c "use core/nulib/workflows/batch.nu *; batch rollback <workflow_id>"
-
-# Show batch workflow statistics
-nu -c "use core/nulib/workflows/batch.nu *; batch stats"
-
-

Nickel Workflow Schema

-

Batch workflows are defined using Nickel configuration in schemas/workflows.ncl:

-
# Example batch workflow with mixed providers
-{
-  batch_workflow = {
-    name = "multi_cloud_deployment",
-    version = "1.0.0",
-    storage_backend = "surrealdb",  # or "filesystem"
-    parallel_limit = 5,
-    rollback_enabled = true,
-
-    operations = [
-      {
-        id = "upcloud_servers",
-        type = "server_batch",
-        provider = "upcloud",
-        dependencies = [],
-        server_configs = [
-          { name = "web-01", plan = "1xCPU-2 GB", zone = "de-fra1" },
-          { name = "web-02", plan = "1xCPU-2 GB", zone = "us-nyc1" }
-        ]
-      },
-      {
-        id = "aws_taskservs",
-        type = "taskserv_batch",
-        provider = "aws",
-        dependencies = ["upcloud_servers"],
-        taskservs = ["kubernetes", "cilium", "containerd"]
-      }
-    ]
-  }
-}
-
-

REST API Endpoints (Batch Operations)

-

Extended orchestrator API for batch workflow management:

-
    -
  • Submit Batch: POST http://localhost:9090/v1/workflows/batch/submit
  • -
  • Batch Status: GET http://localhost:9090/v1/workflows/batch/{id}
  • -
  • List Batches: GET http://localhost:9090/v1/workflows/batch
  • -
  • Monitor Progress: GET http://localhost:9090/v1/workflows/batch/{id}/progress
  • -
  • Initiate Rollback: POST http://localhost:9090/v1/workflows/batch/{id}/rollback
  • -
  • Batch Statistics: GET http://localhost:9090/v1/workflows/batch/stats
  • -
-

System Benefits

-
    -
  • Provider Agnostic: Mix UpCloud, AWS, and local providers in single workflows
  • -
  • Type Safety: Nickel schema validation prevents runtime errors
  • -
  • Dependency Management: Automatic resolution with failure handling
  • -
  • State Recovery: Checkpoint-based recovery from any failure point
  • -
  • Real-time Monitoring: Live progress tracking with detailed status
  • -
-

Multi-Provider Batch Workflow Examples

-

This document provides practical examples of orchestrating complex deployments and operations across multiple cloud providers using the batch workflow -system.

-

Table of Contents

- -

Overview

-

The batch workflow system enables declarative orchestration of operations across multiple providers with:

-
    -
  • Dependency Tracking: Define what must complete before what
  • -
  • Error Handling: Automatic rollback on failure
  • -
  • Idempotency: Safe to re-run workflows
  • -
  • Status Tracking: Real-time progress monitoring
  • -
  • Recovery Checkpoints: Resume from failure points
  • -
-

Workflow 1: Coordinated Multi-Provider Deployment

-

Use Case: Deploy web application across DigitalOcean, AWS, and Hetzner with proper sequencing and dependencies.

-

Workflow Characteristics:

-
    -
  • Database created first (dependencies)
  • -
  • Backup storage ready before compute
  • -
  • Web servers scale once database ready
  • -
  • Health checks before considering complete
  • -
-

Workflow Definition

-
# file: workflows/multi-provider-deployment.yml
-
-name: multi-provider-app-deployment
-version: "1.0"
-description: "Deploy web app across three cloud providers"
-
-parameters:
-  do_region: "nyc3"
-  aws_region: "us-east-1"
-  hetzner_location: "nbg1"
-  web_server_count: 3
-
-phases:
-  # Phase 1: Create backup storage first (independent)
-  - name: "provision-backup-storage"
-    provider: "hetzner"
-    description: "Create backup storage volume in Hetzner"
-
-    operations:
-      - id: "create-backup-volume"
-        action: "create-volume"
-        config:
-          name: "webapp-backups"
-          size: 500
-          location: "{{ hetzner_location }}"
-          format: "ext4"
-
-        tags: ["storage", "backup"]
-
-    on_failure: "alert"
-    on_success: "proceed"
-
-  # Phase 2: Create database (independent, but must complete before app)
-  - name: "provision-database"
-    provider: "aws"
-    description: "Create managed PostgreSQL database"
-    depends_on: []  # Can run in parallel with Phase 1
-
-    operations:
-      - id: "create-rds-instance"
-        action: "create-db-instance"
-        config:
-          identifier: "webapp-db"
-          engine: "postgres"
-          engine_version: "14.6"
-          instance_class: "db.t3.medium"
-          allocated_storage: 100
-          multi_az: true
-          backup_retention_days: 30
-
-        tags: ["database", "primary"]
-
-      - id: "create-security-group"
-        action: "create-security-group"
-        config:
-          name: "webapp-db-sg"
-          description: "Security group for RDS"
-
-        depends_on: ["create-rds-instance"]
-
-      - id: "configure-db-access"
-        action: "authorize-security-group"
-        config:
-          group_id: "{{ create-security-group.id }}"
-          protocol: "tcp"
-          port: 5432
-          cidr: "10.0.0.0/8"
-
-        depends_on: ["create-security-group"]
-
-        timeout: 60
-
-  # Phase 3: Create web tier (depends on database being ready)
-  - name: "provision-web-tier"
-    provider: "digitalocean"
-    description: "Create web servers and load balancer"
-    depends_on: ["provision-database"]  # Wait for database
-
-    operations:
-      - id: "create-droplets"
-        action: "create-droplet"
-        config:
-          name: "web-server"
-          size: "s-2vcpu-4gb"
-          region: "{{ do_region }}"
-          image: "ubuntu-22-04-x64"
-          count: "{{ web_server_count }}"
-          backups: true
-          monitoring: true
-
-        tags: ["web", "production"]
-
-        timeout: 300
-        retry:
-          max_attempts: 3
-          backoff: exponential
-
-      - id: "create-firewall"
-        action: "create-firewall"
-        config:
-          name: "web-firewall"
-          inbound_rules:
-            - protocol: "tcp"
-              ports: "22"
-              sources: ["0.0.0.0/0"]
-            - protocol: "tcp"
-              ports: "80"
-              sources: ["0.0.0.0/0"]
-            - protocol: "tcp"
-              ports: "443"
-              sources: ["0.0.0.0/0"]
-
-        depends_on: ["create-droplets"]
-
-      - id: "create-load-balancer"
-        action: "create-load-balancer"
-        config:
-          name: "web-lb"
-          algorithm: "round_robin"
-          region: "{{ do_region }}"
-          forwarding_rules:
-            - entry_protocol: "http"
-              entry_port: 80
-              target_protocol: "http"
-              target_port: 80
-            - entry_protocol: "https"
-              entry_port: 443
-              target_protocol: "http"
-              target_port: 80
-          health_check:
-            protocol: "http"
-            port: 80
-            path: "/health"
-            interval: 10
-
-        depends_on: ["create-droplets"]
-
-  # Phase 4: Network configuration (depends on all resources)
-  - name: "configure-networking"
-    description: "Setup VPN tunnels and security between providers"
-    depends_on: ["provision-web-tier"]
-
-    operations:
-      - id: "setup-vpn-tunnel-do-aws"
-        action: "create-vpn-tunnel"
-        config:
-          source_provider: "digitalocean"
-          destination_provider: "aws"
-          protocol: "ipsec"
-          encryption: "aes-256"
-
-        timeout: 120
-
-      - id: "setup-vpn-tunnel-aws-hetzner"
-        action: "create-vpn-tunnel"
-        config:
-          source_provider: "aws"
-          destination_provider: "hetzner"
-          protocol: "ipsec"
-          encryption: "aes-256"
-
-  # Phase 5: Validation and verification
-  - name: "verify-deployment"
-    description: "Verify all resources are operational"
-    depends_on: ["configure-networking"]
-
-    operations:
-      - id: "health-check-droplets"
-        action: "run-health-check"
-        config:
-          targets: "{{ create-droplets.ips }}"
-          endpoint: "/health"
-          expected_status: 200
-          timeout: 30
-
-        timeout: 300
-
-      - id: "health-check-database"
-        action: "verify-database"
-        config:
-          host: "{{ create-rds-instance.endpoint }}"
-          port: 5432
-          database: "postgres"
-          timeout: 30
-
-      - id: "health-check-backup"
-        action: "verify-volume"
-        config:
-          volume_id: "{{ create-backup-volume.id }}"
-          status: "available"
-
-# Rollback strategy: if any phase fails
-rollback:
-  strategy: "automatic"
-  on_phase_failure: "rollback-previous-phases"
-  preserve_data: true
-
-# Notifications
-notifications:
-  on_start: "slack:#deployments"
-  on_phase_complete: "slack:#deployments"
-  on_failure: "slack:#alerts"
-  on_success: "slack:#deployments"
-
-# Validation checks
-pre_flight:
-  - check: "credentials"
-    description: "Verify all provider credentials"
-  - check: "quotas"
-    description: "Verify sufficient quotas in each provider"
-  - check: "dependencies"
-    description: "Verify all dependencies are available"
-
-

Execution Flow

-
┌─────────────────────────────────────────────────────────┐
-│ Start Deployment                                        │
-└──────────────────┬──────────────────────────────────────┘
-                   │
-        ┌──────────┴──────────┐
-        │                     │
-        ▼                     ▼
-   ┌─────────────┐    ┌──────────────────┐
-   │  Hetzner    │    │      AWS         │
-   │  Backup     │    │   Database       │
-   │ (Phase 1)   │    │   (Phase 2)      │
-   └──────┬──────┘    └────────┬─────────┘
-          │                    │
-          │ Ready              │ Ready
-          └────────┬───────────┘
-                   │
-                   ▼
-            ┌──────────────────┐
-            │  DigitalOcean    │
-            │   Web Tier       │
-            │  (Phase 3)       │
-            │ - Droplets       │
-            │ - Firewall       │
-            │ - Load Balancer  │
-            └────────┬─────────┘
-                     │
-                     ▼
-            ┌──────────────────┐
-            │  Network Setup   │
-            │  (Phase 4)       │
-            │ - VPN Tunnels    │
-            └────────┬─────────┘
-                     │
-                     ▼
-            ┌──────────────────┐
-            │  Verification    │
-            │  (Phase 5)       │
-            │ - Health Checks  │
-            └────────┬─────────┘
-                     │
-                     ▼
-            ┌──────────────────┐
-            │  Deployment OK   │
-            │  (Ready to use)  │
-            └──────────────────┘
-
-

Workflow 2: Multi-Provider Disaster Recovery Failover

-

Use Case: Automated failover from primary provider (DigitalOcean) to backup provider (Hetzner) on detection of failure.

-

Workflow Characteristics:

-
    -
  • Continuous health monitoring
  • -
  • Automatic failover trigger
  • -
  • Database promotion
  • -
  • DNS update
  • -
  • Verification before considering complete
  • -
-

Workflow Definition

-
# file: workflows/multi-provider-dr-failover.yml
-
-name: multi-provider-dr-failover
-version: "1.0"
-description: "Automated failover from DigitalOcean to Hetzner"
-
-parameters:
-  primary_provider: "digitalocean"
-  backup_provider: "hetzner"
-  dns_provider: "aws"
-  health_check_threshold: 3
-
-phases:
-  # Phase 1: Monitor primary provider
-  - name: "monitor-primary"
-    description: "Continuous health monitoring of primary"
-
-    operations:
-      - id: "health-check-primary"
-        action: "run-health-check"
-        config:
-          provider: "{{ primary_provider }}"
-          resources: ["web-servers", "load-balancer"]
-          checks:
-            - type: "http"
-              endpoint: "/health"
-              expected_status: 200
-            - type: "database"
-              host: "db.primary.example.com"
-              query: "SELECT 1"
-            - type: "connectivity"
-              test: "ping"
-          interval: 30  # Check every 30 seconds
-
-        timeout: 300
-
-      - id: "aggregate-health"
-        action: "aggregate-metrics"
-        config:
-          source: "{{ health-check-primary.results }}"
-          failure_threshold: 3  # 3 consecutive failures trigger failover
-
-  # Phase 2: Trigger failover (conditional on failure)
-  - name: "trigger-failover"
-    description: "Activate disaster recovery if primary fails"
-    depends_on: ["monitor-primary"]
-    condition: "{{ aggregate-health.status }} == 'FAILED'"
-
-    operations:
-      - id: "alert-on-failure"
-        action: "send-notification"
-        config:
-          type: "critical"
-          message: "Primary provider ({{ primary_provider }}) has failed. Initiating failover..."
-          recipients: ["ops-team@example.com", "slack:#alerts"]
-
-      - id: "enable-backup-infrastructure"
-        action: "scale-up"
-        config:
-          provider: "{{ backup_provider }}"
-          target: "warm-standby-servers"
-          desired_count: 3
-          instance_type: "cx31"
-
-        timeout: 300
-        retry:
-          max_attempts: 3
-
-      - id: "promote-database-replica"
-        action: "promote-read-replica"
-        config:
-          provider: "aws"
-          replica_identifier: "backup-db-replica"
-          to_master: true
-
-        timeout: 600  # Allow time for promotion
-
-  # Phase 3: Network failover
-  - name: "network-failover"
-    description: "Switch traffic to backup provider"
-    depends_on: ["trigger-failover"]
-
-    operations:
-      - id: "update-load-balancer"
-        action: "reconfigure-load-balancer"
-        config:
-          provider: "{{ dns_provider }}"
-          record: "api.example.com"
-          old_backend: "do-lb-{{ primary_provider }}"
-          new_backend: "hz-lb-{{ backup_provider }}"
-
-      - id: "update-dns"
-        action: "update-dns-record"
-        config:
-          provider: "route53"
-          record: "example.com"
-          old_value: "do-lb-ip"
-          new_value: "hz-lb-ip"
-          ttl: 60
-
-      - id: "update-cdn"
-        action: "update-cdn-origin"
-        config:
-          cdn_provider: "cloudfront"
-          distribution_id: "E123456789ABCDEF"
-          new_origin: "backup-lb.hetzner.com"
-
-  # Phase 4: Verify failover
-  - name: "verify-failover"
-    description: "Verify backup provider is operational"
-    depends_on: ["network-failover"]
-
-    operations:
-      - id: "health-check-backup"
-        action: "run-health-check"
-        config:
-          provider: "{{ backup_provider }}"
-          resources: ["backup-servers"]
-          endpoint: "/health"
-          expected_status: 200
-          timeout: 30
-
-        timeout: 300
-
-      - id: "verify-database"
-        action: "verify-database"
-        config:
-          provider: "aws"
-          database: "backup-db-promoted"
-          query: "SELECT COUNT(*) FROM users"
-          expected_rows: "> 0"
-
-      - id: "verify-traffic"
-        action: "verify-traffic-flow"
-        config:
-          endpoint: "https://example.com"
-          expected_response_time: "< 500 ms"
-          expected_status: 200
-
-  # Phase 5: Activate backup fully
-  - name: "activate-backup"
-    description: "Run at full capacity on backup provider"
-    depends_on: ["verify-failover"]
-
-    operations:
-      - id: "scale-to-production"
-        action: "scale-up"
-        config:
-          provider: "{{ backup_provider }}"
-          target: "all-backup-servers"
-          desired_count: 6
-
-        timeout: 600
-
-      - id: "configure-persistence"
-        action: "enable-persistence"
-        config:
-          provider: "{{ backup_provider }}"
-          resources: ["backup-servers"]
-          persistence_type: "volume"
-
-# Recovery strategy for primary restoration
-recovery:
-  description: "Restore primary provider when recovered"
-  phases:
-    - name: "detect-primary-recovery"
-      operation: "health-check"
-      target: "primary-provider"
-      success_criteria: "3 consecutive successful checks"
-
-    - name: "resync-data"
-      operation: "database-resync"
-      direction: "backup-to-primary"
-      timeout: 3600
-
-    - name: "failback"
-      operation: "switch-traffic"
-      target: "primary-provider"
-      verification: "100% traffic restored"
-
-# Notifications
-notifications:
-  on_failover_start: "pagerduty:critical"
-  on_failover_complete: "slack:#ops"
-  on_failover_failed: ["pagerduty:critical", "email:cto@example.com"]
-  on_recovery_start: "slack:#ops"
-  on_recovery_complete: "slack:#ops"
-
-

Failover Timeline

-
Time    Event
-────────────────────────────────────────────────────
-00:00   Health check detects failure (3 consecutive failures)
-00:01   Alert sent to ops team
-00:02   Backup infrastructure scaled to 3 servers
-00:05   Database replica promoted to master
-00:10   DNS updated (TTL=60s, propagation ~2 minutes)
-00:12   Load balancer reconfigured
-00:15   Traffic verified flowing through backup
-00:20   Backup scaled to full production capacity (6 servers)
-00:25   Fully operational on backup provider
-
-Total RTO: 25 minutes (including DNS propagation)
-Data loss (RPO): < 5 minutes (database replication lag)
-
-

Workflow 3: Cost Optimization Workload Migration

-

Use Case: Migrate running workloads to cheaper provider (DigitalOcean to Hetzner) for cost reduction.

-

Workflow Characteristics:

-
    -
  • Parallel deployment on target provider
  • -
  • Gradual traffic migration
  • -
  • Rollback capability
  • -
  • Cost tracking
  • -
-

Workflow Definition

-
# file: workflows/cost-optimization-migration.yml
-
-name: cost-optimization-migration
-version: "1.0"
-description: "Migrate workload from DigitalOcean to Hetzner for cost savings"
-
-parameters:
-  source_provider: "digitalocean"
-  target_provider: "hetzner"
-  migration_speed: "gradual"  # or "aggressive"
-  traffic_split: [10, 25, 50, 75, 100]  # Gradual percentages
-
-phases:
-  # Phase 1: Create target infrastructure
-  - name: "create-target-infrastructure"
-    description: "Deploy identical workload on Hetzner"
-
-    operations:
-      - id: "provision-servers"
-        action: "create-server"
-        config:
-          provider: "{{ target_provider }}"
-          name: "migration-app"
-          server_type: "cpx21"  # Better price/performance than DO
-          count: 3
-
-        timeout: 300
-
-  # Phase 2: Verify target is ready
-  - name: "verify-target"
-    description: "Health checks on target infrastructure"
-    depends_on: ["create-target-infrastructure"]
-
-    operations:
-      - id: "health-check"
-        action: "run-health-check"
-        config:
-          provider: "{{ target_provider }}"
-          endpoint: "/health"
-
-        timeout: 300
-
-  # Phase 3: Gradual traffic migration
-  - name: "migrate-traffic"
-    description: "Gradually shift traffic to target provider"
-    depends_on: ["verify-target"]
-
-    operations:
-      - id: "set-traffic-10"
-        action: "set-traffic-split"
-        config:
-          source: "{{ source_provider }}"
-          target: "{{ target_provider }}"
-          percentage: 10
-          duration: 300
-
-      - id: "verify-10"
-        action: "verify-traffic-flow"
-        config:
-          target_percentage: 10
-          error_rate_threshold: 0.1
-
-      - id: "set-traffic-25"
-        action: "set-traffic-split"
-        config:
-          percentage: 25
-          duration: 600
-
-      - id: "set-traffic-50"
-        action: "set-traffic-split"
-        config:
-          percentage: 50
-          duration: 900
-
-      - id: "set-traffic-75"
-        action: "set-traffic-split"
-        config:
-          percentage: 75
-          duration: 900
-
-      - id: "set-traffic-100"
-        action: "set-traffic-split"
-        config:
-          percentage: 100
-          duration: 600
-
-  # Phase 4: Cleanup source
-  - name: "cleanup-source"
-    description: "Remove old infrastructure from source provider"
-    depends_on: ["migrate-traffic"]
-
-    operations:
-      - id: "verify-final"
-        action: "run-health-check"
-        config:
-          provider: "{{ target_provider }}"
-          duration: 3600  # Monitor for 1 hour
-
-      - id: "decommission-source"
-        action: "delete-resources"
-        config:
-          provider: "{{ source_provider }}"
-          resources: ["droplets", "load-balancer"]
-          preserve_backups: true
-
-# Cost tracking
-cost_tracking:
-  before:
-    provider: "{{ source_provider }}"
-    estimated_monthly: "$72"
-
-  after:
-    provider: "{{ target_provider }}"
-    estimated_monthly: "$42"
-
-  savings:
-    monthly: "$30"
-    annual: "$360"
-    percentage: "42%"
-
-

Workflow 4: Multi-Region Database Replication

-

Use Case: Setup database replication across multiple providers and regions for disaster recovery.

-

Workflow Characteristics:

-
    -
  • Create primary database
  • -
  • Setup read replicas in other providers
  • -
  • Configure replication
  • -
  • Monitor lag
  • -
-

Workflow Definition

-
# file: workflows/multi-region-replication.yml
-
-name: multi-region-replication
-version: "1.0"
-description: "Setup database replication across providers"
-
-phases:
-  # Primary database
-  - name: "create-primary"
-    provider: "aws"
-    operations:
-      - id: "create-rds"
-        action: "create-db-instance"
-        config:
-          identifier: "app-db-primary"
-          engine: "postgres"
-          instance_class: "db.t3.medium"
-          region: "us-east-1"
-
-  # Secondary replica
-  - name: "create-secondary-replica"
-    depends_on: ["create-primary"]
-    provider: "aws"
-    operations:
-      - id: "create-replica"
-        action: "create-read-replica"
-        config:
-          source: "app-db-primary"
-          region: "eu-west-1"
-          identifier: "app-db-secondary"
-
-  # Tertiary replica in different provider
-  - name: "create-tertiary-replica"
-    depends_on: ["create-primary"]
-    operations:
-      - id: "setup-replication"
-        action: "setup-external-replication"
-        config:
-          source_provider: "aws"
-          source_db: "app-db-primary"
-          target_provider: "hetzner"
-          replication_slot: "hetzner_replica"
-          replication_type: "logical"
-
-  # Monitor replication
-  - name: "monitor-replication"
-    depends_on: ["create-tertiary-replica"]
-    operations:
-      - id: "check-lag"
-        action: "monitor-replication-lag"
-        config:
-          replicas:
-            - name: "secondary"
-              warning_threshold: 300
-              critical_threshold: 600
-            - name: "tertiary"
-              warning_threshold: 1000
-              critical_threshold: 2000
-          interval: 60
-
-

Best Practices

-

1. Workflow Design

-
    -
  • Define Clear Dependencies: Explicitly state what must happen before what
  • -
  • Use Idempotent Operations: Workflows should be safe to re-run
  • -
  • Set Realistic Timeouts: Account for cloud provider delays
  • -
  • Plan for Failures: Define rollback strategies
  • -
  • Test Workflows: Run in staging before production
  • -
-

2. Orchestration

-
    -
  • Parallel Execution: Run independent phases in parallel for speed
  • -
  • Checkpoints: Add verification at each phase
  • -
  • Progressive Deployment: Use gradual traffic shifting
  • -
  • Monitoring Integration: Track metrics during workflow
  • -
  • Notifications: Alert team at key points
  • -
-

3. Cost Management

-
    -
  • Calculate ROI: Track cost savings from optimizations
  • -
  • Monitor Resource Usage: Watch for over-provisioning
  • -
  • Implement Cleanup: Remove old resources after migration
  • -
  • Review Regularly: Reassess provider choices
  • -
-

Troubleshooting

-

Issue: Workflow Stuck in Phase

-

Diagnosis:

-
provisioning workflow status workflow-id --verbose
-
-

Solution:

-
    -
  • Increase timeout if legitimate long operation
  • -
  • Check provider logs for actual status
  • -
  • Manually intervene if necessary
  • -
  • Use --skip-phase to skip problematic phase
  • -
-

Issue: Rollback Failed

-

Diagnosis:

-
provisioning workflow rollback workflow-id --dry-run
-
-

Solution:

-
    -
  • Review what resources were created
  • -
  • Manually delete resources if needed
  • -
  • Fix root cause of failure
  • -
  • Re-run workflow
  • -
-

Issue: Data Inconsistency After Failover

-

Diagnosis:

-
provisioning database verify-consistency
-
-

Solution:

-
    -
  • Check replication lag before failover
  • -
  • Manually resync if necessary
  • -
  • Use backup to restore consistency
  • -
  • Run validation queries
  • -
-

Summary

-

Batch workflows enable complex multi-provider orchestration with:

-
    -
  • Coordinated deployment across providers
  • -
  • Automated failover and recovery
  • -
  • Gradual workload migration
  • -
  • Cost optimization
  • -
  • Disaster recovery
  • -
-

Start with simple workflows and gradually add complexity as you gain confidence.

-

Modular CLI Architecture (v3.2.0 - MAJOR REFACTORING)

-

🚀 CLI Refactoring Completed (2025-09-30)

-

A comprehensive CLI refactoring transforming the monolithic 1,329-line script into a modular, maintainable architecture with domain-driven design.

-

Architecture Improvements

-
    -
  • Main File Reduction: 1,329 lines → 211 lines (84% reduction)
  • -
  • Domain Handlers: 7 focused modules (infrastructure, orchestration, development, workspace, configuration, utilities, generation)
  • -
  • Code Duplication: 50+ instances eliminated through centralized flag handling
  • -
  • Command Registry: 80+ shortcuts for improved user experience
  • -
  • Bi-directional Help: provisioning help ws = provisioning ws help
  • -
  • Test Coverage: Comprehensive test suite with 6 test groups
  • -
-

Command Shortcuts Reference

-

Infrastructure

-

[Full docs: provisioning help infra]

-
    -
  • sserver (create, delete, list, ssh, price)
  • -
  • t, tasktaskserv (create, delete, list, generate, check-updates)
  • -
  • clcluster (create, delete, list)
  • -
  • i, infrasinfra (list, validate)
  • -
-

Orchestration

-

[Full docs: provisioning help orch]

-
    -
  • wf, flowworkflow (list, status, monitor, stats, cleanup)
  • -
  • batbatch (submit, list, status, monitor, rollback, cancel, stats)
  • -
  • orchorchestrator (start, stop, status, health, logs)
  • -
-

Development

-

[Full docs: provisioning help dev]

-
    -
  • modmodule (discover, load, list, unload, sync-nickel)
  • -
  • lyrlayer (explain, show, test, stats)
  • -
  • version (check, show, updates, apply, taskserv)
  • -
  • pack (core, provider, list, clean)
  • -
-

Workspace

-

[Full docs: provisioning help ws]

-
    -
  • wsworkspace (init, create, validate, info, list, migrate)
  • -
  • tpl, tmpltemplate (list, types, show, apply, validate)
  • -
-

Configuration

-

[Full docs: provisioning help config]

-
    -
  • eenv (show environment variables)
  • -
  • valvalidate (validate configuration)
  • -
  • st, configsetup (setup wizard)
  • -
  • show (show configuration details)
  • -
  • init (initialize infrastructure)
  • -
  • allenv (show all config and environment)
  • -
-

Utilities

-
    -
  • l, ls, listlist (list resources)
  • -
  • ssh (SSH operations)
  • -
  • sops (edit encrypted files)
  • -
  • cache (cache management)
  • -
  • providers (provider operations)
  • -
  • nu (start Nushell session with provisioning library)
  • -
  • qr (QR code generation)
  • -
  • nuinfo (Nushell information)
  • -
  • plugin, plugins (plugin management)
  • -
-

Generation

-

[Full docs: provisioning generate help]

-
    -
  • g, gengenerate (server, taskserv, cluster, infra, new)
  • -
-

Special Commands

-
    -
  • ccreate (create resources)
  • -
  • ddelete (delete resources)
  • -
  • uupdate (update resources)
  • -
  • price, cost, costsprice (show pricing)
  • -
  • cst, cstscreate-server-task (create server with taskservs)
  • -
-

Bi-directional Help System

-

The help system works in both directions:

-
# All these work identically:
-provisioning help workspace
-provisioning workspace help
-provisioning ws help
-provisioning help ws
-
-# Same for all categories:
-provisioning help infra    = provisioning infra help
-provisioning help orch     = provisioning orch help
-provisioning help dev      = provisioning dev help
-provisioning help ws       = provisioning ws help
-provisioning help plat     = provisioning plat help
-provisioning help concept  = provisioning concept help
-
-

CLI Internal Architecture

-

File Structure:

-
provisioning/core/nulib/
-├── provisioning (211 lines) - Main entry point
-├── main_provisioning/
-│   ├── flags.nu (139 lines) - Centralized flag handling
-│   ├── dispatcher.nu (264 lines) - Command routing
-│   ├── help_system.nu - Categorized help
-│   └── commands/ - Domain-focused handlers
-│       ├── infrastructure.nu (117 lines)
-│       ├── orchestration.nu (64 lines)
-│       ├── development.nu (72 lines)
-│       ├── workspace.nu (56 lines)
-│       ├── generation.nu (78 lines)
-│       ├── utilities.nu (157 lines)
-│       └── configuration.nu (316 lines)
-
-

For Developers:

-
    -
  • Adding commands: Update appropriate domain handler in commands/
  • -
  • Adding shortcuts: Update command registry in dispatcher.nu
  • -
  • Flag changes: Modify centralized functions in flags.nu
  • -
  • Testing: Run nu tests/test_provisioning_refactor.nu
  • -
-

See ADR-006: CLI Refactoring for complete refactoring details.

-

Configuration System (v2.0.0)

-

⚠️ Migration Completed (2025-09-23)

-

The system has been migrated from ENV-based to config-driven architecture.

-
    -
  • 65+ files migrated across entire codebase
  • -
  • 200+ ENV variables replaced with 476 config accessors
  • -
  • 16 token-efficient agents used for systematic migration
  • -
  • 92% token efficiency achieved vs monolithic approach
  • -
-

Configuration Files

-
    -
  • Primary Config: config.defaults.toml (system defaults)
  • -
  • User Config: config.user.toml (user preferences)
  • -
  • Environment Configs: config.{dev,test,prod}.toml.example
  • -
  • Hierarchical Loading: defaults → user → project → infra → env → runtime
  • -
  • Interpolation: {{paths.base}}, {{env.HOME}}, {{now.date}}, {{git.branch}}
  • -
-

Essential Commands

-
    -
  • provisioning validate config - Validate configuration
  • -
  • provisioning env - Show environment variables
  • -
  • provisioning allenv - Show all config and environment
  • -
  • PROVISIONING_ENV=prod provisioning - Use specific environment
  • -
-

Configuration Architecture

-

See ADR-010: Configuration Format Strategy for complete rationale and design patterns.

-

Configuration Loading Hierarchy (Priority)

-

When loading configuration, precedence is (highest to lowest):

-
    -
  1. Runtime Arguments - CLI flags and direct user input
  2. -
  3. Environment Variables - PROVISIONING_* overrides
  4. -
  5. User Configuration - ~/.config/provisioning/user_config.yaml
  6. -
  7. Infrastructure Configuration - Nickel schemas, extensions, provider configs
  8. -
  9. System Defaults - provisioning/config/config.defaults.toml
  10. -
-

File Type Guidelines

-

For new configuration:

-
    -
  • Infrastructure/schemas → Use Nickel (type-safe, schema-validated)
  • -
  • Application settings → Use TOML (hierarchical, supports interpolation)
  • -
  • Kubernetes/CI-CD → Use YAML (standard, ecosystem-compatible)
  • -
-

For existing workspace configs:

-
    -
  • Nickel is the primary configuration language
  • -
  • All new workspaces use Nickel exclusively
  • -
-

CLI Reference

-

Complete command-line reference for Infrastructure Automation. This guide covers all commands, options, and usage patterns.

-

What You’ll Learn

-
    -
  • Complete command syntax and options
  • -
  • All available commands and subcommands
  • -
  • Usage examples and patterns
  • -
  • Scripting and automation
  • -
  • Integration with other tools
  • -
  • Advanced command combinations
  • -
-

Command Structure

-

All provisioning commands follow this structure:

-
provisioning [global-options] <command> [subcommand] [command-options] [arguments]
-
-

Global Options

-

These options can be used with any command:

-
- - - - - - - - -
OptionShortDescriptionExample
--infra-iSpecify infrastructure--infra production
--environmentEnvironment override--environment prod
--check-cDry run mode--check
--debug-xEnable debug output--debug
--yes-yAuto-confirm actions--yes
--wait-wWait for completion--wait
--outOutput format--out json
--help-hShow help--help
-
-

Output Formats

-
- - - - - -
FormatDescriptionUse Case
textHuman-readable textTerminal viewing
jsonJSON formatScripting, APIs
yamlYAML formatConfiguration files
tomlTOML formatSettings files
tableTabular formatReports, lists
-
-

Core Commands

-

help - Show Help Information

-

Display help information for the system or specific commands.

-
# General help
-provisioning help
-
-# Command-specific help
-provisioning help server
-provisioning help taskserv
-provisioning help cluster
-
-# Show all available commands
-provisioning help --all
-
-# Show help for subcommand
-provisioning server help create
-
-

Options:

-
    -
  • --all - Show all available commands
  • -
  • --detailed - Show detailed help with examples
  • -
-

version - Show Version Information

-

Display version information for the system and dependencies.

-
# Basic version
+

Verification

+

Validate the Provisioning platform installation and infrastructure health.

+

Installation Verification

+

CLI and Core Tools

+
# Check CLI version
 provisioning version
-provisioning --version
-provisioning -V
 
-# Detailed version with dependencies
-provisioning version --verbose
+# Verify Nushell
+nu --version  # 0.109.1+
 
-# Show version info with title
-provisioning --info
-provisioning -I
-
-

Options:

-
    -
  • --verbose - Show detailed version information
  • -
  • --dependencies - Include dependency versions
  • -
-

env - Environment Information

-

Display current environment configuration and settings.

-
# Show environment variables
-provisioning env
-
-# Show all environment and configuration
-provisioning allenv
-
-# Show specific environment
-provisioning env --environment prod
-
-# Export environment
-provisioning env --export
-
-

Output includes:

-
    -
  • Configuration file locations
  • -
  • Environment variables
  • -
  • Provider settings
  • -
  • Path configurations
  • -
-

Server Management Commands

-

server create - Create Servers

-

Create new server instances based on configuration.

-
# Create all servers in infrastructure
-provisioning server create --infra my-infra
-
-# Dry run (check mode)
-provisioning server create --infra my-infra --check
-
-# Create with confirmation
-provisioning server create --infra my-infra --yes
-
-# Create and wait for completion
-provisioning server create --infra my-infra --wait
-
-# Create specific server
-provisioning server create web-01 --infra my-infra
-
-# Create with custom settings
-provisioning server create --infra my-infra --settings custom.ncl
-
-

Options:

-
    -
  • --check, -c - Dry run mode (show what would be created)
  • -
  • --yes, -y - Auto-confirm creation
  • -
  • --wait, -w - Wait for servers to be fully ready
  • -
  • --settings, -s - Custom settings file
  • -
  • --template, -t - Use specific template
  • -
-

server delete - Delete Servers

-

Remove server instances and associated resources.

-
# Delete all servers
-provisioning server delete --infra my-infra
-
-# Delete with confirmation
-provisioning server delete --infra my-infra --yes
-
-# Delete but keep storage
-provisioning server delete --infra my-infra --keepstorage
-
-# Delete specific server
-provisioning server delete web-01 --infra my-infra
-
-# Dry run deletion
-provisioning server delete --infra my-infra --check
-
-

Options:

-
    -
  • --yes, -y - Auto-confirm deletion
  • -
  • --keepstorage - Preserve storage volumes
  • -
  • --force - Force deletion even if servers are running
  • -
-

server list - List Servers

-

Display information about servers.

-
# List all servers
-provisioning server list --infra my-infra
-
-# List with detailed information
-provisioning server list --infra my-infra --detailed
-
-# List in specific format
-provisioning server list --infra my-infra --out json
-
-# List servers across all infrastructures
-provisioning server list --all
-
-# Filter by status
-provisioning server list --infra my-infra --status running
-
-

Options:

-
    -
  • --detailed - Show detailed server information
  • -
  • --status - Filter by server status
  • -
  • --all - Show servers from all infrastructures
  • -
-

server ssh - SSH Access

-

Connect to servers via SSH.

-
# SSH to server
-provisioning server ssh web-01 --infra my-infra
-
-# SSH with specific user
-provisioning server ssh web-01 --user admin --infra my-infra
-
-# SSH with custom key
-provisioning server ssh web-01 --key ~/.ssh/custom_key --infra my-infra
-
-# Execute single command
-provisioning server ssh web-01 --command "systemctl status nginx" --infra my-infra
-
-

Options:

-
    -
  • --user - SSH username (default from configuration)
  • -
  • --key - SSH private key file
  • -
  • --command - Execute command and exit
  • -
  • --port - SSH port (default: 22)
  • -
-

server price - Cost Information

-

Display pricing information for servers.

-
# Show costs for all servers
-provisioning server price --infra my-infra
-
-# Show detailed cost breakdown
-provisioning server price --infra my-infra --detailed
-
-# Show monthly estimates
-provisioning server price --infra my-infra --monthly
-
-# Cost comparison between providers
-provisioning server price --infra my-infra --compare
-
-

Options:

-
    -
  • --detailed - Detailed cost breakdown
  • -
  • --monthly - Monthly cost estimates
  • -
  • --compare - Compare costs across providers
  • -
-

Task Service Commands

-

taskserv create - Install Services

-

Install and configure task services on servers.

-
# Install service on all eligible servers
-provisioning taskserv create kubernetes --infra my-infra
-
-# Install with check mode
-provisioning taskserv create kubernetes --infra my-infra --check
-
-# Install specific version
-provisioning taskserv create kubernetes --version 1.28 --infra my-infra
-
-# Install on specific servers
-provisioning taskserv create postgresql --servers db-01,db-02 --infra my-infra
-
-# Install with custom configuration
-provisioning taskserv create kubernetes --config k8s-config.yaml --infra my-infra
-
-

Options:

-
    -
  • --version - Specific version to install
  • -
  • --config - Custom configuration file
  • -
  • --servers - Target specific servers
  • -
  • --force - Force installation even if conflicts exist
  • -
-

taskserv delete - Remove Services

-

Remove task services from servers.

-
# Remove service
-provisioning taskserv delete kubernetes --infra my-infra
-
-# Remove with data cleanup
-provisioning taskserv delete postgresql --cleanup-data --infra my-infra
-
-# Remove from specific servers
-provisioning taskserv delete nginx --servers web-01,web-02 --infra my-infra
-
-# Dry run removal
-provisioning taskserv delete kubernetes --infra my-infra --check
-
-

Options:

-
    -
  • --cleanup-data - Remove associated data
  • -
  • --servers - Target specific servers
  • -
  • --force - Force removal
  • -
-

taskserv list - List Services

-

Display available and installed task services.

-
# List all available services
-provisioning taskserv list
-
-# List installed services
-provisioning taskserv list --infra my-infra --installed
-
-# List by category
-provisioning taskserv list --category database
-
-# List with versions
-provisioning taskserv list --versions
-
-# Search services
-provisioning taskserv list --search kubernetes
-
-

Options:

-
    -
  • --installed - Show only installed services
  • -
  • --category - Filter by service category
  • -
  • --versions - Include version information
  • -
  • --search - Search by name or description
  • -
-

taskserv generate - Generate Configurations

-

Generate configuration files for task services.

-
# Generate configuration
-provisioning taskserv generate kubernetes --infra my-infra
-
-# Generate with custom template
-provisioning taskserv generate kubernetes --template custom --infra my-infra
-
-# Generate for specific servers
-provisioning taskserv generate nginx --servers web-01,web-02 --infra my-infra
-
-# Generate and save to file
-provisioning taskserv generate postgresql --output db-config.yaml --infra my-infra
-
-

Options:

-
    -
  • --template - Use specific template
  • -
  • --output - Save to specific file
  • -
  • --servers - Target specific servers
  • -
-

taskserv check-updates - Version Management

-

Check for and manage service version updates.

-
# Check updates for all services
-provisioning taskserv check-updates --infra my-infra
-
-# Check specific service
-provisioning taskserv check-updates kubernetes --infra my-infra
-
-# Show available versions
-provisioning taskserv versions kubernetes
-
-# Update to latest version
-provisioning taskserv update kubernetes --infra my-infra
-
-# Update to specific version
-provisioning taskserv update kubernetes --version 1.29 --infra my-infra
-
-

Options:

-
    -
  • --version - Target specific version
  • -
  • --security-only - Only security updates
  • -
  • --dry-run - Show what would be updated
  • -
-

Cluster Management Commands

-

cluster create - Deploy Clusters

-

Deploy and configure application clusters.

-
# Create cluster
-provisioning cluster create web-cluster --infra my-infra
-
-# Create with check mode
-provisioning cluster create web-cluster --infra my-infra --check
-
-# Create with custom configuration
-provisioning cluster create web-cluster --config cluster.yaml --infra my-infra
-
-# Create and scale immediately
-provisioning cluster create web-cluster --replicas 5 --infra my-infra
-
-

Options:

-
    -
  • --config - Custom cluster configuration
  • -
  • --replicas - Initial replica count
  • -
  • --namespace - Kubernetes namespace
  • -
-

cluster delete - Remove Clusters

-

Remove application clusters and associated resources.

-
# Delete cluster
-provisioning cluster delete web-cluster --infra my-infra
-
-# Delete with data cleanup
-provisioning cluster delete web-cluster --cleanup --infra my-infra
-
-# Force delete
-provisioning cluster delete web-cluster --force --infra my-infra
-
-

Options:

-
    -
  • --cleanup - Remove associated data
  • -
  • --force - Force deletion
  • -
  • --keep-volumes - Preserve persistent volumes
  • -
-

cluster list - List Clusters

-

Display information about deployed clusters.

-
# List all clusters
-provisioning cluster list --infra my-infra
-
-# List with status
-provisioning cluster list --infra my-infra --status
-
-# List across all infrastructures
-provisioning cluster list --all
-
-# Filter by namespace
-provisioning cluster list --namespace production --infra my-infra
-
-

Options:

-
    -
  • --status - Include status information
  • -
  • --all - Show clusters from all infrastructures
  • -
  • --namespace - Filter by namespace
  • -
-

cluster scale - Scale Clusters

-

Adjust cluster size and resources.

-
# Scale cluster
-provisioning cluster scale web-cluster --replicas 10 --infra my-infra
-
-# Auto-scale configuration
-provisioning cluster scale web-cluster --auto-scale --min 3 --max 20 --infra my-infra
-
-# Scale specific component
-provisioning cluster scale web-cluster --component api --replicas 5 --infra my-infra
-
-

Options:

-
    -
  • --replicas - Target replica count
  • -
  • --auto-scale - Enable auto-scaling
  • -
  • --min, --max - Auto-scaling limits
  • -
  • --component - Scale specific component
  • -
-

Infrastructure Commands

-

generate - Generate Configurations

-

Generate infrastructure and configuration files.

-
# Generate new infrastructure
-provisioning generate infra --new my-infrastructure
-
-# Generate from template
-provisioning generate infra --template web-app --name my-app
-
-# Generate server configurations
-provisioning generate server --infra my-infra
-
-# Generate task service configurations
-provisioning generate taskserv --infra my-infra
-
-# Generate cluster configurations
-provisioning generate cluster --infra my-infra
-
-

Subcommands:

-
    -
  • infra - Infrastructure configurations
  • -
  • server - Server configurations
  • -
  • taskserv - Task service configurations
  • -
  • cluster - Cluster configurations
  • -
-

Options:

-
    -
  • --new - Create new infrastructure
  • -
  • --template - Use specific template
  • -
  • --name - Name for generated resources
  • -
  • --output - Output directory
  • -
-

show - Display Information

-

Show detailed information about infrastructure components.

-
# Show settings
-provisioning show settings --infra my-infra
-
-# Show servers
-provisioning show servers --infra my-infra
-
-# Show specific server
-provisioning show servers web-01 --infra my-infra
-
-# Show task services
-provisioning show taskservs --infra my-infra
-
-# Show costs
-provisioning show costs --infra my-infra
-
-# Show in different format
-provisioning show servers --infra my-infra --out json
-
-

Subcommands:

-
    -
  • settings - Configuration settings
  • -
  • servers - Server information
  • -
  • taskservs - Task service information
  • -
  • costs - Cost information
  • -
  • data - Raw infrastructure data
  • -
-

list - List Resources

-

List resource types (servers, networks, volumes, etc.).

-
# List providers
-provisioning list providers
-
-# List task services
-provisioning list taskservs
-
-# List clusters
-provisioning list clusters
-
-# List infrastructures
-provisioning list infras
-
-# List with selection interface
-provisioning list servers --select
-
-

Subcommands:

-
    -
  • providers - Available providers
  • -
  • taskservs - Available task services
  • -
  • clusters - Available clusters
  • -
  • infras - Available infrastructures
  • -
  • servers - Server instances
  • -
-

validate - Validate Configuration

-

Validate configuration files and infrastructure definitions.

-
# Validate configuration
-provisioning validate config --infra my-infra
-
-# Validate with detailed output
-provisioning validate config --detailed --infra my-infra
-
-# Validate specific file
-provisioning validate config settings.ncl --infra my-infra
-
-# Quick validation
-provisioning validate quick --infra my-infra
-
-# Validate interpolation
-provisioning validate interpolation --infra my-infra
-
-

Subcommands:

-
    -
  • config - Configuration validation
  • -
  • quick - Quick infrastructure validation
  • -
  • interpolation - Interpolation pattern validation
  • -
-

Options:

-
    -
  • --detailed - Show detailed validation results
  • -
  • --strict - Strict validation mode
  • -
  • --rules - Show validation rules
  • -
-

Configuration Commands

-

init - Initialize Configuration

-

Initialize user and project configurations.

-
# Initialize user configuration
-provisioning init config
-
-# Initialize with specific template
-provisioning init config dev
-
-# Initialize project configuration
-provisioning init project
-
-# Force overwrite existing
-provisioning init config --force
-
-

Subcommands:

-
    -
  • config - User configuration
  • -
  • project - Project configuration
  • -
-

Options:

-
    -
  • --template - Configuration template
  • -
  • --force - Overwrite existing files
  • -
-

template - Template Management

-

Manage configuration templates.

-
# List available templates
-provisioning template list
-
-# Show template content
-provisioning template show dev
-
-# Validate templates
-provisioning template validate
-
-# Create custom template
-provisioning template create my-template --from dev
-
-

Subcommands:

-
    -
  • list - List available templates
  • -
  • show - Display template content
  • -
  • validate - Validate templates
  • -
  • create - Create custom template
  • -
-

Advanced Commands

-

nu - Interactive Shell

-

Start interactive Nushell session with provisioning library loaded.

-
# Start interactive shell
-provisioning nu
-
-# Execute specific command
-provisioning nu -c "use lib_provisioning *; show_env"
-
-# Start with custom script
-provisioning nu --script my-script.nu
-
-

Options:

-
    -
  • -c - Execute command and exit
  • -
  • --script - Run specific script
  • -
  • --load - Load additional modules
  • -
-

sops - Secret Management

-

Edit encrypted configuration files using SOPS.

-
# Edit encrypted file
-provisioning sops settings.ncl --infra my-infra
-
-# Encrypt new file
-provisioning sops --encrypt new-secrets.ncl --infra my-infra
-
-# Decrypt for viewing
-provisioning sops --decrypt secrets.ncl --infra my-infra
-
-# Rotate keys
-provisioning sops --rotate-keys secrets.ncl --infra my-infra
-
-

Options:

-
    -
  • --encrypt - Encrypt file
  • -
  • --decrypt - Decrypt file
  • -
  • --rotate-keys - Rotate encryption keys
  • -
-

context - Context Management

-

Manage infrastructure contexts and environments.

-
# Show current context
-provisioning context
-
-# List available contexts
-provisioning context list
-
-# Switch context
-provisioning context switch production
-
-# Create new context
-provisioning context create staging --from development
-
-# Delete context
-provisioning context delete old-context
-
-

Subcommands:

-
    -
  • list - List contexts
  • -
  • switch - Switch active context
  • -
  • create - Create new context
  • -
  • delete - Delete context
  • -
-

Workflow Commands

-

workflows - Batch Operations

-

Manage complex workflows and batch operations.

-
# Submit batch workflow
-provisioning workflows batch submit my-workflow.ncl
-
-# Monitor workflow progress
-provisioning workflows batch monitor workflow-123
-
-# List workflows
-provisioning workflows batch list --status running
-
-# Get workflow status
-provisioning workflows batch status workflow-123
-
-# Rollback failed workflow
-provisioning workflows batch rollback workflow-123
-
-

Options:

-
    -
  • --status - Filter by workflow status
  • -
  • --follow - Follow workflow progress
  • -
  • --timeout - Set timeout for operations
  • -
-

orchestrator - Orchestrator Management

-

Control the hybrid orchestrator system.

-
# Start orchestrator
-provisioning orchestrator start
-
-# Check orchestrator status
-provisioning orchestrator status
-
-# Stop orchestrator
-provisioning orchestrator stop
-
-# Show orchestrator logs
-provisioning orchestrator logs
-
-# Health check
-provisioning orchestrator health
-
-

Scripting and Automation

-

Exit Codes

-

Provisioning uses standard exit codes:

-
    -
  • 0 - Success
  • -
  • 1 - General error
  • -
  • 2 - Invalid command or arguments
  • -
  • 3 - Configuration error
  • -
  • 4 - Permission denied
  • -
  • 5 - Resource not found
  • -
-

Environment Variables

-

Control behavior through environment variables:

-
# Enable debug mode
-export PROVISIONING_DEBUG=true
-
-# Set environment
-export PROVISIONING_ENV=production
-
-# Set output format
-export PROVISIONING_OUTPUT_FORMAT=json
-
-# Disable interactive prompts
-export PROVISIONING_NONINTERACTIVE=true
-
-

Batch Operations

-
#!/bin/bash
-# Example batch script
-
-# Set environment
-export PROVISIONING_ENV=production
-export PROVISIONING_NONINTERACTIVE=true
-
-# Validate first
-if ! provisioning validate config --infra production; then
-    echo "Configuration validation failed"
-    exit 1
-fi
-
-# Create infrastructure
-provisioning server create --infra production --yes --wait
-
-# Install services
-provisioning taskserv create kubernetes --infra production --yes
-provisioning taskserv create postgresql --infra production --yes
-
-# Deploy clusters
-provisioning cluster create web-app --infra production --yes
-
-echo "Deployment completed successfully"
-
-

JSON Output Processing

-
# Get server list as JSON
-servers=$(provisioning server list --infra my-infra --out json)
-
-# Process with jq
-echo "$servers" | jq '.[] | select(.status == "running") | .name'
-
-# Use in scripts
-for server in $(echo "$servers" | jq -r '.[] | select(.status == "running") | .name'); do
-    echo "Processing server: $server"
-    provisioning server ssh "$server" --command "uptime" --infra my-infra
-done
-
-

Command Chaining and Pipelines

-

Sequential Operations

-
# Chain commands with && (stop on failure)
-provisioning validate config --infra my-infra && \
-provisioning server create --infra my-infra --check && \
-provisioning server create --infra my-infra --yes
-
-# Chain with || (continue on failure)
-provisioning taskserv create kubernetes --infra my-infra || \
-echo "Kubernetes installation failed, continuing with other services"
-
-

Complex Workflows

-
# Full deployment workflow
-deploy_infrastructure() {
-    local infra_name=$1
-
-    echo "Deploying infrastructure: $infra_name"
-
-    # Validate
-    provisioning validate config --infra "$infra_name" || return 1
-
-    # Create servers
-    provisioning server create --infra "$infra_name" --yes --wait || return 1
-
-    # Install base services
-    for service in containerd kubernetes; do
-        provisioning taskserv create "$service" --infra "$infra_name" --yes || return 1
-    done
-
-    # Deploy applications
-    provisioning cluster create web-app --infra "$infra_name" --yes || return 1
-
-    echo "Deployment completed: $infra_name"
-}
-
-# Use the function
-deploy_infrastructure "production"
-
-

Integration with Other Tools

-

CI/CD Integration

-
# GitLab CI example
-deploy:
-  script:
-    - provisioning validate config --infra production
-    - provisioning server create --infra production --check
-    - provisioning server create --infra production --yes --wait
-    - provisioning taskserv create kubernetes --infra production --yes
-  only:
-    - main
-
-

Monitoring Integration

-
# Health check script
-#!/bin/bash
-
-# Check infrastructure health
-if provisioning health check --infra production --out json | jq -e '.healthy'; then
-    echo "Infrastructure healthy"
-    exit 0
-else
-    echo "Infrastructure unhealthy"
-    # Send alert
-    curl -X POST https://alerts.company.com/webhook \
-        -d '{"message": "Infrastructure health check failed"}'
-    exit 1
-fi
-
-

Backup Automation

-
# Backup script
-#!/bin/bash
-
-DATE=$(date +%Y%m%d_%H%M%S)
-BACKUP_DIR="/backups/provisioning/$DATE"
-
-# Create backup directory
-mkdir -p "$BACKUP_DIR"
-
-# Export configurations
-provisioning config export --format yaml > "$BACKUP_DIR/config.yaml"
-
-# Backup infrastructure definitions
-for infra in $(provisioning list infras --out json | jq -r '.[]'); do
-    provisioning show settings --infra "$infra" --out yaml > "$BACKUP_DIR/$infra.yaml"
-done
-
-echo "Backup completed: $BACKUP_DIR"
-
-

This CLI reference provides comprehensive coverage of all provisioning commands. Use it as your primary reference for command syntax, options, and -integration patterns.

-

Dynamic Secrets Guide

-

This guide covers generating and managing temporary credentials (dynamic secrets) instead of using static secrets. See the Quick Reference section -below for fast lookup.

-

Quick Reference

-

Quick Start: Generate temporary credentials instead of using static secrets

-

Quick Commands

-

Generate AWS Credentials (1 hour)

-
secrets generate aws --role deploy --workspace prod --purpose "deployment"
-
-

Generate SSH Key (2 hours)

-
secrets generate ssh --ttl 2 --workspace dev --purpose "server access"
-
-

Generate UpCloud Subaccount (2 hours)

-
secrets generate upcloud --workspace staging --purpose "testing"
-
-

List Active Secrets

-
secrets list
-
-

Revoke Secret

-
secrets revoke <secret-id> --reason "no longer needed"
-
-

View Statistics

-
secrets stats
-
-
-

Secret Types

-
- - - - -
TypeTTL RangeRenewableUse Case
AWS STS15 min - 12 h✅ YesCloud resource provisioning
SSH Keys10 min - 24 h❌ NoTemporary server access
UpCloud30 min - 8 h❌ NoUpCloud API operations
Vault5 min - 24 h✅ YesAny Vault-backed secret
-
-
-

REST API Endpoints

-

Base URL: http://localhost:9090/api/v1/secrets

-
# Generate secret
-POST /generate
-
-# Get secret
-GET /{id}
-
-# Revoke secret
-POST /{id}/revoke
-
-# Renew secret
-POST /{id}/renew
-
-# List secrets
-GET /list
-
-# List expiring
-GET /expiring
-
-# Statistics
-GET /stats
-
-
-

AWS STS Example

-
# Generate
-let creds = secrets generate aws `
-    --role deploy `
-    --region us-west-2 `
-    --workspace prod `
-    --purpose "Deploy servers"
-
-# Export to environment
-export-env {
-    AWS_ACCESS_KEY_ID: ($creds.credentials.access_key_id)
-    AWS_SECRET_ACCESS_KEY: ($creds.credentials.secret_access_key)
-    AWS_SESSION_TOKEN: ($creds.credentials.session_token)
-}
-
-# Use credentials
-provisioning server create
-
-# Cleanup
-secrets revoke ($creds.id) --reason "done"
-
-
-

SSH Key Example

-
# Generate
-let key = secrets generate ssh `
-    --ttl 4 `
-    --workspace dev `
-    --purpose "Debug issue"
-
-# Save key
-$key.credentials.private_key | save ~/.ssh/temp_key
-chmod 600 ~/.ssh/temp_key
-
-# Use key
-ssh -i ~/.ssh/temp_key user@server
-
-# Cleanup
-rm ~/.ssh/temp_key
-secrets revoke ($key.id) --reason "fixed"
-
-
-

Configuration

-

File: provisioning/platform/orchestrator/config.defaults.toml

-
[secrets]
-default_ttl_hours = 1
-max_ttl_hours = 12
-auto_revoke_on_expiry = true
-warning_threshold_minutes = 5
-
-aws_account_id = "123456789012"
-aws_default_region = "us-east-1"
-
-upcloud_username = "${UPCLOUD_USER}"
-upcloud_password = "${UPCLOUD_PASS}"
-
-
-

Troubleshooting

-

“Provider not found”

-

→ Check service initialization

-

“TTL exceeds maximum”

-

→ Reduce TTL or configure higher max

-

“Secret not renewable”

-

→ Generate new secret instead

-

“Missing required parameter”

-

→ Check provider requirements (for example, AWS needs ‘role’)

-
-

Security Features

-
    -
  • ✅ No static credentials stored
  • -
  • ✅ Automatic expiration (1-12 hours)
  • -
  • ✅ Auto-revocation on expiry
  • -
  • ✅ Full audit trail
  • -
  • ✅ Memory-only storage
  • -
  • ✅ TLS in transit
  • -
-
-

Support

-

Orchestrator logs: provisioning/platform/orchestrator/data/orchestrator.log

-

Debug secrets: secrets list | where is_expired == true

-

Mode System Quick Reference

-

Version: 1.0.0 | Date: 2025-10-06

-
-

Quick Start

-
# Check current mode
-provisioning mode current
-
-# List all available modes
-provisioning mode list
-
-# Switch to a different mode
-provisioning mode switch <mode-name>
-
-# Validate mode configuration
-provisioning mode validate
-
-
-

Available Modes

-
- - - - -
ModeUse CaseAuthOrchestratorOCI Registry
soloLocal developmentNoneLocal binaryLocal Zot (optional)
multi-userTeam collaborationToken (JWT)RemoteRemote Harbor
cicdCI/CD pipelinesToken (CI injected)RemoteRemote Harbor
enterpriseProductionmTLSKubernetes HAHarbor HA + DR
-
-
-

Mode Comparison

-

Solo Mode

-
    -
  • Best for: Individual developers
  • -
  • 🔐 Authentication: None
  • -
  • 🚀 Services: Local orchestrator only
  • -
  • 📦 Extensions: Local filesystem
  • -
  • 🔒 Workspace Locking: Disabled
  • -
  • 💾 Resource Limits: Unlimited
  • -
-

Multi-User Mode

-
    -
  • Best for: Development teams (5-20 developers)
  • -
  • 🔐 Authentication: Token (JWT, 24h expiry)
  • -
  • 🚀 Services: Remote orchestrator, control-center, DNS, git
  • -
  • 📦 Extensions: OCI registry (Harbor)
  • -
  • 🔒 Workspace Locking: Enabled (Gitea provider)
  • -
  • 💾 Resource Limits: 10 servers, 32 cores, 128 GB per user
  • -
-

CI/CD Mode

-
    -
  • Best for: Automated pipelines
  • -
  • 🔐 Authentication: Token (1h expiry, CI/CD injected)
  • -
  • 🚀 Services: Remote orchestrator, DNS, git
  • -
  • 📦 Extensions: OCI registry (always pull latest)
  • -
  • 🔒 Workspace Locking: Disabled (stateless)
  • -
  • 💾 Resource Limits: 5 servers, 16 cores, 64 GB per pipeline
  • -
-

Enterprise Mode

-
    -
  • Best for: Large enterprises with strict compliance
  • -
  • 🔐 Authentication: mTLS (TLS 1.3)
  • -
  • 🚀 Services: All services on Kubernetes (HA)
  • -
  • 📦 Extensions: OCI registry (signature verification)
  • -
  • 🔒 Workspace Locking: Required (etcd provider)
  • -
  • 💾 Resource Limits: 20 servers, 64 cores, 256 GB per user
  • -
-
-

Common Operations

-

Initialize Mode System

-
provisioning mode init
-
-

Check Current Mode

-
provisioning mode current
-
-# Output:
-# mode: solo
-# configured: true
-# config_file: ~/.provisioning/config/active-mode.yaml
-
-

List All Modes

-
provisioning mode list
-
-# Output:
-# ┌───────────────┬───────────────────────────────────┬─────────┐
-# │ mode          │ description                        │ current │
-# ├───────────────┼───────────────────────────────────┼─────────┤
-# │ solo          │ Single developer local development │ ●       │
-# │ multi-user    │ Team collaboration                 │         │
-# │ cicd          │ CI/CD pipeline execution           │         │
-# │ enterprise    │ Production enterprise deployment   │         │
-# └───────────────┴───────────────────────────────────┴─────────┘
-
-

Switch Mode

-
# Switch with confirmation
-provisioning mode switch multi-user
-
-# Dry run (preview changes)
-provisioning mode switch multi-user --dry-run
-
-# With validation
-provisioning mode switch multi-user --validate
-
-

Show Mode Details

-
# Show current mode
-provisioning mode show
-
-# Show specific mode
-provisioning mode show enterprise
-
-

Validate Mode

-
# Validate current mode
-provisioning mode validate
-
-# Validate specific mode
-provisioning mode validate cicd
-
-

Compare Modes

-
provisioning mode compare solo multi-user
-
-# Output shows differences in:
-# - Authentication
-# - Service deployments
-# - Extension sources
-# - Workspace locking
-# - Security settings
-
-
-

OCI Registry Management

-

Solo Mode Only

-
# Start local OCI registry
-provisioning mode oci-registry start
-
-# Check registry status
-provisioning mode oci-registry status
-
-# View registry logs
-provisioning mode oci-registry logs
-
-# Stop registry
-provisioning mode oci-registry stop
-
-

Note: OCI registry management only works in solo mode with local deployment.

-
-

Mode-Specific Workflows

-

Solo Mode Workflow

-
# 1. Initialize (defaults to solo)
-provisioning workspace init
-
-# 2. Start orchestrator
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-# 3. (Optional) Start OCI registry
-provisioning mode oci-registry start
-
-# 4. Create infrastructure
-provisioning server create web-01 --check
-provisioning taskserv create kubernetes
-
-# Extensions loaded from local filesystem
-
-

Multi-User Mode Workflow

-
# 1. Switch to multi-user mode
-provisioning mode switch multi-user
-
-# 2. Authenticate
-provisioning auth login
-# Enter JWT token from team admin
-
-# 3. Lock workspace
-provisioning workspace lock my-infra
-
-# 4. Pull extensions from OCI registry
-provisioning extension pull upcloud
-provisioning extension pull kubernetes
-
-# 5. Create infrastructure
-provisioning server create web-01
-
-# 6. Unlock workspace
-provisioning workspace unlock my-infra
-
-

CI/CD Mode Workflow

-
# GitLab CI example
-deploy:
-  stage: deploy
-  script:
-    # Token injected by CI
-    - export PROVISIONING_MODE=cicd
-    - mkdir -p /var/run/secrets/provisioning
-    - echo "$PROVISIONING_TOKEN" > /var/run/secrets/provisioning/token
-
-    # Validate
-    - provisioning validate --all
-
-    # Test
-    - provisioning test quick kubernetes
-
-    # Deploy
-    - provisioning server create --check
-    - provisioning server create
-
-  after_script:
-    - provisioning workspace cleanup
-
-

Enterprise Mode Workflow

-
# 1. Switch to enterprise mode
-provisioning mode switch enterprise
-
-# 2. Verify Kubernetes connectivity
-kubectl get pods -n provisioning-system
-
-# 3. Login to Harbor
-docker login harbor.enterprise.local
-
-# 4. Request workspace (requires approval)
-provisioning workspace request prod-deployment
-# Approval from: platform-team, security-team
-
-# 5. After approval, lock workspace
-provisioning workspace lock prod-deployment --provider etcd
-
-# 6. Pull extensions (with signature verification)
-provisioning extension pull upcloud --verify-signature
-
-# 7. Deploy infrastructure
-provisioning infra create --check
-provisioning infra create
-
-# 8. Release workspace
-provisioning workspace unlock prod-deployment
-
-
-

Configuration Files

-

Mode Templates

-
workspace/config/modes/
-├── solo.yaml           # Solo mode configuration
-├── multi-user.yaml     # Multi-user mode configuration
-├── cicd.yaml           # CI/CD mode configuration
-└── enterprise.yaml     # Enterprise mode configuration
-
-

Active Mode Configuration

-
~/.provisioning/config/active-mode.yaml
-
-

This file is created/updated when you switch modes.

-
-

OCI Registry Namespaces

-

All modes use the following OCI registry namespaces:

-
- - - - -
NamespacePurposeExample
*-extensionsExtension artifactsprovisioning-extensions/upcloud:latest
*-schemasNickel schema artifactsprovisioning-schemas/lib:v1.0.0
*-platformPlatform service imagesprovisioning-platform/orchestrator:latest
*-testTest environment imagesprovisioning-test/ubuntu:22.04
-
-

Note: Prefix varies by mode (dev-, provisioning-, cicd-, prod-)

-
-

Troubleshooting

-

Mode switch fails

-
# Validate mode first
-provisioning mode validate <mode-name>
-
-# Check runtime requirements
-provisioning mode validate <mode-name> --check-requirements
-
-

Cannot start OCI registry (solo mode)

-
# Check if registry binary is installed
-which zot
-
-# Install Zot
-# macOS: brew install project-zot/tap/zot
-# Linux: Download from https://github.com/project-zot/zot/releases
-
-# Check if port 5000 is available
-lsof -i :5000
-
-

Authentication fails (multi-user/cicd/enterprise)

-
# Check token expiry
-provisioning auth status
-
-# Re-authenticate
-provisioning auth login
-
-# For enterprise mTLS, verify certificates
-ls -la /etc/provisioning/certs/
-# Should contain: client.crt, client.key, ca.crt
-
-

Workspace locking issues (multi-user/enterprise)

-
# Check lock status
-provisioning workspace lock-status <workspace-name>
-
-# Force unlock (use with caution)
-provisioning workspace unlock <workspace-name> --force
-
-# Check lock provider status
-# Multi-user: Check Gitea connectivity
-curl -I https://git.company.local
-
-# Enterprise: Check etcd cluster
-etcdctl endpoint health
-
-

OCI registry connection fails

-
# Test registry connectivity
-curl https://harbor.company.local/v2/
-
-# Check authentication token
-cat ~/.provisioning/tokens/oci
-
-# Verify network connectivity
-ping harbor.company.local
-
-# For Harbor, check credentials
-docker login harbor.company.local
-
-
-

Environment Variables

-
- - - -
VariablePurposeExample
PROVISIONING_MODEOverride active modeexport PROVISIONING_MODE=cicd
PROVISIONING_WORKSPACE_CONFIGOverride config location~/.provisioning/config
PROVISIONING_PROJECT_ROOTProject root directory/opt/project-provisioning
-
-
-

Best Practices

-

1. Use Appropriate Mode

-
    -
  • Solo: Individual development, experimentation
  • -
  • Multi-User: Team collaboration, shared infrastructure
  • -
  • CI/CD: Automated testing and deployment
  • -
  • Enterprise: Production deployments, compliance requirements
  • -
-

2. Validate Before Switching

-
provisioning mode validate <mode-name>
-
-

3. Backup Active Configuration

-
# Automatic backup created when switching
-ls ~/.provisioning/config/active-mode.yaml.backup
-
-

4. Use Check Mode

-
provisioning server create --check
-
-

5. Lock Workspaces in Multi-User/Enterprise

-
provisioning workspace lock <workspace-name>
-# ... make changes ...
-provisioning workspace unlock <workspace-name>
-
-

6. Pull Extensions from OCI (Multi-User/CI/CD/Enterprise)

-
# Don't use local extensions in shared modes
-provisioning extension pull <extension-name>
-
-
-

Security Considerations

-

Solo Mode

-
    -
  • ⚠️ No authentication (local development only)
  • -
  • ⚠️ No encryption (sensitive data should use SOPS)
  • -
  • ✅ Isolated environment
  • -
-

Multi-User Mode

-
    -
  • ✅ Token-based authentication
  • -
  • ✅ TLS in transit
  • -
  • ✅ Audit logging
  • -
  • ⚠️ No encryption at rest (configure as needed)
  • -
-

CI/CD Mode

-
    -
  • ✅ Token authentication (short expiry)
  • -
  • ✅ Full encryption (at rest + in transit)
  • -
  • ✅ KMS for secrets
  • -
  • ✅ Vulnerability scanning (critical threshold)
  • -
  • ✅ Image signing required
  • -
-

Enterprise Mode

-
    -
  • ✅ mTLS authentication
  • -
  • ✅ Full encryption (at rest + in transit)
  • -
  • ✅ KMS for all secrets
  • -
  • ✅ Vulnerability scanning (critical threshold)
  • -
  • ✅ Image signing + signature verification
  • -
  • ✅ Network isolation
  • -
  • ✅ Compliance policies (SOC2, ISO27001, HIPAA)
  • -
-
-

Support and Documentation

-
    -
  • Implementation Summary: MODE_SYSTEM_IMPLEMENTATION_SUMMARY.md
  • -
  • Nickel Schemas: provisioning/schemas/modes.ncl, provisioning/schemas/oci_registry.ncl
  • -
  • Mode Templates: workspace/config/modes/*.yaml
  • -
  • Commands: provisioning/core/nulib/lib_provisioning/mode/
  • -
-
-

Last Updated: 2025-10-06 | Version: 1.0.0

-

Configuration Rendering Guide

-

This guide covers the unified configuration rendering system in the CLI daemon that supports Nickel and Tera template engines.

-

Overview

-

The CLI daemon (cli-daemon) provides a high-performance REST API for rendering configurations in multiple formats:

-
    -
  • Nickel: Functional configuration language with lazy evaluation and type safety (primary choice)
  • -
  • Tera: Jinja2-compatible template engine (simple templating)
  • -
-

All renderers are accessible through a single unified API endpoint with intelligent caching to minimize latency.

-

Quick Start

-

Starting the Daemon

-

The daemon runs on port 9091 by default:

-
# Start in background
-./target/release/cli-daemon &
-
-# Check it's running
-curl http://localhost:9091/health
-
-

Simple Nickel Rendering

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "nickel",
-    "content": "{ name = \"my-server\", cpu = 4, memory = 8192 }",
-    "name": "server-config"
-  }'
-
-

Response:

-
{
-  "rendered": "{ name = \"my-server\", cpu = 4, memory = 8192 }",
-  "error": null,
-  "language": "nickel",
-  "execution_time_ms": 23
-}
-
-

REST API Reference

-

POST /config/render

-

Render a configuration in any supported language.

-

Request Headers:

-
Content-Type: application/json
-
-

Request Body:

-
{
-  "language": "nickel|tera",
-  "content": "...configuration content...",
-  "context": {
-    "key1": "value1",
-    "key2": 123
-  },
-  "name": "optional-config-name"
-}
-
-

Parameters:

-
- - - - -
ParameterTypeRequiredDescription
languagestringYesOne of: nickel, tera
contentstringYesThe configuration or template content to render
contextobjectNoVariables to pass to the configuration (JSON object)
namestringNoOptional name for logging purposes
-
-

Response (Success):

-
{
-  "rendered": "...rendered output...",
-  "error": null,
-  "language": "nickel",
-  "execution_time_ms": 23
-}
-
-

Response (Error):

-
{
-  "rendered": null,
-  "error": "Nickel evaluation failed: undefined variable 'name'",
-  "language": "nickel",
-  "execution_time_ms": 18
-}
-
-

Status Codes:

-
    -
  • 200 OK - Rendering completed (check error field in body for evaluation errors)
  • -
  • 400 Bad Request - Invalid request format
  • -
  • 500 Internal Server Error - Daemon error
  • -
-

GET /config/stats

-

Get rendering statistics across all languages.

-

Response:

-
{
-  "total_renders": 156,
-  "successful_renders": 154,
-  "failed_renders": 2,
-  "average_time_ms": 28,
-  "nickel_renders": 104,
-  "tera_renders": 52,
-  "nickel_cache_hits": 87,
-  "tera_cache_hits": 38
-}
-
-

POST /config/stats/reset

-

Reset all rendering statistics.

-

Response:

-
{
-  "status": "success",
-  "message": "Configuration rendering statistics reset"
-}
-
-

Nickel Rendering

-

Basic Nickel Configuration

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "nickel",
-    "content": "{
-  name = \"production-server\",
-  type = \"web\",
-  cpu = 4,
-  memory = 8192,
-  disk = 50,
-  tags = {
-    environment = \"production\",
-    team = \"platform\"
-  }
-}",
-    "name": "nickel-server-config"
-  }'
-
-

Nickel with Lazy Evaluation

-

Nickel excels at evaluating only what’s needed:

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "nickel",
-    "content": "{
-  server = {
-    name = \"db-01\",
-    # Expensive computation - only computed if accessed
-    health_check = std.array.fold
-      (fun acc x => acc + x)
-      0
-      [1, 2, 3, 4, 5]
-  },
-  networking = {
-    dns_servers = [\"8.8.8.8\", \"8.8.4.4\"],
-    firewall_rules = [\"allow_ssh\", \"allow_https\"]
-  }
-}",
-    "context": {
-      "only_server": true
-    }
-  }'
-
-

Expected Nickel Rendering Time

-
    -
  • First render (cache miss): 30-60 ms
  • -
  • Cached render (same content): 1-5 ms
  • -
  • Large configs with lazy evaluation: 40-80 ms
  • -
-

Advantage: Nickel only computes fields that are actually used in the output

-

Tera Template Rendering

-

Basic Tera Template

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "tera",
-    "content": "
-Server Configuration
-====================
-
-Name: {{ server_name }}
-Environment: {{ environment | default(value=\"development\") }}
-Type: {{ server_type }}
-
-Assigned Tasks:
-{% for task in tasks %}
-  - {{ task }}
-{% endfor %}
-
-{% if enable_monitoring %}
-Monitoring: ENABLED
-  - Prometheus: true
-  - Grafana: true
-{% else %}
-Monitoring: DISABLED
-{% endif %}
-",
-    "context": {
-      "server_name": "prod-web-01",
-      "environment": "production",
-      "server_type": "web",
-      "tasks": ["kubernetes", "prometheus", "cilium"],
-      "enable_monitoring": true
-    },
-    "name": "server-template"
-  }'
-
-

Tera Filters and Functions

-

Tera supports Jinja2-compatible filters and functions:

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "tera",
-    "content": "
-Configuration for {{ environment | upper }}
-Servers: {{ server_count | default(value=1) }}
-Cost estimate: \${{ monthly_cost | round(precision=2) }}
-
-{% for server in servers | reverse %}
-- {{ server.name }}: {{ server.cpu }} CPUs
-{% endfor %}
-",
-    "context": {
-      "environment": "production",
-      "server_count": 5,
-      "monthly_cost": 1234.567,
-      "servers": [
-        {"name": "web-01", "cpu": 4},
-        {"name": "db-01", "cpu": 8},
-        {"name": "cache-01", "cpu": 2}
-      ]
-    }
-  }'
-
-

Expected Tera Rendering Time

-
    -
  • Simple templates: 4-10 ms
  • -
  • Complex templates with loops: 10-20 ms
  • -
  • Always fast (template is pre-compiled)
  • -
-

Performance Characteristics

-

Caching Strategy

-

All three renderers use LRU (Least Recently Used) caching:

-
    -
  • Cache Size: 100 entries per renderer
  • -
  • Cache Key: SHA256 hash of (content + context)
  • -
  • Cache Hit: Typically < 5 ms
  • -
  • Cache Miss: Language-dependent (20-60 ms)
  • -
-

To maximize cache hits:

-
    -
  1. Render the same config multiple times → hits after first render
  2. -
  3. Use static content when possible → better cache reuse
  4. -
  5. Monitor cache hit ratio via /config/stats
  6. -
-

Benchmarks

-

Comparison of rendering times (on commodity hardware):

-
- - - - -
ScenarioNickelTera
Simple config (10 vars)30 ms5 ms
Medium config (50 vars)45 ms8 ms
Large config (100+ vars)50-80 ms10 ms
Cached render1-5 ms1-5 ms
-
-

Memory Usage

-
    -
  • Each renderer keeps 100 cached entries in memory
  • -
  • Average config size in cache: ~5 KB
  • -
  • Maximum memory per renderer: ~500 KB + overhead
  • -
-

Error Handling

-

Common Errors

-

Nickel Binary Not Found

-

Error Response:

-
{
-  "rendered": null,
-  "error": "Nickel binary not found in PATH. Install Nickel or set NICKEL_PATH environment variable",
-  "language": "nickel",
-  "execution_time_ms": 0
-}
-
-

Solution:

-
# Install Nickel
-nickel version
-
-# Or set explicit path
-export NICKEL_PATH=/usr/local/bin/nickel
-
-

Invalid Nickel Syntax

-

Error Response:

-
{
-  "rendered": null,
-  "error": "Nickel evaluation failed: Type mismatch at line 3: expected String, got Number",
-  "language": "nickel",
-  "execution_time_ms": 12
-}
-
-

Solution: Verify Nickel syntax. Run nickel typecheck file.ncl directly for better error messages.

-

Missing Context Variable

-

Error Response:

-
{
-  "rendered": null,
-  "error": "Nickel evaluation failed: undefined variable 'required_var'",
-  "language": "nickel",
-  "execution_time_ms": 8
-}
-
-

Solution: Provide required context variables or define fields with default values.

-

Invalid JSON in Context

-

HTTP Status: 400 Bad Request -Body: Error message about invalid JSON

-

Solution: Ensure context is valid JSON.

-

Integration Examples

-

Using with Nushell

-
# Render a Nickel config from Nushell
-let config = open workspace/config/provisioning.ncl | into string
-let response = curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d $"{{ language: \"nickel\", content: $config }}" | from json
-
-print $response.rendered
-
-

Using with Python

-
import requests
-import json
-
-def render_config(language, content, context=None, name=None):
-    payload = {
-        "language": language,
-        "content": content,
-        "context": context or {},
-        "name": name
-    }
-
-    response = requests.post(
-        "http://localhost:9091/config/render",
-        json=payload
-    )
-
-    return response.json()
-
-# Example usage
-result = render_config(
-    "nickel",
-    '{name = "server", cpu = 4}',
-    {"name": "prod-server"},
-    "my-config"
-)
-
-if result["error"]:
-    print(f"Error: {result['error']}")
-else:
-    print(f"Rendered in {result['execution_time_ms']}ms")
-    print(result["rendered"])
-
-

Using with Curl

-
#!/bin/bash
-
-# Function to render config
-render_config() {
-    local language=$1
-    local content=$2
-    local name=${3:-"unnamed"}
-
-    curl -X POST http://localhost:9091/config/render \
-        -H "Content-Type: application/json" \
-        -d @- << EOF
-{
-  "language": "$language",
-  "content": $(echo "$content" | jq -Rs .),
-  "name": "$name"
-}
-EOF
-}
-
-# Usage
-render_config "nickel" "{name = \"my-server\"}"  "server-config"
-
-

Troubleshooting

-

Daemon Won’t Start

-

Check log level:

-
PROVISIONING_LOG_LEVEL=debug ./target/release/cli-daemon
-
-

Verify Nushell binary:

-
which nu
-# or set explicit path
-NUSHELL_PATH=/usr/local/bin/nu ./target/release/cli-daemon
-
-

Very Slow Rendering

-

Check cache hit rate:

-
curl http://localhost:9091/config/stats | jq '.nickel_cache_hits / .nickel_renders'
-
-

If low cache hit rate: Rendering same configs repeatedly?

-

Monitor execution time:

-
curl http://localhost:9091/config/render ... | jq '.execution_time_ms'
-
-

Rendering Hangs

-

Set timeout (depends on client):

-
curl --max-time 10 -X POST http://localhost:9091/config/render ...
-
-

Check daemon logs for stuck processes.

-

Out of Memory

-

Reduce cache size (rebuild with modified config) or restart daemon.

-

Best Practices

-
    -
  1. -

    Choose right language for task:

    -
      -
    • Nickel: Large configs with lazy evaluation, type-safe infrastructure definitions
    • -
    • Tera: Simple templating, fastest for rendering
    • -
    -
  2. -
  3. -

    Use context variables instead of hardcoding values:

    -
    "context": {
    -  "environment": "production",
    -  "replica_count": 3
    -}
    -
    -
  4. -
  5. -

    Monitor statistics to understand performance:

    -
    watch -n 1 'curl -s http://localhost:9091/config/stats | jq'
    -
    -
  6. -
  7. -

    Cache warming: Pre-render common configs on startup

    -
  8. -
  9. -

    Error handling: Always check error field in response

    -
  10. -
-

See Also

- -
-

Quick Reference

-

API Endpoint

-
POST http://localhost:9091/config/render
-
-

Request Template

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "nickel|tera",
-    "content": "...",
-    "context": {...},
-    "name": "optional-name"
-  }'
-
-

Quick Examples

-

Nickel - Simple Config

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "nickel",
-    "content": "{name = \"server\", cpu = 4, memory = 8192}"
-  }'
-
-

Tera - Template with Loops

-
curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d '{
-    "language": "tera",
-    "content": "{% for task in tasks %}{{ task }}\n{% endfor %}",
-    "context": {"tasks": ["kubernetes", "postgres", "redis"]}
-  }'
-
-

Statistics

-
# Get stats
-curl http://localhost:9091/config/stats
-
-# Reset stats
-curl -X POST http://localhost:9091/config/stats/reset
-
-# Watch stats in real-time
-watch -n 1 'curl -s http://localhost:9091/config/stats | jq'
-
-

Performance Guide

-
- - -
LanguageColdCachedUse Case
Nickel30-60 ms1-5 msType-safe configs, lazy evaluation
Tera5-20 ms1-5 msSimple templating
-
-

Status Codes

-
- - - -
CodeMeaning
200Success (check error field for evaluation errors)
400Invalid request
500Daemon error
-
-

Response Fields

-
{
-  "rendered": "...output or null on error",
-  "error": "...error message or null on success",
-  "language": "nickel|tera",
-  "execution_time_ms": 23
-}
-
-

Languages Comparison

-

Nickel

-
{
-  name = "server",
-  type = "web",
-  cpu = 4,
-  memory = 8192,
-  tags = {
-    env = "prod",
-    team = "platform"
-  }
-}
-
-

Pros: Lazy evaluation, functional style, compact -Cons: Different paradigm, smaller ecosystem

-

Tera

-
Server: {{ name }}
-Type: {{ type | upper }}
-{% for tag_name, tag_value in tags %}
-- {{ tag_name }}: {{ tag_value }}
-{% endfor %}
-
-

Pros: Fast, simple, familiar template syntax -Cons: No validation, template-only

-

Caching

-

How it works: SHA256(content + context) → cached result

-

Cache hit: < 5 ms -Cache miss: 20-60 ms (language dependent) -Cache size: 100 entries per language

-

Cache stats:

-
curl -s http://localhost:9091/config/stats | jq '{
-  nickel_cache_hits: .nickel_cache_hits,
-  nickel_renders: .nickel_renders,
-  nickel_hit_ratio: (.nickel_cache_hits / .nickel_renders * 100)
-}'
-
-

Common Tasks

-

Batch Rendering

-
#!/bin/bash
-for config in configs/*.ncl; do
-  curl -X POST http://localhost:9091/config/render \
-    -H "Content-Type: application/json" \
-    -d "$(jq -n --arg content \"$(cat $config)\" \
-      '{language: "nickel", content: $content}')"
-done
-
-

Validate Before Rendering

-
# Nickel validation
-nickel typecheck my-config.ncl
-
-# Daemon validation (via first render)
-curl ... # catches errors in response
-
-

Monitor Cache Performance

-
#!/bin/bash
-while true; do
-  STATS=$(curl -s http://localhost:9091/config/stats)
-  HIT_RATIO=$( echo "$STATS" | jq '.nickel_cache_hits / .nickel_renders * 100')
-  echo "Cache hit ratio: ${HIT_RATIO}%"
-  sleep 5
-done
-
-

Error Examples

-

Missing Binary

-
{
-  "error": "Nickel binary not found. Install Nickel or set NICKEL_PATH",
-  "rendered": null
-}
-
-

Fix: export NICKEL_PATH=/path/to/nickel or install Nickel

-

Syntax Error

-
{
-  "error": "Nickel type checking failed: Type mismatch at line 3",
-  "rendered": null
-}
-
-

Fix: Check Nickel syntax, run nickel typecheck file.ncl directly

-

Integration Quick Start

-

Nushell

-
use lib_provisioning
-
-let config = open server.ncl | into string
-let result = (curl -X POST http://localhost:9091/config/render \
-  -H "Content-Type: application/json" \
-  -d {language: "nickel", content: $config} | from json)
-
-if ($result.error != null) {
-  error $result.error
-} else {
-  print $result.rendered
-}
-
-

Python

-
import requests
-
-resp = requests.post("http://localhost:9091/config/render", json={
-    "language": "nickel",
-    "content": '{name = "server"}',
-    "context": {}
-})
-result = resp.json()
-print(result["rendered"] if not result["error"] else f"Error: {result['error']}")
-
-

Bash

-
render() {
-  curl -s -X POST http://localhost:9091/config/render \
-    -H "Content-Type: application/json" \
-    -d "$1" | jq '.'
-}
-
-# Usage
-render '{"language":"nickel","content":"{name = \"server\"}"}'
-
-

Environment Variables

-
# Daemon configuration
-PROVISIONING_LOG_LEVEL=debug        # Log level
-DAEMON_BIND=127.0.0.1:9091         # Bind address
-NUSHELL_PATH=/usr/local/bin/nu      # Nushell binary
-NICKEL_PATH=/usr/local/bin/nickel   # Nickel binary
-
-

Useful Commands

-
# Health check
-curl http://localhost:9091/health
-
-# Daemon info
-curl http://localhost:9091/info
-
-# View stats
-curl http://localhost:9091/config/stats | jq '.'
-
-# Pretty print stats
-curl -s http://localhost:9091/config/stats | jq '{
-  total: .total_renders,
-  success_rate: (.successful_renders / .total_renders * 100),
-  avg_time: .average_time_ms,
-  cache_hit_rate: ((.nickel_cache_hits + .tera_cache_hits) / (.nickel_renders + .tera_renders) * 100)
-}'
-
-

Troubleshooting Checklist

-
    -
  • -Daemon running? curl http://localhost:9091/health
  • -
  • -Correct content for language?
  • -
  • -Valid JSON in context?
  • -
  • -Nickel or Tera binary available?
  • -
  • -Check log level? PROVISIONING_LOG_LEVEL=debug
  • -
  • -Cache hit rate? /config/stats
  • -
  • -Error in response? Check error field
  • -
-

Configuration Guide

-

This comprehensive guide explains the configuration system of the Infrastructure Automation platform, helping you understand, customize, and manage -all configuration aspects.

-

What You’ll Learn

-
    -
  • Understanding the configuration hierarchy and precedence
  • -
  • Working with different configuration file types
  • -
  • Configuration interpolation and templating
  • -
  • Environment-specific configurations
  • -
  • User customization and overrides
  • -
  • Validation and troubleshooting
  • -
  • Advanced configuration patterns
  • -
-

Configuration Architecture

-

Configuration Hierarchy

-

The system uses a layered configuration approach with clear precedence rules:

-
Runtime CLI arguments (highest precedence)
-    ↓ (overrides)
-Environment Variables
-    ↓ (overrides)
-Infrastructure Config (./.provisioning.toml)
-    ↓ (overrides)
-Project Config (./provisioning.toml)
-    ↓ (overrides)
-User Config (~/.config/provisioning/config.toml)
-    ↓ (overrides)
-System Defaults (config.defaults.toml) (lowest precedence)
-
-

Configuration File Types

-
- - - - - - -
File TypePurposeLocationFormat
System DefaultsBase system configurationconfig.defaults.tomlTOML
User ConfigPersonal preferences~/.config/provisioning/config.tomlTOML
Project ConfigProject-wide settings./provisioning.tomlTOML
Infrastructure ConfigInfra-specific settings./.provisioning.tomlTOML
Environment ConfigEnvironment overridesconfig.{env}.tomlTOML
Infrastructure DefinitionsInfrastructure as Codemain.ncl, *.nclNickel
-
-

Understanding Configuration Sections

-

Core System Configuration

-
[core]
-version = "1.0.0"           # System version
-name = "provisioning"       # System identifier
-
-

Path Configuration

-

The most critical configuration section that defines where everything is located:

-
[paths]
-# Base directory - all other paths derive from this
-base = "/usr/local/provisioning"
-
-# Derived paths (usually don't need to change these)
-kloud = "{{paths.base}}/infra"
-providers = "{{paths.base}}/providers"
-taskservs = "{{paths.base}}/taskservs"
-clusters = "{{paths.base}}/cluster"
-resources = "{{paths.base}}/resources"
-templates = "{{paths.base}}/templates"
-tools = "{{paths.base}}/tools"
-core = "{{paths.base}}/core"
-
-[paths.files]
-# Important file locations
-settings_file = "settings.ncl"
-keys = "{{paths.base}}/keys.yaml"
-requirements = "{{paths.base}}/requirements.yaml"
-
-

Debug and Logging

-
[debug]
-enabled = false             # Enable debug mode
-metadata = false           # Show internal metadata
-check = false              # Default to check mode (dry run)
-remote = false            # Enable remote debugging
-log_level = "info"        # Logging verbosity
-no_terminal = false       # Disable terminal features
-
-

Output Configuration

-
[output]
-file_viewer = "less"       # File viewer command
-format = "yaml"           # Default output format (json, yaml, toml, text)
-
-

Provider Configuration

-
[providers]
-default = "local"         # Default provider
-
-[providers.aws]
-api_url = ""              # AWS API endpoint (blank = default)
-auth = ""                 # Authentication method
-interface = "CLI"         # Interface type (CLI or API)
-
-[providers.upcloud]
-api_url = "https://api.upcloud.com/1.3"
-auth = ""
-interface = "CLI"
-
-[providers.local]
-api_url = ""
-auth = ""
-interface = "CLI"
-
-

Encryption (SOPS) Configuration

-
[sops]
-use_sops = true           # Enable SOPS encryption
-config_path = "{{paths.base}}/.sops.yaml"
-
-# Search paths for Age encryption keys
-key_search_paths = [
-    "{{paths.base}}/keys/age.txt",
-    "~/.config/sops/age/keys.txt"
-]
-
-

Configuration Interpolation

-

The system supports powerful interpolation patterns for dynamic configuration values.

-

Basic Interpolation Patterns

-

Path Interpolation

-
# Reference other path values
-templates = "{{paths.base}}/my-templates"
-custom_path = "{{paths.providers}}/custom"
-
-

Environment Variable Interpolation

-
# Access environment variables
-user_home = "{{env.HOME}}"
-current_user = "{{env.USER}}"
-custom_path = "{{env.CUSTOM_PATH || /default/path}}"  # With fallback
-
-

Date/Time Interpolation

-
# Dynamic date/time values
-log_file = "{{paths.base}}/logs/app-{{now.date}}.log"
-backup_dir = "{{paths.base}}/backups/{{now.timestamp}}"
-
-

Git Information Interpolation

-
# Git repository information
-deployment_branch = "{{git.branch}}"
-version_tag = "{{git.tag}}"
-commit_hash = "{{git.commit}}"
-
-

Cross-Section References

-
# Reference values from other sections
-database_host = "{{providers.aws.database_endpoint}}"
-api_key = "{{sops.decrypted_key}}"
-
-

Advanced Interpolation

-

Function Calls

-
# Built-in functions
-config_path = "{{path.join(env.HOME, .config, provisioning)}}"
-safe_name = "{{str.lower(str.replace(project.name, ' ', '-'))}}"
-
-

Conditional Expressions

-
# Conditional logic
-debug_level = "{{debug.enabled && 'debug' || 'info'}}"
-storage_path = "{{env.STORAGE_PATH || path.join(paths.base, 'storage')}}"
-
-

Interpolation Examples

-
[paths]
-base = "/opt/provisioning"
-workspace = "{{env.HOME}}/provisioning-workspace"
-current_project = "{{paths.workspace}}/{{env.PROJECT_NAME || 'default'}}"
-
-[deployment]
-environment = "{{env.DEPLOY_ENV || 'development'}}"
-timestamp = "{{now.iso8601}}"
-version = "{{git.tag || git.commit}}"
-
-[database]
-connection_string = "postgresql://{{env.DB_USER}}:{{env.DB_PASS}}@{{env.DB_HOST || 'localhost'}}/{{env.DB_NAME}}"
-
-[notifications]
-slack_channel = "#{{env.TEAM_NAME || 'general'}}-notifications"
-email_subject = "Deployment {{deployment.environment}} - {{deployment.timestamp}}"
-
-

Environment-Specific Configuration

-

Environment Detection

-

The system automatically detects the environment using:

-
    -
  1. PROVISIONING_ENV environment variable
  2. -
  3. Git branch patterns (dev, staging, main/master)
  4. -
  5. Directory patterns (development, staging, production)
  6. -
  7. Explicit configuration
  8. -
-

Environment Configuration Files

-

Create environment-specific configurations:

-

Development Environment (config.dev.toml)

-
[core]
-name = "provisioning-dev"
-
-[debug]
-enabled = true
-log_level = "debug"
-metadata = true
-
-[providers]
-default = "local"
-
-[cache]
-enabled = false  # Disable caching for development
-
-[notifications]
-enabled = false  # No notifications in dev
-
-

Testing Environment (config.test.toml)

-
[core]
-name = "provisioning-test"
-
-[debug]
-enabled = true
-check = true     # Default to check mode in testing
-log_level = "info"
-
-[providers]
-default = "local"
-
-[infrastructure]
-auto_cleanup = true  # Clean up test resources
-resource_prefix = "test-{{git.branch}}-"
-
-

Production Environment (config.prod.toml)

-
[core]
-name = "provisioning-prod"
-
-[debug]
-enabled = false
-log_level = "warn"
-
-[providers]
-default = "aws"
-
-[security]
-require_approval = true
-audit_logging = true
-encrypt_backups = true
-
-[notifications]
-enabled = true
-critical_only = true
-
-

Environment Switching

-
# Set environment for session
-export PROVISIONING_ENV=dev
-provisioning env
-
-# Use environment for single command
-provisioning --environment prod server create
-
-# Switch environment permanently
-provisioning env set prod
-
-

User Configuration Customization

-

Creating Your User Configuration

-
# Initialize user configuration from template
-provisioning init config
-
-# Or copy and customize
-cp config-examples/config.user.toml ~/.config/provisioning/config.toml
-
-

Common User Customizations

-

Developer Setup

-
[paths]
-base = "/Users/alice/dev/provisioning"
-
-[debug]
-enabled = true
-log_level = "debug"
-
-[providers]
-default = "local"
-
-[output]
-format = "json"
-file_viewer = "code"
-
-[sops]
-key_search_paths = [
-    "/Users/alice/.config/sops/age/keys.txt"
-]
-
-

Operations Engineer Setup

-
[paths]
-base = "/opt/provisioning"
-
-[debug]
-enabled = false
-log_level = "info"
-
-[providers]
-default = "aws"
-
-[output]
-format = "yaml"
-
-[notifications]
-enabled = true
-email = "ops-team@company.com"
-
-

Team Lead Setup

-
[paths]
-base = "/home/teamlead/provisioning"
-
-[debug]
-enabled = true
-metadata = true
-log_level = "info"
-
-[providers]
-default = "upcloud"
-
-[security]
-require_confirmation = true
-audit_logging = true
-
-[sops]
-key_search_paths = [
-    "/secure/keys/team-lead.txt",
-    "~/.config/sops/age/keys.txt"
-]
-
-

Project-Specific Configuration

-

Project Configuration File (provisioning.toml)

-
[project]
-name = "web-application"
-description = "Main web application infrastructure"
-version = "2.1.0"
-team = "platform-team"
-
-[paths]
-# Project-specific path overrides
-infra = "./infrastructure"
-templates = "./custom-templates"
-
-[defaults]
-# Project defaults
-provider = "aws"
-region = "us-west-2"
-environment = "development"
-
-[cost_controls]
-max_monthly_budget = 5000.00
-alert_threshold = 0.8
-
-[compliance]
-required_tags = ["team", "environment", "cost-center"]
-encryption_required = true
-backup_required = true
-
-[notifications]
-slack_webhook = "https://hooks.slack.com/services/..."
-team_email = "platform-team@company.com"
-
-

Infrastructure-Specific Configuration (.provisioning.toml)

-
[infrastructure]
-name = "production-web-app"
-environment = "production"
-region = "us-west-2"
-
-[overrides]
-# Infrastructure-specific overrides
-debug.enabled = false
-debug.log_level = "error"
-cache.enabled = true
-
-[scaling]
-auto_scaling_enabled = true
-min_instances = 3
-max_instances = 20
+# Verify Nickel
+nickel --version  # 1.15.1+
 
-[security]
-vpc_id = "vpc-12345678"
-subnet_ids = ["subnet-12345678", "subnet-87654321"]
-security_group_id = "sg-12345678"
+# Check SOPS and Age
+sops --version  # 3.10.2+
+age --version   # 1.2.1+
 
-[monitoring]
-enabled = true
-retention_days = 90
-alerting_enabled = true
+# Verify K9s
+k9s version  # 0.50.6+
 
-

Configuration Validation

-

Built-in Validation

-
# Validate current configuration
+

Configuration Validation

+
# Validate all configuration files
 provisioning validate config
 
-# Detailed validation with warnings
-provisioning validate config --detailed
+# Check environment
+provisioning env
 
-# Strict validation mode
-provisioning validate config strict
-
-# Validate specific environment
-provisioning validate config --environment prod
+# Show all configuration
+provisioning allenv
 
-

Custom Validation Rules

-

Create custom validation in your configuration:

-
[validation]
-# Custom validation rules
-required_sections = ["paths", "providers", "debug"]
-required_env_vars = ["AWS_REGION", "PROJECT_NAME"]
-forbidden_values = ["password123", "admin"]
-
-[validation.paths]
-# Path validation rules
-base_must_exist = true
-writable_required = ["paths.base", "paths.cache"]
-
-[validation.security]
-# Security validation
-require_encryption = true
-min_key_length = 32
+

Expected output:

+
Configuration validation: PASSED
+  - User config: ~/.config/provisioning/user_config.yaml ✓
+  - System defaults: provisioning/config/config.defaults.toml ✓
+  - Provider credentials: configured ✓
 
-

Troubleshooting Configuration

-

Common Configuration Issues

-

Issue 1: Path Not Found Errors

-
# Problem: Base path doesn't exist
-# Check current configuration
-provisioning env | grep paths.base
+

Provider Connectivity

+
# List available providers
+provisioning providers
 
-# Verify path exists
-ls -la /path/shown/above
+# Test provider connection (UpCloud example)
+provisioning provider test upcloud
 
-# Fix: Update user config
-nano ~/.config/provisioning/config.toml
-# Set correct paths.base = "/correct/path"
-
-

Issue 2: Interpolation Failures

-
# Problem: {{env.VARIABLE}} not resolving
-# Check environment variables
-env | grep VARIABLE
-
-# Check interpolation
-provisioning validate interpolation test
-
-# Debug interpolation
-provisioning --debug validate interpolation validate
-
-

Issue 3: SOPS Encryption Errors

-
# Problem: Cannot decrypt SOPS files
-# Check SOPS configuration
-provisioning sops config
-
-# Verify key files
-ls -la ~/.config/sops/age/keys.txt
-
-# Test decryption
-sops -d encrypted-file.ncl
-
-

Issue 4: Provider Authentication

-
# Problem: Provider authentication failed
-# Check provider configuration
-provisioning show providers
-
-# Test provider connection
+# Test provider connection (AWS example)
 provisioning provider test aws
+
+

Workspace Verification

+

Workspace Structure

+
# List workspaces
+provisioning workspace list
+
+# Show current workspace
+provisioning workspace current
+
+# Verify workspace structure
+ls -la <workspace-name>/
+
+

Expected structure:

+
workspace-name/
+├── infra/          # Infrastructure Nickel schemas
+├── config/         # Workspace configuration
+├── extensions/     # Custom extensions
+└── runtime/        # State and logs
+
+

Workspace Configuration

+
# Show workspace configuration
+provisioning config show
+
+# Validate workspace-specific config
+provisioning validate config --workspace <name>
+
+

Infrastructure Verification

+

Server Health

+
# List all servers
+provisioning server list
+
+# Check server status
+provisioning server status <hostname>
+
+# Test SSH connectivity
+provisioning server ssh <hostname> -- echo "Connection successful"
+
+

Task Service Health

+
# List installed task services
+provisioning taskserv list
+
+# Check service status
+provisioning taskserv status <service-name>
+
+# Verify service health
+provisioning taskserv health <service-name>
+
+

Cluster Health

+

For Kubernetes clusters:

+
# SSH to control plane
+provisioning server ssh <control-hostname>
+
+# Check cluster nodes
+kubectl get nodes
+
+# Check system pods
+kubectl get pods -n kube-system
+
+# Check cluster info
+kubectl cluster-info
+
+

Platform Services Verification

+

Orchestrator Service

+
# Check orchestrator status
+curl  [http://localhost:5000/health](http://localhost:5000/health)
+
+# View orchestrator version
+curl  [http://localhost:5000/version](http://localhost:5000/version)
+
+# List active workflows
+provisioning workflow list
+
+

Expected response:

+
{
+  "status": "healthy",
+  "version": "x.x.x",
+  "uptime": "2h 15m"
+}
+
+

Control Center

+
# Check control center
+curl  [http://localhost:8080/health](http://localhost:8080/health)
+
+# Access web UI
+open  [http://localhost:8080](http://localhost:8080)  # macOS
+xdg-open  [http://localhost:8080](http://localhost:8080)  # Linux
+
+

Native Plugins

+
# List registered plugins
+nu -c "plugin list"
+
+# Verify plugins loaded
+nu -c "plugin use nu_plugin_auth; plugin use nu_plugin_kms; plugin use nu_plugin_orchestrator"
+
+

Security Verification

+

Secrets Management

+
# Verify SOPS configuration
+cat ~/.config/provisioning/.sops.yaml
+
+# Test encryption/decryption
+echo "test secret" > /tmp/test-secret.txt
+sops -e /tmp/test-secret.txt > /tmp/test-secret.enc
+sops -d /tmp/test-secret.enc
+rm /tmp/test-secret.*
+
+

SSH Keys

+
# Verify SSH keys exist
+ls -la ~/.ssh/provisioning_*
+
+# Test SSH key permissions
+ls -l ~/.ssh/provisioning_* | awk '{print $1}'
+# Should show: -rw------- (600)
+
+

Encrypted Configuration

+
# Verify user config encryption
+file ~/.config/provisioning/user_config.yaml
+
+# Should show: SOPS encrypted data or YAML
+
+

Troubleshooting Common Issues

+

CLI Not Found

+
# Check PATH
+echo $PATH | tr ':' '
+' | grep provisioning
+
+# Verify symlink
+ls -l /usr/local/bin/provisioning
+
+# Try direct execution
+/path/to/project-provisioning/provisioning/core/cli/provisioning version
+
+

Provider Authentication Fails

+
# Verify credentials are set
+provisioning config show | grep -A5 providers
+
+# Test with debug mode
+provisioning --debug provider test <provider-name>
+
+# Check network connectivity
+ping -c 3 api.upcloud.com  # UpCloud
+ping -c 3 ec2.amazonaws.com  # AWS
+
+

Nickel Schema Errors

+
# Type-check schema
+nickel typecheck <schema-file>.ncl
+
+# Validate with verbose output
+provisioning validate config --verbose
+
+# Format Nickel file
+nickel fmt <schema-file>.ncl
+
+

Server SSH Fails

+
# Verify SSH key
+ssh-add -l | grep provisioning
+
+# Test direct SSH
+ssh -i ~/.ssh/provisioning_rsa root@<server-ip>
+
+# Check server status
+provisioning server status <hostname>
+
+

Task Service Installation Fails

+
# Check dependencies
+provisioning taskserv dependencies <service>
+
+# Verify server has resources
+provisioning server ssh <hostname> -- df -h
+provisioning server ssh <hostname> -- free -h
+
+# Enable debug mode
+provisioning --debug taskserv create <service>
+
+

Health Check Checklist

+

Complete verification checklist:

+
# Core tools
+[x] Nushell 0.109.1+
+[x] Nickel 1.15.1+
+[x] SOPS 3.10.2+
+[x] Age 1.2.1+
+[x] K9s 0.50.6+
+
+# Configuration
+[x] User config valid
+[x] Provider credentials configured
+[x] Workspace initialized
+
+# Provider connectivity
+[x] Provider API accessible
+[x] Authentication successful
+
+# Infrastructure (if deployed)
+[x] Servers running
+[x] SSH connectivity working
+[x] Task services installed
+[x] Cluster healthy
+
+# Platform services (if running)
+[x] Orchestrator responsive
+[x] Control center accessible
+[x] Plugins registered
+
+# Security
+[x] Secrets encrypted
+[x] SSH keys secured
+[x] Configuration protected
+
+

Performance Verification

+

Response Times

+
# CLI response time
+time provisioning version
+
+# Provider API response time
+time provisioning provider test <provider>
+
+# Orchestrator response time
+time curl  [http://localhost:5000/health](http://localhost:5000/health)
+
+

Acceptable ranges:

+
    +
  • CLI commands: <1 second
  • +
  • Provider API: <3 seconds
  • +
  • Orchestrator API: <100ms
  • +
+

Resource Usage

+
# Check system resources
+htop  # Interactive process viewer
+
+# Check disk usage
+df -h
+
+# Check memory usage
+free -h
+
+

Next Steps

+

Once verification is complete:

+ +

Setup & Configuration

+

Post-installation configuration and system setup for the Provisioning platform.

+

Overview

+

After installation, setup configures your system and prepares workspaces for infrastructure deployment.

+

Setup encompasses three critical phases:

+
    +
  1. Initial Setup - Environment detection, dependency verification, directory creation
  2. +
  3. Workspace Setup - Create workspaces, configure providers, initialize schemas
  4. +
  5. Configuration - Provider credentials, system settings, profiles, validation
  6. +
+

This process validates prerequisites, detects your environment, and bootstraps your first workspace.

+

Quick Setup

+

Get up and running in 3 commands:

+
# 1. Complete initial setup (detects system, creates dirs, validates dependencies)
+provisioning setup initial
+
+# 2. Create first workspace (for your infrastructure)
+provisioning workspace create --name production
+
+# 3. Add cloud provider credentials (AWS, UpCloud, Hetzner, etc.)
+provisioning config set --workspace production \
+  extensions.providers.aws.enabled true \
+  extensions.providers.aws.config.region us-east-1
+
+# 4. Verify configuration is valid
+provisioning validate config
+
+

Setup Process Explained

+

The setup system automatically:

+
    +
  1. System Detection - Detects OS (Linux, macOS, Windows), CPU architecture, RAM, disk space
  2. +
  3. Dependency Verification - Validates Nushell, Nickel, SOPS, Age, K9s installation
  4. +
  5. Directory Structure - Creates ~/.provisioning/, ~/.config/provisioning/, workspace directories
  6. +
  7. Configuration Creation - Initializes default configuration, security settings, profiles
  8. +
  9. Workspace Bootstrap - Creates default workspace with basic configuration
  10. +
  11. Health Checks - Validates installation, runs diagnostic tests
  12. +
+

All steps are logged and can be verified with provisioning status.

+

Setup Configuration Guides

+

Starting Fresh

+
    +
  • +

    Initial Setup - First-time system setup: detection, validation, directory +creation, default configuration, health checks.

    +
  • +
  • +

    Workspace Setup - Create and initialize workspaces: creation, +provider configuration, schema management, local customization.

    +
  • +
  • +

    Configuration Management - Configure system: providers, credentials, +profiles, environment variables, validation rules.

    +
  • +
+

Setup Profiles

+

Pre-configured setup profiles for different use cases:

+

Developer Profile

+
provisioning setup profile --profile developer
+# Configures for local development with demo provider
+
+

Production Profile

+
provisioning setup profile --profile production
+# Configures for production with security hardening
+
+

Custom Profile

+
provisioning setup profile --custom
+# Interactive setup with customization
+
+

Directory Structure Created

+

Setup creates this directory structure:

+
~/.provisioning/
+├── workspaces/           # Workspace data
+├── cache/                # Build and dependency cache
+├── plugins/              # Installed Nushell plugins
+└── detectors/            # Custom detectors
+
+~/.config/provisioning/
+├── config.toml          # Main configuration
+├── providers/           # Provider credentials
+├── secrets/             # Encrypted secrets (via SOPS)
+└── profiles/            # Setup profiles
+
+

Quick Setup Verification

+
# Check system status
+provisioning status
+
+# Verify all dependencies
+provisioning setup verify-dependencies
+
+# Test cloud provider connection
+provisioning provider test --name aws
+
+# Validate configuration
+provisioning validate config
+
+# Run health checks
+provisioning health check
+
+

Environment-Specific Setup

+

For Single Workspace (Simple)

+
    +
  1. Run Initial Setup
  2. +
  3. Create one workspace
  4. +
  5. Configure provider
  6. +
  7. Done!
  8. +
+

For Multiple Workspaces (Team)

+
    +
  1. Run Initial Setup
  2. +
  3. Create multiple workspaces per team
  4. +
  5. Configure shared providers
  6. +
  7. Set up workspace-specific schemas
  8. +
+

For Multi-Cloud (Enterprise)

+
    +
  1. Run Initial Setup with production profile
  2. +
  3. Create workspace per environment (dev, staging, prod)
  4. +
  5. Configure multiple cloud providers
  6. +
  7. Enable audit logging and security features
  8. +
+

Configuration Hierarchy

+

Configurations load in priority order:

+
1. Command-line arguments       (highest)
+2. Environment variables        (PROVISIONING_*)
+3. User profile config         (~/.config/provisioning/)
+4. Workspace config            (workspace/config/)
+5. System defaults             (provisioning/config/)
+                               (lowest)
+
+

Common Setup Tasks

+

Add a Cloud Provider

+
provisioning config set --workspace production \
+  extensions.providers.aws.config.region us-east-1 \
+  extensions.providers.aws.config.credentials_source aws_iam
+
+

Configure Secrets Storage

+
provisioning config set \
+  security.secrets.backend secretumvault \
+  security.secrets.url  [http://localhost:8200](http://localhost:8200)
+
+

Enable Audit Logging

+
provisioning config set \
+  security.audit.enabled true \
+  security.audit.retention_days 2555
+
+

Set Up Multi-Tenancy

+
# Create separate workspaces per tenant
+provisioning workspace create --name tenant-1
+provisioning workspace create --name tenant-2
+
+# Each workspace has isolated configuration
+
+

Setup Validation

+

After setup, validate everything works:

+
# Run complete validation suite
+provisioning setup validate-all
+
+# Or check specific components
+provisioning setup validate-system       # OS, dependencies
+provisioning setup validate-directories  # Directory structure
+provisioning setup validate-config       # Configuration syntax
+provisioning setup validate-providers    # Cloud provider connectivity
+provisioning setup validate-security     # Security settings
+
+

Troubleshooting Setup

+

If setup fails:

+
    +
  1. Check logs - provisioning setup logs --tail 20
  2. +
  3. Verify dependencies - provisioning setup verify-dependencies
  4. +
  5. Reset configuration - provisioning setup reset --workspace <name>
  6. +
  7. Run diagnostics - provisioning diagnose setup
  8. +
  9. Check documentation - See Troubleshooting
  10. +
+

Next Steps After Setup

+

After initial setup completes:

+
    +
  1. Create workspaces - See Workspace Setup
  2. +
  3. Configure providers - See Configuration Management
  4. +
  5. Deploy infrastructure - See Getting Started
  6. +
  7. Learn features - See Features
  8. +
  9. Explore examples - See Examples
  10. +
+ +
    +
  • Getting Started → See provisioning/docs/src/getting-started/
  • +
  • Features → See provisioning/docs/src/features/
  • +
  • Configuration Guide → See provisioning/docs/src/infrastructure/
  • +
  • Troubleshooting → See provisioning/docs/src/troubleshooting/
  • +
+

Initial Setup

+

Configure Provisioning after installation.

+

Overview

+

Initial setup validates your environment and prepares Provisioning for workspace +creation. The setup process performs system detection, dependency verification, and +configuration initialization.

+

Prerequisites

+

Before initial setup, ensure:

+
    +
  1. Provisioning CLI installed and in PATH
  2. +
  3. Nushell 0.109.0+ installed
  4. +
  5. Nickel installed
  6. +
  7. SOPS 3.10.2+ installed
  8. +
  9. Age 1.2.1+ installed
  10. +
  11. K9s 0.50.6+ installed (for Kubernetes)
  12. +
+

Verify installation:

+
provisioning version
+nu --version
+nickel --version
+sops --version
+age --version
+
+

Setup Profiles

+

Provisioning provides configuration profiles for different use cases:

+

1. Developer Profile

+

For local development and testing:

+
provisioning setup profile --profile developer
+
+

Includes:

+
    +
  • Local provider (simulation environment)
  • +
  • Development workspace
  • +
  • Test environment configuration
  • +
  • Debug logging enabled
  • +
  • No MFA required
  • +
  • Workspace directory: ~/.provisioning-dev/
  • +
+

2. Production Profile

+

For production deployments:

+
provisioning setup profile --profile production
+
+

Includes:

+
    +
  • Encrypted configuration
  • +
  • Strict validation rules
  • +
  • MFA enabled
  • +
  • Audit logging enabled
  • +
  • Workspace directory: /opt/provisioning/
  • +
+

3. CI/CD Profile

+

For unattended automation:

+
provisioning setup profile --profile cicd
+
+

Includes:

+
    +
  • Headless mode (no TUI prompts)
  • +
  • Service account authentication
  • +
  • Automated backups
  • +
  • Policy enforcement
  • +
  • Unattended upgrade support
  • +
+

Configuration Detection

+

The setup system automatically detects:

+
# System detection
+OS:            $(uname -s)
+CPU:           $(lscpu | grep 'CPU(s)' | awk '{print $NF}')
+RAM:           $(free -h | grep Mem | awk '{print $2}')
+Architecture:  $(uname -m)
+
+

The system adapts configuration based on detected resources:

+
+ + + + + + +
Detected ResourceConfiguration
2-4 CPU coresSolo (single-instance) mode
4-8 CPU coresMultiUser mode (small cluster)
8+ CPU coresCICD or Enterprise mode
4GB RAMMinimal services only
8GB RAMStandard setup
16GB+ RAMFull feature set
+
+

Setup Steps

+

Step 1: Validate Environment

+
provisioning setup validate
+
+

Checks:

+
    +
  • ✅ All dependencies installed
  • +
  • ✅ Permission levels
  • +
  • ✅ Network connectivity
  • +
  • ✅ Disk space (minimum 20GB recommended)
  • +
+

Step 2: Initialize Configuration

+
provisioning setup init
+
+

Creates:

+
    +
  • ~/.config/provisioning/ - User configuration directory
  • +
  • ~/.config/provisioning/user_config.yaml - User settings
  • +
  • ~/.provisioning/workspaces/ - Workspace registry
  • +
+

Step 3: Configure Providers

+
provisioning setup providers
+
+

Interactive configuration for:

+
    +
  • UpCloud (API key, endpoint)
  • +
  • AWS (Access key, secret, region)
  • +
  • Hetzner (API token)
  • +
  • Local (No configuration required)
  • +
+

Store credentials securely:

+
# Credentials are encrypted with SOPS + Age
+~/.config/provisioning/.secrets/providers.enc.yaml
+
+

Step 4: Configure Security

+
provisioning setup security
+
+

Sets up:

+
    +
  • JWT secret for authentication
  • +
  • KMS backend (local, Cosmian, AWS KMS)
  • +
  • Encryption keys
  • +
  • Certificate authorities
  • +
+

Step 5: Verify Installation

+
provisioning verify
+
+

Checks:

+
    +
  • ✅ All components running
  • +
  • ✅ Provider connectivity
  • +
  • ✅ Configuration validity
  • +
  • ✅ Security systems operational
  • +
+

User Configuration

+

User configuration is stored in ~/.config/provisioning/user_config.yaml:

+
# User preferences
+user:
+  name: "Your Name"
+  email: "[your@email.com](mailto:your@email.com)"
+  default_region: "us-east-1"
+
+# Workspace settings
+workspaces:
+  active: "my-project"
+  directory: "~/.provisioning/workspaces/"
+  registry:
+    my-project:
+      path: "/home/user/.provisioning/workspaces/workspace_my_project"
+      created: "2026-01-16T10:30:00Z"
+      template: "default"
+
+# Provider defaults
+providers:
+  default: "upcloud"
+  upcloud:
+    endpoint: " [https://api.upcloud.com"](https://api.upcloud.com")
+  aws:
+    region: "us-east-1"
+
+# Security settings
+security:
+  mfa_enabled: false
+  kms_backend: "local"
+  encryption: "aes-256-gcm"
+
+# Display options
+ui:
+  theme: "dark"
+  table_format: "compact"
+  colors: true
+
+# Logging
+logging:
+  level: "info"
+  output: "console"
+  file: "~/.provisioning/logs/provisioning.log"
+
+

Environment Variables

+

Override settings with environment variables:

+
# Provider selection
+export PROVISIONING_PROVIDER=aws
+
+# Workspace selection
+export PROVISIONING_WORKSPACE=my-project
+
+# Logging
+export PROVISIONING_LOG_LEVEL=debug
+
+# Configuration path
+export PROVISIONING_CONFIG=~/.config/provisioning/
+
+# KMS endpoint
+export PROVISIONING_KMS_ENDPOINT= [http://localhost:8080](http://localhost:8080)
+
+

Troubleshooting

+

Missing Dependencies

+
# Install missing tools
+brew install nushell nickel sops age k9s
+
+# Verify
+provisioning setup validate
+
+

Permission Errors

+
# Fix directory permissions
+chmod 700 ~/.config/provisioning/
+chmod 600 ~/.config/provisioning/user_config.yaml
+
+

Provider Connection Failed

+
# Test provider connectivity
+provisioning providers test upcloud --verbose
 
 # Verify credentials
-aws configure list  # For AWS
+cat ~/.config/provisioning/.secrets/providers.enc.yaml
 
-

Configuration Debugging

-
# Show current configuration hierarchy
-provisioning config show --hierarchy
-
-# Show configuration sources
-provisioning config sources
-
-# Show interpolated values
-provisioning config interpolated
-
-# Debug specific section
-provisioning config debug paths
-provisioning config debug providers
+

Next Steps

+

After initial setup:

+
    +
  1. Create workspace
  2. +
  3. Configure infrastructure
  4. +
  5. Deploy first cluster
  6. +
+

Workspace Setup

+

Create and initialize your first Provisioning workspace.

+

Overview

+

A workspace is the default organizational unit for all infrastructure work in Provisioning. +It groups infrastructure definitions, configurations, extensions, and runtime data in an +isolated environment.

+

Workspace Structure

+

Every workspace follows a consistent directory structure:

+
workspace_my_project/
+├── config/                     # Workspace configuration
+│   ├── workspace.ncl           # Workspace definition (Nickel)
+│   ├── provisioning.yaml       # Workspace metadata
+│   ├── dev-defaults.toml       # Development environment settings
+│   ├── test-defaults.toml      # Testing environment settings
+│   └── prod-defaults.toml      # Production environment settings
+│
+├── infra/                      # Infrastructure definitions
+│   ├── servers.ncl             # Server configurations
+│   ├── clusters.ncl            # Cluster definitions
+│   ├── networks.ncl            # Network configurations
+│   └── batch-workflows.ncl     # Batch workflow definitions
+│
+├── extensions/                 # Workspace-specific extensions (optional)
+│   ├── providers/              # Custom providers
+│   ├── taskservs/              # Custom task services
+│   ├── clusters/               # Custom cluster templates
+│   └── workflows/              # Custom workflow definitions
+│
+└── runtime/                    # Runtime data (gitignored)
+    ├── state/                  # Infrastructure state files
+    ├── checkpoints/            # Workflow checkpoints
+    ├── logs/                   # Operation logs
+    └── generated/              # Generated configuration files
 
-

Configuration Reset

+

Creating a Workspace

+

Method 1: From Built-in Template

+
# Create from default template
+provisioning workspace init my-project
+
+# Create from specific template
+provisioning workspace init my-k8s --template kubernetes-ha
+
+# Create with custom path
+provisioning workspace init my-project --path /custom/location
+
+

Method 2: From Git Repository

+
# Clone infrastructure repository
+git clone  [https://github.com/org/infra-repo.git](https://github.com/org/infra-repo.git) my-infra
+cd my-infra
+
+# Import as workspace
+provisioning workspace init . --import
+
+

Available Templates

+

Provisioning includes templates for common use cases:

+
+ + + + + + +
TemplateDescriptionUse Case
defaultMinimal structureGeneral-purpose infrastructure
kubernetes-haHA Kubernetes (3 control planes)Production Kubernetes deployments
developmentDev-optimized with Docker ComposeLocal testing and development
multi-cloudMultiple provider configsMulti-cloud deployments
database-clusterDatabase-focusedDatabase infrastructure
cicdCI/CD pipeline configsAutomated deployment pipelines
+
+

List available templates:

+
provisioning workspace templates
+
+# Show template details
+provisioning workspace template show kubernetes-ha
+
+

Switching Workspaces

+

List All Workspaces

+
provisioning workspace list
+
+# Example output:
+NAME              PATH                           LAST_USED          STATUS
+my-project        ~/.provisioning/workspace_my   2026-01-16 10:30   Active
+dev-env           ~/.provisioning/workspace_dev  2026-01-15 15:45
+production        ~/.provisioning/workspace_prod 2026-01-10 09:00
+
+

Switch to a Workspace

+
# Switch workspace
+provisioning workspace switch my-project
+
+# Verify switch
+provisioning workspace status
+
+# Quick switch (shortcut)
+provisioning ws switch dev-env
+
+

When you switch workspaces:

+
    +
  • Active workspace marker updates in user configuration
  • +
  • Environment variables update for current session
  • +
  • CLI prompt changes (if configured)
  • +
  • Last-used timestamp updates
  • +
+

Workspace Registry

+

The workspace registry is stored in user configuration:

+
# ~/.config/provisioning/user_config.yaml
+workspaces:
+  active: my-project
+  registry:
+    my-project:
+      path: ~/.provisioning/workspaces/workspace_my_project
+      created: 2026-01-16T10:30:00Z
+      last_used: 2026-01-16T14:20:00Z
+      template: default
+
+

Configuring Workspace

+

Workspace Definition (workspace.ncl)

+
# workspace.ncl - Workspace configuration
+
+{
+  # Workspace metadata
+  name = "my-project"
+  description = "My infrastructure project"
+  version = "1.0.0"
+
+  # Environment settings
+  environment = 'production
+
+  # Default provider
+  provider = "upcloud"
+
+  # Region preferences
+  region = "de-fra1"
+
+  # Workspace-specific providers (override defaults)
+  providers = {
+    upcloud = {
+      endpoint = " [https://api.upcloud.com"](https://api.upcloud.com")
+      region = "de-fra1"
+    }
+    aws = {
+      region = "us-east-1"
+    }
+  }
+
+  # Extensions (inherit from provisioning/extensions/)
+  extensions = {
+    providers = ["upcloud", "aws"]
+    taskservs = ["kubernetes", "docker", "postgres"]
+    clusters = ["web", "oci-reg"]
+  }
+}
+
+

Environment-Specific Configuration

+

Create environment-specific configuration files:

+
# Development environment
+config/dev-defaults.toml:
+[server]
+plan = "small"
+backup_enabled = false
+
+# Production environment
+config/prod-defaults.toml:
+[server]
+plan = "large"
+backup_enabled = true
+monitoring_enabled = true
+
+

Use environment selection:

+
# Deploy to development
+PROVISIONING_ENV=dev provisioning server create
+
+# Deploy to production (stricter validation)
+PROVISIONING_ENV=prod provisioning server create --validate
+
+

Workspace Metadata (provisioning.yaml)

+
name: "my-project"
+version: "1.0.0"
+created: "2026-01-16T10:30:00Z"
+owner: "team-infra"
+
+# Provider configuration
+providers:
+  default: "upcloud"
+  upcloud:
+    api_endpoint: " [https://api.upcloud.com"](https://api.upcloud.com")
+    region: "de-fra1"
+  aws:
+    region: "us-east-1"
+
+# Workspace features
+features:
+  workspace_switching: true
+  batch_workflows: true
+  test_environment: true
+  security_system: true
+
+# Validation rules
+validation:
+  strict: true
+  check_dependencies: true
+  validate_certificates: true
+
+# Backup settings
+backup:
+  enabled: true
+  frequency: "daily"
+  retention_days: 30
+
+

Initializing Infrastructure

+

Step 1: Create Infrastructure Definition

+

Create infra/servers.ncl:

+
let defaults = import "defaults.ncl" in
+
+{
+  servers = [
+    defaults.make_server {
+      name = "web-01"
+      plan = "medium"
+      region = "de-fra1"
+    }
+    defaults.make_server {
+      name = "db-01"
+      plan = "large"
+      region = "de-fra1"
+      backup_enabled = true
+    }
+  ]
+}
+
+

Step 2: Validate Configuration

+
# Validate Nickel configuration
+nickel typecheck infra/servers.ncl
+
+# Export and validate
+nickel export infra/servers.ncl | provisioning validate config
+
+# Verbose validation
+provisioning validate config --verbose
+
+

Step 3: Export Configuration

+
# Export Nickel to TOML (generated output)
+nickel export --format toml infra/servers.ncl > infra/servers.toml
+
+# The .toml files are auto-generated, don't edit directly
+
+

Workspace Security

+

Securing Credentials

+

Credentials are encrypted with SOPS + Age:

+
# Initialize secrets
+provisioning sops init
+
+# Create encrypted secrets file
+provisioning sops create .secrets/providers.enc.yaml
+
+# Encrypt existing credentials
+sops -e -i infra/credentials.toml
+
+

Git Workflow

+

Version control best practices:

+
# COMMIT (shared with team)
+infra/**/*.ncl              # Infrastructure definitions
+config/*.toml               # Environment configurations
+config/provisioning.yaml    # Workspace metadata
+extensions/**/*             # Custom extensions
+
+# GITIGNORE (never commit)
+config/local-overrides.toml # Local user settings
+runtime/**/*                # Runtime data and state
+**/*.secret                 # Credential files
+**/*.enc                    # Encrypted files (if not decrypted locally)
+
+

Multi-Workspace Strategies

+

Strategy 1: Separate Workspaces Per Environment

+
# Create dedicated workspaces
+provisioning workspace init myapp-dev
+provisioning workspace init myapp-staging
+provisioning workspace init myapp-prod
+
+# Each workspace is completely isolated
+provisioning ws switch myapp-prod
+provisioning server create  # Creates in prod only
+
+

Pros: Complete isolation, different credentials, independent state +Cons: More workspace management, configuration duplication

+

Strategy 2: Single Workspace, Multiple Environments

+
# Single workspace with environment configs
+provisioning workspace init myapp
+
+# Deploy to different environments
+PROVISIONING_ENV=dev provisioning server create
+PROVISIONING_ENV=staging provisioning server create
+PROVISIONING_ENV=prod provisioning server create
+
+

Pros: Shared configuration, easier maintenance +Cons: Shared credentials, risk of cross-environment mistakes

+

Strategy 3: Hybrid Approach

+
# Dev workspace for experimentation
+provisioning workspace init myapp-dev
+
+# Prod workspace for production only
+provisioning workspace init myapp-prod
+
+# Use environment flags within workspaces
+provisioning ws switch myapp-prod
+PROVISIONING_ENV=prod provisioning cluster deploy
+
+

Pros: Balances isolation and convenience +Cons: More complex to explain to teams

+

Workspace Validation

+

Before deploying infrastructure:

+
# Validate entire workspace
+provisioning validate workspace
+
+# Validate specific configuration
+provisioning validate config --infra servers.ncl
+
+# Validate with strict rules
+provisioning validate config --strict
+
+

Troubleshooting

+

Workspace Not Found

+
# Re-register workspace
+provisioning workspace register /path/to/workspace
+
+# Or create new workspace
+provisioning workspace init my-project
+
+

Permission Errors

+
# Fix workspace permissions
+chmod 755 ~/.provisioning/workspaces/workspace_*
+chmod 644 ~/.provisioning/workspaces/workspace_*/config/*
+
+

Configuration Validation Errors

+
# Check configuration syntax
+nickel typecheck infra/*.ncl
+
+# Inspect generated TOML
+nickel export infra/*.ncl | jq '.'
+
+# Debug configuration loading
+provisioning config validate --verbose
+
+

Next Steps

+
    +
  1. Configure infrastructure
  2. +
  3. Deploy servers
  4. +
  5. Create batch workflows
  6. +
+

Configuration Management

+

Configure Provisioning providers, credentials, and system settings.

+

Overview

+

Provisioning uses a hierarchical configuration system with 5 layers of precedence. +Configuration is type-safe via Nickel schemas and can be overridden at multiple levels.

+

Configuration Hierarchy

+
1. Runtime Arguments        (Highest Priority)
+   ↓ (CLI flags: --provider upcloud)
+2. Environment Variables
+   ↓ (PROVISIONING_PROVIDER=upcloud)
+3. Workspace Configuration
+   ↓ (workspace/config/provisioning.yaml)
+4. Environment Defaults
+   ↓ (workspace/config/prod-defaults.toml)
+5. System Defaults          (Lowest Priority)
+   ├─ User Config (~/.config/provisioning/user_config.yaml)
+   └─ Platform Defaults (provisioning/config/config.defaults.toml)
+
+

Configuration Sources

+

1. System Defaults

+

Built-in defaults for all Provisioning settings:

+

Location: provisioning/config/config.defaults.toml

+
# Default provider
+[providers]
+default = "local"
+
+# Default server configuration
+[server]
+plan = "small"
+region = "us-east-1"
+zone = "a"
+backup_enabled = false
+monitoring = false
+
+# Default workspace
+[workspace]
+directory = "~/.provisioning/workspaces/"
+
+# Logging
+[logging]
+level = "info"
+output = "console"
+
+# Security
+[security]
+mfa_enabled = false
+encryption = "aes-256-gcm"
+
+

2. User Configuration

+

User-level settings in home directory:

+

Location: ~/.config/provisioning/user_config.yaml

+
user:
+  name: "Your Name"
+  email: "[user@example.com](mailto:user@example.com)"
+
+providers:
+  default: "upcloud"
+  upcloud:
+    endpoint: " [https://api.upcloud.com"](https://api.upcloud.com")
+    api_key: "${UPCLOUD_API_KEY}"
+  aws:
+    region: "us-east-1"
+    profile: "default"
+
+workspace:
+  directory: "~/.provisioning/workspaces/"
+  default: "my-project"
+
+logging:
+  level: "info"
+  file: "~/.provisioning/logs/provisioning.log"
+
+

3. Workspace Configuration

+

Workspace-specific settings:

+

Location: workspace/config/provisioning.yaml

+
name: "my-project"
+environment: "production"
+
+providers:
+  default: "upcloud"
+  upcloud:
+    region: "de-fra1"
+    endpoint: " [https://api.upcloud.com"](https://api.upcloud.com")
+
+validation:
+  strict: true
+  require_approval: false
+
+

4. Environment Defaults

+

Environment-specific configuration files:

+

Files:

+
    +
  • workspace/config/dev-defaults.toml - Development
  • +
  • workspace/config/test-defaults.toml - Testing
  • +
  • workspace/config/prod-defaults.toml - Production
  • +
+

Example prod-defaults.toml:

+
# Production environment overrides
+[server]
+plan = "large"
+backup_enabled = true
+monitoring = true
+high_availability = true
+
+[security]
+mfa_enabled = true
+require_approval = true
+
+[workspace]
+require_version_tag = true
+require_changelog = true
+
+

5. Runtime Arguments

+

Command-line flags with highest priority:

+
# Override provider
+provisioning --provider aws server create
+
+# Override configuration
+provisioning --config /custom/config.yaml
+
+# Override environment
+provisioning --env production
+
+# Combined
+provisioning --provider aws --env production --format json server list
+
+

Provider Configuration

+

Supported Providers

+
+ + + + +
ProviderStatusConfiguration
UpCloud✅ ActiveAPI endpoint, credentials
AWS✅ ActiveRegion, access keys, profile
Hetzner✅ ActiveAPI token, datacenter
Local✅ ActiveDirectory path (no credentials)
+
+

Configuring UpCloud

+

Interactive setup:

+
provisioning setup providers
+
+

Or manually in ~/.config/provisioning/user_config.yaml:

+
providers:
+  default: "upcloud"
+  upcloud:
+    endpoint: " [https://api.upcloud.com"](https://api.upcloud.com")
+    api_key: "${UPCLOUD_API_KEY}"
+    api_secret: "${UPCLOUD_API_SECRET}"
+
+

Store credentials securely:

+
# Set environment variables
+export UPCLOUD_API_KEY="your-api-key"
+export UPCLOUD_API_SECRET="your-api-secret"
+
+# Or use SOPS for encrypted storage
+provisioning sops set providers.upcloud.api_key "your-api-key"
+
+

Configuring AWS

+
providers:
+  aws:
+    region: "us-east-1"
+    access_key_id: "${AWS_ACCESS_KEY_ID}"
+    secret_access_key: "${AWS_SECRET_ACCESS_KEY}"
+    profile: "default"
+
+

Set environment variables:

+
export AWS_ACCESS_KEY_ID="your-access-key"
+export AWS_SECRET_ACCESS_KEY="your-secret-key"
+export AWS_REGION="us-east-1"
+
+

Configuring Hetzner

+
providers:
+  hetzner:
+    api_token: "${HETZNER_API_TOKEN}"
+    datacenter: "nbg1-dc3"
+
+

Set environment:

+
export HETZNER_API_TOKEN="your-api-token"
+
+

Testing Provider Connectivity

+
# Test provider connectivity
+provisioning providers test upcloud
+
+# Verbose output
+provisioning providers test aws --verbose
+
+# Test all configured providers
+provisioning providers test --all
+
+

Global Configuration Accessors

+

Provisioning provides 476+ configuration accessors for accessing settings:

+
# Access configuration values
+let config = (provisioning config load)
+
+# Provider settings
+$config.providers.default
+$config.providers.upcloud.endpoint
+$config.providers.aws.region
+
+# Workspace settings
+$config.workspace.directory
+$config.workspace.default
+
+# Server defaults
+$config.server.plan
+$config.server.region
+$config.server.backup_enabled
+
+# Security settings
+$config.security.mfa_enabled
+$config.security.encryption
+
+

Credential Management

+

Encrypted Credentials

+

Use SOPS + Age for encrypted secrets:

+
# Initialize SOPS configuration
+provisioning sops init
+
+# Create encrypted credentials file
+provisioning sops create .secrets/providers.enc.yaml
+
+# Edit encrypted file
+provisioning sops edit .secrets/providers.enc.yaml
+
+# Decrypt for local use
+provisioning sops decrypt .secrets/providers.enc.yaml > .secrets/providers.toml
+
+

Using Environment Variables

+

Override credentials at runtime:

+
# Provider credentials
+export PROVISIONING_PROVIDER=aws
+export AWS_ACCESS_KEY_ID="your-key"
+export AWS_SECRET_ACCESS_KEY="your-secret"
+export AWS_REGION="us-east-1"
+
+# Execute command
+provisioning server create
+
+

KMS Integration

+

For enterprise deployments, use KMS backends:

+
# Configure KMS backend
+provisioning kms init --backend cosmian
+
+# Store credentials in KMS
+provisioning kms set providers.upcloud.api_key "value"
+
+# Decrypt on-demand
+provisioning kms get providers.upcloud.api_key
+
+

Configuration Validation

+

Validate Configuration

+
# Validate all configuration
+provisioning validate config
+
+# Validate specific section
+provisioning validate config --section providers
+
+# Strict validation
+provisioning validate config --strict
+
+# Verbose output
+provisioning validate config --verbose
+
+

Validate Infrastructure

+
# Validate infrastructure schemas
+provisioning validate infra
+
+# Validate specific file
+provisioning validate infra workspace/infra/servers.ncl
+
+# Type-check with Nickel
+nickel typecheck workspace/infra/servers.ncl
+
+

Configuration Merging

+

Configuration is merged from all layers respecting priority:

+
# View final merged configuration
+provisioning config show
+
+# Export merged configuration
+provisioning config export --format yaml
+
+# Show configuration source
+provisioning config debug --keys providers.default
+
+

Working with Configurations

+

Export Configuration

+
# Export as YAML
+provisioning config export --format yaml > config.yaml
+
+# Export as JSON
+provisioning config export --format json | jq '.'
+
+# Export as TOML
+provisioning config export --format toml > config.toml
+
+

Import Configuration

+
# Import from file
+provisioning config import --file config.yaml
+
+# Merge with existing
+provisioning config merge --file config.yaml
+
+

Reset Configuration

# Reset to defaults
 provisioning config reset
 
 # Reset specific section
-provisioning config reset providers
+provisioning config reset --section providers
 
-# Backup current config before reset
+# Backup before reset
 provisioning config backup
 
-

Advanced Configuration Patterns

-

Dynamic Configuration Loading

-
[dynamic]
-# Load configuration from external sources
-config_urls = [
-    "https://config.company.com/provisioning/base.toml",
-    "file:///etc/provisioning/shared.toml"
-]
-
-# Conditional configuration loading
-load_if_exists = [
-    "./local-overrides.toml",
-    "../shared/team-config.toml"
-]
-
-

Configuration Templating

-
[templates]
-# Template-based configuration
-base_template = "aws-web-app"
-template_vars = {
-    region = "us-west-2"
-    instance_type = "t3.medium"
-    team_name = "platform"
-}
-
-# Template inheritance
-extends = ["base-web", "monitoring", "security"]
-
-

Multi-Region Configuration

-
[regions]
-primary = "us-west-2"
-secondary = "us-east-1"
-
-[regions.us-west-2]
-providers.aws.region = "us-west-2"
-availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
-
-[regions.us-east-1]
-providers.aws.region = "us-east-1"
-availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
-
-

Configuration Profiles

-
[profiles]
-active = "development"
-
-[profiles.development]
-debug.enabled = true
-providers.default = "local"
-cost_controls.enabled = false
-
-[profiles.staging]
-debug.enabled = true
-providers.default = "aws"
-cost_controls.max_budget = 1000.00
-
-[profiles.production]
-debug.enabled = false
-providers.default = "aws"
-security.strict_mode = true
-
-

Configuration Management Best Practices

-

1. Version Control

-
# Track configuration changes
-git add provisioning.toml
-git commit -m "feat(config): add production settings"
-
-# Use branches for configuration experiments
-git checkout -b config/new-provider
-
-

2. Documentation

-
# Document your configuration choices
-[paths]
-# Using custom base path for team shared installation
-base = "/opt/team-provisioning"
-
-[debug]
-# Debug enabled for troubleshooting infrastructure issues
-enabled = true
-log_level = "debug"  # Temporary while debugging network problems
-
-

3. Validation

-
# Always validate before committing
-provisioning validate config
-git add . && git commit -m "update config"
-
-

4. Backup

-
# Regular configuration backups
-provisioning config export --format yaml > config-backup-$(date +%Y%m%d).yaml
-
-# Automated backup script
-echo '0 2 * * * provisioning config export > ~/backups/config-$(date +\%Y\%m\%d).yaml' | crontab -
-
-

5. Security

-
    -
  • Never commit sensitive values in plain text
  • -
  • Use SOPS for encrypting secrets
  • -
  • Rotate encryption keys regularly
  • -
  • Audit configuration access
  • -
-
# Encrypt sensitive configuration
-sops -e settings.ncl > settings.encrypted.ncl
-
-# Audit configuration changes
-git log -p -- provisioning.toml
-
-

Configuration Migration

-

Migrating from Environment Variables

-
# Old: Environment variables
-export PROVISIONING_DEBUG=true
-export PROVISIONING_PROVIDER=aws
-
-# New: Configuration file
-[debug]
-enabled = true
-
-[providers]
-default = "aws"
-
-

Upgrading Configuration Format

-
# Check for configuration updates needed
-provisioning config check-version
-
-# Migrate to new format
-provisioning config migrate --from 1.0 --to 2.0
-
-# Validate migrated configuration
-provisioning validate config
-
-

Next Steps

-

Now that you understand the configuration system:

-
    -
  1. Create your user configuration: provisioning init config
  2. -
  3. Set up environment-specific configs for your workflow
  4. -
  5. Learn CLI commands: CLI Reference
  6. -
  7. Practice with examples: Examples and Tutorials
  8. -
  9. Troubleshoot issues: Troubleshooting Guide
  10. -
-

You now have complete control over how provisioning behaves in your environment!

-

Workspace Setup Guide

-

This guide shows you how to set up a new infrastructure workspace with Nickel-based configuration and auto-generated documentation.

-

Quick Start

-

1. Create a New Workspace (Automatic)

-
# Interactive workspace creation with prompts
-provisioning workspace init
-
-# Or non-interactive with explicit path
-provisioning workspace init my_workspace /path/to/my_workspace
-
-

When you run provisioning workspace init, the system automatically:

-
    -
  • ✅ Creates Nickel-based configuration (config/config.ncl)
  • -
  • ✅ Sets up infrastructure directories with Nickel files (infra/default/)
  • -
  • Generates 4 workspace guides (deployment, configuration, troubleshooting, README)
  • -
  • ✅ Configures local provider as default
  • -
  • ✅ Creates .gitignore for workspace
  • -
-

2. Workspace Structure (Auto-Generated)

-

After running workspace init, your workspace has this structure:

-
my_workspace/
-├── config/
-│   ├── config.ncl              # Master Nickel configuration
-│   ├── providers/
-│   └── platform/
-│
-├── infra/
-│   └── default/
-│       ├── main.ncl            # Infrastructure definition
-│       └── servers.ncl         # Server configurations
-│
-├── docs/                       # ✨ AUTO-GENERATED GUIDES
-│   ├── README.md              # Workspace overview & quick start
-│   ├── deployment-guide.md    # Step-by-step deployment
-│   ├── configuration-guide.md # Configuration reference
-│   └── troubleshooting.md     # Common issues & solutions
-│
-├── .providers/                # Provider state & cache
-├── .kms/                      # KMS data
-├── .provisioning/             # Workspace metadata
-└── workspace.nu              # Utility scripts
-
-

3. Understanding Nickel Configuration

-

The config/config.ncl file is the master configuration for your workspace:

-
{
-  workspace = {
-    name = "my_workspace",
-    path = "/path/to/my_workspace",
-    description = "Workspace: my_workspace",
-    metadata = {
-      owner = "your_username",
-      created = "2025-01-07T19:30:00Z",
-      environment = "development",
-    },
-  },
-
-  providers = {
-    local = {
-      name = "local",
-      enabled = true,
-      workspace = "my_workspace",
-      auth = { interface = "local" },
-      paths = {
-        base = ".providers/local",
-        cache = ".providers/local/cache",
-        state = ".providers/local/state",
-      },
-    },
-  },
-}
-
-

4. Auto-Generated Documentation

-

Every workspace gets 4 auto-generated guides tailored to your specific configuration:

-

README.md - Overview with workspace structure and quick start -deployment-guide.md - Step-by-step deployment instructions for your infrastructure -configuration-guide.md - Configuration reference specific to your workspace -troubleshooting.md - Common issues and solutions for your setup

-

These guides are automatically generated based on your workspace’s:

-
    -
  • Configured providers
  • -
  • Infrastructure definitions
  • -
  • Server configurations
  • -
  • Taskservs and services
  • -
-

5. Customize Your Workspace

-

After creation, edit the Nickel configuration files:

-
# Edit master configuration
-vim config/config.ncl
-
-# Edit infrastructure definition
-vim infra/default/main.ncl
-
-# Edit server definitions
-vim infra/default/servers.ncl
-
-# Validate Nickel syntax
-nickel typecheck config/config.ncl
-
-

Next Steps After Workspace Creation

-

1. Read Your Auto-Generated Documentation

-

Each workspace gets 4 auto-generated guides in the docs/ directory:

-
cd my_workspace
-
-# Overview and quick start
-cat docs/README.md
-
-# Step-by-step deployment
-cat docs/deployment-guide.md
-
-# Configuration reference
-cat docs/configuration-guide.md
-
-# Common issues and solutions
-cat docs/troubleshooting.md
-
-

2. Customize Your Configuration

-

Edit the Nickel configuration files to suit your needs:

-
# Master configuration (providers, settings)
-vim config/config.ncl
-
-# Infrastructure definition
-vim infra/default/main.ncl
-
-# Server configurations
-vim infra/default/servers.ncl
-
-

3. Validate Your Configuration

-
# Check Nickel syntax
-nickel typecheck config/config.ncl
-nickel typecheck infra/default/main.ncl
-
-# Validate with provisioning system
-provisioning validate config
-
-

4. Add Multiple Infrastructures

-

To add more infrastructure environments:

-
# Create new infrastructure directory
-mkdir infra/production
-mkdir infra/staging
-
-# Create Nickel files for each infrastructure
-cp infra/default/main.ncl infra/production/main.ncl
-cp infra/default/servers.ncl infra/production/servers.ncl
-
-# Edit them for your specific needs
-vim infra/production/servers.ncl
-
-

5. Configure Providers

-

To use cloud providers (UpCloud, AWS, etc.), update config/config.ncl:

-
providers = {
-  upcloud = {
-    name = "upcloud",
-    enabled = true,              # Set to true to enable
-    workspace = "my_workspace",
-    auth = { interface = "API" },
-    paths = {
-      base = ".providers/upcloud",
-      cache = ".providers/upcloud/cache",
-      state = ".providers/upcloud/state",
-    },
-    api = {
-      url = "https://api.upcloud.com/1.3",
-      timeout = 30,
-    },
-  },
-}
-
-

Workspace Management Commands

-

List Workspaces

-
provisioning workspace list
-
-

Activate a Workspace

-
provisioning workspace activate my_workspace
-
-

Show Active Workspace

-
provisioning workspace active
-
-

Deploy Infrastructure

-
# Dry-run first (check mode)
-provisioning -c server create
-
-# Actually create servers
-provisioning server create
-
-# List created servers
-provisioning server list
-
-

Troubleshooting

-

Invalid Nickel Syntax

-
# Check syntax
-nickel typecheck config/config.ncl
-
-# Example error and solution
-Error: Type checking failed
-Solution: Fix the syntax error shown and retry
-
-

Configuration Issues

-

Refer to the auto-generated docs/troubleshooting.md in your workspace for:

-
    -
  • Authentication & credentials issues
  • -
  • Server deployment problems
  • -
  • Configuration validation errors
  • -
  • Network connectivity issues
  • -
  • Performance issues
  • -
-

Getting Help

-
    -
  1. Consult workspace guides: Check the docs/ directory
  2. -
  3. Check the docs: provisioning --help, provisioning workspace --help
  4. -
  5. Enable debug mode: provisioning --debug server create
  6. -
  7. Review logs: Check logs for detailed error information
  8. -
-

Next Steps

-
    -
  1. Review auto-generated guides in docs/
  2. -
  3. Customize configuration in Nickel files
  4. -
  5. Test with dry-run before deployment
  6. -
  7. Deploy infrastructure
  8. -
  9. Monitor and maintain your workspace
  10. -
-

For detailed deployment instructions, see docs/deployment-guide.md in your workspace.

-

Workspace Guide

-

Complete guide to workspace management in the provisioning platform.

-

📖 Workspace Switching Guide

-

The comprehensive workspace guide is available here:

-

Workspace Switching Guide - Complete workspace documentation

-

This guide covers:

-
    -
  • Workspace creation and initialization
  • -
  • Switching between multiple workspaces
  • -
  • User preferences and configuration
  • -
  • Workspace registry management
  • -
  • Backup and restore operations
  • -
-

Quick Start

-
# List all workspaces
-provisioning workspace list
-
-# Switch to a workspace
-provisioning workspace switch <name>
-
-# Create new workspace
-provisioning workspace init <name>
-
-# Show active workspace
-provisioning workspace active
-
-

Additional Workspace Resources

- -
-

For complete workspace documentation, see Workspace Switching Guide.

-

Workspace Switching Guide

-

Version: 1.0.0 -Date: 2025-10-06 -Status: ✅ Production Ready

-

Overview

-

The provisioning system now includes a centralized workspace management system that allows you to easily switch between multiple workspaces without -manually editing configuration files.

-

Quick Start

-

List Available Workspaces

-
provisioning workspace list
-```bash
-
-Output:
-
-```plaintext
-Registered Workspaces:
-
-  ● librecloud
-      Path: /Users/Akasha/project-provisioning/workspace_librecloud
-      Last used: 2025-10-06T12:29:43Z
-
-    production
-      Path: /opt/workspaces/production
-      Last used: 2025-10-05T10:15:30Z
-```bash
-
-The green ● indicates the currently active workspace.
-
-### Check Active Workspace
-
-```bash
-provisioning workspace active
-```bash
-
-Output:
-
-```plaintext
-Active Workspace:
-  Name: librecloud
-  Path: /Users/Akasha/project-provisioning/workspace_librecloud
-  Last used: 2025-10-06T12:29:43Z
-```bash
-
-### Switch to Another Workspace
-
-```bash
-# Option 1: Using activate
-provisioning workspace activate production
-
-# Option 2: Using switch (alias)
-provisioning workspace switch production
-```bash
-
-Output:
-
-```plaintext
-✓ Workspace 'production' activated
-
-Current workspace: production
-Path: /opt/workspaces/production
-
-ℹ All provisioning commands will now use this workspace
-```bash
-
-### Register a New Workspace
-
-```bash
-# Register without activating
-provisioning workspace register my-project ~/workspaces/my-project
-
-# Register and activate immediately
-provisioning workspace register my-project ~/workspaces/my-project --activate
-```bash
-
-### Remove Workspace from Registry
-
-```bash
-# With confirmation prompt
-provisioning workspace remove old-workspace
-
-# Skip confirmation
-provisioning workspace remove old-workspace --force
-```bash
-
-**Note**: This only removes the workspace from the registry. The workspace files are NOT deleted.
-
-## Architecture
-
-### Central User Configuration
-
-All workspace information is stored in a central user configuration file:
-
-**Location**: `~/Library/Application Support/provisioning/user_config.yaml`
-
-**Structure**:
-
-```yaml
-# Active workspace (current workspace in use)
-active_workspace: "librecloud"
-
-# Known workspaces (automatically managed)
-workspaces:
-  - name: "librecloud"
-    path: "/Users/Akasha/project-provisioning/workspace_librecloud"
-    last_used: "2025-10-06T12:29:43Z"
-
-  - name: "production"
-    path: "/opt/workspaces/production"
-    last_used: "2025-10-05T10:15:30Z"
-
-# User preferences (global settings)
-preferences:
-  editor: "vim"
-  output_format: "yaml"
-  confirm_delete: true
-  confirm_deploy: true
-  default_log_level: "info"
-  preferred_provider: "upcloud"
-
-# Metadata
-metadata:
-  created: "2025-10-06T12:29:43Z"
-  last_updated: "2025-10-06T13:46:16Z"
-  version: "1.0.0"
-```bash
-
-### How It Works
-
-1. **Workspace Registration**: When you register a workspace, it's added to the `workspaces` list in `user_config.yaml`
-
-2. **Activation**: When you activate a workspace:
-   - `active_workspace` is updated to the workspace name
-   - The workspace's `last_used` timestamp is updated
-   - All provisioning commands now use this workspace's configuration
-
-3. **Configuration Loading**: The config loader reads `active_workspace` from `user_config.yaml` and loads:
-   - `workspace_path/config/provisioning.yaml`
-   - `workspace_path/config/providers/*.toml`
-   - `workspace_path/config/platform/*.toml`
-   - `workspace_path/config/kms.toml`
-
-## Advanced Features
-
-### User Preferences
-
-You can set global user preferences that apply across all workspaces:
-
-```bash
-# Get a preference value
-provisioning workspace get-preference editor
-
-# Set a preference value
-provisioning workspace set-preference editor "code"
-
-# View all preferences
-provisioning workspace preferences
-```bash
-
-**Available Preferences**:
-
-- `editor`: Default editor for config files (vim, code, nano, etc.)
-- `output_format`: Default output format (yaml, json, toml)
-- `confirm_delete`: Require confirmation for deletions (true/false)
-- `confirm_deploy`: Require confirmation for deployments (true/false)
-- `default_log_level`: Default log level (debug, info, warn, error)
-- `preferred_provider`: Preferred cloud provider (aws, upcloud, local)
-
-### Output Formats
-
-List workspaces in different formats:
-
-```bash
-# Table format (default)
-provisioning workspace list
-
-# JSON format
-provisioning workspace list --format json
-
-# YAML format
-provisioning workspace list --format yaml
-```bash
-
-### Quiet Mode
-
-Activate workspace without output messages:
-
-```bash
-provisioning workspace activate production --quiet
-```bash
-
-## Workspace Requirements
-
-For a workspace to be activated, it must have:
-
-1. **Directory exists**: The workspace directory must exist on the filesystem
-
-2. **Config directory**: Must have a `config/` directory
-
-   ```bash
-
-   workspace_name/
-   └── config/
-       ├── provisioning.yaml  # Required
-       ├── providers/         # Optional
-       ├── platform/          # Optional
-       └── kms.toml           # Optional
-
-```bash
-
-3. **Main config file**: Must have `config/provisioning.yaml`
-
-If these requirements are not met, the activation will fail with helpful error messages:
-
-```plaintext
-✗ Workspace 'my-project' not found in registry
-💡 Available workspaces:
-   [list of workspaces]
-💡 Register it first with: provisioning workspace register my-project <path>
-```bash
-
-```plaintext
-✗ Workspace is not migrated to new config system
-💡 Missing: /path/to/workspace/config
-💡 Run migration: provisioning workspace migrate my-project
-```bash
-
-## Migration from Old System
-
-If you have workspaces using the old context system (`ws_{name}.yaml` files), they still work but you should register them in the new system:
-
-```bash
-# Register existing workspace
-provisioning workspace register old-workspace ~/workspaces/old-workspace
-
-# Activate it
-provisioning workspace activate old-workspace
-```bash
-
-The old `ws_{name}.yaml` files are still supported for backward compatibility, but the new centralized system is recommended.
-
-## Best Practices
-
-### 1. **One Active Workspace at a Time**
-
-Only one workspace can be active at a time. All provisioning commands use the active workspace's configuration.
-
-### 2. **Use Descriptive Names**
-
-Use clear, descriptive names for your workspaces:
-
-```bash
-# ✅ Good
-provisioning workspace register production-us-east ~/workspaces/prod-us-east
-provisioning workspace register dev-local ~/workspaces/dev
-
-# ❌ Avoid
-provisioning workspace register ws1 ~/workspaces/workspace1
-provisioning workspace register temp ~/workspaces/t
-```bash
-
-### 3. **Keep Workspaces Organized**
-
-Store all workspaces in a consistent location:
-
-```bash
-~/workspaces/
-├── production/
-├── staging/
-├── development/
-└── testing/
-```bash
-
-### 4. **Regular Cleanup**
-
-Remove workspaces you no longer use:
-
-```bash
-# List workspaces to see which ones are unused
-provisioning workspace list
-
-# Remove old workspace
-provisioning workspace remove old-workspace
-```bash
-
-### 5. **Backup User Config**
-
-Periodically backup your user configuration:
-
-```bash
-cp "~/Library/Application Support/provisioning/user_config.yaml" \
-   "~/Library/Application Support/provisioning/user_config.yaml.backup"
-```bash
-
-## Troubleshooting
-
-### Workspace Not Found
-
-**Problem**: `✗ Workspace 'name' not found in registry`
-
-**Solution**: Register the workspace first:
-
-```bash
-provisioning workspace register name /path/to/workspace
-```bash
-
-### Missing Configuration
-
-**Problem**: `✗ Missing workspace configuration`
-
-**Solution**: Ensure the workspace has a `config/provisioning.yaml` file. Run migration if needed:
-
-```bash
-provisioning workspace migrate name
-```bash
-
-### Directory Not Found
-
-**Problem**: `✗ Workspace directory not found: /path/to/workspace`
-
-**Solution**:
-
-1. Check if the workspace was moved or deleted
-2. Update the path or remove from registry:
-
-```bash
-provisioning workspace remove name
-provisioning workspace register name /new/path
-```bash
-
-### Corrupted User Config
-
-**Problem**: `Error: Failed to parse user config`
-
-**Solution**: The system automatically creates a backup and regenerates the config. Check:
-
-```bash
-ls -la "~/Library/Application Support/provisioning/user_config.yaml"*
-```bash
-
-Restore from backup if needed:
-
-```bash
-cp "~/Library/Application Support/provisioning/user_config.yaml.backup.TIMESTAMP" \
-   "~/Library/Application Support/provisioning/user_config.yaml"
-```bash
-
-## CLI Commands Reference
-
-| Command | Alias | Description |
-| --------- | ------- | ------------- |
-| `provisioning workspace activate <name>` | - | Activate a workspace |
-| `provisioning workspace switch <name>` | - | Alias for activate |
-| `provisioning workspace list` | - | List all registered workspaces |
-| `provisioning workspace active` | - | Show currently active workspace |
-| `provisioning workspace register <name> <path>` | - | Register a new workspace |
-| `provisioning workspace remove <name>` | - | Remove workspace from registry |
-| `provisioning workspace preferences` | - | Show user preferences |
-| `provisioning workspace set-preference <key> <value>` | - | Set a preference |
-| `provisioning workspace get-preference <key>` | - | Get a preference value |
-
-## Integration with Config System
-
-The workspace switching system is fully integrated with the new target-based configuration system:
-
-### Configuration Hierarchy (Priority: Low → High)
-
-```plaintext
-1. Workspace config      workspace/{name}/config/provisioning.yaml
-2. Provider configs      workspace/{name}/config/providers/*.toml
-3. Platform configs      workspace/{name}/config/platform/*.toml
-4. User context          ~/Library/Application Support/provisioning/ws_{name}.yaml (legacy)
-5. User config           ~/Library/Application Support/provisioning/user_config.yaml (new)
-6. Environment variables PROVISIONING_*
-```bash
-
-### Example Workflow
-
-```bash
-# 1. Create and activate development workspace
-provisioning workspace register dev ~/workspaces/dev --activate
-
-# 2. Work on development
-provisioning server create web-dev-01
-provisioning taskserv create kubernetes
-
-# 3. Switch to production
-provisioning workspace switch production
-
-# 4. Deploy to production
-provisioning server create web-prod-01
-provisioning taskserv create kubernetes
-
-# 5. Switch back to development
-provisioning workspace switch dev
-
-# All commands now use dev workspace config
-```bash
-
-## Nickel Workspace Configuration
-
-Starting with v3.7.0, workspaces use **Nickel** for type-safe, schema-validated configurations.
-
-### Nickel Configuration Features
-
-**Nickel Configuration** (Type-Safe):
-
-```nickel
-{
-  workspace = {
-    name = "myworkspace",
-    version = "1.0.0",
-  },
-  paths = {
-    base = "/path/to/workspace",
-    infra = "/path/to/workspace/infra",
-    config = "/path/to/workspace/config",
-  },
-}
-```bash
-
-### Benefits of Nickel Configuration
-
-- ✅ **Type Safety**: Catch configuration errors at load time, not runtime
-- ✅ **Schema Validation**: Required fields, value constraints, format checking
-- ✅ **Lazy Evaluation**: Only computes what's needed
-- ✅ **Self-Documenting**: Records provide instant documentation
-- ✅ **Merging**: Powerful record merging for composition
-
-### Viewing Workspace Configuration
-
-```bash
-# View your Nickel workspace configuration
-provisioning workspace config show
-
-# View in different formats
-provisioning workspace config show --format=yaml    # YAML output
-provisioning workspace config show --format=json    # JSON output
-provisioning workspace config show --format=nickel  # Raw Nickel file
-
-# Validate configuration
-provisioning workspace config validate
-# Output: ✅ Validation complete - all configs are valid
-
-# Show configuration hierarchy
-provisioning workspace config hierarchy
-```bash
-
-## See Also
-
-- **Configuration Guide**: `docs/architecture/adr/ADR-010-configuration-format-strategy.md`
-- **Migration Guide**: [Nickel Migration](../architecture/adr/adr-011-nickel-migration.md)
-- **From-Scratch Guide**: [From-Scratch Guide](../guides/from-scratch.md)
-- **Nickel Patterns**: Nickel Language Module System
-
----
-
-**Maintained By**: Infrastructure Team
-**Version**: 2.0.0 (Updated for Nickel)
-**Status**: ✅ Production Ready
-**Last Updated**: 2025-12-03
-
-

Workspace Switching System (v2.0.5)

-

🚀 Workspace Switching Completed (2025-10-02)

-

A centralized workspace management system has been implemented, allowing seamless switching between multiple workspaces without manually editing -configuration files. This builds upon the target-based configuration system.

-

Key Features

-
    -
  • Centralized Configuration: Single user_config.yaml file stores all workspace information
  • -
  • Simple CLI Commands: Switch workspaces with a single command
  • -
  • Active Workspace Tracking: Automatic tracking of currently active workspace
  • -
  • Workspace Registry: Maintain list of all known workspaces
  • -
  • User Preferences: Global user settings that apply across all workspaces
  • -
  • Automatic Updates: Last-used timestamps and metadata automatically managed
  • -
  • Validation: Ensures workspaces have required configuration before activation
  • -
-

Workspace Management Commands

-
# List all registered workspaces
-provisioning workspace list
-
-# Show currently active workspace
-provisioning workspace active
-
-# Switch to another workspace
-provisioning workspace activate <name>
-provisioning workspace switch <name>     # alias
-
-# Register a new workspace
-provisioning workspace register <name> <path> [--activate]
-
-# Remove workspace from registry (does not delete files)
-provisioning workspace remove <name> [--force]
-
-# View user preferences
-provisioning workspace preferences
-
-# Set user preference
-provisioning workspace set-preference <key> <value>
-
-# Get user preference
-provisioning workspace get-preference <key>
-
-

Central User Configuration

-

Location: ~/Library/Application Support/provisioning/user_config.yaml

-

Structure:

-
# Active workspace (current workspace in use)
-active_workspace: "librecloud"
-
-# Known workspaces (automatically managed)
-workspaces:
-  - name: "librecloud"
-    path: "/Users/Akasha/project-provisioning/workspace_librecloud"
-    last_used: "2025-10-06T12:29:43Z"
-
-  - name: "production"
-    path: "/opt/workspaces/production"
-    last_used: "2025-10-05T10:15:30Z"
-
-# User preferences (global settings)
-preferences:
-  editor: "vim"
-  output_format: "yaml"
-  confirm_delete: true
-  confirm_deploy: true
-  default_log_level: "info"
-  preferred_provider: "upcloud"
-
-# Metadata
-metadata:
-  created: "2025-10-06T12:29:43Z"
-  last_updated: "2025-10-06T13:46:16Z"
-  version: "1.0.0"
-
-

Usage Example

-
# Start with workspace librecloud active
-$ provisioning workspace active
-Active Workspace:
-  Name: librecloud
-  Path: /Users/Akasha/project-provisioning/workspace_librecloud
-  Last used: 2025-10-06T13:46:16Z
-
-# List all workspaces (● indicates active)
-$ provisioning workspace list
-
-Registered Workspaces:
-
-  ● librecloud
-      Path: /Users/Akasha/project-provisioning/workspace_librecloud
-      Last used: 2025-10-06T13:46:16Z
-
-    production
-      Path: /opt/workspaces/production
-      Last used: 2025-10-05T10:15:30Z
-
-# Switch to production
-$ provisioning workspace switch production
-✓ Workspace 'production' activated
-
-Current workspace: production
-Path: /opt/workspaces/production
-
-ℹ All provisioning commands will now use this workspace
-
-# All subsequent commands use production workspace
-$ provisioning server list
-$ provisioning taskserv create kubernetes
-
-

Integration with Config System

-

The workspace switching system integrates seamlessly with the configuration system:

-
    -
  1. Active Workspace Detection: Config loader reads active_workspace from user_config.yaml
  2. -
  3. Workspace Validation: Ensures workspace has required config/provisioning.yaml
  4. -
  5. Configuration Loading: Loads workspace-specific configs automatically
  6. -
  7. Automatic Timestamps: Updates last_used on workspace activation
  8. -
-

Configuration Hierarchy (Priority: Low → High):

-
1. Workspace config      workspace/{name}/config/provisioning.yaml
-2. Provider configs      workspace/{name}/config/providers/*.toml
-3. Platform configs      workspace/{name}/config/platform/*.toml
-4. User config           ~/Library/Application Support/provisioning/user_config.yaml
-5. Environment variables PROVISIONING_*
-
-

Benefits

-
    -
  • No Manual Config Editing: Switch workspaces with single command
  • -
  • Multiple Workspaces: Manage dev, staging, production simultaneously
  • -
  • User Preferences: Global settings across all workspaces
  • -
  • Automatic Tracking: Last-used timestamps, active workspace markers
  • -
  • Safe Operations: Validation before activation, confirmation prompts
  • -
  • Backward Compatible: Old ws_{name}.yaml files still supported
  • -
-

For more detailed information, see Workspace Switching Guide.

-

Workspace Configuration Architecture

-

Version: 2.0.0 -Date: 2025-10-06 -Status: Implemented

-

Overview

-

The provisioning system now uses a workspace-based configuration architecture where each workspace has its own complete configuration structure. -This replaces the old ENV-based and template-only system.

-

Critical Design Principle

-

config.defaults.toml is ONLY a template, NEVER loaded at runtime

-

This file exists solely as a reference template for generating workspace configurations. The system does NOT load it during operation.

-

Configuration Hierarchy

-

Configuration is loaded in the following order (lowest to highest priority):

-
    -
  1. Workspace Config (Base): {workspace}/config/provisioning.yaml
  2. -
  3. Provider Configs: {workspace}/config/providers/*.toml
  4. -
  5. Platform Configs: {workspace}/config/platform/*.toml
  6. -
  7. User Context: ~/Library/Application Support/provisioning/ws_{name}.yaml
  8. -
  9. Environment Variables: PROVISIONING_* (highest priority)
  10. -
-

Workspace Structure

-

When a workspace is initialized, the following structure is created:

-
{workspace}/
-├── config/
-│   ├── provisioning.yaml       # Main workspace config (generated from template)
-│   ├── providers/              # Provider-specific configs
-│   │   ├── aws.toml
-│   │   ├── local.toml
-│   │   └── upcloud.toml
-│   ├── platform/               # Platform service configs
-│   │   ├── orchestrator.toml
-│   │   └── mcp.toml
-│   └── kms.toml                # KMS configuration
-├── infra/                      # Infrastructure definitions
-├── .cache/                     # Cache directory
-├── .runtime/                   # Runtime data
-│   ├── taskservs/
-│   └── clusters/
-├── .providers/                 # Provider state
-├── .kms/                       # Key management
-│   └── keys/
-├── generated/                  # Generated files
-└── .gitignore                  # Workspace gitignore
-
-

Template System

-

Templates are located at: /Users/Akasha/project-provisioning/provisioning/config/templates/

-

Available Templates

-
    -
  1. workspace-provisioning.yaml.template - Main workspace configuration
  2. -
  3. provider-aws.toml.template - AWS provider configuration
  4. -
  5. provider-local.toml.template - Local provider configuration
  6. -
  7. provider-upcloud.toml.template - UpCloud provider configuration
  8. -
  9. kms.toml.template - KMS configuration
  10. -
  11. user-context.yaml.template - User context configuration
  12. -
-

Template Variables

-

Templates support the following interpolation variables:

-
    -
  • {{workspace.name}} - Workspace name
  • -
  • {{workspace.path}} - Absolute path to workspace
  • -
  • {{now.iso}} - Current timestamp in ISO format
  • -
  • {{env.HOME}} - User’s home directory
  • -
  • {{env.*}} - Environment variables (safe list only)
  • -
  • {{paths.base}} - Base path (after config load)
  • -
-

Workspace Initialization

-

Command

-
# Using the workspace init function
-nu -c "use provisioning/core/nulib/lib_provisioning/workspace/init.nu *; \
-  workspace-init 'my-workspace' '/path/to/workspace' \
-  --providers ['aws' 'local'] --activate"
-
-

Process

-
    -
  1. Create Directory Structure: All necessary directories
  2. -
  3. Generate Config from Template: Creates config/provisioning.yaml
  4. -
  5. Generate Provider Configs: For each specified provider
  6. -
  7. Generate KMS Config: Security configuration
  8. -
  9. Create User Context (if –activate): User-specific overrides
  10. -
  11. Create .gitignore: Ignore runtime/cache files
  12. -
-

User Context

-

User context files are stored per workspace:

-

Location: ~/Library/Application Support/provisioning/ws_{workspace_name}.yaml

-

Purpose

-
    -
  • Store user-specific overrides (debug settings, output preferences)
  • -
  • Mark active workspace
  • -
  • Override workspace paths if needed
  • -
-

Example

-
workspace:
-  name: "my-workspace"
-  path: "/path/to/my-workspace"
-  active: true
-
-debug:
-  enabled: true
-  log_level: "debug"
-
-output:
-  format: "json"
-
-providers:
-  default: "aws"
-
-

Configuration Loading Process

-

1. Determine Active Workspace

-
# Check user config directory for active workspace
-let user_config_dir = ~/Library/Application Support/provisioning/
-let active_workspace = (find workspace with active: true in ws_*.yaml files)
-
-

2. Load Workspace Config

-
# Load main workspace config
-let workspace_config = {workspace.path}/config/provisioning.yaml
-
-

3. Load Provider Configs

-
# Merge all provider configs
-for provider in {workspace.path}/config/providers/*.toml {
-  merge provider config
-}
-
-

4. Load Platform Configs

-
# Merge all platform configs
-for platform in {workspace.path}/config/platform/*.toml {
-  merge platform config
-}
-
-

5. Apply User Context

-
# Apply user-specific overrides
-let user_context = ~/Library/Application Support/provisioning/ws_{name}.yaml
-merge user_context (highest config priority)
-
-

6. Apply Environment Variables

-
# Final overrides from environment
-PROVISIONING_DEBUG=true
-PROVISIONING_LOG_LEVEL=debug
-PROVISIONING_PROVIDER=aws
-# etc.
-
-

Migration from Old System

-

Before (ENV-based)

-
export PROVISIONING=/usr/local/provisioning
-export PROVISIONING_INFRA_PATH=/path/to/infra
-export PROVISIONING_DEBUG=true
-# ... many ENV variables
-
-

After (Workspace-based)

-
# Initialize workspace
-workspace-init "production" "/workspaces/prod" --providers ["aws"] --activate
-
-# All config is now in workspace
-# No ENV variables needed (except for overrides)
-
-

Breaking Changes

-
    -
  1. config.defaults.toml NOT loaded - Only used as template
  2. -
  3. Workspace required - Must have active workspace or be in workspace directory
  4. -
  5. New config locations - User config in ~/Library/Application Support/provisioning/
  6. -
  7. YAML main config - provisioning.yaml instead of TOML
  8. -
-

Workspace Management Commands

-

Initialize Workspace

-
use provisioning/core/nulib/lib_provisioning/workspace/init.nu *
-workspace-init "my-workspace" "/path/to/workspace" --providers ["aws" "local"] --activate
-
-

List Workspaces

-
workspace-list
-
-

Activate Workspace

-
workspace-activate "my-workspace"
-
-

Get Active Workspace

-
workspace-get-active
-
-

Implementation Files

-

Core Files

-
    -
  1. Template Directory: /Users/Akasha/project-provisioning/provisioning/config/templates/
  2. -
  3. Workspace Init: /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/workspace/init.nu
  4. -
  5. Config Loader: /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/config/loader.nu
  6. -
-

Key Changes in Config Loader

-

Removed

-
    -
  • get-defaults-config-path() - No longer loads config.defaults.toml
  • -
  • Old hierarchy with user/project/infra TOML files
  • -
-

Added

-
    -
  • get-active-workspace() - Finds active workspace from user config
  • -
  • Support for YAML config files
  • -
  • Provider and platform config merging
  • -
  • User context loading
  • -
-

Configuration Schema

-

Main Workspace Config (provisioning.yaml)

-
workspace:
-  name: string
-  version: string
-  created: timestamp
-
-paths:
-  base: string
-  infra: string
-  cache: string
-  runtime: string
-  # ... all paths
-
-core:
-  version: string
-  name: string
-
-debug:
-  enabled: bool
-  log_level: string
-  # ... debug settings
-
-providers:
-  active: [string]
-  default: string
-
-# ... all other sections
-
-

Provider Config (providers/*.toml)

-
[provider]
-name = "aws"
-enabled = true
-workspace = "workspace-name"
-
-[provider.auth]
-profile = "default"
-region = "us-east-1"
-
-[provider.paths]
-base = "{workspace}/.providers/aws"
-cache = "{workspace}/.providers/aws/cache"
-
-

User Context (ws_{name}.yaml)

-
workspace:
-  name: string
-  path: string
-  active: bool
-
-debug:
-  enabled: bool
-  log_level: string
-
-output:
-  format: string
-
-

Benefits

-
    -
  1. No Template Loading: config.defaults.toml is template-only
  2. -
  3. Workspace Isolation: Each workspace is self-contained
  4. -
  5. Explicit Configuration: No hidden defaults from ENV
  6. -
  7. Clear Hierarchy: Predictable override behavior
  8. -
  9. Multi-Workspace Support: Easy switching between workspaces
  10. -
  11. User Overrides: Per-workspace user preferences
  12. -
  13. Version Control: Workspace configs can be committed (except secrets)
  14. -
-

Security Considerations

-

Generated .gitignore

-

The workspace .gitignore excludes:

-
    -
  • .cache/ - Cache files
  • -
  • .runtime/ - Runtime data
  • -
  • .providers/ - Provider state
  • -
  • .kms/keys/ - Secret keys
  • -
  • generated/ - Generated files
  • -
  • *.log - Log files
  • -
-

Secret Management

-
    -
  • KMS keys stored in .kms/keys/ (gitignored)
  • -
  • SOPS config references keys, doesn’t store them
  • -
  • Provider credentials in user-specific locations (not workspace)
  • -
-

Troubleshooting

-

No Active Workspace Error

-
Error: No active workspace found. Please initialize or activate a workspace.
-
-

Solution: Initialize or activate a workspace:

-
workspace-init "my-workspace" "/path/to/workspace" --activate
-
-

Config File Not Found

-
Error: Required configuration file not found: {workspace}/config/provisioning.yaml
-
-

Solution: The workspace config is corrupted or deleted. Re-initialize:

-
workspace-init "workspace-name" "/existing/path" --providers ["aws"]
-
-

Provider Not Configured

-

Solution: Add provider config to workspace:

-
# Generate provider config manually
-generate-provider-config "/workspace/path" "workspace-name" "aws"
-
-

Future Enhancements

-
    -
  1. Workspace Templates: Pre-configured workspace templates (dev, prod, test)
  2. -
  3. Workspace Import/Export: Share workspace configurations
  4. -
  5. Remote Workspace: Load workspace from remote Git repository
  6. -
  7. Workspace Validation: Comprehensive workspace health checks
  8. -
  9. Config Migration Tool: Automated migration from old ENV-based system
  10. -
-

Summary

-
    -
  • config.defaults.toml is ONLY a template - Never loaded at runtime
  • -
  • Workspaces are self-contained - Complete config structure generated from templates
  • -
  • New hierarchy: Workspace → Provider → Platform → User Context → ENV
  • -
  • User context for overrides - Stored in ~/Library/Application Support/provisioning/
  • -
  • Clear, explicit configuration - No hidden defaults
  • -
- -
    -
  • Template files: provisioning/config/templates/
  • -
  • Workspace init: provisioning/core/nulib/lib_provisioning/workspace/init.nu
  • -
  • Config loader: provisioning/core/nulib/lib_provisioning/config/loader.nu
  • -
  • User guide: docs/user/workspace-management.md
  • -
-

Workspace Configuration Management Commands

-

Overview

-

The workspace configuration management commands provide a comprehensive set of tools for viewing, editing, validating, and managing workspace configurations.

-

Command Summary

-
- - - - - - -
CommandDescription
workspace config showDisplay workspace configuration
workspace config validateValidate all configuration files
workspace config generate providerGenerate provider configuration from template
workspace config editEdit configuration files
workspace config hierarchyShow configuration loading hierarchy
workspace config listList all configuration files
-
-

Commands

-

Show Workspace Configuration

-

Display the complete workspace configuration in JSON, YAML, TOML, and other formats.

-
# Show active workspace config (YAML format)
-provisioning workspace config show
-
-# Show specific workspace config
-provisioning workspace config show my-workspace
-
-# Show in JSON format
-provisioning workspace config show --out json
-
-# Show in TOML format
-provisioning workspace config show --out toml
-
-# Show specific workspace in JSON
-provisioning workspace config show my-workspace --out json
-
-

Output: Complete workspace configuration in the specified format

-

Validate Workspace Configuration

-

Validate all configuration files for syntax and required sections.

-
# Validate active workspace
-provisioning workspace config validate
-
-# Validate specific workspace
-provisioning workspace config validate my-workspace
-
-

Checks performed:

-
    -
  • Main config (provisioning.yaml) - YAML syntax and required sections
  • -
  • Provider configs (providers/*.toml) - TOML syntax
  • -
  • Platform service configs (platform/*.toml) - TOML syntax
  • -
  • KMS config (kms.toml) - TOML syntax
  • -
-

Output: Validation report with success/error indicators

-

Generate Provider Configuration

-

Generate a provider configuration file from a template.

-
# Generate AWS provider config for active workspace
-provisioning workspace config generate provider aws
-
-# Generate UpCloud provider config for specific workspace
-provisioning workspace config generate provider upcloud --infra my-workspace
-
-# Generate local provider config
-provisioning workspace config generate provider local
-
-

What it does:

-
    -
  1. Locates provider template in extensions/providers/{name}/config.defaults.toml
  2. -
  3. Interpolates workspace-specific values ({{workspace.name}}, {{workspace.path}})
  4. -
  5. Saves to {workspace}/config/providers/{name}.toml
  6. -
-

Output: Generated configuration file ready for customization

-

Edit Configuration Files

-

Open configuration files in your editor for modification.

-
# Edit main workspace config
-provisioning workspace config edit main
-
-# Edit specific provider config
-provisioning workspace config edit provider aws
-
-# Edit platform service config
-provisioning workspace config edit platform orchestrator
-
-# Edit KMS config
-provisioning workspace config edit kms
-
-# Edit for specific workspace
-provisioning workspace config edit provider upcloud --infra my-workspace
-
-

Editor used: Value of $EDITOR environment variable (defaults to vi)

-

Config types:

-
    -
  • main - Main workspace configuration (provisioning.yaml)
  • -
  • provider <name> - Provider configuration (providers/{name}.toml)
  • -
  • platform <name> - Platform service configuration (platform/{name}.toml)
  • -
  • kms - KMS configuration (kms.toml)
  • -
-

Show Configuration Hierarchy

-

Display the configuration loading hierarchy and precedence.

-
# Show hierarchy for active workspace
-provisioning workspace config hierarchy
-
-# Show hierarchy for specific workspace
-provisioning workspace config hierarchy my-workspace
-
-

Output: Visual hierarchy showing:

-
    -
  1. Environment Variables (highest priority)
  2. -
  3. User Context
  4. -
  5. Platform Services
  6. -
  7. Provider Configs
  8. -
  9. Workspace Config (lowest priority)
  10. -
-

List Configuration Files

-

List all configuration files for a workspace.

-
# List all configs
-provisioning workspace config list
-
-# List only provider configs
-provisioning workspace config list --type provider
-
-# List only platform configs
-provisioning workspace config list --type platform
-
-# List only KMS config
-provisioning workspace config list --type kms
-
-# List for specific workspace
-provisioning workspace config list my-workspace --type all
-
-

Output: Table of configuration files with type, name, and path

-

Workspace Selection

-

All config commands support two ways to specify the workspace:

-
    -
  1. -

    Active Workspace (default):

    -
    provisioning workspace config show
    -
    -
  2. -
  3. -

    Specific Workspace (using --infra flag):

    -
    provisioning workspace config show --infra my-workspace
    -
    -
  4. -
-

Configuration File Locations

-

Workspace configurations are organized in a standard structure:

-
{workspace}/
-├── config/
-│   ├── provisioning.yaml       # Main workspace config
-│   ├── providers/              # Provider configurations
-│   │   ├── aws.toml
-│   │   ├── upcloud.toml
-│   │   └── local.toml
-│   ├── platform/               # Platform service configs
-│   │   ├── orchestrator.toml
-│   │   ├── control-center.toml
-│   │   └── mcp.toml
-│   └── kms.toml                # KMS configuration
-
-

Configuration Hierarchy

-

Configuration values are loaded in the following order (highest to lowest priority):

-
    -
  1. Environment Variables - PROVISIONING_* variables
  2. -
  3. User Context - ~/Library/Application Support/provisioning/ws_{name}.yaml
  4. -
  5. Platform Services - {workspace}/config/platform/*.toml
  6. -
  7. Provider Configs - {workspace}/config/providers/*.toml
  8. -
  9. Workspace Config - {workspace}/config/provisioning.yaml
  10. -
-

Higher priority values override lower priority values.

-

Examples

-

Complete Workflow

-
# 1. Create new workspace with activation
-provisioning workspace init my-project ~/workspaces/my-project --providers [aws,local] --activate
-
-# 2. Validate configuration
-provisioning workspace config validate
-
-# 3. View configuration hierarchy
-provisioning workspace config hierarchy
-
-# 4. Generate additional provider config
-provisioning workspace config generate provider upcloud
-
-# 5. Edit provider settings
-provisioning workspace config edit provider upcloud
-
-# 6. List all configs
-provisioning workspace config list
-
-# 7. Show complete config in JSON
-provisioning workspace config show --out json
-
-# 8. Validate everything
-provisioning workspace config validate
-
-

Multi-Workspace Management

-
# Create multiple workspaces
-provisioning workspace init dev ~/workspaces/dev --activate
-provisioning workspace init staging ~/workspaces/staging
-provisioning workspace init prod ~/workspaces/prod
-
-# Validate specific workspace
-provisioning workspace config validate staging
-
-# Show config for production
-provisioning workspace config show prod --out yaml
-
-# Edit provider for specific workspace
-provisioning workspace config edit provider aws --infra prod
-
-

Configuration Troubleshooting

-
# 1. Validate all configs
-provisioning workspace config validate
-
-# 2. If errors, check hierarchy
-provisioning workspace config hierarchy
-
-# 3. List all config files
-provisioning workspace config list
-
-# 4. Edit problematic config
-provisioning workspace config edit provider aws
-
-# 5. Validate again
-provisioning workspace config validate
-
-

Integration with Other Commands

-

Config commands integrate seamlessly with other workspace operations:

-
# Create workspace with providers
-provisioning workspace init my-app ~/apps/my-app --providers [aws,upcloud] --activate
-
-# Generate additional configs
-provisioning workspace config generate provider local
-
-# Validate before deployment
-provisioning workspace config validate
-
-# Deploy infrastructure
-provisioning server create --infra my-app
-
-

Tips

-
    -
  1. -

    Always validate after editing: Run workspace config validate after manual edits

    -
  2. -
  3. -

    Use hierarchy to understand precedence: Run workspace config hierarchy to see which config files are being used

    -
  4. -
  5. -

    Generate from templates: Use config generate provider rather than creating configs manually

    -
  6. -
  7. -

    Check before activation: Validate a workspace before activating it as default

    -
  8. -
  9. -

    Use –out json for scripting: JSON output is easier to parse in scripts

    -
  10. -
-

See Also

- -

Workspace Enforcement and Version Tracking Guide

-

Version: 1.0.0 -Last Updated: 2025-10-06 -System Version: 2.0.5+

-
-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Workspace Requirement
  4. -
  5. Version Tracking
  6. -
  7. Migration Framework
  8. -
  9. Command Reference
  10. -
  11. Troubleshooting
  12. -
  13. Best Practices
  14. -
-
-

Overview

-

The provisioning system now enforces mandatory workspace requirements for all infrastructure operations. This ensures:

-
    -
  • Consistent Environment: All operations run in a well-defined workspace
  • -
  • Version Compatibility: Workspaces track provisioning and schema versions
  • -
  • Safe Migrations: Automatic migration framework with backup/rollback support
  • -
  • Configuration Isolation: Each workspace has isolated configurations and state
  • -
-

Key Features

-
    -
  • Mandatory Workspace: Most commands require an active workspace
  • -
  • Version Tracking: Workspaces track system, schema, and format versions
  • -
  • Compatibility Checks: Automatic validation before operations
  • -
  • Migration Framework: Safe upgrades with backup/restore
  • -
  • Clear Error Messages: Helpful guidance when workspace is missing or incompatible
  • -
-
-

Workspace Requirement

-

Commands That Require Workspace

-

Almost all provisioning commands now require an active workspace:

-
    -
  • Infrastructure: server, taskserv, cluster, infra
  • -
  • Orchestration: workflow, batch, orchestrator
  • -
  • Development: module, layer, pack
  • -
  • Generation: generate
  • -
  • Configuration: Most config commands
  • -
  • Test: test environment commands
  • -
-

Commands That Don’t Require Workspace

-

Only informational and workspace management commands work without a workspace:

-
    -
  • help - Help system
  • -
  • version - Show version information
  • -
  • workspace - Workspace management commands
  • -
  • guide / sc - Documentation and quick reference
  • -
  • nu - Start Nushell session
  • -
  • nuinfo - Nushell information
  • -
-

What Happens Without a Workspace

-

If you run a command without an active workspace, you’ll see:

-
✗ Workspace Required
-
-No active workspace is configured.
-
-To get started:
-
-  1. Create a new workspace:
-     provisioning workspace init <name>
-
-  2. Or activate an existing workspace:
-     provisioning workspace activate <name>
-
-  3. List available workspaces:
-     provisioning workspace list
-
-
-

Version Tracking

-

Workspace Metadata

-

Each workspace maintains metadata in .provisioning/metadata.yaml:

-
workspace:
-  name: "my-workspace"
-  path: "/path/to/workspace"
-
-version:
-  provisioning: "2.0.5"    # System version when created/updated
-  schema: "1.0.0"          # KCL schema version
-  workspace_format: "2.0.0" # Directory structure version
-
-created: "2025-10-06T12:00:00Z"
-last_updated: "2025-10-06T13:30:00Z"
-
-migration_history: []
-
-compatibility:
-  min_provisioning_version: "2.0.0"
-  min_schema_version: "1.0.0"
-
-

Version Components

-

1. Provisioning Version

-
    -
  • What: Version of the provisioning system (CLI + libraries)
  • -
  • Example: 2.0.5
  • -
  • Purpose: Ensures workspace is compatible with current system
  • -
-

2. Schema Version

-
    -
  • What: Version of KCL schemas used in workspace
  • -
  • Example: 1.0.0
  • -
  • Purpose: Tracks configuration schema compatibility
  • -
-

3. Workspace Format Version

-
    -
  • What: Version of workspace directory structure
  • -
  • Example: 2.0.0
  • -
  • Purpose: Ensures workspace has required directories and files
  • -
-

Checking Workspace Version

-

View workspace version information:

-
# Check active workspace version
-provisioning workspace version
-
-# Check specific workspace version
-provisioning workspace version my-workspace
-
-# JSON output
-provisioning workspace version --format json
-
-

Example Output:

-
Workspace Version Information
-
-System:
-  Version: 2.0.5
-
-Workspace:
-  Name: my-workspace
-  Path: /Users/user/workspaces/my-workspace
-  Version: 2.0.5
-  Schema Version: 1.0.0
-  Format Version: 2.0.0
-  Created: 2025-10-06T12:00:00Z
-  Last Updated: 2025-10-06T13:30:00Z
-
-Compatibility:
-  Compatible: true
-  Reason: version_match
-  Message: Workspace and system versions match
-
-Migrations:
-  Total: 0
-
-
-

Migration Framework

-

When Migration is Needed

-

Migration is required when:

-
    -
  1. No Metadata: Workspace created before version tracking (< 2.0.5)
  2. -
  3. Version Mismatch: System version is newer than workspace version
  4. -
  5. Breaking Changes: Major version update with structural changes
  6. -
-

Compatibility Scenarios

-

Scenario 1: No Metadata (Unknown Version)

-
Workspace version is incompatible:
-  Workspace: my-workspace
-  Path: /path/to/workspace
-
-Workspace metadata not found or corrupted
-
-This workspace needs migration:
-
-  Run workspace migration:
-     provisioning workspace migrate my-workspace
-
-

Scenario 2: Migration Available

-
ℹ Migration available: Workspace can be updated from 2.0.0 to 2.0.5
-  Run: provisioning workspace migrate my-workspace
-
-

Scenario 3: Workspace Too New

-
Workspace version (3.0.0) is newer than system (2.0.5)
-
-Workspace is newer than the system:
-  Workspace version: 3.0.0
-  System version: 2.0.5
-
-  Upgrade the provisioning system to use this workspace.
-
-

Running Migrations

-

Basic Migration

-

Migrate active workspace to current system version:

-
provisioning workspace migrate
-
-

Migrate Specific Workspace

-
provisioning workspace migrate my-workspace
-
-

Migration Options

-
# Skip backup (not recommended)
-provisioning workspace migrate --skip-backup
-
-# Force without confirmation
-provisioning workspace migrate --force
-
-# Migrate to specific version
-provisioning workspace migrate --target-version 2.1.0
-
-

Migration Process

-

When you run a migration:

-
    -
  1. Validation: System validates workspace exists and needs migration
  2. -
  3. Backup: Creates timestamped backup in .workspace_backups/
  4. -
  5. Confirmation: Prompts for confirmation (unless --force)
  6. -
  7. Migration: Applies migration steps sequentially
  8. -
  9. Verification: Validates migration success
  10. -
  11. Metadata Update: Records migration in workspace metadata
  12. -
-

Example Migration Output:

-
Workspace Migration
-
-Workspace: my-workspace
-Path: /path/to/workspace
-
-Current version: unknown
-Target version: 2.0.5
-
-This will migrate the workspace from unknown to 2.0.5
-A backup will be created before migration.
-
-Continue with migration? (y/N): y
-
-Creating backup...
-✓ Backup created: /path/.workspace_backups/my-workspace_backup_20251006_123000
-
-Migration Strategy: Initialize metadata
-Description: Add metadata tracking to existing workspace
-From: unknown → To: 2.0.5
-
-Migrating workspace to version 2.0.5...
-✓ Initialize metadata completed
-
-✓ Migration completed successfully
-
-

Workspace Backups

-

List Backups

-
# List backups for active workspace
-provisioning workspace list-backups
-
-# List backups for specific workspace
-provisioning workspace list-backups my-workspace
-
-

Example Output:

-
Workspace Backups for my-workspace
-
-name                               created                  reason         size
-my-workspace_backup_20251006_1200  2025-10-06T12:00:00Z    pre_migration  2.3 MB
-my-workspace_backup_20251005_1500  2025-10-05T15:00:00Z    pre_migration  2.1 MB
-
-

Restore from Backup

-
# Restore workspace from backup
-provisioning workspace restore-backup /path/to/backup
-
-# Force restore without confirmation
-provisioning workspace restore-backup /path/to/backup --force
-
-

Restore Process:

-
Restore Workspace from Backup
-
-Backup: /path/.workspace_backups/my-workspace_backup_20251006_1200
-Original path: /path/to/workspace
-Created: 2025-10-06T12:00:00Z
-Reason: pre_migration
-
-⚠ This will replace the current workspace at:
-  /path/to/workspace
-
-Continue with restore? (y/N): y
-
-✓ Workspace restored from backup
-
-
-

Command Reference

-

Workspace Version Commands

-
# Show workspace version information
-provisioning workspace version [workspace-name] [--format table|json|yaml]
-
-# Check compatibility
-provisioning workspace check-compatibility [workspace-name]
-
-# Migrate workspace
-provisioning workspace migrate [workspace-name] [--skip-backup] [--force] [--target-version VERSION]
-
-# List backups
-provisioning workspace list-backups [workspace-name]
-
-# Restore from backup
-provisioning workspace restore-backup <backup-path> [--force]
-
-

Workspace Management Commands

-
# List all workspaces
-provisioning workspace list
-
-# Show active workspace
-provisioning workspace active
-
-# Activate workspace
-provisioning workspace activate <name>
-
-# Create new workspace (includes metadata initialization)
-provisioning workspace init <name> [path]
-
-# Register existing workspace
-provisioning workspace register <name> <path>
-
-# Remove workspace from registry
-provisioning workspace remove <name> [--force]
-
-
-

Troubleshooting

-

Problem: “No active workspace”

-

Solution: Activate or create a workspace

-
# List available workspaces
-provisioning workspace list
-
-# Activate existing workspace
-provisioning workspace activate my-workspace
-
-# Or create new workspace
-provisioning workspace init new-workspace
-
-

Problem: “Workspace has invalid structure”

-

Symptoms: Missing directories or configuration files

-

Solution: Run migration to fix structure

-
provisioning workspace migrate my-workspace
-
-

Problem: “Workspace version is incompatible”

-

Solution: Run migration to upgrade workspace

-
provisioning workspace migrate
-
-

Problem: Migration Failed

-

Solution: Restore from automatic backup

-
# List backups
-provisioning workspace list-backups
-
-# Restore from most recent backup
-provisioning workspace restore-backup /path/to/backup
-
-

Problem: Can’t Activate Workspace After Migration

-

Possible Causes:

-
    -
  1. Migration failed partially
  2. -
  3. Workspace path changed
  4. -
  5. Metadata corrupted
  6. -
-

Solutions:

-
# Check workspace compatibility
-provisioning workspace check-compatibility my-workspace
-
-# If corrupted, restore from backup
-provisioning workspace restore-backup /path/to/backup
-
-# If path changed, re-register
-provisioning workspace remove my-workspace
-provisioning workspace register my-workspace /new/path --activate
-
-
-

Best Practices

-

1. Always Use Named Workspaces

-

Create workspaces for different environments:

-
provisioning workspace init dev ~/workspaces/dev --activate
-provisioning workspace init staging ~/workspaces/staging
-provisioning workspace init production ~/workspaces/production
-
-

2. Let System Create Backups

-

Never use --skip-backup for important workspaces. Backups are cheap, data loss is expensive.

-
# Good: Default with backup
-provisioning workspace migrate
-
-# Risky: No backup
-provisioning workspace migrate --skip-backup  # DON'T DO THIS
-
-

3. Check Compatibility Before Operations

-

Before major operations, verify workspace compatibility:

-
provisioning workspace check-compatibility
-
-

4. Migrate After System Upgrades

-

After upgrading the provisioning system:

-
# Check if migration available
-provisioning workspace version
-
-# Migrate if needed
-provisioning workspace migrate
-
-

5. Keep Backups for Safety

-

Don’t immediately delete old backups:

-
# List backups
-provisioning workspace list-backups
-
-# Keep at least 2-3 recent backups
-
-

6. Use Version Control for Workspace Configs

-

Initialize git in workspace directory:

-
cd ~/workspaces/my-workspace
-git init
-git add config/ infra/
-git commit -m "Initial workspace configuration"
-
-

Exclude runtime and cache directories in .gitignore:

-
.cache/
-.runtime/
-.provisioning/
-.workspace_backups/
-
-

7. Document Custom Migrations

-

If you need custom migration steps, document them:

-
# Create migration notes
-echo "Custom steps for v2 to v3 migration" > MIGRATION_NOTES.md
-
-
-

Migration History

-

Each migration is recorded in workspace metadata:

-
migration_history:
-  - from_version: "unknown"
-    to_version: "2.0.5"
-    migration_type: "metadata_initialization"
-    timestamp: "2025-10-06T12:00:00Z"
-    success: true
-    notes: "Initial metadata creation"
-
-  - from_version: "2.0.5"
-    to_version: "2.1.0"
-    migration_type: "version_update"
-    timestamp: "2025-10-15T10:30:00Z"
-    success: true
-    notes: "Updated to workspace switching support"
-
-

View migration history:

-
provisioning workspace version --format yaml | grep -A 10 "migration_history"
-
-
-

Summary

-

The workspace enforcement and version tracking system provides:

-
    -
  • Safety: Mandatory workspace prevents accidental operations outside defined environments
  • -
  • Compatibility: Version tracking ensures workspace works with current system
  • -
  • Upgradability: Migration framework handles version transitions safely
  • -
  • Recoverability: Automatic backups protect against migration failures
  • -
-

Key Commands:

-
# Create workspace
-provisioning workspace init my-workspace --activate
-
-# Check version
-provisioning workspace version
-
-# Migrate if needed
-provisioning workspace migrate
-
-# List backups
-provisioning workspace list-backups
-
-

For more information, see:

-
    -
  • Workspace Switching Guide: docs/user/WORKSPACE_SWITCHING_GUIDE.md
  • -
  • Quick Reference: provisioning sc or provisioning guide quickstart
  • -
  • Help System: provisioning help workspace
  • -
-
-

Questions or Issues?

-

Check the troubleshooting section or run:

-
provisioning workspace check-compatibility
-
-

This will provide specific guidance for your situation.

-

Unified Workspace:Infrastructure Reference System

-

Version: 1.0.0 -Last Updated: 2025-12-04

-

Overview

-

The Workspace:Infrastructure Reference System provides a unified notation for managing workspaces and their associated infrastructure. This system -eliminates the need to specify infrastructure separately and enables convenient defaults.

-

Quick Start

-

Temporal Override (Single Command)

-

Use the -ws flag with workspace:infra notation:

-
# Use production workspace with sgoyol infrastructure for this command only
-provisioning server list -ws production:sgoyol
-
-# Use default infrastructure of active workspace
-provisioning taskserv create kubernetes
-
-

Persistent Activation

-

Activate a workspace with a default infrastructure:

-
# Activate librecloud workspace and set wuji as default infra
-provisioning workspace activate librecloud:wuji
-
-# Now all commands use librecloud:wuji by default
-provisioning server list
-
-

Notation Syntax

-

Basic Format

-
workspace:infra
-
-
- - - -
PartDescriptionExample
workspaceWorkspace namelibrecloud
:Separator-
infraInfrastructure namewuji
-
-

Examples

-
- - - - -
NotationWorkspaceInfrastructure
librecloud:wujilibrecloudwuji
production:sgoyolproductionsgoyol
dev:localdevlocal
librecloudlibrecloud(from default or context)
-
-

Resolution Priority

-

When no infrastructure is explicitly specified, the system uses this priority order:

-
    -
  1. -

    Explicit --infra flag (highest)

    -
    provisioning server list --infra another-infra
    -
    -
  2. -
  3. -

    PWD Detection

    -
    cd workspace_librecloud/infra/wuji
    -provisioning server list  # Auto-detects wuji
    -
    -
  4. -
  5. -

    Default Infrastructure

    -
    # If workspace has default_infra set
    -provisioning server list  # Uses configured default
    -
    -
  6. -
  7. -

    Error (no infra found)

    -
    # Error: No infrastructure specified
    -
    -
  8. -
-

Usage Patterns

-

Pattern 1: Temporal Override for Commands

-

Use -ws to override workspace:infra for a single command:

-
# Currently in librecloud:wuji context
-provisioning server list  # Shows librecloud:wuji
-
-# Temporary override for this command only
-provisioning server list -ws production:sgoyol  # Shows production:sgoyol
-
-# Back to original context
-provisioning server list  # Shows librecloud:wuji again
-
-

Pattern 2: Persistent Workspace Activation

-

Set a workspace as active with a default infrastructure:

-
# List available workspaces
-provisioning workspace list
-
-# Activate with infra notation
-provisioning workspace activate production:sgoyol
-
-# All subsequent commands use production:sgoyol
-provisioning server list
-provisioning taskserv create kubernetes
-
-

Pattern 3: PWD-Based Inference

-

The system auto-detects workspace and infrastructure from your current directory:

-
# Your workspace structure
-workspace_librecloud/
-  infra/
-    wuji/
-      settings.ncl
-    another/
-      settings.ncl
-
-# Navigation auto-detects context
-cd workspace_librecloud/infra/wuji
-provisioning server list  # Uses wuji automatically
-
-cd ../another
-provisioning server list  # Switches to another
-
-

Pattern 4: Default Infrastructure Management

-

Set a workspace-specific default infrastructure:

-
# During activation
-provisioning workspace activate librecloud:wuji
-
-# Or explicitly after activation
-provisioning workspace set-default-infra librecloud another-infra
-
-# View current defaults
-provisioning workspace list
-
-

Command Reference

-

Workspace Commands

-
# Activate workspace with infra
-provisioning workspace activate workspace:infra
-
-# Switch to different workspace
-provisioning workspace switch workspace_name
-
-# List all workspaces
-provisioning workspace list
-
-# Show active workspace
-provisioning workspace active
-
-# Set default infrastructure
-provisioning workspace set-default-infra workspace_name infra_name
-
-# Get default infrastructure
-provisioning workspace get-default-infra workspace_name
-
-

Common Commands with -ws

-
# Server operations
-provisioning server create -ws workspace:infra
-provisioning server list -ws workspace:infra
-provisioning server delete name -ws workspace:infra
-
-# Task service operations
-provisioning taskserv create kubernetes -ws workspace:infra
-provisioning taskserv delete kubernetes -ws workspace:infra
-
-# Infrastructure operations
-provisioning infra validate -ws workspace:infra
-provisioning infra list -ws workspace:infra
-
-

Features

-

✅ Unified Notation

-
    -
  • Single workspace:infra format for all references
  • -
  • Works with all provisioning commands
  • -
  • Backward compatible with existing workflows
  • -
-

✅ Temporal Override

-
    -
  • Use -ws flag for single-command overrides
  • -
  • No permanent state changes
  • -
  • Automatically reverted after command
  • -
-

✅ Persistent Defaults

-
    -
  • Set default infrastructure per workspace
  • -
  • Eliminates repetitive --infra flags
  • -
  • Survives across sessions
  • -
-

✅ Smart Detection

-
    -
  • Auto-detects workspace from directory
  • -
  • Auto-detects infrastructure from PWD
  • -
  • Fallback to configured defaults
  • -
-

✅ Error Handling

-
    -
  • Clear error messages when infra not found
  • -
  • Validation of workspace and infra existence
  • -
  • Helpful hints for missing configurations
  • -
-

Environment Context

-

TEMP_WORKSPACE Variable

-

The system uses $env.TEMP_WORKSPACE for temporal overrides:

-
# Set temporarily (via -ws flag automatically)
-$env.TEMP_WORKSPACE = "production"
-
-# Check current context
-echo $env.TEMP_WORKSPACE
-
-# Clear after use
-hide-env TEMP_WORKSPACE
-
-

Validation

-

Validating Notation

-
# Valid notation formats
-librecloud:wuji           # Standard format
-production:sgoyol.v2      # With dots and hyphens
-dev-01:local-test         # Multiple hyphens
-prod123:infra456          # Numeric names
-
-# Special characters
-lib-cloud_01:wu-ji.v2    # Mix of all allowed chars
-
-

Error Cases

-
# Workspace not found
-provisioning workspace activate unknown:infra
-# Error: Workspace 'unknown' not found in registry
-
-# Infrastructure not found
-provisioning workspace activate librecloud:unknown
-# Error: Infrastructure 'unknown' not found in workspace 'librecloud'
-
-# Empty specification
-provisioning workspace activate ""
-# Error: Workspace '' not found in registry
-
-

Configuration

-

User Configuration

-

Default infrastructure is stored in ~/Library/Application Support/provisioning/user_config.yaml:

-
active_workspace: "librecloud"
-
-workspaces:
-  - name: "librecloud"
-    path: "/Users/you/workspaces/librecloud"
-    last_used: "2025-12-04T12:00:00Z"
-    default_infra: "wuji"  # Default infrastructure
-
-  - name: "production"
-    path: "/opt/workspaces/production"
-    last_used: "2025-12-03T15:30:00Z"
-    default_infra: "sgoyol"
-
-

Workspace Schema

-

In provisioning/schemas/workspace_config.ncl:

-
{
-  InfraConfig = {
-    current | String,  # Infrastructure context settings
-    default | String | optional,  # Default infrastructure for workspace
-  },
-}
-
-

Best Practices

-

1. Use Persistent Activation for Long Sessions

-
# Good: Activate at start of session
-provisioning workspace activate production:sgoyol
-
-# Then use simple commands
-provisioning server list
-provisioning taskserv create kubernetes
-
-

2. Use Temporal Override for Ad-Hoc Operations

-
# Good: Quick one-off operation
-provisioning server list -ws production:other-infra
-
-# Avoid: Repeated -ws flags
-provisioning server list -ws prod:infra1
-provisioning taskserv list -ws prod:infra1  # Better to activate once
-
-

3. Navigate with PWD for Context Awareness

-
# Good: Navigate to infrastructure directory
-cd workspace_librecloud/infra/wuji
-provisioning server list  # Auto-detects context
-
-# Works well with: cd - history, terminal multiplexer panes
-
-

4. Set Meaningful Defaults

-
# Good: Default to production infrastructure
-provisioning workspace activate production:main-infra
-
-# Avoid: Default to dev infrastructure in production workspace
-
-

Troubleshooting

-

Issue: “Workspace not found in registry”

-

Solution: Register the workspace first

-
provisioning workspace register librecloud /path/to/workspace_librecloud
-
-

Issue: “Infrastructure not found”

-

Solution: Verify infrastructure directory exists

-
ls workspace_librecloud/infra/  # Check available infras
-provisioning workspace activate librecloud:wuji  # Use correct name
-
-

Issue: Temporal override not working

-

Solution: Ensure you’re using -ws flag correctly

-
# Correct
-provisioning server list -ws production:sgoyol
-
-# Incorrect (missing space)
-provisioning server list-wsproduction:sgoyol
-
-# Incorrect (ws is not a command)
-provisioning -ws production:sgoyol server list
-
-

Issue: PWD detection not working

-

Solution: Navigate to proper infrastructure directory

-
# Must be in workspace structure
-cd workspace_name/infra/infra_name
-
-# Then run command
-provisioning server list
-
-

Migration from Old System

-

Old Way

-
provisioning workspace activate librecloud
-provisioning --infra wuji server list
-provisioning --infra wuji taskserv create kubernetes
-
-

New Way

-
provisioning workspace activate librecloud:wuji
-provisioning server list
-provisioning taskserv create kubernetes
-
-

Performance Notes

-
    -
  • Notation parsing: <1 ms per command
  • -
  • Workspace detection: <5 ms from PWD
  • -
  • Workspace switching: ~100 ms (includes platform activation)
  • -
  • Temporal override: No additional overhead
  • -
-

Backward Compatibility

-

All existing commands and flags continue to work:

-
# Old syntax still works
-provisioning --infra wuji server list
-
-# New syntax also works
-provisioning server list -ws librecloud:wuji
-
-# Mix and match
-provisioning --infra other-infra server list -ws librecloud:wuji
-# Uses other-infra (explicit flag takes priority)
-
-

See Also

-
    -
  • provisioning help workspace - Workspace commands
  • -
  • provisioning help infra - Infrastructure commands
  • -
  • docs/architecture/ARCHITECTURE_OVERVIEW.md - Overall architecture
  • -
  • docs/user/WORKSPACE_SWITCHING_GUIDE.md - Workspace switching details
  • -
-

Authentication Layer Implementation Guide

-

Version: 1.0.0 -Date: 2025-10-09 -Status: Production Ready

-
-

Overview

-

A comprehensive authentication layer has been integrated into the provisioning system to -secure sensitive operations. The system uses nu_plugin_auth for JWT authentication with -MFA support, providing enterprise-grade security with graceful user experience.

-
-

Key Features

-

JWT Authentication

-
    -
  • RS256 asymmetric signing
  • -
  • Access tokens (15 min) + refresh tokens (7 d)
  • -
  • OS keyring storage (macOS Keychain, Windows Credential Manager, Linux Secret Service)
  • -
-

MFA Support

-
    -
  • TOTP (Google Authenticator, Authy)
  • -
  • WebAuthn/FIDO2 (YubiKey, Touch ID)
  • -
  • Required for production and destructive operations
  • -
-

Security Policies

-
    -
  • Production environment: Requires authentication + MFA
  • -
  • Destructive operations: Requires authentication + MFA (delete, destroy)
  • -
  • Development/test: Requires authentication, allows skip with flag
  • -
  • Check mode: Always bypasses authentication (dry-run operations)
  • -
-

Audit Logging

-
    -
  • All authenticated operations logged
  • -
  • User, timestamp, operation details
  • -
  • MFA verification status
  • -
  • JSON format for easy parsing
  • -
-

User-Friendly Error Messages

-
    -
  • Clear instructions for login/MFA
  • -
  • Distinct error types (platform auth vs provider auth)
  • -
  • Helpful guidance for setup
  • -
-
-

Quick Start

-

1. Login to Platform

-
# Interactive login (password prompt)
-provisioning auth login <username>
-
-# Save credentials to keyring
-provisioning auth login <username> --save
-
-# Custom control center URL
-provisioning auth login admin --url http://control.example.com:9080
-
-

2. Enroll MFA (First Time)

-
# Enroll TOTP (Google Authenticator)
-provisioning auth mfa enroll totp
-
-# Scan QR code with authenticator app
-# Or enter secret manually
-
-

3. Verify MFA (For Sensitive Operations)

-
# Get 6-digit code from authenticator app
-provisioning auth mfa verify --code 123456
-
-

4. Check Authentication Status

-
# View current authentication status
-provisioning auth status
-
-# Verify token is valid
-provisioning auth verify
-
-
-

Protected Operations

-

Server Operations

-
# ✅ CREATE - Requires auth (prod: +MFA)
-provisioning server create web-01                    # Auth required
-provisioning server create web-01 --check            # Auth skipped (check mode)
-
-# ❌ DELETE - Requires auth + MFA
-provisioning server delete web-01                    # Auth + MFA required
-provisioning server delete web-01 --check            # Auth skipped (check mode)
-
-# 📖 READ - No auth required
-provisioning server list                             # No auth required
-provisioning server ssh web-01                       # No auth required
-
-

Task Service Operations

-
# ✅ CREATE - Requires auth (prod: +MFA)
-provisioning taskserv create kubernetes              # Auth required
-provisioning taskserv create kubernetes --check      # Auth skipped
-
-# ❌ DELETE - Requires auth + MFA
-provisioning taskserv delete kubernetes              # Auth + MFA required
-
-# 📖 READ - No auth required
-provisioning taskserv list                           # No auth required
-
-

Cluster Operations

-
# ✅ CREATE - Requires auth (prod: +MFA)
-provisioning cluster create buildkit                 # Auth required
-provisioning cluster create buildkit --check         # Auth skipped
-
-# ❌ DELETE - Requires auth + MFA
-provisioning cluster delete buildkit                 # Auth + MFA required
-
-

Batch Workflows

-
# ✅ SUBMIT - Requires auth (prod: +MFA)
-provisioning batch submit workflow.ncl               # Auth required
-provisioning batch submit workflow.ncl --skip-auth   # Auth skipped (if allowed)
-
-# 📖 READ - No auth required
-provisioning batch list                              # No auth required
-provisioning batch status <task-id>                  # No auth required
-
-
-

Configuration

-

Security Settings (config.defaults.toml)

-
[security]
-require_auth = true  # Enable authentication system
-require_mfa_for_production = true  # MFA for prod environment
-require_mfa_for_destructive = true  # MFA for delete operations
-auth_timeout = 3600  # Token timeout (1 hour)
-audit_log_path = "{{paths.base}}/logs/audit.log"
-
-[security.bypass]
-allow_skip_auth = false  # Allow PROVISIONING_SKIP_AUTH env var
-
-[plugins]
-auth_enabled = true  # Enable nu_plugin_auth
-
-[platform.control_center]
-url = "http://localhost:9080"  # Control center URL
-
-

Environment-Specific Configuration

-
# Development
-[environments.dev]
-security.bypass.allow_skip_auth = true  # Allow auth bypass in dev
-
-# Production
-[environments.prod]
-security.bypass.allow_skip_auth = false  # Never allow bypass
-security.require_mfa_for_production = true
-
-
-

Authentication Bypass (Dev/Test Only)

-

Environment Variable Method

-
# Export environment variable (dev/test only)
-export PROVISIONING_SKIP_AUTH=true
-
-# Run operations without authentication
-provisioning server create web-01
-
-# Unset when done
-unset PROVISIONING_SKIP_AUTH
-
-

Per-Command Flag

-
# Some commands support --skip-auth flag
-provisioning batch submit workflow.ncl --skip-auth
-
-

Check Mode (Always Bypasses Auth)

-
# Check mode is always allowed without auth
-provisioning server create web-01 --check
-provisioning taskserv create kubernetes --check
-
-

⚠️ WARNING: Auth bypass is ONLY for development/testing. Production systems must have -security.bypass.allow_skip_auth = false.

-
-

Error Messages

-

Not Authenticated

-
❌ Authentication Required
-
-Operation: server create web-01
-You must be logged in to perform this operation.
-
-To login:
-   provisioning auth login <username>
-
-Note: Your credentials will be securely stored in the system keyring.
-
-

Solution: Run provisioning auth login <username>

-
-

MFA Required

-
❌ MFA Verification Required
-
-Operation: server delete web-01
-Reason: destructive operation (delete/destroy)
-
-To verify MFA:
-   1. Get code from your authenticator app
-   2. Run: provisioning auth mfa verify --code <6-digit-code>
-
-Don't have MFA set up?
-   Run: provisioning auth mfa enroll totp
-
-

Solution: Run provisioning auth mfa verify --code 123456

-
-

Token Expired

-
❌ Authentication Required
-
-Operation: server create web-02
-You must be logged in to perform this operation.
-
-Error: Token verification failed
-
-

Solution: Token expired, re-login with provisioning auth login <username>

-
-

Audit Logging

-

All authenticated operations are logged to the audit log file with the following information:

-
{
-  "timestamp": "2025-10-09 14:32:15",
-  "user": "admin",
-  "operation": "server_create",
-  "details": {
-    "hostname": "web-01",
-    "infra": "production",
-    "environment": "prod",
-    "orchestrated": false
-  },
-  "mfa_verified": true
-}
-
-

Viewing Audit Logs

-
# View raw audit log
-cat provisioning/logs/audit.log
-
-# Filter by user
-cat provisioning/logs/audit.log | jq '. | select(.user == "admin")'
-
-# Filter by operation type
-cat provisioning/logs/audit.log | jq '. | select(.operation == "server_create")'
-
-# Filter by date
-cat provisioning/logs/audit.log | jq '. | select(.timestamp | startswith("2025-10-09"))'
-
-
-

Integration with Control Center

-

The authentication system integrates with the provisioning platform’s control center REST API:

-
    -
  • POST /api/auth/login - Login with credentials
  • -
  • POST /api/auth/logout - Revoke tokens
  • -
  • POST /api/auth/verify - Verify token validity
  • -
  • GET /api/auth/sessions - List active sessions
  • -
  • POST /api/mfa/enroll - Enroll MFA device
  • -
  • POST /api/mfa/verify - Verify MFA code
  • -
-

Starting Control Center

-
# Start control center (required for authentication)
-cd provisioning/platform/control-center
-cargo run --release
-
-

Or use the orchestrator which includes control center:

-
cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-
-

Testing Authentication

-

Manual Testing

-
# 1. Start control center
-cd provisioning/platform/control-center
-cargo run --release &
-
-# 2. Login
-provisioning auth login admin
-
-# 3. Try creating server (should succeed if authenticated)
-provisioning server create test-server --check
-
-# 4. Logout
-provisioning auth logout
-
-# 5. Try creating server (should fail - not authenticated)
-provisioning server create test-server --check
-
-

Automated Testing

-
# Run authentication tests
-nu provisioning/core/nulib/lib_provisioning/plugins/auth_test.nu
-
-
-

Troubleshooting

-

Plugin Not Available

-

Error: Authentication plugin not available

-

Solution:

-
    -
  1. Check plugin is built: ls provisioning/core/plugins/nushell-plugins/nu_plugin_auth/target/release/
  2. -
  3. Register plugin: plugin add target/release/nu_plugin_auth
  4. -
  5. Use plugin: plugin use auth
  6. -
  7. Verify: which auth
  8. -
-
-

Control Center Not Running

-

Error: Cannot connect to control center

-

Solution:

-
    -
  1. Start control center: cd provisioning/platform/control-center && cargo run --release
  2. -
  3. Or use orchestrator: cd provisioning/platform/orchestrator && ./scripts/start-orchestrator.nu --background
  4. -
  5. Check URL is correct in config: provisioning config get platform.control_center.url
  6. -
-
-

MFA Not Working

-

Error: Invalid MFA code

-

Solutions:

-
    -
  • Ensure time is synchronized (TOTP codes are time-based)
  • -
  • Code expires every 30 seconds, get fresh code
  • -
  • Verify you’re using the correct authenticator app entry
  • -
  • Re-enroll if needed: provisioning auth mfa enroll totp
  • -
-
-

Keyring Access Issues

-

Error: Keyring storage unavailable

-

macOS: Grant Keychain access to Terminal/iTerm2 in System Preferences → Security & Privacy

-

Linux: Ensure gnome-keyring or kwallet is running

-

Windows: Check Windows Credential Manager is accessible

-
-

Architecture

-

Authentication Flow

-
┌─────────────┐
-│ User Command│
-└──────┬──────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Infrastructure Command Handler  │
-│ (infrastructure.nu)             │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Auth Check                       │
-│ - Determine operation type       │
-│ - Check if auth required         │
-│ - Check environment (prod/dev)   │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Auth Plugin Wrapper              │
-│ (auth.nu)                        │
-│ - Call plugin or HTTP fallback   │
-│ - Verify token validity          │
-│ - Check MFA if required          │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ nu_plugin_auth                   │
-│ - JWT verification (RS256)       │
-│ - Keyring token storage          │
-│ - MFA verification               │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Control Center API               │
-│ - /api/auth/verify               │
-│ - /api/mfa/verify                │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Operation Execution              │
-│ (servers/create.nu, etc.)        │
-└──────┬──────────────────────────┘
-       │
-       ▼
-┌─────────────────────────────────┐
-│ Audit Logging                    │
-│ - Log to audit.log               │
-│ - Include user, timestamp, MFA   │
-└─────────────────────────────────┘
-
-

File Structure

-
provisioning/
-├── config/
-│   └── config.defaults.toml           # Security configuration
-├── core/nulib/
-│   ├── lib_provisioning/plugins/
-│   │   └── auth.nu                    # Auth wrapper (550 lines)
-│   ├── servers/
-│   │   └── create.nu                  # Server ops with auth
-│   ├── workflows/
-│   │   └── batch.nu                   # Batch workflows with auth
-│   └── main_provisioning/commands/
-│       └── infrastructure.nu          # Infrastructure commands with auth
-├── core/plugins/nushell-plugins/
-│   └── nu_plugin_auth/                # Native Rust plugin
-│       ├── src/
-│       │   ├── main.rs                # Plugin implementation
-│       │   └── helpers.rs             # Helper functions
-│       └── README.md                  # Plugin documentation
-├── platform/control-center/           # Control Center (Rust)
-│   └── src/auth/                      # JWT auth implementation
-└── logs/
-    └── audit.log                       # Audit trail
-
-
- -
    -
  • Security System Overview: docs/architecture/adr-009-security-system-complete.md
  • -
  • JWT Authentication: docs/architecture/JWT_AUTH_IMPLEMENTATION.md
  • -
  • MFA Implementation: docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
  • Plugin README: provisioning/core/plugins/nushell-plugins/nu_plugin_auth/README.md
  • -
  • Control Center: provisioning/platform/control-center/README.md
  • -
-
-

Summary of Changes

-
- - - - - - - -
FileChangesLines Added
lib_provisioning/plugins/auth.nuAdded security policy enforcement functions+260
config/config.defaults.tomlAdded security configuration section+19
servers/create.nuAdded auth check for server creation+25
workflows/batch.nuAdded auth check for batch workflow submission+43
main_provisioning/commands/infrastructure.nuAdded auth checks for all infrastructure commands+90
lib_provisioning/providers/interface.nuAdded authentication guidelines for providers+65
Total6 files modified~500 lines
-
-
-

Best Practices

-

For Users

-
    -
  1. Always login: Keep your session active to avoid interruptions
  2. -
  3. Use keyring: Save credentials with --save flag for persistence
  4. -
  5. Enable MFA: Use MFA for production operations
  6. -
  7. Check mode first: Always test with --check before actual operations
  8. -
  9. Monitor audit logs: Review audit logs regularly for security
  10. -
-

For Developers

-
    -
  1. Check auth early: Verify authentication before expensive operations
  2. -
  3. Log operations: Always log authenticated operations for audit
  4. -
  5. Clear error messages: Provide helpful guidance for auth failures
  6. -
  7. Respect check mode: Always skip auth in check/dry-run mode
  8. -
  9. Test both paths: Test with and without authentication
  10. -
-

For Operators

-
    -
  1. Production hardening: Set allow_skip_auth = false in production
  2. -
  3. MFA enforcement: Require MFA for all production environments
  4. -
  5. Monitor audit logs: Set up log monitoring and alerts
  6. -
  7. Token rotation: Configure short token timeouts (15 min default)
  8. -
  9. Backup authentication: Ensure multiple admins have MFA enrolled
  10. -
-
-

License

-

MIT License - See LICENSE file for details

-
-

Quick Reference

-

Version: 1.0.0 -Last Updated: 2025-10-09

-
-

Quick Commands

-

Login

-
provisioning auth login <username>              # Interactive password
-provisioning auth login <username> --save       # Save to keyring
-
-

MFA

-
provisioning auth mfa enroll totp               # Enroll TOTP
-provisioning auth mfa verify --code 123456      # Verify code
-
-

Status

-
provisioning auth status                        # Show auth status
-provisioning auth verify                        # Verify token
-
-

Logout

-
provisioning auth logout                        # Logout current session
-provisioning auth logout --all                  # Logout all sessions
-
-
-

Protected Operations

-
- - - - - - - - -
OperationAuthMFA (Prod)MFA (Delete)Check Mode
server createSkip
server deleteSkip
server list-
taskserv createSkip
taskserv deleteSkip
cluster createSkip
cluster deleteSkip
batch submit-
-
-
-

Bypass Authentication (Dev/Test Only)

-

Environment Variable

-
export PROVISIONING_SKIP_AUTH=true
-provisioning server create test
-unset PROVISIONING_SKIP_AUTH
-
-

Check Mode (Always Allowed)

-
provisioning server create prod --check
-provisioning taskserv delete k8s --check
-
-

Config Flag

-
[security.bypass]
-allow_skip_auth = true  # Only in dev/test
-
-
-

Configuration

-

Security Settings

-
[security]
-require_auth = true
-require_mfa_for_production = true
-require_mfa_for_destructive = true
-auth_timeout = 3600
-
-[security.bypass]
-allow_skip_auth = false  # true in dev only
-
-[plugins]
-auth_enabled = true
-
-[platform.control_center]
-url = "http://localhost:3000"
-
-
-

Error Messages

-

Not Authenticated

-
❌ Authentication Required
-Operation: server create web-01
-To login: provisioning auth login <username>
-
-

Fix: provisioning auth login <username>

-

MFA Required

-
❌ MFA Verification Required
-Operation: server delete web-01
-Reason: destructive operation
-
-

Fix: provisioning auth mfa verify --code <code>

-

Token Expired

-
Error: Token verification failed
-
-

Fix: Re-login: provisioning auth login <username>

-
-

Troubleshooting

-
- - - - - -
ErrorSolution
Plugin not availableplugin add target/release/nu_plugin_auth
Control center offlineStart: cd provisioning/platform/control-center && cargo run
Invalid MFA codeGet fresh code (expires in 30s)
Token expiredRe-login: provisioning auth login <username>
Keyring access deniedGrant app access in system settings
-
-
-

Audit Logs

-
# View audit log
-cat provisioning/logs/audit.log
-
-# Filter by user
-cat provisioning/logs/audit.log | jq '. | select(.user == "admin")'
-
-# Filter by operation
-cat provisioning/logs/audit.log | jq '. | select(.operation == "server_create")'
-
-
-

CI/CD Integration

-

Option 1: Skip Auth (Dev/Test Only)

-
export PROVISIONING_SKIP_AUTH=true
-provisioning server create ci-server
-
-

Option 2: Check Mode

-
provisioning server create ci-server --check
-
-

Option 3: Service Account (Future)

-
export PROVISIONING_AUTH_TOKEN="<token>"
-provisioning server create ci-server
-
-
-

Performance

-
- - - - -
OperationAuth Overhead
Server create~20 ms
Taskserv create~20 ms
Batch submit~20 ms
Check mode0 ms (skipped)
-
-
- -
    -
  • Full Guide: docs/user/AUTHENTICATION_LAYER_GUIDE.md
  • -
  • Implementation: AUTHENTICATION_LAYER_IMPLEMENTATION_SUMMARY.md
  • -
  • Security ADR: docs/architecture/adr-009-security-system-complete.md
  • -
-
-

Quick Help: provisioning help auth or provisioning auth --help

-
-

Last Updated: 2025-10-09 -Maintained By: Security Team

-
-

Setup Guide

-

Complete Authentication Setup Guide

-

Current Settings (from your config)

-
[security]
-require_auth = true                    # ✅ Auth is REQUIRED
-allow_skip_auth = false                # ❌ Cannot skip with env var
-auth_timeout = 3600                    # Token valid for 1 hour
-
-[platform.control_center]
-url = "http://localhost:3000"          # Control Center endpoint
-
-

STEP 1: Start Control Center

-

The Control Center is the authentication backend:

-
# Check if it's already running
-curl http://localhost:3000/health
-
-# If not running, start it
-cd /Users/Akasha/project-provisioning/provisioning/platform/control-center
-cargo run --release &
-
-# Wait for it to start (may take 30-60 seconds)
-sleep 30
-curl http://localhost:3000/health
-
-

Expected Output:

-
{"status": "healthy"}
-
-

STEP 2: Find Default Credentials

-

Check for default user setup:

-
# Look for initialization scripts
-ls -la /Users/Akasha/project-provisioning/provisioning/platform/control-center/
-
-# Check for README or setup instructions
-cat /Users/Akasha/project-provisioning/provisioning/platform/control-center/README.md
-
-# Or check for default config
-cat /Users/Akasha/project-provisioning/provisioning/platform/control-center/config.toml 2>/dev/null || echo "Config not found"
-
-

STEP 3: Log In

-

Once you have credentials (usually admin / password from setup):

-
# Interactive login - will prompt for password
-provisioning auth login
-
-# Or with username
-provisioning auth login admin
-
-# Verify you're logged in
-provisioning auth status
-
-

Expected Success Output:

-
✓ Login successful!
-
-User:       admin
-Role:       admin
-Expires:    2025-10-22T14:30:00Z
-MFA:        false
-
-Session active and ready
-
-

STEP 4: Now Create Your Server

-

Once authenticated:

-
# Try server creation again
-provisioning server create sgoyol --check
-
-# Or with full details
-provisioning server create sgoyol --infra workspace_librecloud --check
-
-

🛠️ Alternative: Skip Auth for Development

-

If you want to bypass authentication temporarily for testing:

-

Option A: Edit config to allow skip

-
# You would need to parse and modify TOML - easier to do next option
-
-

Option B: Use environment variable (if allowed by config)

-
export PROVISIONING_SKIP_AUTH=true
-provisioning server create sgoyol
-unset PROVISIONING_SKIP_AUTH
-
-

Option C: Use check mode (always works, no auth needed)

-
provisioning server create sgoyol --check
-
-

Option D: Modify config.defaults.toml (permanent for dev)

-

Edit: provisioning/config/config.defaults.toml

-

Change line 193 to:

-
allow_skip_auth = true
-
-

🔍 Troubleshooting

-
- - - - - -
ProblemSolution
Control Center won’t startCheck port 3000 not in use: lsof -i :3000
“No token found” errorLogin with: provisioning auth login
Login failsVerify Control Center is running: curl http://localhost:3000/health
Token expiredRe-login: provisioning auth login
Plugin not availableUsing HTTP fallback - this is OK, works without plugin
-

Configuration Encryption Guide

-

Version: 1.0.0 -Last Updated: 2025-10-08 -Status: Production Ready

-

Overview

-

The Provisioning Platform includes a comprehensive configuration encryption system that provides:

-
    -
  • Transparent Encryption/Decryption: Configs are automatically decrypted on load
  • -
  • Multiple KMS Backends: Age, AWS KMS, HashiCorp Vault, Cosmian KMS
  • -
  • Memory-Only Decryption: Secrets never written to disk in plaintext
  • -
  • SOPS Integration: Industry-standard encryption with SOPS
  • -
  • Sensitive Data Detection: Automatic scanning for unencrypted sensitive data
  • -
-

Table of Contents

-
    -
  1. Prerequisites
  2. -
  3. Quick Start
  4. -
  5. Configuration Encryption
  6. -
  7. KMS Backends
  8. -
  9. CLI Commands
  10. -
  11. Integration with Config Loader
  12. -
  13. Best Practices
  14. -
  15. Troubleshooting
  16. -
-
-

Prerequisites

-

Required Tools

-
    -
  1. -

    SOPS (v3.10.2+)

    -
    # macOS
    -brew install sops
    -
    -# Linux
    -wget https://github.com/mozilla/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64
    -sudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops
    -sudo chmod +x /usr/local/bin/sops
    -
    -
  2. -
  3. -

    Age (for Age backend - recommended)

    -
    # macOS
    -brew install age
    -
    -# Linux
    -apt install age
    -
    -
  4. -
  5. -

    AWS CLI (for AWS KMS backend - optional)

    -
    brew install awscli
    -
    -
  6. -
-

Verify Installation

-
# Check SOPS
-sops --version
-
-# Check Age
-age --version
-
-# Check AWS CLI (optional)
-aws --version
-
-
-

Quick Start

-

1. Initialize Encryption

-

Generate Age keys and create SOPS configuration:

-
provisioning config init-encryption --kms age
-
-

This will:

-
    -
  • Generate Age key pair in ~/.config/sops/age/keys.txt
  • -
  • Display your public key (recipient)
  • -
  • Create .sops.yaml in your project
  • -
-

2. Set Environment Variables

-

Add to your shell profile (~/.zshrc or ~/.bashrc):

-
# Age encryption
-export SOPS_AGE_RECIPIENTS="age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p"
-export PROVISIONING_KAGE="$HOME/.config/sops/age/keys.txt"
-
-

Replace the recipient with your actual public key.

-

3. Validate Setup

-
provisioning config validate-encryption
-
-

Expected output:

-
✅ Encryption configuration is valid
-   SOPS installed: true
-   Age backend: true
-   KMS enabled: false
-   Errors: 0
-   Warnings: 0
-
-

4. Encrypt Your First Config

-
# Create a config with sensitive data
-cat > workspace/config/secure.yaml <<EOF
-database:
-  host: localhost
-  password: supersecret123
-  api_key: key_abc123
-EOF
-
-# Encrypt it
-provisioning config encrypt workspace/config/secure.yaml --in-place
-
-# Verify it's encrypted
-provisioning config is-encrypted workspace/config/secure.yaml
-
-
-

Configuration Encryption

-

File Naming Conventions

-

Encrypted files should follow these patterns:

-
    -
  • *.enc.yaml - Encrypted YAML files
  • -
  • *.enc.yml - Encrypted YAML files (alternative)
  • -
  • *.enc.toml - Encrypted TOML files
  • -
  • secure.yaml - Files in workspace/config/
  • -
-

The .sops.yaml configuration automatically applies encryption rules based on file paths.

-

Encrypt a Configuration File

-

Basic Encryption

-
# Encrypt and create new file
-provisioning config encrypt secrets.yaml
-
-# Output: secrets.yaml.enc
-
-

In-Place Encryption

-
# Encrypt and replace original
-provisioning config encrypt secrets.yaml --in-place
-
-

Specify Output Path

-
# Encrypt to specific location
-provisioning config encrypt secrets.yaml --output workspace/config/secure.enc.yaml
-
-

Choose KMS Backend

-
# Use Age (default)
-provisioning config encrypt secrets.yaml --kms age
-
-# Use AWS KMS
-provisioning config encrypt secrets.yaml --kms aws-kms
-
-# Use Vault
-provisioning config encrypt secrets.yaml --kms vault
-
-

Decrypt a Configuration File

-
# Decrypt to new file
-provisioning config decrypt secrets.enc.yaml
-
-# Decrypt in-place
-provisioning config decrypt secrets.enc.yaml --in-place
-
-# Decrypt to specific location
-provisioning config decrypt secrets.enc.yaml --output plaintext.yaml
-
-

Edit Encrypted Files

-

The system provides a secure editing workflow:

-
# Edit encrypted file (auto decrypt -> edit -> re-encrypt)
-provisioning config edit-secure workspace/config/secure.enc.yaml
-
-

This will:

-
    -
  1. Decrypt the file temporarily
  2. -
  3. Open in your $EDITOR (vim/nano/etc)
  4. -
  5. Re-encrypt when you save and close
  6. -
  7. Remove temporary decrypted file
  8. -
-

Check Encryption Status

-
# Check if file is encrypted
-provisioning config is-encrypted workspace/config/secure.yaml
-
-# Get detailed encryption info
-provisioning config encryption-info workspace/config/secure.yaml
-
-
-

KMS Backends

- -

Pros:

-
    -
  • Simple file-based keys
  • -
  • No external dependencies
  • -
  • Fast and secure
  • -
  • Works offline
  • -
-

Setup:

-
# Initialize
-provisioning config init-encryption --kms age
-
-# Set environment variables
-export SOPS_AGE_RECIPIENTS="age1..."  # Your public key
-export PROVISIONING_KAGE="$HOME/.config/sops/age/keys.txt"
-
-

Encrypt/Decrypt:

-
provisioning config encrypt secrets.yaml --kms age
-provisioning config decrypt secrets.enc.yaml
-
-

AWS KMS (Production)

-

Pros:

-
    -
  • Centralized key management
  • -
  • Audit logging
  • -
  • IAM integration
  • -
  • Key rotation
  • -
-

Setup:

-
    -
  1. -

    Create KMS key in AWS Console

    -
  2. -
  3. -

    Configure AWS credentials:

    -
    aws configure
    -
    -
  4. -
  5. -

    Update .sops.yaml:

    -
    creation_rules:
    -  - path_regex: .*\.enc\.yaml$
    -    kms: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
    -
    -
  6. -
-

Encrypt/Decrypt:

-
provisioning config encrypt secrets.yaml --kms aws-kms
-provisioning config decrypt secrets.enc.yaml
-
-

HashiCorp Vault (Enterprise)

-

Pros:

-
    -
  • Dynamic secrets
  • -
  • Centralized secret management
  • -
  • Audit logging
  • -
  • Policy-based access
  • -
-

Setup:

-
    -
  1. -

    Configure Vault address and token:

    -
    export VAULT_ADDR="https://vault.example.com:8200"
    -export VAULT_TOKEN="s.xxxxxxxxxxxxxx"
    -
    -
  2. -
  3. -

    Update configuration:

    -
    # workspace/config/provisioning.yaml
    -kms:
    -  enabled: true
    -  mode: "remote"
    -  vault:
    -    address: "https://vault.example.com:8200"
    -    transit_key: "provisioning"
    -
    -
  4. -
-

Encrypt/Decrypt:

-
provisioning config encrypt secrets.yaml --kms vault
-provisioning config decrypt secrets.enc.yaml
-
-

Cosmian KMS (Confidential Computing)

-

Pros:

-
    -
  • Confidential computing support
  • -
  • Zero-knowledge architecture
  • -
  • Post-quantum ready
  • -
  • Cloud-agnostic
  • -
-

Setup:

-
    -
  1. -

    Deploy Cosmian KMS server

    -
  2. -
  3. -

    Update configuration:

    -
    kms:
    -  enabled: true
    -  mode: "remote"
    -  remote:
    -    endpoint: "https://kms.example.com:9998"
    -    auth_method: "certificate"
    -    client_cert: "/path/to/client.crt"
    -    client_key: "/path/to/client.key"
    -
    -
  4. -
-

Encrypt/Decrypt:

-
provisioning config encrypt secrets.yaml --kms cosmian
-provisioning config decrypt secrets.enc.yaml
-
-
-

CLI Commands

-

Configuration Encryption Commands

-
- - - - - - - - - - -
CommandDescription
config encrypt <file>Encrypt configuration file
config decrypt <file>Decrypt configuration file
config edit-secure <file>Edit encrypted file securely
config rotate-keys <file> <key>Rotate encryption keys
config is-encrypted <file>Check if file is encrypted
config encryption-info <file>Show encryption details
config validate-encryptionValidate encryption setup
config scan-sensitive <dir>Find unencrypted sensitive configs
config encrypt-all <dir>Encrypt all sensitive configs
config init-encryptionInitialize encryption (generate keys)
-
-

Examples

-
# Encrypt workspace config
-provisioning config encrypt workspace/config/secure.yaml --in-place
-
-# Edit encrypted file
-provisioning config edit-secure workspace/config/secure.yaml
-
-# Scan for unencrypted sensitive configs
-provisioning config scan-sensitive workspace/config --recursive
-
-# Encrypt all sensitive configs in workspace
-provisioning config encrypt-all workspace/config --kms age --recursive
-
-# Check encryption status
-provisioning config is-encrypted workspace/config/secure.yaml
-
-# Get detailed info
-provisioning config encryption-info workspace/config/secure.yaml
-
-# Validate setup
-provisioning config validate-encryption
-
-
-

Integration with Config Loader

-

Automatic Decryption

-

The config loader automatically detects and decrypts encrypted files:

-
# Load encrypted config (automatically decrypted in memory)
-use lib_provisioning/config/loader.nu
-
-let config = (load-provisioning-config --debug)
-
-

Key Features:

-
    -
  • Transparent: No code changes needed
  • -
  • Memory-Only: Decrypted content never written to disk
  • -
  • Fallback: If decryption fails, attempts to load as plain file
  • -
  • Debug Support: Shows decryption status with --debug flag
  • -
-

Manual Loading

-
use lib_provisioning/config/encryption.nu
-
-# Load encrypted config
-let secure_config = (load-encrypted-config "workspace/config/secure.enc.yaml")
-
-# Memory-only decryption (no file created)
-let decrypted_content = (decrypt-config-memory "workspace/config/secure.enc.yaml")
-
-

Configuration Hierarchy with Encryption

-

The system supports encrypted files at any level:

-
1. workspace/{name}/config/provisioning.yaml        ← Can be encrypted
-2. workspace/{name}/config/providers/*.toml         ← Can be encrypted
-3. workspace/{name}/config/platform/*.toml          ← Can be encrypted
-4. ~/.../provisioning/ws_{name}.yaml                ← Can be encrypted
-5. Environment variables (PROVISIONING_*)           ← Plain text
-
-
-

Best Practices

-

1. Encrypt All Sensitive Data

-

Always encrypt configs containing:

-
    -
  • Passwords
  • -
  • API keys
  • -
  • Secret keys
  • -
  • Private keys
  • -
  • Tokens
  • -
  • Credentials
  • -
-

Scan for unencrypted sensitive data:

-
provisioning config scan-sensitive workspace --recursive
-
-

2. Use Appropriate KMS Backend

-
- - - - -
EnvironmentRecommended Backend
DevelopmentAge (file-based)
StagingAWS KMS or Vault
ProductionAWS KMS or Vault
CI/CDAWS KMS with IAM roles
-
-

3. Key Management

-

Age Keys:

-
    -
  • Store private keys securely: ~/.config/sops/age/keys.txt
  • -
  • Set file permissions: chmod 600 ~/.config/sops/age/keys.txt
  • -
  • Backup keys securely (encrypted backup)
  • -
  • Never commit private keys to git
  • -
-

AWS KMS:

-
    -
  • Use separate keys per environment
  • -
  • Enable key rotation
  • -
  • Use IAM policies for access control
  • -
  • Monitor usage with CloudTrail
  • -
-

Vault:

-
    -
  • Use transit engine for encryption
  • -
  • Enable audit logging
  • -
  • Implement least-privilege policies
  • -
  • Regular policy reviews
  • -
-

4. File Organization

-
workspace/
-└── config/
-    ├── provisioning.yaml         # Plain (no secrets)
-    ├── secure.yaml                # Encrypted (SOPS auto-detects)
-    ├── providers/
-    │   ├── aws.toml               # Plain (no secrets)
-    │   └── aws-credentials.enc.toml  # Encrypted
-    └── platform/
-        └── database.enc.yaml      # Encrypted
-
-

5. Git Integration

-

Add to .gitignore:

-
# Unencrypted sensitive files
-**/secrets.yaml
-**/credentials.yaml
-**/*.dec.yaml
-**/*.dec.toml
-
-# Temporary decrypted files
-*.tmp.yaml
-*.tmp.toml
-
-

Commit encrypted files:

-
# Encrypted files are safe to commit
-git add workspace/config/secure.enc.yaml
-git commit -m "Add encrypted configuration"
-
-

6. Rotation Strategy

-

Regular Key Rotation:

-
# Generate new Age key
-age-keygen -o ~/.config/sops/age/keys-new.txt
-
-# Update .sops.yaml with new recipient
-
-# Rotate keys for file
-provisioning config rotate-keys workspace/config/secure.yaml <new-key-id>
-
-

Frequency:

-
    -
  • Development: Annually
  • -
  • Production: Quarterly
  • -
  • After team member departure: Immediately
  • -
-

7. Audit and Monitoring

-

Track encryption status:

-
# Regular scans
-provisioning config scan-sensitive workspace --recursive
-
-# Validate encryption setup
-provisioning config validate-encryption
-
-

Monitor access (with Vault/AWS KMS):

-
    -
  • Enable audit logging
  • -
  • Review access patterns
  • -
  • Alert on anomalies
  • -
-
-

Troubleshooting

-

SOPS Not Found

-

Error:

-
SOPS binary not found
-
-

Solution:

-
# Install SOPS
-brew install sops
-
-# Verify
-sops --version
-
-

Age Key Not Found

-

Error:

-
Age key file not found: ~/.config/sops/age/keys.txt
-
-

Solution:

-
# Generate new key
-mkdir -p ~/.config/sops/age
-age-keygen -o ~/.config/sops/age/keys.txt
-
-# Set environment variable
-export PROVISIONING_KAGE="$HOME/.config/sops/age/keys.txt"
-
-

SOPS_AGE_RECIPIENTS Not Set

-

Error:

-
no AGE_RECIPIENTS for file.yaml
-
-

Solution:

-
# Extract public key from private key
-grep "public key:" ~/.config/sops/age/keys.txt
-
-# Set environment variable
-export SOPS_AGE_RECIPIENTS="age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p"
-
-

Decryption Failed

-

Error:

-
Failed to decrypt configuration file
-
-

Solutions:

-
    -
  1. -

    Wrong key:

    -
    # Verify you have the correct private key
    -provisioning config validate-encryption
    -
    -
  2. -
  3. -

    File corrupted:

    -
    # Check file integrity
    -sops --decrypt workspace/config/secure.yaml
    -
    -
  4. -
  5. -

    Wrong backend:

    -
    # Check SOPS metadata in file
    -head -20 workspace/config/secure.yaml
    -
    -
  6. -
-

AWS KMS Access Denied

-

Error:

-
AccessDeniedException: User is not authorized to perform: kms:Decrypt
-
-

Solution:

-
# Check AWS credentials
-aws sts get-caller-identity
-
-# Verify KMS key policy allows your IAM user/role
-aws kms describe-key --key-id <key-arn>
-
-

Vault Connection Failed

-

Error:

-
Vault encryption failed: connection refused
-
-

Solution:

-
# Verify Vault address
-echo $VAULT_ADDR
-
-# Check connectivity
-curl -k $VAULT_ADDR/v1/sys/health
-
-# Verify token
-vault token lookup
-
-
-

Security Considerations

-

Threat Model

-

Protected Against:

-
    -
  • ✅ Plaintext secrets in git
  • -
  • ✅ Accidental secret exposure
  • -
  • ✅ Unauthorized file access
  • -
  • ✅ Key compromise (with rotation)
  • -
-

Not Protected Against:

-
    -
  • ❌ Memory dumps during decryption
  • -
  • ❌ Root/admin access to running process
  • -
  • ❌ Compromised Age/KMS keys
  • -
  • ❌ Social engineering
  • -
-

Security Best Practices

-
    -
  1. Principle of Least Privilege: Only grant decryption access to those who need it
  2. -
  3. Key Separation: Use different keys for different environments
  4. -
  5. Regular Audits: Review who has access to keys
  6. -
  7. Secure Key Storage: Never store private keys in git
  8. -
  9. Rotation: Regularly rotate encryption keys
  10. -
  11. Monitoring: Monitor decryption operations (with AWS KMS/Vault)
  12. -
-
-

Additional Resources

- -
-

Support

-

For issues or questions:

-
    -
  • Check troubleshooting section above
  • -
  • Run: provisioning config validate-encryption
  • -
  • Review logs with --debug flag
  • -
-
-

Quick Reference

-

Setup (One-time)

-
# 1. Initialize encryption
-provisioning config init-encryption --kms age
-
-# 2. Set environment variables (add to ~/.zshrc or ~/.bashrc)
-export SOPS_AGE_RECIPIENTS="age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p"
-export PROVISIONING_KAGE="$HOME/.config/sops/age/keys.txt"
-
-# 3. Validate setup
-provisioning config validate-encryption
-
-

Common Commands

-
- - - - - - - - -
TaskCommand
Encrypt fileprovisioning config encrypt secrets.yaml --in-place
Decrypt fileprovisioning config decrypt secrets.enc.yaml
Edit encryptedprovisioning config edit-secure secrets.enc.yaml
Check if encryptedprovisioning config is-encrypted secrets.yaml
Scan for unencryptedprovisioning config scan-sensitive workspace --recursive
Encrypt all sensitiveprovisioning config encrypt-all workspace/config --kms age
Validate setupprovisioning config validate-encryption
Show encryption infoprovisioning config encryption-info secrets.yaml
-
-

File Naming Conventions

-

Automatically encrypted by SOPS:

-
    -
  • workspace/*/config/secure.yaml ← Auto-encrypted
  • -
  • *.enc.yaml ← Auto-encrypted
  • -
  • *.enc.yml ← Auto-encrypted
  • -
  • *.enc.toml ← Auto-encrypted
  • -
  • workspace/*/config/providers/*credentials*.toml ← Auto-encrypted
  • -
-

Quick Workflow

-
# Create config with secrets
-cat > workspace/config/secure.yaml <<EOF
-database:
-  password: supersecret
-api_key: secret_key_123
-EOF
-
-# Encrypt in-place
-provisioning config encrypt workspace/config/secure.yaml --in-place
-
-# Verify encrypted
-provisioning config is-encrypted workspace/config/secure.yaml
-
-# Edit securely (decrypt -> edit -> re-encrypt)
-provisioning config edit-secure workspace/config/secure.yaml
-
-# Configs are auto-decrypted when loaded
-provisioning env  # Automatically decrypts secure.yaml
-
-

KMS Backends

-
- - - - -
BackendUse CaseSetup Command
AgeDevelopment, simple setupprovisioning config init-encryption --kms age
AWS KMSProduction, AWS environmentsConfigure in .sops.yaml
VaultEnterprise, dynamic secretsSet VAULT_ADDR and VAULT_TOKEN
CosmianConfidential computingConfigure in config.toml
-
-

Security Checklist

-
    -
  • ✅ Encrypt all files with passwords, API keys, secrets
  • -
  • ✅ Never commit unencrypted secrets to git
  • -
  • ✅ Set file permissions: chmod 600 ~/.config/sops/age/keys.txt
  • -
  • ✅ Add plaintext files to .gitignore: *.dec.yaml, secrets.yaml
  • -
  • ✅ Regular key rotation (quarterly for production)
  • -
  • ✅ Separate keys per environment (dev/staging/prod)
  • -
  • ✅ Backup Age keys securely (encrypted backup)
  • -
-

Troubleshooting

-
- - - - - -
ProblemSolution
SOPS binary not foundbrew install sops
Age key file not foundprovisioning config init-encryption --kms age
SOPS_AGE_RECIPIENTS not setexport SOPS_AGE_RECIPIENTS="age1..."
Decryption failedCheck key file: provisioning config validate-encryption
AWS KMS Access DeniedVerify IAM permissions: aws sts get-caller-identity
-
-

Testing

-
# Run all encryption tests
-nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu
-
-# Run specific test
-nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu --test roundtrip
-
-# Test full workflow
-nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu test-full-encryption-workflow
-
-# Test KMS backend
-use lib_provisioning/kms/client.nu
-kms-test --backend age
-
-

Integration

-

Configs are automatically decrypted when loaded:

-
# Nushell code - encryption is transparent
-use lib_provisioning/config/loader.nu
-
-# Auto-decrypts encrypted files in memory
-let config = (load-provisioning-config)
-
-# Access secrets normally
-let db_password = ($config | get database.password)
-
-

Emergency Key Recovery

-

If you lose your Age key:

-
    -
  1. Check backups: ~/.config/sops/age/keys.txt.backup
  2. -
  3. Check other systems: Keys might be on other dev machines
  4. -
  5. Contact team: Team members with access can re-encrypt for you
  6. -
  7. Rotate secrets: If keys are lost, rotate all secrets
  8. -
-

Advanced

-

Multiple Recipients (Team Access)

-
# .sops.yaml
-creation_rules:
-  - path_regex: .*\.enc\.yaml$
-    age: >-
-      age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p,
-      age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8q
-
-

Key Rotation

-
# Generate new key
-age-keygen -o ~/.config/sops/age/keys-new.txt
-
-# Update .sops.yaml with new recipient
-
-# Rotate keys for file
-provisioning config rotate-keys workspace/config/secure.yaml <new-key-id>
-
-

Scan and Encrypt All

-
# Find all unencrypted sensitive configs
-provisioning config scan-sensitive workspace --recursive
-
-# Encrypt them all
-provisioning config encrypt-all workspace --kms age --recursive
-
-# Verify
-provisioning config scan-sensitive workspace --recursive
-
-

Documentation

- -
-

Last Updated: 2025-10-08 -Version: 1.0.0

-

Complete Security System (v4.0.0)

-

🔐 Enterprise-Grade Security Implementation

-

A comprehensive security system with 39,699 lines across 12 components providing enterprise-grade protection for infrastructure automation.

-

Core Security Components

-

1. Authentication (JWT)

-
    -
  • -

    Type: RS256 token-based authentication

    -
  • -
  • -

    Features: Argon2id hashing, token rotation, session management

    -
  • -
  • -

    Roles: 5 distinct role levels with inheritance

    -
  • -
  • -

    Commands:

    -
    provisioning login
    -provisioning mfa totp verify
    -
    -
  • -
-

2. Authorization (Cedar)

-
    -
  • Type: Policy-as-code using Cedar authorization engine
  • -
  • Features: Context-aware policies, hot reload, fine-grained control
  • -
  • Updates: Dynamic policy reloading without service restart
  • -
-

3. Multi-Factor Authentication (MFA)

-
    -
  • -

    Methods: TOTP (Time-based OTP) + WebAuthn/FIDO2

    -
  • -
  • -

    Features: Backup codes, rate limiting, device binding

    -
  • -
  • -

    Commands:

    -
    provisioning mfa totp enroll
    -provisioning mfa webauthn enroll
    -
    -
  • -
-

4. Secrets Management

-
    -
  • -

    Dynamic Secrets: AWS STS, SSH keys, UpCloud credentials

    -
  • -
  • -

    KMS Integration: Vault + AWS KMS + Age + Cosmian

    -
  • -
  • -

    Features: Auto-cleanup, TTL management, rotation policies

    -
  • -
  • -

    Commands:

    -
    provisioning secrets generate aws --ttl 1hr
    -provisioning ssh connect server01
    -
    -
  • -
-

5. Key Management System (KMS)

-
    -
  • -

    Backends: RustyVault, Age, AWS KMS, HashiCorp Vault, Cosmian

    -
  • -
  • -

    Features: Envelope encryption, key rotation, secure storage

    -
  • -
  • -

    Commands:

    -
    provisioning kms encrypt
    -provisioning config encrypt secure.yaml
    -
    -
  • -
-

6. Audit Logging

-
    -
  • Format: Structured JSON logs with full context
  • -
  • Compliance: GDPR-compliant with PII filtering
  • -
  • Retention: 7-year data retention policy
  • -
  • Exports: 5 export formats (JSON, CSV, SYSLOG, Splunk, CloudWatch)
  • -
-

7. Break-Glass Emergency Access

-
    -
  • -

    Approval: Multi-party approval workflow

    -
  • -
  • -

    Features: Temporary elevated privileges, auto-revocation, audit trail

    -
  • -
  • -

    Commands:

    -
    provisioning break-glass request "reason"
    -provisioning break-glass approve <id>
    -
    -
  • -
-

8. Compliance Management

-
    -
  • -

    Standards: GDPR, SOC2, ISO 27001, incident response procedures

    -
  • -
  • -

    Features: Compliance reporting, audit trails, policy enforcement

    -
  • -
  • -

    Commands:

    -
    provisioning compliance report
    -provisioning compliance gdpr export <user>
    -
    -
  • -
-

9. Audit Query System

-
    -
  • -

    Filtering: By user, action, time range, resource

    -
  • -
  • -

    Features: Structured query language, real-time search

    -
  • -
  • -

    Commands:

    -
    provisioning audit query --user alice --action deploy --from 24h
    -
    -
  • -
-

10. Token Management

-
    -
  • Features: Rotation policies, expiration tracking, revocation
  • -
  • Integration: Seamless with auth system
  • -
-

11. Access Control

-
    -
  • Model: Role-based access control (RBAC)
  • -
  • Features: Resource-level permissions, delegation, audit
  • -
-

12. Encryption

-
    -
  • Standards: AES-256, TLS 1.3, envelope encryption
  • -
  • Coverage: At-rest and in-transit encryption
  • -
-

Performance Characteristics

-
    -
  • Overhead: <20 ms per secure operation
  • -
  • Tests: 350+ comprehensive test cases
  • -
  • Endpoints: 83+ REST API endpoints
  • -
  • CLI Commands: 111+ security-related commands
  • -
-

Quick Reference

-
- - - - - - - - - - -
ComponentCommandPurpose
Loginprovisioning loginUser authentication
MFA TOTPprovisioning mfa totp enrollSetup time-based MFA
MFA WebAuthnprovisioning mfa webauthn enrollSetup hardware security key
Secretsprovisioning secrets generate aws --ttl 1hrGenerate temporary credentials
SSHprovisioning ssh connect server01Secure SSH session
KMS Encryptprovisioning kms encrypt <file>Encrypt configuration
Break-Glassprovisioning break-glass request "reason"Request emergency access
Complianceprovisioning compliance reportGenerate compliance report
GDPR Exportprovisioning compliance gdpr export <user>Export user data
Auditprovisioning audit query --user alice --action deploy --from 24hSearch audit logs
-
-

Architecture

-

Security system is integrated throughout provisioning platform:

-
    -
  • Embedded: All authentication/authorization checks
  • -
  • Non-blocking: <20 ms overhead on operations
  • -
  • Graceful degradation: Fallback mechanisms for partial failures
  • -
  • Hot reload: Policies update without service restart
  • -
-

Configuration

-

Security policies and settings are defined in:

-
    -
  • provisioning/kcl/security.k - KCL security schema definitions
  • -
  • provisioning/config/security/*.toml - Security policy configurations
  • -
  • Environment-specific overrides in workspace/config/
  • -
-

Documentation

- -

Help Commands

-
# Show security help
-provisioning help security
-
-# Show specific security command help
-provisioning login --help
-provisioning mfa --help
-provisioning secrets --help
-
-

RustyVault KMS Backend Guide

-

Version: 1.0.0 -Date: 2025-10-08 -Status: Production-ready

-
-

Overview

-

RustyVault is a self-hosted, Rust-based secrets management system that provides a Vault-compatible API. The provisioning platform now supports -RustyVault as a KMS backend alongside Age, Cosmian, AWS KMS, and HashiCorp Vault.

-

Why RustyVault

-
    -
  • Self-hosted: Full control over your key management infrastructure
  • -
  • Pure Rust: Better performance and memory safety
  • -
  • Vault-compatible: Drop-in replacement for HashiCorp Vault Transit engine
  • -
  • OSI-approved License: Apache 2.0 (vs HashiCorp’s BSL)
  • -
  • Embeddable: Can run as standalone service or embedded library
  • -
  • No Vendor Lock-in: Open-source alternative to proprietary KMS solutions
  • -
-
-

Architecture Position

-
KMS Service Backends:
-├── Age (local development, file-based)
-├── Cosmian (privacy-preserving, production)
-├── AWS KMS (cloud-native AWS)
-├── HashiCorp Vault (enterprise, external)
-└── RustyVault (self-hosted, embedded) ✨ NEW
-
-
-

Installation

-

Option 1: Standalone RustyVault Server

-
# Install RustyVault binary
-cargo install rusty_vault
-
-# Start RustyVault server
-rustyvault server -config=/path/to/config.hcl
-
-

Option 2: Docker Deployment

-
# Pull RustyVault image (if available)
-docker pull tongsuo/rustyvault:latest
-
-# Run RustyVault container
-docker run -d \
-  --name rustyvault \
-  -p 8200:8200 \
-  -v $(pwd)/config:/vault/config \
-  -v $(pwd)/data:/vault/data \
-  tongsuo/rustyvault:latest
-
-

Option 3: From Source

-
# Clone repository
-git clone https://github.com/Tongsuo-Project/RustyVault.git
-cd RustyVault
-
-# Build and run
-cargo build --release
-./target/release/rustyvault server -config=config.hcl
-
-
-

Configuration

-

RustyVault Server Configuration

-

Create rustyvault-config.hcl:

-
# RustyVault Server Configuration
-
-storage "file" {
-  path = "/vault/data"
-}
-
-listener "tcp" {
-  address     = "0.0.0.0:8200"
-  tls_disable = true  # Enable TLS in production
-}
-
-api_addr = "http://127.0.0.1:8200"
-cluster_addr = "https://127.0.0.1:8201"
-
-# Enable Transit secrets engine
-default_lease_ttl = "168h"
-max_lease_ttl = "720h"
-
-

Initialize RustyVault

-
# Initialize (first time only)
-export VAULT_ADDR='http://127.0.0.1:8200'
-rustyvault operator init
-
-# Unseal (after every restart)
-rustyvault operator unseal <unseal_key_1>
-rustyvault operator unseal <unseal_key_2>
-rustyvault operator unseal <unseal_key_3>
-
-# Save root token
-export RUSTYVAULT_TOKEN='<root_token>'
-
-

Enable Transit Engine

-
# Enable transit secrets engine
-rustyvault secrets enable transit
-
-# Create encryption key
-rustyvault write -f transit/keys/provisioning-main
-
-# Verify key creation
-rustyvault read transit/keys/provisioning-main
-
-
-

KMS Service Configuration

-

Update provisioning/config/kms.toml

-
[kms]
-type = "rustyvault"
-server_url = "http://localhost:8200"
-token = "${RUSTYVAULT_TOKEN}"
-mount_point = "transit"
-key_name = "provisioning-main"
-tls_verify = true
-
-[service]
-bind_addr = "0.0.0.0:8081"
-log_level = "info"
-audit_logging = true
-
-[tls]
-enabled = false  # Set true with HTTPS
-
-

Environment Variables

-
# RustyVault connection
-export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="s.xxxxxxxxxxxxxxxxxxxxxx"
-export RUSTYVAULT_MOUNT_POINT="transit"
-export RUSTYVAULT_KEY_NAME="provisioning-main"
-export RUSTYVAULT_TLS_VERIFY="true"
-
-# KMS service
-export KMS_BACKEND="rustyvault"
-export KMS_BIND_ADDR="0.0.0.0:8081"
-
-
-

Usage

-

Start KMS Service

-
# With RustyVault backend
-cd provisioning/platform/kms-service
-cargo run
-
-# With custom config
-cargo run -- --config=/path/to/kms.toml
-
-

CLI Operations

-
# Encrypt configuration file
-provisioning kms encrypt provisioning/config/secrets.yaml
-
-# Decrypt configuration
-provisioning kms decrypt provisioning/config/secrets.yaml.enc
-
-# Generate data key (envelope encryption)
-provisioning kms generate-key --spec AES256
-
-# Health check
-provisioning kms health
-
-

REST API Usage

-
# Health check
-curl http://localhost:8081/health
-
-# Encrypt data
-curl -X POST http://localhost:8081/encrypt \
-  -H "Content-Type: application/json" \
-  -d '{
-    "plaintext": "SGVsbG8sIFdvcmxkIQ==",
-    "context": "environment=production"
-  }'
-
-# Decrypt data
-curl -X POST http://localhost:8081/decrypt \
-  -H "Content-Type: application/json" \
-  -d '{
-    "ciphertext": "vault:v1:...",
-    "context": "environment=production"
-  }'
-
-# Generate data key
-curl -X POST http://localhost:8081/datakey/generate \
-  -H "Content-Type: application/json" \
-  -d '{"key_spec": "AES_256"}'
-
-
-

Advanced Features

-

Context-based Encryption (AAD)

-

Additional authenticated data binds encrypted data to specific contexts:

-
# Encrypt with context
-curl -X POST http://localhost:8081/encrypt \
-  -d '{
-    "plaintext": "c2VjcmV0",
-    "context": "environment=prod,service=api"
-  }'
-
-# Decrypt requires same context
-curl -X POST http://localhost:8081/decrypt \
-  -d '{
-    "ciphertext": "vault:v1:...",
-    "context": "environment=prod,service=api"
-  }'
-
-

Envelope Encryption

-

For large files, use envelope encryption:

-
# 1. Generate data key
-DATA_KEY=$(curl -X POST http://localhost:8081/datakey/generate \
-  -d '{"key_spec": "AES_256"}' | jq -r '.plaintext')
-
-# 2. Encrypt large file with data key (locally)
-openssl enc -aes-256-cbc -in large-file.bin -out encrypted.bin -K $DATA_KEY
-
-# 3. Store encrypted data key (from response)
-echo "vault:v1:..." > encrypted-data-key.txt
-
-

Key Rotation

-
# Rotate encryption key in RustyVault
-rustyvault write -f transit/keys/provisioning-main/rotate
-
-# Verify new version
-rustyvault read transit/keys/provisioning-main
-
-# Rewrap existing ciphertext with new key version
-curl -X POST http://localhost:8081/rewrap \
-  -d '{"ciphertext": "vault:v1:..."}'
-
-
-

Production Deployment

-

High Availability Setup

-

Deploy multiple RustyVault instances behind a load balancer:

-
# docker-compose.yml
-version: '3.8'
-
-services:
-  rustyvault-1:
-    image: tongsuo/rustyvault:latest
-    ports:
-      - "8200:8200"
-    volumes:
-      - ./config:/vault/config
-      - vault-data-1:/vault/data
-
-  rustyvault-2:
-    image: tongsuo/rustyvault:latest
-    ports:
-      - "8201:8200"
-    volumes:
-      - ./config:/vault/config
-      - vault-data-2:/vault/data
-
-  lb:
-    image: nginx:alpine
-    ports:
-      - "80:80"
-    volumes:
-      - ./nginx.conf:/etc/nginx/nginx.conf
-    depends_on:
-      - rustyvault-1
-      - rustyvault-2
-
-volumes:
-  vault-data-1:
-  vault-data-2:
-
-

TLS Configuration

-
# kms.toml
-[kms]
-type = "rustyvault"
-server_url = "https://vault.example.com:8200"
-token = "${RUSTYVAULT_TOKEN}"
-tls_verify = true
-
-[tls]
-enabled = true
-cert_path = "/etc/kms/certs/server.crt"
-key_path = "/etc/kms/certs/server.key"
-ca_path = "/etc/kms/certs/ca.crt"
-
-

Auto-Unseal (AWS KMS)

-
# rustyvault-config.hcl
-seal "awskms" {
-  region     = "us-east-1"
-  kms_key_id = "arn:aws:kms:us-east-1:123456789012:key/..."
-}
-
-
-

Monitoring

-

Health Checks

-
# RustyVault health
-curl http://localhost:8200/v1/sys/health
-
-# KMS service health
-curl http://localhost:8081/health
-
-# Metrics (if enabled)
-curl http://localhost:8081/metrics
-
-

Audit Logging

-

Enable audit logging in RustyVault:

-
# rustyvault-config.hcl
-audit {
-  path = "/vault/logs/audit.log"
-  format = "json"
-}
-
-
-

Troubleshooting

-

Common Issues

-

1. Connection Refused

-
# Check RustyVault is running
-curl http://localhost:8200/v1/sys/health
-
-# Check token is valid
-export VAULT_ADDR='http://localhost:8200'
-rustyvault token lookup
-
-

2. Authentication Failed

-
# Verify token in environment
-echo $RUSTYVAULT_TOKEN
-
-# Renew token if needed
-rustyvault token renew
-
-

3. Key Not Found

-
# List available keys
-rustyvault list transit/keys
-
-# Create missing key
-rustyvault write -f transit/keys/provisioning-main
-
-

4. TLS Verification Failed

-
# Disable TLS verification (dev only)
-export RUSTYVAULT_TLS_VERIFY=false
-
-# Or add CA certificate
-export RUSTYVAULT_CACERT=/path/to/ca.crt
-
-
-

Migration from Other Backends

-

From HashiCorp Vault

-

RustyVault is API-compatible, minimal changes required:

-
# Old config (Vault)
-[kms]
-type = "vault"
-address = "https://vault.example.com:8200"
-token = "${VAULT_TOKEN}"
-
-# New config (RustyVault)
-[kms]
-type = "rustyvault"
-server_url = "http://rustyvault.example.com:8200"
-token = "${RUSTYVAULT_TOKEN}"
-
-

From Age

-

Re-encrypt existing encrypted files:

-
# 1. Decrypt with Age
-provisioning kms decrypt --backend age secrets.enc > secrets.plain
-
-# 2. Encrypt with RustyVault
-provisioning kms encrypt --backend rustyvault secrets.plain > secrets.rustyvault.enc
-
-
-

Security Considerations

-

Best Practices

-
    -
  1. Enable TLS: Always use HTTPS in production
  2. -
  3. Rotate Tokens: Regularly rotate RustyVault tokens
  4. -
  5. Least Privilege: Use policies to restrict token permissions
  6. -
  7. Audit Logging: Enable and monitor audit logs
  8. -
  9. Backup Keys: Secure backup of unseal keys and root token
  10. -
  11. Network Isolation: Run RustyVault in isolated network segment
  12. -
-

Token Policies

-

Create restricted policy for KMS service:

-
# kms-policy.hcl
-path "transit/encrypt/provisioning-main" {
-  capabilities = ["update"]
-}
-
-path "transit/decrypt/provisioning-main" {
-  capabilities = ["update"]
-}
-
-path "transit/datakey/plaintext/provisioning-main" {
-  capabilities = ["update"]
-}
-
-

Apply policy:

-
rustyvault policy write kms-service kms-policy.hcl
-rustyvault token create -policy=kms-service
-
-
-

Performance

-

Benchmarks (Estimated)

-
- - - -
OperationLatencyThroughput
Encrypt5-15 ms2,000-5,000 ops/sec
Decrypt5-15 ms2,000-5,000 ops/sec
Generate Key10-20 ms1,000-2,000 ops/sec
-
-

Actual performance depends on hardware, network, and RustyVault configuration

-

Optimization Tips

-
    -
  1. Connection Pooling: Reuse HTTP connections
  2. -
  3. Batching: Batch multiple operations when possible
  4. -
  5. Caching: Cache data keys for envelope encryption
  6. -
  7. Local Unseal: Use auto-unseal for faster restarts
  8. -
-
- -
    -
  • KMS Service: docs/user/CONFIG_ENCRYPTION_GUIDE.md
  • -
  • Dynamic Secrets: docs/user/DYNAMIC_SECRETS_QUICK_REFERENCE.md
  • -
  • Security System: docs/architecture/adr-009-security-system-complete.md
  • -
  • RustyVault GitHub: https://github.com/Tongsuo-Project/RustyVault
  • -
-
-

Support

- -
-

Last Updated: 2025-10-08 -Maintained By: Architecture Team

-

SecretumVault KMS Backend Guide

-

SecretumVault is an enterprise-grade, post-quantum ready secrets management system integrated as the fourth KMS backend in the provisioning platform, -alongside Age (dev), Cosmian (prod), and RustyVault (self-hosted).

-

Overview

-

What is SecretumVault

-

SecretumVault provides:

-
    -
  • Post-Quantum Cryptography: Ready for quantum-resistant algorithms
  • -
  • Enterprise Features: Policy-as-code (Cedar), audit logging, compliance tracking
  • -
  • Multiple Storage Backends: Filesystem (dev), SurrealDB (staging), etcd (prod), PostgreSQL
  • -
  • Transit Engine: Encryption-as-a-service for data protection
  • -
  • KV Engine: Versioned secret storage with rotation policies
  • -
  • High Availability: Seamless transition from embedded to distributed modes
  • -
-

When to Use SecretumVault

-
- - - - -
ScenarioBackendReason
Local developmentAgeSimple, no dependencies
Testing/StagingSecretumVaultEnterprise features, production-like
ProductionCosmian or SecretumVaultEnterprise security, compliance
Self-Hosted EnterpriseSecretumVault + etcdFull control, HA support
-
-

Deployment Modes

-

Development Mode (Embedded)

-

Storage: Filesystem (~/.config/provisioning/secretumvault/data) -Performance: <3 ms encryption/decryption -Setup: No separate service required -Best For: Local development and testing

-
export PROVISIONING_ENV=dev
-export KMS_DEV_BACKEND=secretumvault
-provisioning kms encrypt config.yaml
-
-

Staging Mode (Service + SurrealDB)

-

Storage: SurrealDB (document database) -Performance: <10 ms operations -Setup: Start SecretumVault service separately -Best For: Team testing, staging environments

-
# Start SecretumVault service
-secretumvault server --storage-backend surrealdb
-
-# Configure provisioning
-export PROVISIONING_ENV=staging
-export SECRETUMVAULT_URL=http://localhost:8200
-export SECRETUMVAULT_TOKEN=your-auth-token
-
-provisioning kms encrypt config.yaml
-
-

Production Mode (Service + etcd)

-

Storage: etcd cluster (3+ nodes) -Performance: <10 ms operations (ninety-ninth percentile) -Setup: etcd cluster + SecretumVault service -Best For: Production deployments with HA requirements

-
# Setup etcd cluster (3 nodes minimum)
-etcd --name etcd1 --data-dir etcd1-data \
-     --advertise-client-urls http://localhost:2379 \
-     --listen-client-urls http://localhost:2379
-
-# Start SecretumVault with etcd
-secretumvault server \
-  --storage-backend etcd \
-  --etcd-endpoints http://etcd1:2379,http://etcd2:2379,http://etcd3:2379
-
-# Configure provisioning
-export PROVISIONING_ENV=prod
-export SECRETUMVAULT_URL=https://your-secretumvault:8200
-export SECRETUMVAULT_TOKEN=your-auth-token
-export SECRETUMVAULT_STORAGE=etcd
-
-provisioning kms encrypt config.yaml
-
-

Configuration

-

Environment Variables

-
- - - - - - - - -
VariablePurposeDefaultExample
PROVISIONING_ENVDeployment environmentdevstaging, prod
KMS_DEV_BACKENDDevelopment KMS backendagesecretumvault
KMS_STAGING_BACKENDStaging KMS backendsecretumvaultcosmian
KMS_PROD_BACKENDProduction KMS backendcosmiansecretumvault
SECRETUMVAULT_URLServer URLhttp://localhost:8200https://kms.example.com
SECRETUMVAULT_TOKENAuthentication token(none)(Bearer token)
SECRETUMVAULT_STORAGEStorage backendfilesystemsurrealdb, etcd
SECRETUMVAULT_TLS_VERIFYVerify TLS certificatesfalsetrue
-
-

Configuration Files

-

System Defaults: provisioning/config/secretumvault.toml -KMS Config: provisioning/config/kms.toml

-

Edit these files to customize:

-
    -
  • Engine mount points
  • -
  • Key names
  • -
  • Storage backend settings
  • -
  • Performance tuning
  • -
  • Audit logging
  • -
  • Key rotation policies
  • -
-

Operations

-

Encrypt Data

-
# Encrypt a file
-provisioning kms encrypt config.yaml
-# Output: config.yaml.enc
-
-# Encrypt with specific key
-provisioning kms encrypt --key-id my-key config.yaml
-
-# Encrypt and sign
-provisioning kms encrypt --sign config.yaml
-
-

Decrypt Data

-
# Decrypt a file
-provisioning kms decrypt config.yaml.enc
-# Output: config.yaml
-
-# Decrypt with specific key
-provisioning kms decrypt --key-id my-key config.yaml.enc
-
-# Verify and decrypt
-provisioning kms decrypt --verify config.yaml.enc
-
-

Generate Data Keys

-
# Generate AES-256 data key
-provisioning kms generate-key --spec AES256
-
-# Generate AES-128 data key
-provisioning kms generate-key --spec AES128
-
-# Generate RSA-4096 key
-provisioning kms generate-key --spec RSA4096
-
-

Health and Status

-
# Check KMS health
-provisioning kms health
-
-# Get KMS version
-provisioning kms version
-
-# Detailed KMS status
-provisioning kms status
-
-

Key Rotation

-
# Rotate encryption key
-provisioning kms rotate-key provisioning-master
-
-# Check rotation policy
-provisioning kms rotation-policy provisioning-master
-
-# Update rotation interval
-provisioning kms update-rotation 90  # Rotate every 90 days
-
-

Storage Backends

-

Filesystem (Development)

-

Local file-based storage with no external dependencies.

-

Pros:

-
    -
  • Zero external dependencies
  • -
  • Fast (local disk access)
  • -
  • Easy to inspect/backup
  • -
-

Cons:

-
    -
  • Single-node only
  • -
  • No HA
  • -
  • Manual backup required
  • -
-

Configuration:

-
[secretumvault.storage.filesystem]
-data_dir = "~/.config/provisioning/secretumvault/data"
-permissions = "0700"
-
-

SurrealDB (Staging)

-

Embedded or standalone document database.

-

Pros:

-
    -
  • Embedded or distributed
  • -
  • Flexible schema
  • -
  • Real-time syncing
  • -
-

Cons:

-
    -
  • More complex than filesystem
  • -
  • New technology (less tested than etcd)
  • -
-

Configuration:

-
[secretumvault.storage.surrealdb]
-connection_url = "ws://localhost:8000"
-namespace = "provisioning"
-database = "secrets"
-username = "${SECRETUMVAULT_SURREALDB_USER:-admin}"
-password = "${SECRETUMVAULT_SURREALDB_PASS:-password}"
-
-

etcd (Production)

-

Distributed key-value store for high availability.

-

Pros:

-
    -
  • Proven in production
  • -
  • HA and disaster recovery
  • -
  • Consistent consensus protocol
  • -
  • Multi-site replication
  • -
-

Cons:

-
    -
  • Operational complexity
  • -
  • Requires 3+ nodes
  • -
  • More infrastructure
  • -
-

Configuration:

-
[secretumvault.storage.etcd]
-endpoints = ["http://etcd1:2379", "http://etcd2:2379", "http://etcd3:2379"]
-tls_enabled = true
-tls_cert_file = "/path/to/client.crt"
-tls_key_file = "/path/to/client.key"
-
-

PostgreSQL (Enterprise)

-

Relational database backend.

-

Pros:

-
    -
  • Mature and reliable
  • -
  • Advanced querying
  • -
  • Full ACID transactions
  • -
-

Cons:

-
    -
  • Schema requirements
  • -
  • External database dependency
  • -
  • More operational overhead
  • -
-

Configuration:

-
[secretumvault.storage.postgresql]
-connection_url = "postgresql://user:pass@localhost:5432/secretumvault"
-max_connections = 10
-ssl_mode = "require"
-
-

Troubleshooting

-

Connection Errors

-

Error: “Failed to connect to SecretumVault service”

-

Solutions:

-
    -
  1. -

    Verify SecretumVault is running:

    -
    curl http://localhost:8200/v1/sys/health
    -
    -
  2. -
  3. -

    Check server URL configuration:

    -
    provisioning config show secretumvault.server_url
    -
    -
  4. -
  5. -

    Verify network connectivity:

    -
    nc -zv localhost 8200
    -
    -
  6. -
-

Authentication Failures

-

Error: “Authentication failed: X-Vault-Token missing or invalid”

-

Solutions:

-
    -
  1. -

    Set authentication token:

    -
    export SECRETUMVAULT_TOKEN=your-token
    -
    -
  2. -
  3. -

    Verify token is still valid:

    -
    provisioning secrets verify-token
    -
    -
  4. -
  5. -

    Get new token from SecretumVault:

    -
    secretumvault auth login
    -
    -
  6. -
-

Storage Backend Errors

-

Filesystem Backend

-

Error: “Permission denied: ~/.config/provisioning/secretumvault/data”

-

Solution: Check directory permissions:

-
ls -la ~/.config/provisioning/secretumvault/
-# Should be: drwx------ (0700)
-chmod 700 ~/.config/provisioning/secretumvault/data
-
-

SurrealDB Backend

-

Error: “Failed to connect to SurrealDB at ws://localhost:8000”

-

Solution: Start SurrealDB first:

-
surreal start --bind 0.0.0.0:8000 file://secretum.db
-
-

etcd Backend

-

Error: “etcd cluster unhealthy”

-

Solution: Check etcd cluster status:

-
etcdctl member list
-etcdctl endpoint health
-
-# Verify all nodes are reachable
-curl http://etcd1:2379/health
-curl http://etcd2:2379/health
-curl http://etcd3:2379/health
-
-

Performance Issues

-

Slow encryption/decryption:

-
    -
  1. -

    Check network latency (for service mode):

    -
    ping -c 3 secretumvault-server
    -
    -
  2. -
  3. -

    Monitor SecretumVault performance:

    -
    provisioning kms metrics
    -
    -
  4. -
  5. -

    Check storage backend performance:

    -
      -
    • Filesystem: Check disk I/O
    • -
    • SurrealDB: Monitor database load
    • -
    • etcd: Check cluster consensus state
    • -
    -
  6. -
-

High memory usage:

-
    -
  1. -

    Check cache settings:

    -
    provisioning config show secretumvault.performance.cache_ttl
    -
    -
  2. -
  3. -

    Reduce cache TTL:

    -
    provisioning config set secretumvault.performance.cache_ttl 60
    -
    -
  4. -
  5. -

    Monitor active connections:

    -
    provisioning kms status
    -
    -
  6. -
-

Debugging

-

Enable debug logging:

-
export RUST_LOG=debug
-provisioning kms encrypt config.yaml
-
-

Check configuration:

-
provisioning config show secretumvault
-provisioning config validate
-
-

Test connectivity:

-
provisioning kms health --verbose
-
-

View audit logs:

-
tail -f ~/.config/provisioning/logs/secretumvault-audit.log
-
-

Security Best Practices

-

Token Management

-
    -
  • Never commit tokens to version control
  • -
  • Use environment variables or .env files (gitignored)
  • -
  • Rotate tokens regularly
  • -
  • Use different tokens per environment
  • -
-

TLS/SSL

-
    -
  • -

    Enable TLS verification in production:

    -
    export SECRETUMVAULT_TLS_VERIFY=true
    -
    -
  • -
  • -

    Use proper certificates (not self-signed in production)

    -
  • -
  • -

    Pin certificates to prevent MITM attacks

    -
  • -
-

Access Control

-
    -
  • Restrict who can access SecretumVault admin UI
  • -
  • Use strong authentication (MFA preferred)
  • -
  • Audit all secrets access
  • -
  • Implement least-privilege principle
  • -
-

Key Rotation

-
    -
  • Rotate keys regularly (every 90 days recommended)
  • -
  • Keep old versions for decryption
  • -
  • Test rotation procedures in staging first
  • -
  • Monitor rotation status
  • -
-

Backup and Recovery

-
    -
  • Backup SecretumVault data regularly
  • -
  • Test restore procedures
  • -
  • Store backups securely
  • -
  • Keep backup keys separate from encrypted data
  • -
-

Migration Guide

-

From Age to SecretumVault

-
# Export all secrets encrypted with Age
-provisioning secrets export --backend age --output secrets.json
-
-# Import into SecretumVault
-provisioning secrets import --backend secretumvault secrets.json
-
-# Re-encrypt all configurations
-find workspace/infra -name "*.enc" -exec provisioning kms reencrypt {} \;
-
-

From RustyVault to SecretumVault

-
# Both use Vault-compatible APIs, so migration is simpler:
-# 1. Ensure SecretumVault keys are available
-# 2. Update KMS_PROD_BACKEND=secretumvault
-# 3. Test with staging first
-# 4. Monitor during transition
-
-

From Cosmian to SecretumVault

-
# For production migration:
-# 1. Set up SecretumVault with etcd backend
-# 2. Verify high availability is working
-# 3. Run parallel encryption with both systems
-# 4. Validate all decryptions work
-# 5. Update KMS_PROD_BACKEND=secretumvault
-# 6. Monitor closely for 24 hours
-# 7. Keep Cosmian as fallback for 7 days
-
-

Performance Tuning

-

Development (Filesystem)

-
[secretumvault.performance]
-max_connections = 5
-connection_timeout = 5
-request_timeout = 30
-cache_ttl = 60
-
-

Staging (SurrealDB)

-
[secretumvault.performance]
-max_connections = 20
-connection_timeout = 5
-request_timeout = 30
-cache_ttl = 300
-
-

Production (etcd)

-
[secretumvault.performance]
-max_connections = 50
-connection_timeout = 10
-request_timeout = 30
-cache_ttl = 600
-
-

Compliance and Audit

-

Audit Logging

-

All operations are logged:

-
# View recent audit events
-provisioning kms audit --limit 100
-
-# Export audit logs
-provisioning kms audit export --output audit.json
-
-# Audit specific operations
-provisioning kms audit --action encrypt --from 24h
-
-

Compliance Reports

-
# Generate compliance report
-provisioning compliance report --backend secretumvault
-
-# GDPR data export
-provisioning compliance gdpr-export user@example.com
-
-# SOC2 audit trail
-provisioning compliance soc2-export --output soc2-audit.json
-
-

Advanced Topics

-

Cedar Authorization Policies

-

Enable fine-grained access control:

-
# Enable Cedar integration
-provisioning config set secretumvault.authorization.cedar_enabled true
-
-# Define access policies
-provisioning policy define-kms-access user@example.com admin
-provisioning policy define-kms-access deployer@example.com deploy-only
-
-

Key Encryption Keys (KEK)

-

Configure master key settings:

-
# Set KEK rotation interval
-provisioning config set secretumvault.rotation.rotation_interval_days 90
-
-# Enable automatic rotation
-provisioning config set secretumvault.rotation.auto_rotate true
-
-# Retain old versions for decryption
-provisioning config set secretumvault.rotation.retain_old_versions true
-
-

Multi-Region Setup

-

For production deployments across regions:

-
# Region 1
-export SECRETUMVAULT_URL=https://kms-us-east.example.com
-export SECRETUMVAULT_STORAGE=etcd
-
-# Region 2 (for failover)
-export SECRETUMVAULT_URL_FALLBACK=https://kms-us-west.example.com
-
-

Support and Resources

-
    -
  • Documentation: docs/user/SECRETUMVAULT_KMS_GUIDE.md (this file)
  • -
  • Configuration Template: provisioning/config/secretumvault.toml
  • -
  • KMS Configuration: provisioning/config/kms.toml
  • -
  • Issues: Report issues with provisioning kms debug
  • -
  • Logs: Check ~/.config/provisioning/logs/secretumvault-*.log
  • -
-

See Also

- -

SSH Temporal Keys - User Guide

-

Quick Start

-

Generate and Connect with Temporary Key

-

The fastest way to use temporal SSH keys:

-
# Auto-generate, deploy, and connect (key auto-revoked after disconnect)
-ssh connect server.example.com
-
-# Connect with custom user and TTL
-ssh connect server.example.com --user deploy --ttl 30 min
-
-# Keep key active after disconnect
-ssh connect server.example.com --keep
-
-

Manual Key Management

-

For more control over the key lifecycle:

-
# 1. Generate key
-ssh generate-key server.example.com --user root --ttl 1hr
-
-# Output:
-# ✓ SSH key generated successfully
-#   Key ID: abc-123-def-456
-#   Type: dynamickeypair
-#   User: root
-#   Server: server.example.com
-#   Expires: 2024-01-01T13:00:00Z
-#   Fingerprint: SHA256:...
-#
-# Private Key (save securely):
-# -----BEGIN OPENSSH PRIVATE KEY-----
-# ...
-# -----END OPENSSH PRIVATE KEY-----
-
-# 2. Deploy key to server
-ssh deploy-key abc-123-def-456
-
-# 3. Use the private key to connect
-ssh -i /path/to/private/key root@server.example.com
-
-# 4. Revoke when done
-ssh revoke-key abc-123-def-456
-
-

Key Features

-

Automatic Expiration

-

All keys expire automatically after their TTL:

-
    -
  • Default TTL: 1 hour
  • -
  • Configurable: From 5 minutes to 24 hours
  • -
  • Background Cleanup: Automatic removal from servers every 5 minutes
  • -
-

Multiple Key Types

-

Choose the right key type for your use case:

-
- - - -
TypeDescriptionUse Case
dynamic (default)Generated Ed25519 keysQuick SSH access
caVault CA-signed certificateEnterprise with SSH CA
otpVault one-time passwordSingle-use access
-
-

Security Benefits

-

✅ No static SSH keys to manage -✅ Short-lived credentials (1 hour default) -✅ Automatic cleanup on expiration -✅ Audit trail for all operations -✅ Private keys never stored on disk

-

Common Usage Patterns

-

Development Workflow

-
# Quick SSH for debugging
-ssh connect dev-server.local --ttl 30 min
-
-# Execute commands
-ssh root@dev-server.local "systemctl status nginx"
-
-# Connection closes, key auto-revokes
-
-

Production Deployment

-
# Generate key with longer TTL for deployment
-ssh generate-key prod-server.example.com --ttl 2hr
-
-# Deploy to server
-ssh deploy-key <key-id>
-
-# Run deployment script
-ssh -i /tmp/deploy-key root@prod-server.example.com < deploy.sh
-
-# Manual revoke when done
-ssh revoke-key <key-id>
-
-

Multi-Server Access

-
# Generate one key
-ssh generate-key server01.example.com --ttl 1hr
-
-# Use the same private key for multiple servers (if you have provisioning access)
-# Note: Currently each key is server-specific, multi-server support coming soon
-
-

Command Reference

-

ssh generate-key

-

Generate a new temporal SSH key.

-

Syntax:

-
ssh generate-key <server> [options]
-
-

Options:

-
    -
  • --user <name>: SSH user (default: root)
  • -
  • --ttl <duration>: Key lifetime (default: 1hr)
  • -
  • --type <ca|otp|dynamic>: Key type (default: dynamic)
  • -
  • --ip <address>: Allowed IP (OTP mode only)
  • -
  • --principal <name>: Principal (CA mode only)
  • -
-

Examples:

-
# Basic usage
-ssh generate-key server.example.com
-
-# Custom user and TTL
-ssh generate-key server.example.com --user deploy --ttl 30 min
-
-# Vault CA mode
-ssh generate-key server.example.com --type ca --principal admin
-
-

ssh deploy-key

-

Deploy a generated key to the target server.

-

Syntax:

-
ssh deploy-key <key-id>
-
-

Example:

-
ssh deploy-key abc-123-def-456
-
-

ssh list-keys

-

List all active SSH keys.

-

Syntax:

-
ssh list-keys [--expired]
-
-

Examples:

-
# List active keys
-ssh list-keys
-
-# Show only deployed keys
-ssh list-keys | where deployed == true
-
-# Include expired keys
-ssh list-keys --expired
-
-

ssh get-key

-

Get detailed information about a specific key.

-

Syntax:

-
ssh get-key <key-id>
-
-

Example:

-
ssh get-key abc-123-def-456
-
-

ssh revoke-key

-

Immediately revoke a key (removes from server and tracking).

-

Syntax:

-
ssh revoke-key <key-id>
-
-

Example:

-
ssh revoke-key abc-123-def-456
-
-

ssh connect

-

Auto-generate, deploy, connect, and revoke (all-in-one).

-

Syntax:

-
ssh connect <server> [options]
-
-

Options:

-
    -
  • --user <name>: SSH user (default: root)
  • -
  • --ttl <duration>: Key lifetime (default: 1hr)
  • -
  • --type <ca|otp|dynamic>: Key type (default: dynamic)
  • -
  • --keep: Don’t revoke after disconnect
  • -
-

Examples:

-
# Quick connection
-ssh connect server.example.com
-
-# Custom user
-ssh connect server.example.com --user deploy
-
-# Keep key active after disconnect
-ssh connect server.example.com --keep
-
-

ssh stats

-

Show SSH key statistics.

-

Syntax:

-
ssh stats
-
-

Example Output:

-
SSH Key Statistics:
-  Total generated: 42
-  Active keys: 10
-  Expired keys: 32
-
-Keys by type:
-  dynamic: 35
-  otp: 5
-  certificate: 2
-
-Last cleanup: 2024-01-01T12:00:00Z
-  Cleaned keys: 5
-
-

ssh cleanup

-

Manually trigger cleanup of expired keys.

-

Syntax:

-
ssh cleanup
-
-

ssh test

-

Run a quick test of the SSH key system.

-

Syntax:

-
ssh test <server> [--user <name>]
-
-

Example:

-
ssh test server.example.com --user root
-
-

ssh help

-

Show help information.

-

Syntax:

-
ssh help
-
-

Duration Formats

-

The --ttl option accepts various duration formats:

-
- - - - -
FormatExampleMeaning
Minutes30 min30 minutes
Hours2hr2 hours
Mixed1hr 30 min1.5 hours
Seconds3600sec1 hour
-
-

Working with Private Keys

-

Saving Private Keys

-

When you generate a key, save the private key immediately:

-
# Generate and save to file
-ssh generate-key server.example.com | get private_key | save -f ~/.ssh/temp_key
-chmod 600 ~/.ssh/temp_key
-
-# Use the key
-ssh -i ~/.ssh/temp_key root@server.example.com
-
-# Cleanup
-rm ~/.ssh/temp_key
-
-

Using SSH Agent

-

Add the temporary key to your SSH agent:

-
# Generate key and extract private key
-ssh generate-key server.example.com | get private_key | save -f /tmp/temp_key
-chmod 600 /tmp/temp_key
-
-# Add to agent
-ssh-add /tmp/temp_key
-
-# Connect (agent provides the key automatically)
-ssh root@server.example.com
-
-# Remove from agent
-ssh-add -d /tmp/temp_key
-rm /tmp/temp_key
-
-

Troubleshooting

-

Key Deployment Fails

-

Problem: ssh deploy-key returns error

-

Solutions:

-
    -
  1. -

    Check SSH connectivity to server:

    -
    ssh root@server.example.com
    -
    -
  2. -
  3. -

    Verify provisioning key is configured:

    -
    echo $PROVISIONING_SSH_KEY
    -
    -
  4. -
  5. -

    Check server SSH daemon:

    -
    ssh root@server.example.com "systemctl status sshd"
    -
    -
  6. -
-

Private Key Not Working

-

Problem: SSH connection fails with “Permission denied (publickey)”

-

Solutions:

-
    -
  1. -

    Verify key was deployed:

    -
    ssh list-keys | where id == "<key-id>"
    -
    -
  2. -
  3. -

    Check key hasn’t expired:

    -
    ssh get-key <key-id> | get expires_at
    -
    -
  4. -
  5. -

    Verify private key permissions:

    -
    chmod 600 /path/to/private/key
    -
    -
  6. -
-

Cleanup Not Running

-

Problem: Expired keys not being removed

-

Solutions:

-
    -
  1. -

    Check orchestrator is running:

    -
    curl http://localhost:9090/health
    -
    -
  2. -
  3. -

    Trigger manual cleanup:

    -
    ssh cleanup
    -
    -
  4. -
  5. -

    Check orchestrator logs:

    -
    tail -f ./data/orchestrator.log | grep SSH
    -
    -
  6. -
-

Best Practices

-

Security

-
    -
  1. -

    Short TTLs: Use the shortest TTL that works for your task

    -
    ssh connect server.example.com --ttl 30 min
    -
    -
  2. -
  3. -

    Immediate Revocation: Revoke keys when you’re done

    -
    ssh revoke-key <key-id>
    -
    -
  4. -
  5. -

    Private Key Handling: Never share or commit private keys

    -
    # Save to temp location, delete after use
    -ssh generate-key server.example.com | get private_key | save -f /tmp/key
    -# ... use key ...
    -rm /tmp/key
    -
    -
  6. -
-

Workflow Integration

-
    -
  1. -

    Automated Deployments: Generate key in CI/CD

    -
    #!/bin/bash
    -KEY_ID=$(ssh generate-key prod.example.com --ttl 1hr | get id)
    -ssh deploy-key $KEY_ID
    -# Run deployment
    -ansible-playbook deploy.yml
    -ssh revoke-key $KEY_ID
    -
    -
  2. -
  3. -

    Interactive Use: Use ssh connect for quick access

    -
    ssh connect dev.example.com
    -
    -
  4. -
  5. -

    Monitoring: Check statistics regularly

    -
    ssh stats
    -
    -
  6. -
-

Advanced Usage

-

Vault Integration

-

If your organization uses HashiCorp Vault:

- -
# Generate CA-signed certificate
-ssh generate-key server.example.com --type ca --principal admin --ttl 1hr
-
-# Vault signs your public key
-# Server must trust Vault CA certificate
-
-

Setup (one-time):

-
# On servers, add to /etc/ssh/sshd_config:
-TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem
-
-# Get Vault CA public key:
-vault read -field=public_key ssh/config/ca | \
-  sudo tee /etc/ssh/trusted-user-ca-keys.pem
-
-# Restart SSH:
-sudo systemctl restart sshd
-
-

OTP Mode

-
# Generate one-time password
-ssh generate-key server.example.com --type otp --ip 192.168.1.100
-
-# Use the OTP to connect (single use only)
-
-

Scripting

-

Use in scripts for automated operations:

-
# deploy.nu
-def deploy [target: string] {
-    let key = (ssh generate-key $target --ttl 1hr)
-    ssh deploy-key $key.id
-
-    # Run deployment
-    try {
-        ssh $"root@($target)" "bash /path/to/deploy.sh"
-    } catch {
-        print "Deployment failed"
-    }
-
-    # Always cleanup
-    ssh revoke-key $key.id
-}
-
-

API Integration

-

For programmatic access, use the REST API:

-
# Generate key
-curl -X POST http://localhost:9090/api/v1/ssh/generate \
-  -H "Content-Type: application/json" \
-  -d '{
-    "key_type": "dynamickeypair",
-    "user": "root",
-    "target_server": "server.example.com",
-    "ttl_seconds": 3600
-  }'
-
-# Deploy key
-curl -X POST http://localhost:9090/api/v1/ssh/{key_id}/deploy
-
-# List keys
-curl http://localhost:9090/api/v1/ssh/keys
-
-# Get stats
-curl http://localhost:9090/api/v1/ssh/stats
-
-

FAQ

-

Q: Can I use the same key for multiple servers? -A: Currently, each key is tied to a specific server. Multi-server support is planned.

-

Q: What happens if the orchestrator crashes? -A: Keys in memory are lost, but keys already deployed to servers remain until their expiration time.

-

Q: Can I extend the TTL of an existing key? -A: No, you must generate a new key. This is by design for security.

-

Q: What’s the maximum TTL? -A: Configurable by admin, default maximum is 24 hours.

-

Q: Are private keys stored anywhere? -A: Private keys exist only in memory during generation and are shown once to the user. They are never written to disk by the system.

-

Q: What happens if cleanup fails? -A: The key remains in authorized_keys until the next cleanup run. You can trigger manual cleanup with ssh cleanup.

-

Q: Can I use this with non-root users? -A: Yes, use --user <username> when generating the key.

-

Q: How do I know when my key will expire? -A: Use ssh get-key <key-id> to see the exact expiration timestamp.

-

Support

-

For issues or questions:

-
    -
  1. Check orchestrator logs: tail -f ./data/orchestrator.log
  2. -
  3. Run diagnostics: ssh stats
  4. -
  5. Test connectivity: ssh test server.example.com
  6. -
  7. Review documentation: SSH_KEY_MANAGEMENT.md
  8. -
-

See Also

-
    -
  • Architecture: SSH_KEY_MANAGEMENT.md
  • -
  • Implementation: SSH_IMPLEMENTATION_SUMMARY.md
  • -
  • Configuration: config/ssh-config.toml.example
  • -
-

Nushell Plugin Integration Guide

-

Version: 1.0.0 -Last Updated: 2025-10-09 -Target Audience: Developers, DevOps Engineers, System Administrators

-
-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Why Native Plugins?
  4. -
  5. Prerequisites
  6. -
  7. Installation
  8. -
  9. Quick Start (5 Minutes)
  10. -
  11. Authentication Plugin (nu_plugin_auth)
  12. -
  13. KMS Plugin (nu_plugin_kms)
  14. -
  15. Orchestrator Plugin (nu_plugin_orchestrator)
  16. -
  17. Integration Examples
  18. -
  19. Best Practices
  20. -
  21. Troubleshooting
  22. -
  23. Migration Guide
  24. -
  25. Advanced Configuration
  26. -
  27. Security Considerations
  28. -
  29. FAQ
  30. -
-
-

Overview

-

The Provisioning Platform provides three native Nushell plugins that dramatically improve performance and user experience compared to traditional HTTP -API calls:

-
- - - -
PluginPurposePerformance Gain
nu_plugin_authJWT authentication, MFA, session management20% faster
nu_plugin_kmsEncryption/decryption with multiple KMS backends10x faster
nu_plugin_orchestratorOrchestrator operations without HTTP overhead50x faster
-
-

Architecture Benefits

-
Traditional HTTP Flow:
-User Command → HTTP Request → Network → Server Processing → Response → Parse JSON
-  Total: ~50-100 ms per operation
-
-Plugin Flow:
-User Command → Direct Rust Function Call → Return Nushell Data Structure
-  Total: ~1-10 ms per operation
-
-

Key Features

-

Performance: 10-50x faster than HTTP API -✅ Type Safety: Full Nushell type system integration -✅ Pipeline Support: Native Nushell data structures -✅ Offline Capability: KMS and orchestrator work without network -✅ OS Integration: Native keyring for secure token storage -✅ Graceful Fallback: HTTP still available if plugins not installed

-
-

Why Native Plugins

-

Performance Comparison

-

Real-world benchmarks from production workload:

-
- - - - - - - - - - -
OperationHTTP APIPluginImprovementSpeedup
KMS Encrypt (RustyVault)~50 ms~5 ms-45 ms10x
KMS Decrypt (RustyVault)~50 ms~5 ms-45 ms10x
KMS Encrypt (Age)~30 ms~3 ms-27 ms10x
KMS Decrypt (Age)~30 ms~3 ms-27 ms10x
Orchestrator Status~30 ms~1 ms-29 ms30x
Orchestrator Tasks List~50 ms~5 ms-45 ms10x
Orchestrator Validate~100 ms~10 ms-90 ms10x
Auth Login~100 ms~80 ms-20 ms1.25x
Auth Verify~50 ms~10 ms-40 ms5x
Auth MFA Verify~80 ms~60 ms-20 ms1.3x
-
-

Use Case: Batch Processing

-

Scenario: Encrypt 100 configuration files

-
# HTTP API approach
-ls configs/*.yaml | each { |file|
-    http post http://localhost:9998/encrypt { data: (open $file) }
-} | save encrypted/
-# Total time: ~5 seconds (50 ms × 100)
-
-# Plugin approach
-ls configs/*.yaml | each { |file|
-    kms encrypt (open $file) --backend rustyvault
-} | save encrypted/
-# Total time: ~0.5 seconds (5 ms × 100)
-# Result: 10x faster
-
-

Developer Experience Benefits

-

1. Native Nushell Integration

-
# HTTP: Parse JSON, check status codes
-let result = http post http://localhost:9998/encrypt { data: "secret" }
-if $result.status == "success" {
-    $result.encrypted
-} else {
-    error make { msg: $result.error }
-}
-
-# Plugin: Direct return values
-kms encrypt "secret"
-# Returns encrypted string directly, errors use Nushell's error system
-
-

2. Pipeline Friendly

-
# HTTP: Requires wrapping, JSON parsing
-["secret1", "secret2"] | each { |s|
-    (http post http://localhost:9998/encrypt { data: $s }).encrypted
-}
-
-# Plugin: Natural pipeline flow
-["secret1", "secret2"] | each { |s| kms encrypt $s }
-
-

3. Tab Completion

-
# All plugin commands have full tab completion
-kms <TAB>
-# → encrypt, decrypt, generate-key, status, backends
-
-kms encrypt --<TAB>
-# → --backend, --key, --context
-
-
-

Prerequisites

-

Required Software

-
- - - -
SoftwareMinimum VersionPurpose
Nushell0.107.1Shell and plugin runtime
Rust1.75+Building plugins from source
Cargo(included with Rust)Build tool
-
-

Optional Dependencies

-
- - - - -
SoftwarePurposePlatform
gnome-keyringSecure token storageLinux
kwalletSecure token storageLinux (KDE)
ageAge encryption backendAll
RustyVaultHigh-performance KMSAll
-
-

Platform Support

-
- - - - -
PlatformStatusNotes
macOS✅ FullKeychain integration
Linux✅ FullRequires keyring service
Windows✅ FullCredential Manager integration
FreeBSD⚠️ PartialNo keyring integration
-
-
-

Installation

-

Step 1: Clone or Navigate to Plugin Directory

-
cd /Users/Akasha/project-provisioning/provisioning/core/plugins/nushell-plugins
-
-

Step 2: Build All Plugins

-
# Build in release mode (optimized for performance)
-cargo build --release --all
-
-# Or build individually
-cargo build --release -p nu_plugin_auth
-cargo build --release -p nu_plugin_kms
-cargo build --release -p nu_plugin_orchestrator
-
-

Expected output:

-
   Compiling nu_plugin_auth v0.1.0
-   Compiling nu_plugin_kms v0.1.0
-   Compiling nu_plugin_orchestrator v0.1.0
-    Finished release [optimized] target(s) in 2m 15s
-
-

Step 3: Register Plugins with Nushell

-
# Register all three plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# On macOS, full paths:
-plugin add $PWD/target/release/nu_plugin_auth
-plugin add $PWD/target/release/nu_plugin_kms
-plugin add $PWD/target/release/nu_plugin_orchestrator
-
-

Step 4: Verify Installation

-
# List registered plugins
-plugin list | where name =~ "auth|kms|orch"
-
-# Test each plugin
-auth --help
-kms --help
-orch --help
-
-

Expected output:

-
╭───┬─────────────────────────┬─────────┬───────────────────────────────────╮
-│ # │          name           │ version │           filename                │
-├───┼─────────────────────────┼─────────┼───────────────────────────────────┤
-│ 0 │ nu_plugin_auth          │ 0.1.0   │ .../nu_plugin_auth                │
-│ 1 │ nu_plugin_kms           │ 0.1.0   │ .../nu_plugin_kms                 │
-│ 2 │ nu_plugin_orchestrator  │ 0.1.0   │ .../nu_plugin_orchestrator        │
-╰───┴─────────────────────────┴─────────┴───────────────────────────────────╯
-
-

Step 5: Configure Environment (Optional)

-
# Add to ~/.config/nushell/env.nu
-$env.RUSTYVAULT_ADDR = "http://localhost:8200"
-$env.RUSTYVAULT_TOKEN = "your-vault-token"
-$env.CONTROL_CENTER_URL = "http://localhost:3000"
-$env.ORCHESTRATOR_DATA_DIR = "/opt/orchestrator/data"
-
-
-

Quick Start (5 Minutes)

-

1. Authentication Workflow

-
# Login (password prompted securely)
-auth login admin
-# ✓ Login successful
-# User: admin
-# Role: Admin
-# Expires: 2025-10-09T14:30:00Z
-
-# Verify session
-auth verify
-# {
-#   "active": true,
-#   "user": "admin",
-#   "role": "Admin",
-#   "expires_at": "2025-10-09T14:30:00Z"
-# }
-
-# Enroll in MFA (optional but recommended)
-auth mfa enroll totp
-# QR code displayed, save backup codes
-
-# Verify MFA
-auth mfa verify --code 123456
-# ✓ MFA verification successful
-
-# Logout
-auth logout
-# ✓ Logged out successfully
-
-

2. KMS Operations

-
# Encrypt data
-kms encrypt "my secret data"
-# vault:v1:8GawgGuP...
-
-# Decrypt data
-kms decrypt "vault:v1:8GawgGuP..."
-# my secret data
-
-# Check available backends
-kms status
-# {
-#   "backend": "rustyvault",
-#   "status": "healthy",
-#   "url": "http://localhost:8200"
-# }
-
-# Encrypt with specific backend
-kms encrypt "data" --backend age --key age1xxxxxxx
-
-

3. Orchestrator Operations

-
# Check orchestrator status (no HTTP call)
-orch status
-# {
-#   "active_tasks": 5,
-#   "completed_tasks": 120,
-#   "health": "healthy"
-# }
-
-# Validate workflow
-orch validate workflows/deploy.ncl
-# {
-#   "valid": true,
-#   "workflow": { "name": "deploy_k8s", "operations": 5 }
-# }
-
-# List running tasks
-orch tasks --status running
-# [ { "task_id": "task_123", "name": "deploy_k8s", "progress": 45 } ]
-
-

4. Combined Workflow

-
# Complete authenticated deployment pipeline
-auth login admin
-    | if $in.success { auth verify }
-    | if $in.active {
-        orch validate workflows/production.ncl
-            | if $in.valid {
-                kms encrypt (open secrets.yaml | to json)
-                    | save production-secrets.enc
-              }
-      }
-# ✓ Pipeline completed successfully
-
-
-

Authentication Plugin (nu_plugin_auth)

-

The authentication plugin manages JWT-based authentication, MFA enrollment/verification, and session management with OS-native keyring integration.

-

Available Commands

-
- - - - - - -
CommandPurposeExample
auth loginLogin and store JWTauth login admin
auth logoutLogout and clear tokensauth logout
auth verifyVerify current sessionauth verify
auth sessionsList active sessionsauth sessions
auth mfa enrollEnroll in MFAauth mfa enroll totp
auth mfa verifyVerify MFA codeauth mfa verify --code 123456
-
-

Command Reference

-

auth login <username> [password]

-

Login to provisioning platform and store JWT tokens securely in OS keyring.

-

Arguments:

-
    -
  • username (required): Username for authentication
  • -
  • password (optional): Password (prompted if not provided)
  • -
-

Flags:

-
    -
  • --url <url>: Control center URL (default: http://localhost:3000)
  • -
  • --password <password>: Password (alternative to positional argument)
  • -
-

Examples:

-
# Interactive password prompt (recommended)
-auth login admin
-# Password: ••••••••
-# ✓ Login successful
-# User: admin
-# Role: Admin
-# Expires: 2025-10-09T14:30:00Z
-
-# Password in command (not recommended for production)
-auth login admin mypassword
-
-# Custom control center URL
-auth login admin --url https://control-center.example.com
-
-# Pipeline usage
-let creds = { username: "admin", password: (input --suppress-output "Password: ") }
-auth login $creds.username $creds.password
-
-

Token Storage Locations:

-
    -
  • macOS: Keychain Access (login keychain)
  • -
  • Linux: Secret Service API (gnome-keyring, kwallet)
  • -
  • Windows: Windows Credential Manager
  • -
-

Security Notes:

-
    -
  • Tokens encrypted at rest by OS
  • -
  • Requires user authentication to access (macOS Touch ID, Linux password)
  • -
  • Never stored in plain text files
  • -
-

auth logout

-

Logout from current session and remove stored tokens from keyring.

-

Examples:

-
# Simple logout
-auth logout
-# ✓ Logged out successfully
-
-# Conditional logout
-if (auth verify | get active) {
-    auth logout
-    echo "Session terminated"
-}
-
-# Logout all sessions (requires admin role)
-auth sessions | each { |sess|
-    auth logout --session-id $sess.session_id
-}
-
-

auth verify

-

Verify current session status and check token validity.

-

Returns:

-
    -
  • active (bool): Whether session is active
  • -
  • user (string): Username
  • -
  • role (string): User role
  • -
  • expires_at (datetime): Token expiration
  • -
  • mfa_verified (bool): MFA verification status
  • -
-

Examples:

-
# Check if logged in
-auth verify
-# {
-#   "active": true,
-#   "user": "admin",
-#   "role": "Admin",
-#   "expires_at": "2025-10-09T14:30:00Z",
-#   "mfa_verified": true
-# }
-
-# Pipeline usage
-if (auth verify | get active) {
-    echo "✓ Authenticated"
-} else {
-    auth login admin
-}
-
-# Check expiration
-let session = auth verify
-if ($session.expires_at | into datetime) < (date now) {
-    echo "Session expired, re-authenticating..."
-    auth login $session.user
-}
-
-

auth sessions

-

List all active sessions for current user.

-

Examples:

-
# List all sessions
-auth sessions
-# [
-#   {
-#     "session_id": "sess_abc123",
-#     "created_at": "2025-10-09T12:00:00Z",
-#     "expires_at": "2025-10-09T14:30:00Z",
-#     "ip_address": "192.168.1.100",
-#     "user_agent": "nushell/0.107.1"
-#   }
-# ]
-
-# Filter recent sessions (last hour)
-auth sessions | where created_at > ((date now) - 1hr)
-
-# Find sessions by IP
-auth sessions | where ip_address =~ "192.168"
-
-# Count active sessions
-auth sessions | length
-
-

auth mfa enroll <type>

-

Enroll in Multi-Factor Authentication (TOTP or WebAuthn).

-

Arguments:

-
    -
  • type (required): MFA type (totp or webauthn)
  • -
-

TOTP Enrollment:

-
auth mfa enroll totp
-# ✓ TOTP enrollment initiated
-#
-# Scan this QR code with your authenticator app:
-#
-#   ████ ▄▄▄▄▄ █▀█ █▄▀▀▀▄ ▄▄▄▄▄ ████
-#   ████ █   █ █▀▀▀█▄ ▀▀█ █   █ ████
-#   ████ █▄▄▄█ █ █▀▄ ▀▄▄█ █▄▄▄█ ████
-#   (QR code continues...)
-#
-# Or enter manually:
-# Secret: JBSWY3DPEHPK3PXP
-# URL: otpauth://totp/Provisioning:admin?secret=JBSWY3DPEHPK3PXP&issuer=Provisioning
-#
-# Backup codes (save securely):
-# 1. ABCD-EFGH-IJKL
-# 2. MNOP-QRST-UVWX
-# 3. YZAB-CDEF-GHIJ
-# (8 more codes...)
-
-

WebAuthn Enrollment:

-
auth mfa enroll webauthn
-# ✓ WebAuthn enrollment initiated
-#
-# Insert your security key and touch the button...
-# (waiting for device interaction)
-#
-# ✓ Security key registered successfully
-# Device: YubiKey 5 NFC
-# Created: 2025-10-09T13:00:00Z
-
-

Supported Authenticator Apps:

-
    -
  • Google Authenticator
  • -
  • Microsoft Authenticator
  • -
  • Authy
  • -
  • 1Password
  • -
  • Bitwarden
  • -
-

Supported Hardware Keys:

-
    -
  • YubiKey (all models)
  • -
  • Titan Security Key
  • -
  • Feitian ePass
  • -
  • macOS Touch ID
  • -
  • Windows Hello
  • -
-

auth mfa verify --code <code>

-

Verify MFA code (TOTP or backup code).

-

Flags:

-
    -
  • --code <code> (required): 6-digit TOTP code or backup code
  • -
-

Examples:

-
# Verify TOTP code
-auth mfa verify --code 123456
-# ✓ MFA verification successful
-
-# Verify backup code
-auth mfa verify --code ABCD-EFGH-IJKL
-# ✓ MFA verification successful (backup code used)
-# Warning: This backup code cannot be used again
-
-# Pipeline usage
-let code = input "MFA code: "
-auth mfa verify --code $code
-
-

Error Cases:

-
# Invalid code
-auth mfa verify --code 999999
-# Error: Invalid MFA code
-# → Verify time synchronization on your device
-
-# Rate limited
-auth mfa verify --code 123456
-# Error: Too many failed attempts
-# → Wait 5 minutes before trying again
-
-# No MFA enrolled
-auth mfa verify --code 123456
-# Error: MFA not enrolled for this user
-# → Run: auth mfa enroll totp
-
-

Environment Variables

-
- - - -
VariableDescriptionDefault
USERDefault usernameCurrent OS user
CONTROL_CENTER_URLControl center URLhttp://localhost:3000
AUTH_KEYRING_SERVICEKeyring service nameprovisioning-auth
-
-

Troubleshooting Authentication

-

“No active session”

-
# Solution: Login first
-auth login <username>
-
-

“Keyring error” (macOS)

-
# Check Keychain Access permissions
-# System Preferences → Security & Privacy → Privacy → Full Disk Access
-# Add: /Applications/Nushell.app (or /usr/local/bin/nu)
-
-# Or grant access manually
-security unlock-keychain ~/Library/Keychains/login.keychain-db
-
-

“Keyring error” (Linux)

-
# Install keyring service
-sudo apt install gnome-keyring      # Ubuntu/Debian
-sudo dnf install gnome-keyring      # Fedora
-sudo pacman -S gnome-keyring        # Arch
-
-# Or use KWallet (KDE)
-sudo apt install kwalletmanager
-
-# Start keyring daemon
-eval $(gnome-keyring-daemon --start)
-export $(gnome-keyring-daemon --start --components=secrets)
-
-

“MFA verification failed”

-
# Check time synchronization (TOTP requires accurate time)
-# macOS:
-sudo sntp -sS time.apple.com
-
-# Linux:
-sudo ntpdate pool.ntp.org
-# Or
-sudo systemctl restart systemd-timesyncd
-
-# Use backup code if TOTP not working
-auth mfa verify --code ABCD-EFGH-IJKL
-
-
-

KMS Plugin (nu_plugin_kms)

-

The KMS plugin provides high-performance encryption and decryption using multiple backend providers.

-

Supported Backends

-
- - - - - -
BackendPerformanceUse CaseSetup Complexity
rustyvault⚡ Very Fast (~5 ms)Production KMSMedium
age⚡ Very Fast (~3 ms)Local developmentLow
cosmian🐢 Moderate (~30 ms)Cloud KMSMedium
aws🐢 Moderate (~50 ms)AWS environmentsMedium
vault🐢 Moderate (~40 ms)Enterprise KMSHigh
-
-

Backend Selection Guide

-

Choose rustyvault when:

-
    -
  • ✅ Running in production with high throughput requirements
  • -
  • ✅ Need ~5 ms encryption/decryption latency
  • -
  • ✅ Have RustyVault server deployed
  • -
  • ✅ Require key rotation and versioning
  • -
-

Choose age when:

-
    -
  • ✅ Developing locally without external dependencies
  • -
  • ✅ Need simple file encryption
  • -
  • ✅ Want ~3 ms latency
  • -
  • ❌ Don’t need centralized key management
  • -
-

Choose cosmian when:

-
    -
  • ✅ Using Cosmian KMS service
  • -
  • ✅ Need cloud-based key management
  • -
  • ⚠️ Can accept ~30 ms latency
  • -
-

Choose aws when:

-
    -
  • ✅ Deployed on AWS infrastructure
  • -
  • ✅ Using AWS IAM for access control
  • -
  • ✅ Need AWS KMS integration
  • -
  • ⚠️ Can accept ~50 ms latency
  • -
-

Choose vault when:

-
    -
  • ✅ Using HashiCorp Vault enterprise
  • -
  • ✅ Need advanced policy management
  • -
  • ✅ Require audit trails
  • -
  • ⚠️ Can accept ~40 ms latency
  • -
-

Available Commands

-
- - - - -
CommandPurposeExample
kms encryptEncrypt datakms encrypt "secret"
kms decryptDecrypt datakms decrypt "vault:v1:..."
kms generate-keyGenerate DEKkms generate-key --spec AES256
kms statusBackend statuskms status
-
-

Command Reference

-

kms encrypt <data> [--backend <backend>]

-

Encrypt data using specified KMS backend.

-

Arguments:

-
    -
  • data (required): Data to encrypt (string or binary)
  • -
-

Flags:

-
    -
  • --backend <backend>: KMS backend (rustyvault, age, cosmian, aws, vault)
  • -
  • --key <key>: Key ID or recipient (backend-specific)
  • -
  • --context <context>: Additional authenticated data (AAD)
  • -
-

Examples:

-
# Auto-detect backend from environment
-kms encrypt "secret configuration data"
-# vault:v1:8GawgGuP+emDKX5q...
-
-# RustyVault backend
-kms encrypt "data" --backend rustyvault --key provisioning-main
-# vault:v1:abc123def456...
-
-# Age backend (local encryption)
-kms encrypt "data" --backend age --key age1xxxxxxxxx
-# -----BEGIN AGE ENCRYPTED FILE-----
-# YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+...
-# -----END AGE ENCRYPTED FILE-----
-
-# AWS KMS
-kms encrypt "data" --backend aws --key alias/provisioning
-# AQICAHhwbGF0Zm9ybS1wcm92aXNpb25p...
-
-# With context (AAD for additional security)
-kms encrypt "data" --backend rustyvault --key provisioning-main --context "user=admin,env=production"
-
-# Encrypt file contents
-kms encrypt (open config.yaml) --backend rustyvault | save config.yaml.enc
-
-# Encrypt multiple files
-ls configs/*.yaml | each { |file|
-    kms encrypt (open $file.name) --backend age
-        | save $"encrypted/($file.name).enc"
-}
-
-

Output Formats:

-
    -
  • RustyVault: vault:v1:base64_ciphertext
  • -
  • Age: -----BEGIN AGE ENCRYPTED FILE-----...-----END AGE ENCRYPTED FILE-----
  • -
  • AWS: base64_aws_kms_ciphertext
  • -
  • Cosmian: cosmian:v1:base64_ciphertext
  • -
-

kms decrypt <encrypted> [--backend <backend>]

-

Decrypt KMS-encrypted data.

-

Arguments:

-
    -
  • encrypted (required): Encrypted data (detects format automatically)
  • -
-

Flags:

-
    -
  • --backend <backend>: KMS backend (auto-detected from format if not specified)
  • -
  • --context <context>: Additional authenticated data (must match encryption context)
  • -
-

Examples:

-
# Auto-detect backend from format
-kms decrypt "vault:v1:8GawgGuP..."
-# secret configuration data
-
-# Explicit backend
-kms decrypt "vault:v1:abc123..." --backend rustyvault
-
-# Age decryption
-kms decrypt "-----BEGIN AGE ENCRYPTED FILE-----..."
-# (uses AGE_IDENTITY from environment)
-
-# With context (must match encryption context)
-kms decrypt "vault:v1:abc123..." --context "user=admin,env=production"
-
-# Decrypt file
-kms decrypt (open config.yaml.enc) | save config.yaml
-
-# Decrypt multiple files
-ls encrypted/*.enc | each { |file|
-    kms decrypt (open $file.name)
-        | save $"configs/(($file.name | path basename) | str replace '.enc' '')"
-}
-
-# Pipeline decryption
-open secrets.json
-    | get database_password_enc
-    | kms decrypt
-    | str trim
-    | psql --dbname mydb --password
-
-

Error Cases:

-
# Invalid ciphertext
-kms decrypt "invalid_data"
-# Error: Invalid ciphertext format
-# → Verify data was encrypted with KMS
-
-# Context mismatch
-kms decrypt "vault:v1:abc..." --context "wrong=context"
-# Error: Authentication failed (AAD mismatch)
-# → Verify encryption context matches
-
-# Backend unavailable
-kms decrypt "vault:v1:abc..."
-# Error: Failed to connect to RustyVault at http://localhost:8200
-# → Check RustyVault is running: curl http://localhost:8200/v1/sys/health
-
-

kms generate-key [--spec <spec>]

-

Generate data encryption key (DEK) using KMS envelope encryption.

-

Flags:

-
    -
  • --spec <spec>: Key specification (AES128 or AES256, default: AES256)
  • -
  • --backend <backend>: KMS backend
  • -
-

Examples:

-
# Generate AES-256 key
-kms generate-key
-# {
-#   "plaintext": "rKz3N8xPq...",  # base64-encoded key
-#   "ciphertext": "vault:v1:...",  # encrypted DEK
-#   "spec": "AES256"
-# }
-
-# Generate AES-128 key
-kms generate-key --spec AES128
-
-# Use in envelope encryption pattern
-let dek = kms generate-key
-let encrypted_data = ($data | openssl enc -aes-256-cbc -K $dek.plaintext)
-{
-    data: $encrypted_data,
-    encrypted_key: $dek.ciphertext
-} | save secure_data.json
-
-# Later, decrypt:
-let envelope = open secure_data.json
-let dek = kms decrypt $envelope.encrypted_key
-$envelope.data | openssl enc -d -aes-256-cbc -K $dek
-
-

Use Cases:

-
    -
  • Envelope encryption (encrypt large data locally, protect DEK with KMS)
  • -
  • Database field encryption
  • -
  • File encryption with key wrapping
  • -
-

kms status

-

Show KMS backend status, configuration, and health.

-

Examples:

-
# Show current backend status
-kms status
-# {
-#   "backend": "rustyvault",
-#   "status": "healthy",
-#   "url": "http://localhost:8200",
-#   "mount_point": "transit",
-#   "version": "0.1.0",
-#   "latency_ms": 5
-# }
-
-# Check all configured backends
-kms status --all
-# [
-#   { "backend": "rustyvault", "status": "healthy", ... },
-#   { "backend": "age", "status": "available", ... },
-#   { "backend": "aws", "status": "unavailable", "error": "..." }
-# ]
-
-# Filter to specific backend
-kms status | where backend == "rustyvault"
-
-# Health check in automation
-if (kms status | get status) == "healthy" {
-    echo "✓ KMS operational"
-} else {
-    error make { msg: "KMS unhealthy" }
-}
-
-

Backend Configuration

-

RustyVault Backend

-
# Environment variables
-export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="hvs.xxxxxxxxxxxxx"
-export RUSTYVAULT_MOUNT="transit"  # Transit engine mount point
-export RUSTYVAULT_KEY="provisioning-main"  # Default key name
-
-
# Usage
-kms encrypt "data" --backend rustyvault --key provisioning-main
-
-

Setup RustyVault:

-
# Start RustyVault
-rustyvault server -dev
-
-# Enable transit engine
-rustyvault secrets enable transit
-
-# Create encryption key
-rustyvault write -f transit/keys/provisioning-main
-
-

Age Backend

-
# Generate Age keypair
-age-keygen -o ~/.age/key.txt
-
-# Environment variables
-export AGE_IDENTITY="$HOME/.age/key.txt"  # Private key
-export AGE_RECIPIENT="age1xxxxxxxxx"      # Public key (from key.txt)
-
-
# Usage
-kms encrypt "data" --backend age
-kms decrypt (open file.enc) --backend age
-
-

AWS KMS Backend

-
# AWS credentials
-export AWS_REGION="us-east-1"
-export AWS_ACCESS_KEY_ID="AKIAXXXXX"
-export AWS_SECRET_ACCESS_KEY="xxxxx"
-
-# KMS configuration
-export AWS_KMS_KEY_ID="alias/provisioning"
-
-
# Usage
-kms encrypt "data" --backend aws --key alias/provisioning
-
-

Setup AWS KMS:

-
# Create KMS key
-aws kms create-key --description "Provisioning Platform"
-
-# Create alias
-aws kms create-alias --alias-name alias/provisioning --target-key-id <key-id>
-
-# Grant permissions
-aws kms create-grant --key-id <key-id> --grantee-principal <role-arn> \
-    --operations Encrypt Decrypt GenerateDataKey
-
-

Cosmian Backend

-
# Cosmian KMS configuration
-export KMS_HTTP_URL="http://localhost:9998"
-export KMS_HTTP_BACKEND="cosmian"
-export COSMIAN_API_KEY="your-api-key"
-
-
# Usage
-kms encrypt "data" --backend cosmian
-
-

Vault Backend (HashiCorp)

-
# Vault configuration
-export VAULT_ADDR="https://vault.example.com:8200"
-export VAULT_TOKEN="hvs.xxxxxxxxxxxxx"
-export VAULT_MOUNT="transit"
-export VAULT_KEY="provisioning"
-
-
# Usage
-kms encrypt "data" --backend vault --key provisioning
-
-

Performance Benchmarks

-

Test Setup:

-
    -
  • Data size: 1 KB
  • -
  • Iterations: 1000
  • -
  • Hardware: Apple M1, 16 GB RAM
  • -
  • Network: localhost
  • -
-

Results:

-
- - - - - -
BackendEncrypt (avg)Decrypt (avg)Throughput (ops/sec)
RustyVault4.8 ms5.1 ms~200
Age2.9 ms3.2 ms~320
Cosmian HTTP31 ms29 ms~33
AWS KMS52 ms48 ms~20
Vault38 ms41 ms~25
-
-

Scaling Test (1000 operations):

-
# RustyVault: ~5 seconds
-0..1000 | each { |_| kms encrypt "data" --backend rustyvault } | length
-# Age: ~3 seconds
-0..1000 | each { |_| kms encrypt "data" --backend age } | length
-
-

Troubleshooting KMS

-

“RustyVault connection failed”

-
# Check RustyVault is running
-curl http://localhost:8200/v1/sys/health
-# Expected: { "initialized": true, "sealed": false }
-
-# Check environment
-echo $env.RUSTYVAULT_ADDR
-echo $env.RUSTYVAULT_TOKEN
-
-# Test authentication
-curl -H "X-Vault-Token: $RUSTYVAULT_TOKEN" $RUSTYVAULT_ADDR/v1/sys/health
-
-

“Age encryption failed”

-
# Check Age keys exist
-ls -la ~/.age/
-# Expected: key.txt
-
-# Verify key format
-cat ~/.age/key.txt | head -1
-# Expected: # created: <date>
-# Line 2: # public key: age1xxxxx
-# Line 3: AGE-SECRET-KEY-xxxxx
-
-# Extract public key
-export AGE_RECIPIENT=$(grep "public key:" ~/.age/key.txt | cut -d: -f2 | tr -d ' ')
-echo $AGE_RECIPIENT
-
-

“AWS KMS access denied”

-
# Verify AWS credentials
-aws sts get-caller-identity
-# Expected: Account, UserId, Arn
-
-# Check KMS key permissions
-aws kms describe-key --key-id alias/provisioning
-
-# Test encryption
-aws kms encrypt --key-id alias/provisioning --plaintext "test"
-
-
-

Orchestrator Plugin (nu_plugin_orchestrator)

-

The orchestrator plugin provides direct file-based access to orchestrator state, eliminating HTTP overhead for status queries and validation.

-

Available Commands

-
- - - -
CommandPurposeExample
orch statusOrchestrator statusorch status
orch validateValidate workfloworch validate workflow.ncl
orch tasksList tasksorch tasks --status running
-
-

Command Reference

-

orch status [--data-dir <dir>]

-

Get orchestrator status from local files (no HTTP, ~1 ms latency).

-

Flags:

-
    -
  • --data-dir <dir>: Data directory (default from ORCHESTRATOR_DATA_DIR)
  • -
-

Examples:

-
# Default data directory
-orch status
-# {
-#   "active_tasks": 5,
-#   "completed_tasks": 120,
-#   "failed_tasks": 2,
-#   "pending_tasks": 3,
-#   "uptime": "2d 4h 15m",
-#   "health": "healthy"
-# }
-
-# Custom data directory
-orch status --data-dir /opt/orchestrator/data
-
-# Monitor in loop
-while true {
-    clear
-    orch status | table
-    sleep 5sec
-}
-
-# Alert on failures
-if (orch status | get failed_tasks) > 0 {
-    echo "⚠️ Failed tasks detected!"
-}
-
-

orch validate <workflow.ncl> [--strict]

-

Validate workflow Nickel file syntax and structure.

-

Arguments:

-
    -
  • workflow.ncl (required): Path to Nickel workflow file
  • -
-

Flags:

-
    -
  • --strict: Enable strict validation (warnings as errors)
  • -
-

Examples:

-
# Basic validation
-orch validate workflows/deploy.ncl
-# {
-#   "valid": true,
-#   "workflow": {
-#     "name": "deploy_k8s_cluster",
-#     "version": "1.0.0",
-#     "operations": 5
-#   },
-#   "warnings": [],
-#   "errors": []
-# }
-
-# Strict mode (warnings cause failure)
-orch validate workflows/deploy.ncl --strict
-# Error: Validation failed with warnings:
-# - Operation 'create_servers': Missing retry_policy
-# - Operation 'install_k8s': Resource limits not specified
-
-# Validate all workflows
-ls workflows/*.ncl | each { |file|
-    let result = orch validate $file.name
-    if $result.valid {
-        echo $"✓ ($file.name)"
-    } else {
-        echo $"✗ ($file.name): ($result.errors | str join ', ')"
-    }
-}
-
-# CI/CD validation
-try {
-    orch validate workflow.ncl --strict
-    echo "✓ Validation passed"
-} catch {
-    echo "✗ Validation failed"
-    exit 1
-}
-
-

Validation Checks:

-
    -
  • ✅ KCL syntax correctness
  • -
  • ✅ Required fields present (name, version, operations)
  • -
  • ✅ Dependency graph valid (no cycles)
  • -
  • ✅ Resource limits within bounds
  • -
  • ✅ Provider configurations valid
  • -
  • ✅ Operation types supported
  • -
  • ⚠️ Optional: Retry policies defined
  • -
  • ⚠️ Optional: Resource limits specified
  • -
-

orch tasks [--status <status>] [--limit <n>]

-

List orchestrator tasks from local state.

-

Flags:

-
    -
  • --status <status>: Filter by status (pending, running, completed, failed)
  • -
  • --limit <n>: Limit results (default: 100)
  • -
  • --data-dir <dir>: Data directory
  • -
-

Examples:

-
# All tasks (last 100)
-orch tasks
-# [
-#   {
-#     "task_id": "task_abc123",
-#     "name": "deploy_kubernetes",
-#     "status": "running",
-#     "priority": 5,
-#     "created_at": "2025-10-09T12:00:00Z",
-#     "progress": 45
-#   }
-# ]
-
-# Running tasks only
-orch tasks --status running
-
-# Failed tasks (last 10)
-orch tasks --status failed --limit 10
-
-# Pending high-priority tasks
-orch tasks --status pending | where priority > 7
-
-# Monitor active tasks
-watch {
-    orch tasks --status running
-        | select name progress updated_at
-        | table
-}
-
-# Count tasks by status
-orch tasks | group-by status | each { |group|
-    { status: $group.0, count: ($group.1 | length) }
-}
-
-

Environment Variables

-
- -
VariableDescriptionDefault
ORCHESTRATOR_DATA_DIRData directoryprovisioning/platform/orchestrator/data
-
-

Performance Comparison

-
- - - -
OperationHTTP APIPluginLatency Reduction
Status query~30 ms~1 ms97% faster
Validate workflow~100 ms~10 ms90% faster
List tasks~50 ms~5 ms90% faster
-
-

Use Case: CI/CD Pipeline

-
# HTTP approach (slow)
-http get http://localhost:9090/tasks --status running
-    | each { |task| http get $"http://localhost:9090/tasks/($task.id)" }
-# Total: ~500 ms for 10 tasks
-
-# Plugin approach (fast)
-orch tasks --status running
-# Total: ~5 ms for 10 tasks
-# Result: 100x faster
-
-

Troubleshooting Orchestrator

-

“Failed to read status”

-
# Check data directory exists
-ls -la provisioning/platform/orchestrator/data/
-
-# Create if missing
-mkdir -p provisioning/platform/orchestrator/data
-
-# Check permissions (must be readable)
-chmod 755 provisioning/platform/orchestrator/data
-
-

“Workflow validation failed”

-
# Use strict mode for detailed errors
-orch validate workflows/deploy.ncl --strict
-
-# Check Nickel syntax manually
-nickel typecheck workflows/deploy.ncl
-nickel eval workflows/deploy.ncl
-
-

“No tasks found”

-
# Check orchestrator running
-ps aux | grep orchestrator
-
-# Start orchestrator if not running
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-# Check task files
-ls provisioning/platform/orchestrator/data/tasks/
-
-
-

Integration Examples

-

Example 1: Complete Authenticated Deployment

-

Full workflow with authentication, secrets, and deployment:

-
# Step 1: Login with MFA
-auth login admin
-auth mfa verify --code (input "MFA code: ")
-
-# Step 2: Verify orchestrator health
-if (orch status | get health) != "healthy" {
-    error make { msg: "Orchestrator unhealthy" }
-}
-
-# Step 3: Validate deployment workflow
-let validation = orch validate workflows/production-deploy.ncl --strict
-if not $validation.valid {
-    error make { msg: $"Validation failed: ($validation.errors)" }
-}
-
-# Step 4: Encrypt production secrets
-let secrets = open secrets/production.yaml
-kms encrypt ($secrets | to json) --backend rustyvault --key prod-main
-    | save secrets/production.enc
-
-# Step 5: Submit deployment
-provisioning cluster create production --check
-
-# Step 6: Monitor progress
-while (orch tasks --status running | length) > 0 {
-    orch tasks --status running
-        | select name progress updated_at
-        | table
-    sleep 10sec
-}
-
-echo "✓ Deployment complete"
-
-

Example 2: Batch Secret Rotation

-

Rotate all secrets in multiple environments:

-
# Rotate database passwords
-["dev", "staging", "production"] | each { |env|
-    # Generate new password
-    let new_password = (openssl rand -base64 32)
-
-    # Encrypt with environment-specific key
-    let encrypted = kms encrypt $new_password --backend rustyvault --key $"($env)-main"
-
-    # Save encrypted password
-    {
-        environment: $env,
-        password_enc: $encrypted,
-        rotated_at: (date now | format date "%Y-%m-%d %H:%M:%S")
-    } | save $"secrets/db-password-($env).json"
-
-    echo $"✓ Rotated password for ($env)"
-}
-
-

Example 3: Multi-Environment Deployment

-

Deploy to multiple environments with validation:

-
# Define environments
-let environments = [
-    { name: "dev", validate: "basic" },
-    { name: "staging", validate: "strict" },
-    { name: "production", validate: "strict", mfa_required: true }
-]
-
-# Deploy to each environment
-$environments | each { |env|
-    echo $"Deploying to ($env.name)..."
-
-    # Authenticate if production
-    if $env.mfa_required? {
-        if not (auth verify | get mfa_verified) {
-            auth mfa verify --code (input $"MFA code for ($env.name): ")
-        }
-    }
-
-    # Validate workflow
-    let validation = if $env.validate == "strict" {
-        orch validate $"workflows/($env.name)-deploy.ncl" --strict
-    } else {
-        orch validate $"workflows/($env.name)-deploy.ncl"
-    }
-
-    if not $validation.valid {
-        echo $"✗ Validation failed for ($env.name)"
-        continue
-    }
-
-    # Decrypt secrets
-    let secrets = kms decrypt (open $"secrets/($env.name).enc")
-
-    # Deploy
-    provisioning cluster create $env.name
-
-    echo $"✓ Deployed to ($env.name)"
-}
-
-

Example 4: Automated Backup and Encryption

-

Backup configuration files with encryption:

-
# Backup script
-let backup_dir = $"backups/(date now | format date "%Y%m%d-%H%M%S")"
-mkdir $backup_dir
-
-# Backup and encrypt configs
-ls configs/**/*.yaml | each { |file|
-    let encrypted = kms encrypt (open $file.name) --backend age
-    let backup_path = $"($backup_dir)/($file.name | path basename).enc"
-    $encrypted | save $backup_path
-    echo $"✓ Backed up ($file.name)"
-}
-
-# Create manifest
-{
-    backup_date: (date now),
-    files: (ls $"($backup_dir)/*.enc" | length),
-    backend: "age"
-} | save $"($backup_dir)/manifest.json"
-
-echo $"✓ Backup complete: ($backup_dir)"
-
-

Example 5: Health Monitoring Dashboard

-

Real-time health monitoring:

-
# Health dashboard
-while true {
-    clear
-
-    # Header
-    echo "=== Provisioning Platform Health Dashboard ==="
-    echo $"Updated: (date now | format date "%Y-%m-%d %H:%M:%S")"
-    echo ""
-
-    # Authentication status
-    let auth_status = try { auth verify } catch { { active: false } }
-    echo $"Auth: (if $auth_status.active { '✓ Active' } else { '✗ Inactive' })"
-
-    # KMS status
-    let kms_health = kms status
-    echo $"KMS: (if $kms_health.status == 'healthy' { '✓ Healthy' } else { '✗ Unhealthy' })"
-
-    # Orchestrator status
-    let orch_health = orch status
-    echo $"Orchestrator: (if $orch_health.health == 'healthy' { '✓ Healthy' } else { '✗ Unhealthy' })"
-    echo $"Active Tasks: ($orch_health.active_tasks)"
-    echo $"Failed Tasks: ($orch_health.failed_tasks)"
-
-    # Task summary
-    echo ""
-    echo "=== Running Tasks ==="
-    orch tasks --status running
-        | select name progress updated_at
-        | table
-
-    sleep 10sec
-}
-
-
-

Best Practices

-

When to Use Plugins vs HTTP

-

✅ Use Plugins When:

-
    -
  • Performance is critical (high-frequency operations)
  • -
  • Working in pipelines (Nushell data structures)
  • -
  • Need offline capability (KMS, orchestrator local ops)
  • -
  • Building automation scripts
  • -
  • CI/CD pipelines
  • -
-

Use HTTP When:

-
    -
  • Calling from external systems (not Nushell)
  • -
  • Need consistent REST API interface
  • -
  • Cross-language integration
  • -
  • Web UI backend
  • -
-

Performance Optimization

-

1. Batch Operations

-
# ❌ Slow: Individual HTTP calls in loop
-ls configs/*.yaml | each { |file|
-    http post http://localhost:9998/encrypt { data: (open $file.name) }
-}
-# Total: ~5 seconds (50 ms × 100)
-
-# ✅ Fast: Plugin in pipeline
-ls configs/*.yaml | each { |file|
-    kms encrypt (open $file.name)
-}
-# Total: ~0.5 seconds (5 ms × 100)
-
-

2. Parallel Processing

-
# Process multiple operations in parallel
-ls configs/*.yaml
-    | par-each { |file|
-        kms encrypt (open $file.name) | save $"encrypted/($file.name).enc"
-    }
-
-

3. Caching Session State

-
# Cache auth verification
-let $auth_cache = auth verify
-if $auth_cache.active {
-    # Use cached result instead of repeated calls
-    echo $"Authenticated as ($auth_cache.user)"
-}
-
-

Error Handling

-

Graceful Degradation:

-
# Try plugin, fallback to HTTP if unavailable
-def kms_encrypt [data: string] {
-    try {
-        kms encrypt $data
-    } catch {
-        http post http://localhost:9998/encrypt { data: $data } | get encrypted
-    }
-}
-
-

Comprehensive Error Handling:

-
# Handle all error cases
-def safe_deployment [] {
-    # Check authentication
-    let auth_status = try {
-        auth verify
-    } catch {
-        echo "✗ Authentication failed, logging in..."
-        auth login admin
-        auth verify
-    }
-
-    # Check KMS health
-    let kms_health = try {
-        kms status
-    } catch {
-        error make { msg: "KMS unavailable, cannot proceed" }
-    }
-
-    # Validate workflow
-    let validation = try {
-        orch validate workflow.ncl --strict
-    } catch {
-        error make { msg: "Workflow validation failed" }
-    }
-
-    # Proceed if all checks pass
-    if $auth_status.active and $kms_health.status == "healthy" and $validation.valid {
-        echo "✓ All checks passed, deploying..."
-        provisioning cluster create production
-    }
-}
-
-

Security Best Practices

-

1. Never Log Decrypted Data

-
# ❌ BAD: Logs plaintext password
-let password = kms decrypt $encrypted_password
-echo $"Password: ($password)"  # Visible in logs!
-
-# ✅ GOOD: Use directly without logging
-let password = kms decrypt $encrypted_password
-psql --dbname mydb --password $password  # Not logged
-
-

2. Use Context (AAD) for Critical Data

-
# Encrypt with context
-let context = $"user=(whoami),env=production,date=(date now | format date "%Y-%m-%d")"
-kms encrypt $sensitive_data --context $context
-
-# Decrypt requires same context
-kms decrypt $encrypted --context $context
-
-

3. Rotate Backup Codes

-
# After using backup code, generate new set
-auth mfa verify --code ABCD-EFGH-IJKL
-# Warning: Backup code used
-auth mfa regenerate-backups
-# New backup codes generated
-
-

4. Limit Token Lifetime

-
# Check token expiration before long operations
-let session = auth verify
-let expires_in = (($session.expires_at | into datetime) - (date now))
-if $expires_in < 5 min {
-    echo "⚠️ Token expiring soon, re-authenticating..."
-    auth login $session.user
-}
-
-
-

Troubleshooting

-

Common Issues Across Plugins

-

“Plugin not found”

-
# Check plugin registration
-plugin list | where name =~ "auth|kms|orch"
-
-# Re-register if missing
-cd provisioning/core/plugins/nushell-plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# Restart Nushell
-exit
-nu
-
-

“Plugin command failed”

-
# Enable debug mode
-$env.RUST_LOG = "debug"
-
-# Run command again to see detailed errors
-kms encrypt "test"
-
-# Check plugin version compatibility
-plugin list | where name =~ "kms" | select name version
-
-

“Permission denied”

-
# Check plugin executable permissions
-ls -l provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*
-# Should show: -rwxr-xr-x
-
-# Fix if needed
-chmod +x provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*
-
-

Platform-Specific Issues

-

macOS Issues:

-
# "cannot be opened because the developer cannot be verified"
-xattr -d com.apple.quarantine target/release/nu_plugin_auth
-xattr -d com.apple.quarantine target/release/nu_plugin_kms
-xattr -d com.apple.quarantine target/release/nu_plugin_orchestrator
-
-# Keychain access denied
-# System Preferences → Security & Privacy → Privacy → Full Disk Access
-# Add: /usr/local/bin/nu
-
-

Linux Issues:

-
# Keyring service not running
-systemctl --user status gnome-keyring-daemon
-systemctl --user start gnome-keyring-daemon
-
-# Missing dependencies
-sudo apt install libssl-dev pkg-config  # Ubuntu/Debian
-sudo dnf install openssl-devel          # Fedora
-
-

Windows Issues:

-
# Credential Manager access denied
-# Control Panel → User Accounts → Credential Manager
-# Ensure Windows Credential Manager service is running
-
-# Missing Visual C++ runtime
-# Download from: https://aka.ms/vs/17/release/vc_redist.x64.exe
-
-

Debugging Techniques

-

Enable Verbose Logging:

-
# Set log level
-$env.RUST_LOG = "debug,nu_plugin_auth=trace"
-
-# Run command
-auth login admin
-
-# Check logs
-
-

Test Plugin Directly:

-
# Test plugin communication (advanced)
-echo '{"Call": [0, {"name": "auth", "call": "login", "args": ["admin", "password"]}]}' \
-    | target/release/nu_plugin_auth
-
-

Check Plugin Health:

-
# Test each plugin
-auth --help       # Should show auth commands
-kms --help        # Should show kms commands
-orch --help       # Should show orch commands
-
-# Test functionality
-auth verify       # Should return session status
-kms status        # Should return backend status
-orch status       # Should return orchestrator status
-
-
-

Migration Guide

-

Migrating from HTTP to Plugin-Based

-

Phase 1: Install Plugins (No Breaking Changes)

-
# Build and register plugins
-cd provisioning/core/plugins/nushell-plugins
-cargo build --release --all
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# Verify HTTP still works
-http get http://localhost:9090/health
-
-

Phase 2: Update Scripts Incrementally

-
# Before (HTTP)
-def encrypt_config [file: string] {
-    let data = open $file
-    let result = http post http://localhost:9998/encrypt { data: $data }
-    $result.encrypted | save $"($file).enc"
-}
-
-# After (Plugin with fallback)
-def encrypt_config [file: string] {
-    let data = open $file
-    let encrypted = try {
-        kms encrypt $data --backend rustyvault
-    } catch {
-        # Fallback to HTTP if plugin unavailable
-        (http post http://localhost:9998/encrypt { data: $data }).encrypted
-    }
-    $encrypted | save $"($file).enc"
-}
-
-

Phase 3: Test Migration

-
# Run side-by-side comparison
-def test_migration [] {
-    let test_data = "test secret data"
-
-    # Plugin approach
-    let start_plugin = date now
-    let plugin_result = kms encrypt $test_data
-    let plugin_time = ((date now) - $start_plugin)
-
-    # HTTP approach
-    let start_http = date now
-    let http_result = (http post http://localhost:9998/encrypt { data: $test_data }).encrypted
-    let http_time = ((date now) - $start_http)
-
-    echo $"Plugin: ($plugin_time)ms"
-    echo $"HTTP: ($http_time)ms"
-    echo $"Speedup: (($http_time / $plugin_time))x"
-}
-
-

Phase 4: Gradual Rollout

-
# Use feature flag for controlled rollout
-$env.USE_PLUGINS = true
-
-def encrypt_with_flag [data: string] {
-    if $env.USE_PLUGINS {
-        kms encrypt $data
-    } else {
-        (http post http://localhost:9998/encrypt { data: $data }).encrypted
-    }
-}
-
-

Phase 5: Full Migration

-
# Replace all HTTP calls with plugin calls
-# Remove fallback logic once stable
-def encrypt_config [file: string] {
-    let data = open $file
-    kms encrypt $data --backend rustyvault | save $"($file).enc"
-}
-
-

Rollback Strategy

-
# If issues arise, quickly rollback
-def rollback_to_http [] {
-    # Remove plugin registrations
-    plugin rm nu_plugin_auth
-    plugin rm nu_plugin_kms
-    plugin rm nu_plugin_orchestrator
-
-    # Restart Nushell
-    exec nu
-}
-
-
-

Advanced Configuration

-

Custom Plugin Paths

-
# ~/.config/nushell/config.nu
-$env.PLUGIN_PATH = "/opt/provisioning/plugins"
-
-# Register from custom location
-plugin add $"($env.PLUGIN_PATH)/nu_plugin_auth"
-plugin add $"($env.PLUGIN_PATH)/nu_plugin_kms"
-plugin add $"($env.PLUGIN_PATH)/nu_plugin_orchestrator"
-
-

Environment-Specific Configuration

-
# ~/.config/nushell/env.nu
-
-# Development environment
-if ($env.ENV? == "dev") {
-    $env.RUSTYVAULT_ADDR = "http://localhost:8200"
-    $env.CONTROL_CENTER_URL = "http://localhost:3000"
-}
-
-# Staging environment
-if ($env.ENV? == "staging") {
-    $env.RUSTYVAULT_ADDR = "https://vault-staging.example.com"
-    $env.CONTROL_CENTER_URL = "https://control-staging.example.com"
-}
-
-# Production environment
-if ($env.ENV? == "prod") {
-    $env.RUSTYVAULT_ADDR = "https://vault.example.com"
-    $env.CONTROL_CENTER_URL = "https://control.example.com"
-}
-
-

Plugin Aliases

-
# ~/.config/nushell/config.nu
-
-# Auth shortcuts
-alias login = auth login
-alias logout = auth logout
-alias whoami = auth verify | get user
-
-# KMS shortcuts
-alias encrypt = kms encrypt
-alias decrypt = kms decrypt
-
-# Orchestrator shortcuts
-alias status = orch status
-alias tasks = orch tasks
-alias validate = orch validate
-
-

Custom Commands

-
# ~/.config/nushell/custom_commands.nu
-
-# Encrypt all files in directory
-def encrypt-dir [dir: string] {
-    ls $"($dir)/**/*" | where type == file | each { |file|
-        kms encrypt (open $file.name) | save $"($file.name).enc"
-        echo $"✓ Encrypted ($file.name)"
-    }
-}
-
-# Decrypt all files in directory
-def decrypt-dir [dir: string] {
-    ls $"($dir)/**/*.enc" | each { |file|
-        kms decrypt (open $file.name)
-            | save (echo $file.name | str replace '.enc' '')
-        echo $"✓ Decrypted ($file.name)"
-    }
-}
-
-# Monitor deployments
-def watch-deployments [] {
-    while true {
-        clear
-        echo "=== Active Deployments ==="
-        orch tasks --status running | table
-        sleep 5sec
-    }
-}
-
-
-

Security Considerations

-

Threat Model

-

What Plugins Protect Against:

-
    -
  • ✅ Network eavesdropping (no HTTP for KMS/orch)
  • -
  • ✅ Token theft from files (keyring storage)
  • -
  • ✅ Credential exposure in logs (prompt-based input)
  • -
  • ✅ Man-in-the-middle attacks (local file access)
  • -
-

What Plugins Don’t Protect Against:

-
    -
  • ❌ Memory dumping (decrypted data in RAM)
  • -
  • ❌ Malicious plugins (trust registry only)
  • -
  • ❌ Compromised OS keyring
  • -
  • ❌ Physical access to machine
  • -
-

Secure Deployment

-

1. Verify Plugin Integrity

-
# Check plugin signatures (if available)
-sha256sum target/release/nu_plugin_auth
-# Compare with published checksums
-
-# Build from trusted source
-git clone https://github.com/provisioning-platform/plugins
-cd plugins
-cargo build --release --all
-
-

2. Restrict Plugin Access

-
# Set plugin permissions (only owner can execute)
-chmod 700 target/release/nu_plugin_*
-
-# Store in protected directory
-sudo mkdir -p /opt/provisioning/plugins
-sudo chown $(whoami):$(whoami) /opt/provisioning/plugins
-sudo chmod 755 /opt/provisioning/plugins
-mv target/release/nu_plugin_* /opt/provisioning/plugins/
-
-

3. Audit Plugin Usage

-
# Log plugin calls (for compliance)
-def logged_encrypt [data: string] {
-    let timestamp = date now
-    let result = kms encrypt $data
-    { timestamp: $timestamp, action: "encrypt" } | save --append audit.log
-    $result
-}
-
-

4. Rotate Credentials Regularly

-
# Weekly credential rotation script
-def rotate_credentials [] {
-    # Re-authenticate
-    auth logout
-    auth login admin
-
-    # Rotate KMS keys (if supported)
-    kms rotate-key --key provisioning-main
-
-    # Update encrypted secrets
-    ls secrets/*.enc | each { |file|
-        let plain = kms decrypt (open $file.name)
-        kms encrypt $plain | save $file.name
-    }
-}
-
-
-

FAQ

-

Q: Can I use plugins without RustyVault/Age installed?

-

A: Yes, authentication and orchestrator plugins work independently. KMS plugin requires at least one backend configured (Age is easiest for local -dev).

-

Q: Do plugins work in CI/CD pipelines?

-

A: Yes, plugins work great in CI/CD. For headless environments (no keyring), use environment variables for auth or file-based tokens.

-
# CI/CD example
-export CONTROL_CENTER_TOKEN="jwt-token-here"
-kms encrypt "data" --backend age
-
-

Q: How do I update plugins?

-

A: Rebuild and re-register:

-
cd provisioning/core/plugins/nushell-plugins
-git pull
-cargo build --release --all
-plugin add --force target/release/nu_plugin_auth
-plugin add --force target/release/nu_plugin_kms
-plugin add --force target/release/nu_plugin_orchestrator
-
-

Q: Can I use multiple KMS backends simultaneously?

-

A: Yes, specify --backend for each operation:

-
kms encrypt "data1" --backend rustyvault
-kms encrypt "data2" --backend age
-kms encrypt "data3" --backend aws
-
-

Q: What happens if a plugin crashes?

-

A: Nushell isolates plugin crashes. The command fails with an error, but Nushell continues running. Check logs with $env.RUST_LOG = "debug".

-

Q: Are plugins compatible with older Nushell versions?

-

A: Plugins require Nushell 0.107.1+. For older versions, use HTTP API.

-

Q: How do I backup MFA enrollment?

-

A: Save backup codes securely (password manager, encrypted file). QR code can be re-scanned from the same secret.

-
# Save backup codes
-auth mfa enroll totp | save mfa-backup-codes.txt
-kms encrypt (open mfa-backup-codes.txt) | save mfa-backup-codes.enc
-rm mfa-backup-codes.txt
-
-

Q: Can plugins work offline?

-

A: Partially:

-
    -
  • kms with Age backend (fully offline)
  • -
  • orch status/tasks (reads local files)
  • -
  • auth (requires control center)
  • -
  • kms with RustyVault/AWS/Vault (requires network)
  • -
-

Q: How do I troubleshoot plugin performance?

-

A: Use Nushell’s timing:

-
timeit { kms encrypt "data" }
-# 5 ms 123μs 456 ns
-
-timeit { http post http://localhost:9998/encrypt { data: "data" } }
-# 52 ms 789μs 123 ns
-
-
- -
    -
  • Security System: /Users/Akasha/project-provisioning/docs/architecture/adr-009-security-system-complete.md
  • -
  • JWT Authentication: /Users/Akasha/project-provisioning/docs/architecture/JWT_AUTH_IMPLEMENTATION.md
  • -
  • Config Encryption: /Users/Akasha/project-provisioning/docs/user/CONFIG_ENCRYPTION_GUIDE.md
  • -
  • RustyVault Integration: /Users/Akasha/project-provisioning/RUSTYVAULT_INTEGRATION_SUMMARY.md
  • -
  • MFA Implementation: /Users/Akasha/project-provisioning/docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
  • Nushell Plugins Reference: /Users/Akasha/project-provisioning/docs/user/NUSHELL_PLUGINS_GUIDE.md
  • -
-
-

Version: 1.0.0 -Maintained By: Platform Team -Last Updated: 2025-10-09 -Feedback: Open an issue or contact platform-team@example.com

-

Nushell Plugins for Provisioning Platform

-

Complete guide to authentication, KMS, and orchestrator plugins.

-

Overview

-

Three native Nushell plugins provide high-performance integration with the provisioning platform:

-
    -
  1. nu_plugin_auth - JWT authentication and MFA operations
  2. -
  3. nu_plugin_kms - Key management (RustyVault, Age, Cosmian, AWS, Vault)
  4. -
  5. nu_plugin_orchestrator - Orchestrator operations (status, validate, tasks)
  6. -
-

Why Native Plugins

-

Performance Advantages:

-
    -
  • 10x faster than HTTP API calls (KMS operations)
  • -
  • Direct access to Rust libraries (no HTTP overhead)
  • -
  • Native integration with Nushell pipelines
  • -
  • Type safety with Nushell’s type system
  • -
-

Developer Experience:

-
    -
  • Pipeline friendly - Use Nushell pipes naturally
  • -
  • Tab completion - All commands and flags
  • -
  • Consistent interface - Follows Nushell conventions
  • -
  • Error handling - Nushell-native error messages
  • -
-
-

Installation

-

Prerequisites

-
    -
  • Nushell 0.107.1+
  • -
  • Rust toolchain (for building from source)
  • -
  • Access to provisioning platform services
  • -
-

Build from Source

-
cd /Users/Akasha/project-provisioning/provisioning/core/plugins/nushell-plugins
-
-# Build all plugins
-cargo build --release -p nu_plugin_auth
-cargo build --release -p nu_plugin_kms
-cargo build --release -p nu_plugin_orchestrator
-
-# Or build individually
-cargo build --release -p nu_plugin_auth
-cargo build --release -p nu_plugin_kms
-cargo build --release -p nu_plugin_orchestrator
-
-

Register with Nushell

-
# Register all plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# Verify registration
-plugin list | where name =~ "provisioning"
-
-

Verify Installation

-
# Test auth commands
-auth --help
-
-# Test KMS commands
-kms --help
-
-# Test orchestrator commands
-orch --help
-
-
-

Plugin: nu_plugin_auth

-

Authentication plugin for JWT login, MFA enrollment, and session management.

-

Commands

-

auth login <username> [password]

-

Login to provisioning platform and store JWT tokens securely.

-

Arguments:

-
    -
  • username (required): Username for authentication
  • -
  • password (optional): Password (prompts interactively if not provided)
  • -
-

Flags:

-
    -
  • --url <url>: Control center URL (default: http://localhost:9080)
  • -
  • --password <password>: Password (alternative to positional argument)
  • -
-

Examples:

-
# Interactive password prompt (recommended)
-auth login admin
-
-# Password in command (not recommended for production)
-auth login admin mypassword
-
-# Custom URL
-auth login admin --url http://control-center:9080
-
-# Pipeline usage
-"admin" | auth login
-
-

Token Storage: -Tokens are stored securely in OS-native keyring:

-
    -
  • macOS: Keychain Access
  • -
  • Linux: Secret Service (gnome-keyring, kwallet)
  • -
  • Windows: Credential Manager
  • -
-

Success Output:

-
✓ Login successful
-User: admin
-Role: Admin
-Expires: 2025-10-09T14:30:00Z
-
-
-

auth logout

-

Logout from current session and remove stored tokens.

-

Examples:

-
# Simple logout
-auth logout
-
-# Pipeline usage (conditional logout)
-if (auth verify | get active) { auth logout }
-
-

Success Output:

-
✓ Logged out successfully
-
-
-

auth verify

-

Verify current session and check token validity.

-

Examples:

-
# Check session status
-auth verify
-
-# Pipeline usage
-auth verify | if $in.active { echo "Session valid" } else { echo "Session expired" }
-
-

Success Output:

-
{
-  "active": true,
-  "user": "admin",
-  "role": "Admin",
-  "expires_at": "2025-10-09T14:30:00Z",
-  "mfa_verified": true
-}
-
-
-

auth sessions

-

List all active sessions for current user.

-

Examples:

-
# List sessions
-auth sessions
-
-# Filter by date
-auth sessions | where created_at > (date now | date to-timezone UTC | into string)
-
-

Output Format:

-
[
-  {
-    "session_id": "sess_abc123",
-    "created_at": "2025-10-09T12:00:00Z",
-    "expires_at": "2025-10-09T14:30:00Z",
-    "ip_address": "192.168.1.100",
-    "user_agent": "nushell/0.107.1"
-  }
-]
-
-
-

auth mfa enroll <type>

-

Enroll in MFA (TOTP or WebAuthn).

-

Arguments:

-
    -
  • type (required): MFA type (totp or webauthn)
  • -
-

Examples:

-
# Enroll TOTP (Google Authenticator, Authy)
-auth mfa enroll totp
-
-# Enroll WebAuthn (YubiKey, Touch ID, Windows Hello)
-auth mfa enroll webauthn
-
-

TOTP Enrollment Output:

-
✓ TOTP enrollment initiated
-
-Scan this QR code with your authenticator app:
-
-  ████ ▄▄▄▄▄ █▀█ █▄▀▀▀▄ ▄▄▄▄▄ ████
-  ████ █   █ █▀▀▀█▄ ▀▀█ █   █ ████
-  ████ █▄▄▄█ █ █▀▄ ▀▄▄█ █▄▄▄█ ████
-  ...
-
-Or enter manually:
-Secret: JBSWY3DPEHPK3PXP
-URL: otpauth://totp/Provisioning:admin?secret=JBSWY3DPEHPK3PXP&issuer=Provisioning
-
-Backup codes (save securely):
-1. ABCD-EFGH-IJKL
-2. MNOP-QRST-UVWX
-...
-
-
-

auth mfa verify --code <code>

-

Verify MFA code (TOTP or backup code).

-

Flags:

-
    -
  • --code <code> (required): 6-digit TOTP code or backup code
  • -
-

Examples:

-
# Verify TOTP code
-auth mfa verify --code 123456
-
-# Verify backup code
-auth mfa verify --code ABCD-EFGH-IJKL
-
-

Success Output:

-
✓ MFA verification successful
-
-
-

Environment Variables

-
- - -
VariableDescriptionDefault
USERDefault usernameCurrent OS user
CONTROL_CENTER_URLControl center URLhttp://localhost:9080
-
-
-

Error Handling

-

Common Errors:

-
# "No active session"
-Error: No active session found
-→ Run: auth login <username>
-
-# "Invalid credentials"
-Error: Authentication failed: Invalid username or password
-→ Check username and password
-
-# "Token expired"
-Error: Token has expired
-→ Run: auth login <username>
-
-# "MFA required"
-Error: MFA verification required
-→ Run: auth mfa verify --code <code>
-
-# "Keyring error" (macOS)
-Error: Failed to access keyring
-→ Check Keychain Access permissions
-
-# "Keyring error" (Linux)
-Error: Failed to access keyring
-→ Install gnome-keyring or kwallet
-
-
-

Plugin: nu_plugin_kms

-

Key Management Service plugin supporting multiple backends.

-

Supported Backends

-
- - - - - -
BackendDescriptionUse Case
rustyvaultRustyVault Transit engineProduction KMS
ageAge encryption (local)Development/testing
cosmianCosmian KMS (HTTP)Cloud KMS
awsAWS KMSAWS environments
vaultHashiCorp VaultEnterprise KMS
-
-

Commands

-

kms encrypt <data> [--backend <backend>]

-

Encrypt data using KMS.

-

Arguments:

-
    -
  • data (required): Data to encrypt (string or binary)
  • -
-

Flags:

-
    -
  • --backend <backend>: KMS backend (rustyvault, age, cosmian, aws, vault)
  • -
  • --key <key>: Key ID or recipient (backend-specific)
  • -
  • --context <context>: Additional authenticated data (AAD)
  • -
-

Examples:

-
# Auto-detect backend from environment
-kms encrypt "secret data"
-
-# RustyVault
-kms encrypt "data" --backend rustyvault --key provisioning-main
-
-# Age (local encryption)
-kms encrypt "data" --backend age --key age1xxxxxxxxx
-
-# AWS KMS
-kms encrypt "data" --backend aws --key alias/provisioning
-
-# With context (AAD)
-kms encrypt "data" --backend rustyvault --key provisioning-main --context "user=admin"
-
-

Output Format:

-
vault:v1:abc123def456...
-
-
-

kms decrypt <encrypted> [--backend <backend>]

-

Decrypt KMS-encrypted data.

-

Arguments:

-
    -
  • encrypted (required): Encrypted data (base64 or KMS format)
  • -
-

Flags:

-
    -
  • --backend <backend>: KMS backend (auto-detected if not specified)
  • -
  • --context <context>: Additional authenticated data (AAD, must match encryption)
  • -
-

Examples:

-
# Auto-detect backend
-kms decrypt "vault:v1:abc123def456..."
-
-# RustyVault explicit
-kms decrypt "vault:v1:abc123..." --backend rustyvault
-
-# Age
-kms decrypt "-----BEGIN AGE ENCRYPTED FILE-----..." --backend age
-
-# With context
-kms decrypt "vault:v1:abc123..." --backend rustyvault --context "user=admin"
-
-

Output:

-
secret data
-
-
-

kms generate-key [--spec <spec>]

-

Generate data encryption key (DEK) using KMS.

-

Flags:

-
    -
  • --spec <spec>: Key specification (AES128 or AES256, default: AES256)
  • -
  • --backend <backend>: KMS backend
  • -
-

Examples:

-
# Generate AES-256 key
-kms generate-key
-
-# Generate AES-128 key
-kms generate-key --spec AES128
-
-# Specific backend
-kms generate-key --backend rustyvault
-
-

Output Format:

-
{
-  "plaintext": "base64-encoded-key",
-  "ciphertext": "vault:v1:encrypted-key",
-  "spec": "AES256"
-}
-
-
-

kms status

-

Show KMS backend status and configuration.

-

Examples:

-
# Show status
-kms status
-
-# Filter to specific backend
-kms status | where backend == "rustyvault"
-
-

Output Format:

-
{
-  "backend": "rustyvault",
-  "status": "healthy",
-  "url": "http://localhost:8200",
-  "mount_point": "transit",
-  "version": "0.1.0"
-}
-
-
-

Environment Variables

-

RustyVault Backend:

-
export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="your-token-here"
-export RUSTYVAULT_MOUNT="transit"
-
-

Age Backend:

-
export AGE_RECIPIENT="age1xxxxxxxxx"
-export AGE_IDENTITY="/path/to/key.txt"
-
-

HTTP Backend (Cosmian):

-
export KMS_HTTP_URL="http://localhost:9998"
-export KMS_HTTP_BACKEND="cosmian"
-
-

AWS KMS:

-
export AWS_REGION="us-east-1"
-export AWS_ACCESS_KEY_ID="..."
-export AWS_SECRET_ACCESS_KEY="..."
-
-
-

Performance Comparison

-
- - - - - -
OperationHTTP APIPluginImprovement
Encrypt (RustyVault)~50 ms~5 ms10x faster
Decrypt (RustyVault)~50 ms~5 ms10x faster
Encrypt (Age)~30 ms~3 ms10x faster
Decrypt (Age)~30 ms~3 ms10x faster
Generate Key~60 ms~8 ms7.5x faster
-
-
-

Plugin: nu_plugin_orchestrator

-

Orchestrator operations plugin for status, validation, and task management.

-

Commands

-

orch status [--data-dir <dir>]

-

Get orchestrator status from local files (no HTTP).

-

Flags:

-
    -
  • --data-dir <dir>: Data directory (default: provisioning/platform/orchestrator/data)
  • -
-

Examples:

-
# Default data dir
-orch status
-
-# Custom dir
-orch status --data-dir ./custom/data
-
-# Pipeline usage
-orch status | if $in.active_tasks > 0 { echo "Tasks running" }
-
-

Output Format:

-
{
-  "active_tasks": 5,
-  "completed_tasks": 120,
-  "failed_tasks": 2,
-  "pending_tasks": 3,
-  "uptime": "2d 4h 15m",
-  "health": "healthy"
-}
-
-
-

orch validate <workflow.ncl> [--strict]

-

Validate workflow Nickel file.

-

Arguments:

-
    -
  • workflow.ncl (required): Path to Nickel workflow file
  • -
-

Flags:

-
    -
  • --strict: Enable strict validation (all checks, warnings as errors)
  • -
-

Examples:

-
# Basic validation
-orch validate workflows/deploy.ncl
-
-# Strict mode
-orch validate workflows/deploy.ncl --strict
-
-# Pipeline usage
-ls workflows/*.ncl | each { |file| orch validate $file.name }
-
-

Output Format:

-
{
-  "valid": true,
-  "workflow": {
-    "name": "deploy_k8s_cluster",
-    "version": "1.0.0",
-    "operations": 5
-  },
-  "warnings": [],
-  "errors": []
-}
-
-

Validation Checks:

-
    -
  • KCL syntax errors
  • -
  • Required fields present
  • -
  • Dependency graph valid (no cycles)
  • -
  • Resource limits within bounds
  • -
  • Provider configurations valid
  • -
-
-

orch tasks [--status <status>] [--limit <n>]

-

List orchestrator tasks.

-

Flags:

-
    -
  • --status <status>: Filter by status (pending, running, completed, failed)
  • -
  • --limit <n>: Limit number of results (default: 100)
  • -
  • --data-dir <dir>: Data directory (default from ORCHESTRATOR_DATA_DIR)
  • -
-

Examples:

-
# All tasks
-orch tasks
-
-# Pending tasks only
-orch tasks --status pending
-
-# Running tasks (limit to 10)
-orch tasks --status running --limit 10
-
-# Pipeline usage
-orch tasks --status failed | each { |task| echo $"Failed: ($task.name)" }
-
-

Output Format:

-
[
-  {
-    "task_id": "task_abc123",
-    "name": "deploy_kubernetes",
-    "status": "running",
-    "priority": 5,
-    "created_at": "2025-10-09T12:00:00Z",
-    "updated_at": "2025-10-09T12:05:00Z",
-    "progress": 45
-  }
-]
-
-
-

Environment Variables

-
- -
VariableDescriptionDefault
ORCHESTRATOR_DATA_DIRData directoryprovisioning/platform/orchestrator/data
-
-
-

Performance Comparison

-
- - - -
OperationHTTP APIPluginImprovement
Status~30 ms~3 ms10x faster
Validate~100 ms~10 ms10x faster
Tasks List~50 ms~5 ms10x faster
-
-
-

Pipeline Examples

-

Authentication Flow

-
# Login and verify in one pipeline
-auth login admin
-    | if $in.success { auth verify }
-    | if $in.mfa_required { auth mfa verify --code (input "MFA code: ") }
-
-

KMS Operations

-
# Encrypt multiple secrets
-["secret1", "secret2", "secret3"]
-    | each { |data| kms encrypt $data --backend rustyvault }
-    | save encrypted_secrets.json
-
-# Decrypt and process
-open encrypted_secrets.json
-    | each { |enc| kms decrypt $enc }
-    | each { |plain| echo $"Decrypted: ($plain)" }
-
-

Orchestrator Monitoring

-
# Monitor running tasks
-while true {
-    orch tasks --status running
-        | each { |task| echo $"($task.name): ($task.progress)%" }
-    sleep 5sec
-}
-
-

Combined Workflow

-
# Complete deployment workflow
-auth login admin
-    | auth mfa verify --code (input "MFA: ")
-    | orch validate workflows/deploy.ncl
-    | if $in.valid {
-        orch tasks --status pending
-            | where priority > 5
-            | each { |task| echo $"High priority: ($task.name)" }
-      }
-
-
-

Troubleshooting

-

Auth Plugin

-

“No active session”:

-
auth login <username>
-
-

“Keyring error” (macOS):

-
    -
  • Check Keychain Access permissions
  • -
  • Security & Privacy → Privacy → Full Disk Access → Add Nushell
  • -
-

“Keyring error” (Linux):

-
# Install keyring service
-sudo apt install gnome-keyring  # Ubuntu/Debian
-sudo dnf install gnome-keyring  # Fedora
-
-# Or use KWallet
-sudo apt install kwalletmanager
-
-

“MFA verification failed”:

-
    -
  • Check time synchronization (TOTP requires accurate clocks)
  • -
  • Use backup codes if TOTP not working
  • -
  • Re-enroll MFA if device lost
  • -
-
-

KMS Plugin

-

“RustyVault connection failed”:

-
# Check RustyVault running
-curl http://localhost:8200/v1/sys/health
-
-# Set environment
-export RUSTYVAULT_ADDR="http://localhost:8200"
-export RUSTYVAULT_TOKEN="your-token"
-
-

“Age encryption failed”:

-
# Check Age keys
-ls -la ~/.age/
-
-# Generate new key if needed
-age-keygen -o ~/.age/key.txt
-
-# Set environment
-export AGE_RECIPIENT="age1xxxxxxxxx"
-export AGE_IDENTITY="$HOME/.age/key.txt"
-
-

“AWS KMS access denied”:

-
# Check AWS credentials
-aws sts get-caller-identity
-
-# Check KMS key policy
-aws kms describe-key --key-id alias/provisioning
-
-
-

Orchestrator Plugin

-

“Failed to read status”:

-
# Check data directory exists
-ls provisioning/platform/orchestrator/data/
-
-# Create if missing
-mkdir -p provisioning/platform/orchestrator/data
-
-

“Workflow validation failed”:

-
# Use strict mode for detailed errors
-orch validate workflows/deploy.ncl --strict
-
-

“No tasks found”:

-
# Check orchestrator running
-ps aux | grep orchestrator
-
-# Start orchestrator
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-
-

Development

-

Building from Source

-
cd provisioning/core/plugins/nushell-plugins
-
-# Clean build
-cargo clean
-
-# Build with debug info
-cargo build -p nu_plugin_auth
-cargo build -p nu_plugin_kms
-cargo build -p nu_plugin_orchestrator
-
-# Run tests
-cargo test -p nu_plugin_auth
-cargo test -p nu_plugin_kms
-cargo test -p nu_plugin_orchestrator
-
-# Run all tests
-cargo test --all
-
-

Adding to CI/CD

-
name: Build Nushell Plugins
-
-on: [push, pull_request]
-
-jobs:
-  build:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Install Rust
-        uses: actions-rs/toolchain@v1
-        with:
-          toolchain: stable
-
-      - name: Build Plugins
-        run: |
-          cd provisioning/core/plugins/nushell-plugins
-          cargo build --release --all
-
-      - name: Test Plugins
-        run: |
-          cd provisioning/core/plugins/nushell-plugins
-          cargo test --all
-
-      - name: Upload Artifacts
-        uses: actions/upload-artifact@v3
-        with:
-          name: plugins
-          path: provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*
-
-
-

Advanced Usage

-

Custom Plugin Configuration

-

Create ~/.config/nushell/plugin_config.nu:

-
# Auth plugin defaults
-$env.CONTROL_CENTER_URL = "https://control-center.example.com"
-
-# KMS plugin defaults
-$env.RUSTYVAULT_ADDR = "https://vault.example.com:8200"
-$env.RUSTYVAULT_MOUNT = "transit"
-
-# Orchestrator plugin defaults
-$env.ORCHESTRATOR_DATA_DIR = "/opt/orchestrator/data"
-
-

Plugin Aliases

-

Add to ~/.config/nushell/config.nu:

-
# Auth shortcuts
-alias login = auth login
-alias logout = auth logout
-
-# KMS shortcuts
-alias encrypt = kms encrypt
-alias decrypt = kms decrypt
-
-# Orchestrator shortcuts
-alias status = orch status
-alias validate = orch validate
-alias tasks = orch tasks
-
-
-

Security Best Practices

-

Authentication

-

DO: Use interactive password prompts -✅ DO: Enable MFA for production environments -✅ DO: Verify session before sensitive operations -❌ DON’T: Pass passwords in command line (visible in history) -❌ DON’T: Store tokens in plain text files

-

KMS Operations

-

DO: Use context (AAD) for encryption when available -✅ DO: Rotate KMS keys regularly -✅ DO: Use hardware-backed keys (WebAuthn, YubiKey) when possible -❌ DON’T: Share Age private keys -❌ DON’T: Log decrypted data

-

Orchestrator

-

DO: Validate workflows in strict mode before production -✅ DO: Monitor task status regularly -✅ DO: Use appropriate data directory permissions (700) -❌ DON’T: Run orchestrator as root -❌ DON’T: Expose data directory over network shares

-
-

FAQ

-

Q: Why use plugins instead of HTTP API? -A: Plugins are 10x faster, have better Nushell integration, and eliminate HTTP overhead.

-

Q: Can I use plugins without orchestrator running? -A: auth and kms work independently. orch requires access to orchestrator data directory.

-

Q: How do I update plugins? -A: Rebuild and re-register: cargo build --release --all && plugin add target/release/nu_plugin_*

-

Q: Are plugins cross-platform? -A: Yes, plugins work on macOS, Linux, and Windows (with appropriate keyring services).

-

Q: Can I use multiple KMS backends simultaneously? -A: Yes, specify --backend flag for each operation.

-

Q: How do I backup MFA enrollment? -A: Save backup codes securely (password manager, encrypted file). QR code can be re-scanned.

-
- -
    -
  • Security System: docs/architecture/adr-009-security-system-complete.md
  • -
  • JWT Auth: docs/architecture/JWT_AUTH_IMPLEMENTATION.md
  • -
  • Config Encryption: docs/user/CONFIG_ENCRYPTION_GUIDE.md
  • -
  • RustyVault Integration: RUSTYVAULT_INTEGRATION_SUMMARY.md
  • -
  • MFA Implementation: docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md
  • -
-
-

Version: 1.0.0 -Last Updated: 2025-10-09 -Maintained By: Platform Team

-

Nushell Plugins Integration (v1.0.0) - See detailed guide for complete reference

-

For complete documentation on Nushell plugins including installation, configuration, and advanced usage, see:

- -

Overview

-

Native Nushell plugins eliminate HTTP overhead and provide direct Rust-to-Nushell integration for critical platform operations.

-

Performance Improvements

-
- - - - -
PluginOperationHTTP LatencyPlugin LatencySpeedup
nu_plugin_kmsEncrypt (RustyVault)~50 ms~5 ms10x
nu_plugin_kmsDecrypt (RustyVault)~50 ms~5 ms10x
nu_plugin_orchestratorStatus query~30 ms~1 ms30x
nu_plugin_authVerify session~50 ms~10 ms5x
-
-

Three Native Plugins

-
    -
  1. -

    Authentication Plugin (nu_plugin_auth)

    -
      -
    • JWT login/logout with password prompts
    • -
    • MFA enrollment (TOTP, WebAuthn)
    • -
    • Session management
    • -
    • OS-native keyring integration
    • -
    -
  2. -
  3. -

    KMS Plugin (nu_plugin_kms)

    -
      -
    • Multiple backend support (RustyVault, Age, Cosmian, AWS KMS, Vault)
    • -
    • 10x faster encryption/decryption
    • -
    • Context-based encryption (AAD support)
    • -
    -
  4. -
  5. -

    Orchestrator Plugin (nu_plugin_orchestrator)

    -
      -
    • Direct file-based operations (no HTTP)
    • -
    • 30-50x faster status queries
    • -
    • KCL workflow validation
    • -
    -
  6. -
-

Quick Commands

-
# Authentication
-auth login admin
-auth verify
-auth mfa enroll totp
-
-# KMS Operations
-kms encrypt "data"
-kms decrypt "vault:v1:abc123..."
-
-# Orchestrator
-orch status
-orch validate workflows/deploy.ncl
-orch tasks --status running
-
-

Installation

-
cd provisioning/core/plugins/nushell-plugins
-cargo build --release --all
-
-# Register with Nushell
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-

Benefits

-

10x faster KMS operations (5 ms vs 50 ms) -✅ 30-50x faster orchestrator queries (1 ms vs 30-50 ms) -✅ Native Nushell integration with data structures and pipelines -✅ Offline capability (KMS with Age, orchestrator local ops) -✅ OS-native keyring for secure token storage

-

See Plugin Integration Guide for complete information.

-

Provisioning Plugins Usage Guide

-

Overview

-

Three high-performance Nushell plugins have been integrated into the provisioning system to provide 10-50x performance improvements over -HTTP-based operations:

-
    -
  • nu_plugin_auth - JWT authentication with system keyring integration
  • -
  • nu_plugin_kms - Multi-backend KMS encryption
  • -
  • nu_plugin_orchestrator - Local orchestrator operations
  • -
-

Installation

-

Prerequisites

-
    -
  • Nushell 0.107.1 or later
  • -
  • All plugins are pre-compiled in provisioning/core/plugins/nushell-plugins/
  • -
-

Quick Install

-

Run the installation script in a new Nushell session:

-
nu provisioning/core/plugins/install-and-register.nu
-
-

This will:

-
    -
  1. Copy plugins to ~/.local/share/nushell/plugins/
  2. -
  3. Register plugins with Nushell
  4. -
  5. Verify installation
  6. -
-

Manual Installation

-

If the script doesn’t work, run these commands:

-
# Copy plugins
-cp provisioning/core/plugins/nushell-plugins/nu_plugin_auth/target/release/nu_plugin_auth ~/.local/share/nushell/plugins/
-cp provisioning/core/plugins/nushell-plugins/nu_plugin_kms/target/release/nu_plugin_kms ~/.local/share/nushell/plugins/
-cp provisioning/core/plugins/nushell-plugins/nu_plugin_orchestrator/target/release/nu_plugin_orchestrator ~/.local/share/nushell/plugins/
-
-chmod +x ~/.local/share/nushell/plugins/nu_plugin_*
-
-# Register with Nushell (run in a fresh session)
-plugin add ~/.local/share/nushell/plugins/nu_plugin_auth
-plugin add ~/.local/share/nushell/plugins/nu_plugin_kms
-plugin add ~/.local/share/nushell/plugins/nu_plugin_orchestrator
-
-

Usage

-

Authentication Plugin

-

10x faster than HTTP fallback

-

Login

-
provisioning auth login <username> [password]
-
-# Examples
-provisioning auth login admin
-provisioning auth login admin mypassword
-provisioning auth login --url http://localhost:8081 admin
-
-

Verify Token

-
provisioning auth verify [--local]
-
-# Examples
-provisioning auth verify
-provisioning auth verify --local
-
-

Logout

-
provisioning auth logout
-
-# Example
-provisioning auth logout
-
-

List Sessions

-
provisioning auth sessions [--active]
-
-# Examples
-provisioning auth sessions
-provisioning auth sessions --active
-
-

KMS Plugin

-

10x faster than HTTP fallback

-

Supports multiple backends: RustyVault, Age, AWS KMS, HashiCorp Vault, Cosmian

-

Encrypt Data

-
provisioning kms encrypt <data> [--backend <backend>] [--key <key>]
-
-# Examples
-provisioning kms encrypt "secret-data"
-provisioning kms encrypt "secret" --backend age
-provisioning kms encrypt "secret" --backend rustyvault --key my-key
-
-

Decrypt Data

-
provisioning kms decrypt <encrypted_data> [--backend <backend>] [--key <key>]
-
-# Examples
-provisioning kms decrypt $encrypted_data
-provisioning kms decrypt $encrypted --backend age
-
-

KMS Status

-
provisioning kms status
-
-# Output shows current backend and availability
-
-

List Backends

-
provisioning kms list-backends
-
-# Shows all available KMS backends
-
-

Orchestrator Plugin

-

30x faster than HTTP fallback

-

Local file-based orchestration without network overhead.

-

Check Status

-
provisioning orch status [--data-dir <path>]
-
-# Examples
-provisioning orch status
-provisioning orch status --data-dir /custom/data
-
-

List Tasks

-
provisioning orch tasks [--status <status>] [--limit <n>] [--data-dir <path>]
-
-# Examples
-provisioning orch tasks
-provisioning orch tasks --status pending
-provisioning orch tasks --status running --limit 10
-
-

Validate Workflow

-
provisioning orch validate <workflow.ncl> [--strict]
-
-# Examples
-provisioning orch validate workflows/deployment.ncl
-provisioning orch validate workflows/deployment.ncl --strict
-
-

Submit Workflow

-
provisioning orch submit <workflow.ncl> [--priority <0-100>] [--check]
-
-# Examples
-provisioning orch submit workflows/deployment.ncl
-provisioning orch submit workflows/critical.ncl --priority 90
-provisioning orch submit workflows/test.ncl --check
-
-

Monitor Task

-
provisioning orch monitor <task_id> [--once] [--interval <ms>] [--timeout <s>]
-
-# Examples
-provisioning orch monitor task-123
-provisioning orch monitor task-123 --once
-provisioning orch monitor task-456 --interval 5000 --timeout 600
-
-

Plugin Status

-

Check which plugins are installed:

-
provisioning plugin status
-
-# Output:
-# Provisioning Plugins Status
-# ============================
-# [OK]  nu_plugin_auth        - JWT authentication with keyring
-# [OK]  nu_plugin_kms         - Multi-backend encryption
-# [OK]  nu_plugin_orchestrator - Local orchestrator (30x faster)
-#
-# All plugins loaded - using native high-performance mode
-
-

Testing Plugins

-
provisioning plugin test
-
-# Runs quick tests on all installed plugins
-# Output shows which plugins are responding
-
-

List Registered Plugins

-
provisioning plugin list
-
-# Shows all provisioning plugins registered with Nushell
-
-

Performance Comparison

-
- - - - - - -
OperationWith PluginHTTP FallbackSpeedup
Auth verify~10 ms~50 ms5x
Auth login~15 ms~100 ms7x
KMS encrypt~5-8 ms~50 ms10x
KMS decrypt~5-8 ms~50 ms10x
Orch status~1-5 ms~30 ms30x
Orch tasks list~2-10 ms~50 ms25x
-
-

Graceful Fallback

-

If plugins are not installed or fail to load, all commands automatically fall back to HTTP-based operations:

-
# With plugins installed (fast)
-$ provisioning auth verify
-Token is valid
-
-# Without plugins (slower, but functional)
-$ provisioning auth verify
-[HTTP fallback mode]
-Token is valid (slower)
-
-

This ensures the system remains functional even if plugins aren’t available.

-

Troubleshooting

-

Plugins not found after installation

-

Make sure you:

-
    -
  1. Have a fresh Nushell session
  2. -
  3. Ran plugin add for all three plugins
  4. -
  5. The plugin files are executable: chmod +x ~/.local/share/nushell/plugins/nu_plugin_*
  6. -
-

“Command not found” errors

-

If you see “command not found” when running provisioning auth login, the auth plugin is not loaded. Run:

-
plugin list | grep nu_plugin
-
-

If you don’t see the plugins, register them:

-
plugin add ~/.local/share/nushell/plugins/nu_plugin_auth
-plugin add ~/.local/share/nushell/plugins/nu_plugin_kms
-plugin add ~/.local/share/nushell/plugins/nu_plugin_orchestrator
-
-

Plugins crash or are unresponsive

-

Check the plugin logs:

-
provisioning plugin test
-
-

If a plugin fails, the system will automatically fall back to HTTP mode.

-

Integration with Provisioning CLI

-

All plugin commands are integrated into the main provisioning CLI:

-
# Shortcuts available
-provisioning auth login admin        # Full command
-provisioning login admin             # Alias
-
-provisioning kms encrypt secret      # Full command
-provisioning encrypt secret          # Alias
-
-provisioning orch status             # Full command
-provisioning orch-status             # Alias
-
-

Advanced Configuration

-

Custom Data Directory

-

For orchestrator operations, specify custom data directory:

-
provisioning orch status --data-dir /custom/orchestrator/data
-provisioning orch tasks --data-dir /custom/orchestrator/data
-
-

Custom Auth URL

-

For auth operations with custom endpoint:

-
provisioning auth login admin --url http://custom-auth-server:8081
-provisioning auth verify --url http://custom-auth-server:8081
-
-

KMS Backend Selection

-

Specify which KMS backend to use:

-
# Use Age encryption
-provisioning kms encrypt "data" --backend age
-
-# Use RustyVault
-provisioning kms encrypt "data" --backend rustyvault
-
-# Use AWS KMS
-provisioning kms encrypt "data" --backend aws
-
-# Decrypt with same backend
-provisioning kms decrypt $encrypted --backend age
-
-

Building Plugins from Source

-

If you need to rebuild plugins:

-
cd provisioning/core/plugins/nushell-plugins
-
-# Build auth plugin
-cd nu_plugin_auth && cargo build --release && cd ..
-
-# Build KMS plugin
-cd nu_plugin_kms && cargo build --release && cd ..
-
-# Build orchestrator plugin
-cd nu_plugin_orchestrator && cargo build --release && cd ..
-
-# Run install script
-cd ../..
-nu install-and-register.nu
-
-

Architecture

-

The plugins follow Nushell’s plugin protocol:

-
    -
  1. Plugin Binary: Compiled Rust binary in target/release/
  2. -
  3. Registration: Via plugin add command
  4. -
  5. IPC: Communication via Nushell’s JSON protocol
  6. -
  7. Fallback: HTTP API fallback if plugins unavailable
  8. -
-

Security Notes

-
    -
  • Auth tokens are stored in system keyring (Keychain/Credential Manager/Secret Service)
  • -
  • KMS keys are protected by the selected backend’s security
  • -
  • Orchestrator operations are local file-based (no network exposure)
  • -
  • All operations are logged in provisioning audit logs
  • -
-

Support

-

For issues or questions:

-
    -
  1. Check plugin status: provisioning plugin test
  2. -
  3. Review logs: provisioning logs or /var/log/provisioning/
  4. -
  5. Test HTTP fallback by temporarily unregistering plugins
  6. -
  7. Contact the provisioning team with plugin test output
  8. -
-

Secrets Management System - Configuration Guide

-

Status: Production Ready -Date: 2025-11-19 -Version: 1.0.0

-

Overview

-

The provisioning system supports secure SSH key retrieval from multiple secret sources, eliminating hardcoded filesystem dependencies and enabling -enterprise-grade security. SSH keys are retrieved from configured secret sources (SOPS, KMS, RustyVault) with automatic fallback to local-dev mode for -development environments.

-

Secret Sources

-

1. SOPS (Secrets Operations)

-

Age-based encrypted secrets file with YAML structure.

-

Pros:

-
    -
  • ✅ Age encryption (modern, performant)
  • -
  • ✅ Easy to version in Git (encrypted)
  • -
  • ✅ No external services required
  • -
  • ✅ Simple YAML structure
  • -
-

Cons:

-
    -
  • ❌ Requires Age key management
  • -
  • ❌ No key rotation automation
  • -
-

Environment Variables:

-
PROVISIONING_SECRET_SOURCE=sops
-PROVISIONING_SOPS_ENABLED=true
-PROVISIONING_SOPS_SECRETS_FILE=/path/to/secrets.enc.yaml
-PROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning
-
-

Secrets File Structure (provisioning/secrets.enc.yaml):

-
# Encrypted with sops
-ssh:
-  web-01:
-    ubuntu: /path/to/id_rsa
-    root: /path/to/root_id_rsa
-  db-01:
-    postgres: /path/to/postgres_id_rsa
-
-

Setup Instructions:

-
# 1. Install sops and age
-brew install sops age
-
-# 2. Generate Age key (store securely!)
-age-keygen -o $HOME/.age/provisioning
-
-# 3. Create encrypted secrets file
-cat > secrets.yaml << 'EOF'
-ssh:
-  web-01:
-    ubuntu: ~/.ssh/provisioning_web01
-  db-01:
-    postgres: ~/.ssh/provisioning_db01
-EOF
-
-# 4. Encrypt with sops
-sops -e -i secrets.yaml
-
-# 5. Rename to enc version
-mv secrets.yaml provisioning/secrets.enc.yaml
-
-# 6. Configure environment
-export PROVISIONING_SECRET_SOURCE=sops
-export PROVISIONING_SOPS_SECRETS_FILE=$(pwd)/provisioning/secrets.enc.yaml
-export PROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning
-
-

2. KMS (Key Management Service)

-

AWS KMS or compatible key management service.

-

Pros:

-
    -
  • ✅ Cloud-native security
  • -
  • ✅ Automatic key rotation
  • -
  • ✅ Audit logging built-in
  • -
  • ✅ High availability
  • -
-

Cons:

-
    -
  • ❌ Requires AWS account/credentials
  • -
  • ❌ API calls add latency (~50 ms)
  • -
  • ❌ Cost per API call
  • -
-

Environment Variables:

-
PROVISIONING_SECRET_SOURCE=kms
-PROVISIONING_KMS_ENABLED=true
-PROVISIONING_KMS_REGION=us-east-1
-
-

Secret Storage Pattern:

-
provisioning/ssh-keys/{hostname}/{username}
-
-

Setup Instructions:

-
# 1. Create KMS key (one-time)
-aws kms create-key \
-    --description "Provisioning SSH Keys" \
-    --region us-east-1
-
-# 2. Store SSH keys in Secrets Manager
-aws secretsmanager create-secret \
-    --name provisioning/ssh-keys/web-01/ubuntu \
-    --secret-string "$(cat ~/.ssh/provisioning_web01)" \
-    --region us-east-1
-
-# 3. Configure environment
-export PROVISIONING_SECRET_SOURCE=kms
-export PROVISIONING_KMS_REGION=us-east-1
-
-# 4. Ensure AWS credentials available
-export AWS_PROFILE=provisioning
-# or
-export AWS_ACCESS_KEY_ID=...
-export AWS_SECRET_ACCESS_KEY=...
-
-

3. RustyVault (Hashicorp Vault-Compatible)

-

Self-hosted or managed Vault instance for secrets.

-

Pros:

-
    -
  • ✅ Self-hosted option
  • -
  • ✅ Fine-grained access control
  • -
  • ✅ Multiple authentication methods
  • -
  • ✅ Easy key rotation
  • -
-

Cons:

-
    -
  • ❌ Requires Vault instance
  • -
  • ❌ More operational overhead
  • -
  • ❌ Network latency
  • -
-

Environment Variables:

-
PROVISIONING_SECRET_SOURCE=vault
-PROVISIONING_VAULT_ENABLED=true
-PROVISIONING_VAULT_ADDRESS=http://localhost:8200
-PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ...
-
-

Secret Storage Pattern:

-
GET /v1/secret/ssh-keys/{hostname}/{username}
-# Returns: {"key_content": "-----BEGIN OPENSSH PRIVATE KEY-----..."}
-
-

Setup Instructions:

-
# 1. Start Vault (if not already running)
-docker run -p 8200:8200 \
-    -e VAULT_DEV_ROOT_TOKEN_ID=provisioning \
-    vault server -dev
-
-# 2. Create KV v2 mount (if not exists)
-vault secrets enable -version=2 -path=secret kv
-
-# 3. Store SSH key
-vault kv put secret/ssh-keys/web-01/ubuntu \
-    key_content=@~/.ssh/provisioning_web01
-
-# 4. Configure environment
-export PROVISIONING_SECRET_SOURCE=vault
-export PROVISIONING_VAULT_ADDRESS=http://localhost:8200
-export PROVISIONING_VAULT_TOKEN=provisioning
-
-# 5. Create AppRole for production
-vault auth enable approle
-vault write auth/approle/role/provisioning \
-    token_ttl=1h \
-    token_max_ttl=4h
-vault read auth/approle/role/provisioning/role-id
-vault write -f auth/approle/role/provisioning/secret-id
-
-

4. Local-Dev (Fallback)

-

Local filesystem SSH keys (development only).

-

Pros:

-
    -
  • ✅ No setup required
  • -
  • ✅ Fast (local filesystem)
  • -
  • ✅ Works offline
  • -
-

Cons:

-
    -
  • ❌ NOT for production
  • -
  • ❌ Hardcoded filesystem dependency
  • -
  • ❌ No key rotation
  • -
-

Environment Variables:

-
PROVISIONING_ENVIRONMENT=local-dev
-
-

Behavior:

-

Standard paths checked (in order):

-
    -
  1. $HOME/.ssh/id_rsa
  2. -
  3. $HOME/.ssh/id_ed25519
  4. -
  5. $HOME/.ssh/provisioning
  6. -
  7. $HOME/.ssh/provisioning_rsa
  8. -
-

Auto-Detection Logic

-

When PROVISIONING_SECRET_SOURCE is not explicitly set, the system auto-detects in this order:

-
1. PROVISIONING_SOPS_ENABLED=true or PROVISIONING_SOPS_SECRETS_FILE set?
-   → Use SOPS
-2. PROVISIONING_KMS_ENABLED=true or PROVISIONING_KMS_REGION set?
-   → Use KMS
-3. PROVISIONING_VAULT_ENABLED=true or both VAULT_ADDRESS and VAULT_TOKEN set?
-   → Use Vault
-4. Otherwise
-   → Use local-dev (with warnings in production environments)
-
-

Configuration Matrix

-
- - - - -
Secret SourceEnv VariablesEnabled in
SOPSPROVISIONING_SOPS_*Development, Staging, Production
KMSPROVISIONING_KMS_*Staging, Production (with AWS)
VaultPROVISIONING_VAULT_*Development, Staging, Production
Local-devPROVISIONING_ENVIRONMENT=local-devDevelopment only
-
- -

Minimal Setup (Single Source)

-
# Using Vault (recommended for self-hosted)
-export PROVISIONING_SECRET_SOURCE=vault
-export PROVISIONING_VAULT_ADDRESS=https://vault.example.com:8200
-export PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ...
-export PROVISIONING_ENVIRONMENT=production
-
-

Enhanced Setup (Fallback Chain)

-
# Primary: Vault
-export PROVISIONING_VAULT_ADDRESS=https://vault.primary.com:8200
-export PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ...
-
-# Fallback: SOPS
-export PROVISIONING_SOPS_SECRETS_FILE=/etc/provisioning/secrets.enc.yaml
-export PROVISIONING_SOPS_AGE_KEY_FILE=/etc/provisioning/.age/key
+

Environment Variables

+

Common environment variables for overriding configuration:

+
# Provider selection
+export PROVISIONING_PROVIDER=upcloud
+export PROVISIONING_PROVIDER_UPCLOUD_ENDPOINT= [https://api.upcloud.com](https://api.upcloud.com)
+
+# Workspace
+export PROVISIONING_WORKSPACE=my-project
+export PROVISIONING_WORKSPACE_DIRECTORY=~/.provisioning/workspaces/
 
 # Environment
-export PROVISIONING_ENVIRONMENT=production
-export PROVISIONING_SECRET_SOURCE=vault  # Explicit: use Vault first
-
-

High-Availability Setup

-
# Use KMS (managed service)
-export PROVISIONING_SECRET_SOURCE=kms
-export PROVISIONING_KMS_REGION=us-east-1
-export AWS_PROFILE=provisioning-admin
-
-# Or use Vault with HA
-export PROVISIONING_VAULT_ADDRESS=https://vault-ha.example.com:8200
-export PROVISIONING_VAULT_NAMESPACE=provisioning
-export PROVISIONING_ENVIRONMENT=production
-
-

Validation & Testing

-

Check Configuration

-
# Nushell
-provisioning secrets status
-
-# Show secret source and configuration
-provisioning secrets validate
-
-# Detailed diagnostics
-provisioning secrets diagnose
-
-

Test SSH Key Retrieval

-
# Test specific host/user
-provisioning secrets get-key web-01 ubuntu
-
-# Test all configured hosts
-provisioning secrets validate-all
-
-# Dry-run SSH with retrieved key
-provisioning ssh --test-key web-01 ubuntu
-
-

Migration Path

-

From Local-Dev to SOPS

-
# 1. Create SOPS secrets file with existing keys
-cat > secrets.yaml << 'EOF'
-ssh:
-  web-01:
-    ubuntu: ~/.ssh/provisioning_web01
-  db-01:
-    postgres: ~/.ssh/provisioning_db01
-EOF
-
-# 2. Encrypt with Age
-sops -e -i secrets.yaml
-
-# 3. Move to repo
-mv secrets.yaml provisioning/secrets.enc.yaml
-
-# 4. Update environment
-export PROVISIONING_SECRET_SOURCE=sops
-export PROVISIONING_SOPS_SECRETS_FILE=$(pwd)/provisioning/secrets.enc.yaml
-export PROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning
-
-

From SOPS to Vault

-
# 1. Decrypt SOPS file
-sops -d provisioning/secrets.enc.yaml > /tmp/secrets.yaml
-
-# 2. Import to Vault
-vault kv put secret/ssh-keys/web-01/ubuntu key_content=@~/.ssh/provisioning_web01
-
-# 3. Update environment
-export PROVISIONING_SECRET_SOURCE=vault
-export PROVISIONING_VAULT_ADDRESS=http://vault.example.com:8200
-export PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ...
-
-# 4. Validate retrieval works
-provisioning secrets validate-all
-
-

Security Best Practices

-

1. Never Commit Secrets

-
# Add to .gitignore
-echo "provisioning/secrets.enc.yaml" >> .gitignore
-echo ".age/provisioning" >> .gitignore
-echo ".vault-token" >> .gitignore
-
-

2. Rotate Keys Regularly

-
# SOPS: Rotate Age key
-age-keygen -o ~/.age/provisioning.new
-# Update all secrets with new key
-
-# KMS: Enable automatic rotation
-aws kms enable-key-rotation --key-id alias/provisioning
-
-# Vault: Set TTL on secrets
-vault write -f secret/metadata/ssh-keys/web-01/ubuntu \
-    delete_version_after=2160h  # 90 days
-
-

3. Restrict Access

-
# SOPS: Protect Age key
-chmod 600 ~/.age/provisioning
-
-# KMS: Restrict IAM permissions
-aws iam put-user-policy --user-name provisioning \
-    --policy-name ProvisioningSecretsAccess \
-    --policy-document file://kms-policy.json
-
-# Vault: Use AppRole for applications
-vault write auth/approle/role/provisioning \
-    token_ttl=1h \
-    secret_id_ttl=30m
-
-

4. Audit Logging

-
# KMS: Enable CloudTrail
-aws cloudtrail put-event-selectors \
-    --trail-name provisioning-trail \
-    --event-selectors ReadWriteType=All
-
-# Vault: Check audit logs
-vault audit list
-
-# SOPS: Version control (encrypted)
-git log -p provisioning/secrets.enc.yaml
-
-

Troubleshooting

-

SOPS Issues

-
# Test Age decryption
-sops -d provisioning/secrets.enc.yaml
-
-# Verify Age key
-age-keygen -l ~/.age/provisioning
-
-# Regenerate if needed
-rm ~/.age/provisioning
-age-keygen -o ~/.age/provisioning
-
-

KMS Issues

-
# Test AWS credentials
-aws sts get-caller-identity
-
-# Check KMS key permissions
-aws kms describe-key --key-id alias/provisioning
-
-# List secrets
-aws secretsmanager list-secrets --filters Name=name,Values=provisioning
-
-

Vault Issues

-
# Check Vault status
-vault status
-
-# Test authentication
-vault token lookup
-
-# List secrets
-vault kv list secret/ssh-keys/
-
-# Check audit logs
-vault audit list
-vault read sys/audit
-
-

FAQ

-

Q: Can I use multiple secret sources simultaneously? -A: Yes, configure multiple sources and set PROVISIONING_SECRET_SOURCE to specify primary. If primary fails, manual fallback to secondary is supported.

-

Q: What happens if secret retrieval fails? -A: System logs the error and fails fast. No automatic fallback to local filesystem (for security).

-

Q: Can I cache SSH keys? -A: Currently not, keys are retrieved fresh for each operation. Use local caching at OS level (ssh-agent) if needed.

-

Q: How do I rotate keys? -A: Update the secret in your configured source (SOPS/KMS/Vault) and retrieve fresh on next operation.

-

Q: Is local-dev mode secure? -A: No - it’s development only. Production requires SOPS/KMS/Vault.

-

Architecture

-
SSH Operation
-    ↓
-SecretsManager (Nushell/Rust)
-    ↓
-[Detect Source]
-    ↓
-┌─────────────────────────────────────┐
-│ SOPS          KMS      Vault   LocalDev
-│ (Encrypted    (AWS KMS (Self-  (Filesystem
-│  Secrets)     Service)  Hosted) Dev Only)
-│
-└─────────────────────────────────────┘
-    ↓
-Return SSH Key Path/Content
-    ↓
-SSH Operation Completes
-
-

Integration with SSH Utilities

-

SSH operations automatically use secrets manager:

-
# Automatic secret retrieval
-ssh-cmd-smart $settings $server false "command" $ip
-# Internally:
-#   1. Determine secret source
-#   2. Retrieve SSH key for server.installer_user@ip
-#   3. Execute SSH with retrieved key
-#   4. Cleanup sensitive data
-
-# Batch operations also integrate
-ssh-batch-execute $servers $settings "command"
-# Per-host: Retrieves key → executes → cleans up
-
-
-

For Support: See docs/user/TROUBLESHOOTING_GUIDE.md -For Integration: See provisioning/core/nulib/lib_provisioning/platform/secrets.nu

-

KMS Service - Key Management Service

-

A unified Key Management Service for the Provisioning platform with support for multiple backends.

-
-

Source: provisioning/platform/kms-service/

-
-

Supported Backends

-
    -
  • Age: Fast, offline encryption (development)
  • -
  • RustyVault: Self-hosted Vault-compatible API
  • -
  • Cosmian KMS: Enterprise-grade with confidential computing
  • -
  • AWS KMS: Cloud-native key management
  • -
  • HashiCorp Vault: Enterprise secrets management
  • -
-

Architecture

-
┌─────────────────────────────────────────────────────────┐
-│                    KMS Service                          │
-├─────────────────────────────────────────────────────────┤
-│  REST API (Axum)                                        │
-│  ├─ /api/v1/kms/encrypt       POST                      │
-│  ├─ /api/v1/kms/decrypt       POST                      │
-│  ├─ /api/v1/kms/generate-key  POST                      │
-│  ├─ /api/v1/kms/status        GET                       │
-│  └─ /api/v1/kms/health        GET                       │
-├─────────────────────────────────────────────────────────┤
-│  Unified KMS Service Interface                          │
-├─────────────────────────────────────────────────────────┤
-│  Backend Implementations                                │
-│  ├─ Age Client (local files)                           │
-│  ├─ RustyVault Client (self-hosted)                    │
-│  └─ Cosmian KMS Client (enterprise)                    │
-└─────────────────────────────────────────────────────────┘
-
-

Quick Start

-

Development Setup (Age)

-
# 1. Generate Age keys
-mkdir -p ~/.config/provisioning/age
-age-keygen -o ~/.config/provisioning/age/private_key.txt
-age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt
-
-# 2. Set environment
-export PROVISIONING_ENV=dev
-
-# 3. Start KMS service
-cd provisioning/platform/kms-service
-cargo run --bin kms-service
-
-

Production Setup (Cosmian)

-
# Set environment variables
-export PROVISIONING_ENV=prod
-export COSMIAN_KMS_URL=https://your-kms.example.com
-export COSMIAN_API_KEY=your-api-key-here
-
-# Start KMS service
-cargo run --bin kms-service
-
-

REST API Examples

-

Encrypt Data

-
curl -X POST http://localhost:8082/api/v1/kms/encrypt \
-  -H "Content-Type: application/json" \
-  -d '{
-    "plaintext": "SGVsbG8sIFdvcmxkIQ==",
-    "context": "env=prod,service=api"
-  }'
-
-

Decrypt Data

-
curl -X POST http://localhost:8082/api/v1/kms/decrypt \
-  -H "Content-Type: application/json" \
-  -d '{
-    "ciphertext": "...",
-    "context": "env=prod,service=api"
-  }'
-
-

Nushell CLI Integration

-
# Encrypt data
-"secret-data" | kms encrypt
-"api-key" | kms encrypt --context "env=prod,service=api"
-
-# Decrypt data
-$ciphertext | kms decrypt
-
-# Generate data key (Cosmian only)
-kms generate-key
-
-# Check service status
-kms status
-kms health
-
-# Encrypt/decrypt files
-kms encrypt-file config.yaml
-kms decrypt-file config.yaml.enc
-
-

Backend Comparison

-
- - - - - - - - - - -
FeatureAgeRustyVaultCosmian KMSAWS KMSVault
SetupSimpleSelf-hostedServer setupAWS accountEnterprise
SpeedVery fastFastFastFastFast
NetworkNoYesYesYesYes
Key RotationManualAutomaticAutomaticAutomaticAutomatic
Data KeysNoYesYesYesYes
Audit LoggingNoYesFullFullFull
ConfidentialNoNoYes (SGX/SEV)NoNo
LicenseMITApache 2.0ProprietaryProprietaryBSL/Enterprise
CostFreeFreePaidPaidPaid
Use CaseDev/TestSelf-hostedPrivacyAWS CloudEnterprise
-
-

Integration Points

-
    -
  1. Config Encryption (SOPS Integration)
  2. -
  3. Dynamic Secrets (Provider API Keys)
  4. -
  5. SSH Key Management
  6. -
  7. Orchestrator (Workflow Data)
  8. -
  9. Control Center (Audit Logs)
  10. -
-

Deployment

-

Docker

-
FROM rust:1.70 as builder
-WORKDIR /app
-COPY . .
-RUN cargo build --release
-
-FROM debian:bookworm-slim
-RUN apt-get update && \
-    apt-get install -y ca-certificates && \
-    rm -rf /var/lib/apt/lists/*
-COPY --from=builder /app/target/release/kms-service /usr/local/bin/
-ENTRYPOINT ["kms-service"]
-
-

Kubernetes

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: kms-service
-spec:
-  replicas: 2
-  template:
-    spec:
-      containers:
-      - name: kms-service
-        image: provisioning/kms-service:latest
-        env:
-        - name: PROVISIONING_ENV
-          value: "prod"
-        - name: COSMIAN_KMS_URL
-          value: "https://kms.example.com"
-        ports:
-        - containerPort: 8082
-
-

Security Best Practices

-
    -
  1. Development: Use Age for dev/test only, never for production secrets
  2. -
  3. Production: Always use Cosmian KMS with TLS verification enabled
  4. -
  5. API Keys: Never hardcode, use environment variables
  6. -
  7. Key Rotation: Enable automatic rotation (90 days recommended)
  8. -
  9. Context Encryption: Always use encryption context (AAD)
  10. -
  11. Network Access: Restrict KMS service access with firewall rules
  12. -
  13. Monitoring: Enable health checks and monitor operation metrics
  14. -
- - -

Gitea Integration Guide

-

Complete guide to using Gitea integration for workspace management, extension distribution, and collaboration.

-

Version: 1.0.0 -Last Updated: 2025-10-06

-
-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Setup
  4. -
  5. Workspace Git Integration
  6. -
  7. Workspace Locking
  8. -
  9. Extension Publishing
  10. -
  11. Service Management
  12. -
  13. API Reference
  14. -
  15. Troubleshooting
  16. -
-
-

Overview

-

The Gitea integration provides:

-
    -
  • Workspace Git Integration: Version control for workspaces
  • -
  • Distributed Locking: Prevent concurrent workspace modifications
  • -
  • Extension Distribution: Publish and download extensions via releases
  • -
  • Collaboration: Share workspaces and extensions across teams
  • -
  • Service Management: Deploy and manage local Gitea instance
  • -
-

Architecture

-
┌─────────────────────────────────────────────────────────┐
-│                 Provisioning System                      │
-├─────────────────────────────────────────────────────────┤
-│                                                          │
-│  ┌────────────┐  ┌──────────────┐  ┌─────────────────┐ │
-│  │ Workspace  │  │   Extension  │  │    Locking      │ │
-│  │   Git      │  │  Publishing  │  │   (Issues)      │ │
-│  └─────┬──────┘  └──────┬───────┘  └────────┬────────┘ │
-│        │                │                     │          │
-│        └────────────────┼─────────────────────┘          │
-│                         │                                │
-│                  ┌──────▼──────┐                         │
-│                  │  Gitea API  │                         │
-│                  │   Client    │                         │
-│                  └──────┬──────┘                         │
-│                         │                                │
-└─────────────────────────┼────────────────────────────────┘
-                          │
-                  ┌───────▼────────┐
-                  │  Gitea Service │
-                  │  (Local/Remote)│
-                  └────────────────┘
-
-
-

Setup

-

Prerequisites

-
    -
  • Nushell 0.107.1+
  • -
  • Git installed and configured
  • -
  • Docker (for local Gitea deployment) or access to remote Gitea instance
  • -
  • SOPS (for encrypted token storage)
  • -
-

Configuration

-

1. Add Gitea Configuration to Nickel

-

Edit your provisioning/schemas/modes.ncl or workspace config:

-
import provisioning.gitea as gitea
-
-# Local Docker deployment
-_gitea_config = gitea.GiteaConfig {
-    mode = "local"
-    local = gitea.LocalGitea {
-        enabled = True
-        deployment = "docker"
-        port = 3000
-        auto_start = True
-        docker = gitea.DockerGitea {
-            image = "gitea/gitea:1.21"
-            container_name = "provisioning-gitea"
-        }
-    }
-    auth = gitea.GiteaAuth {
-        token_path = "~/.provisioning/secrets/gitea-token.enc"
-        username = "provisioning"
-    }
-}
-
-# Or remote Gitea instance
-_gitea_remote = gitea.GiteaConfig {
-    mode = "remote"
-    remote = gitea.RemoteGitea {
-        enabled = True
-        url = "https://gitea.example.com"
-        api_url = "https://gitea.example.com/api/v1"
-    }
-    auth = gitea.GiteaAuth {
-        token_path = "~/.provisioning/secrets/gitea-token.enc"
-        username = "myuser"
-    }
-}
-
-

2. Create Gitea Access Token

-

For local Gitea:

-
    -
  1. Start Gitea: provisioning gitea start
  2. -
  3. Open http://localhost:3000
  4. -
  5. Register admin account
  6. -
  7. Go to Settings → Applications → Generate New Token
  8. -
  9. Save token to encrypted file:
  10. -
-
# Create encrypted token file
-echo "your-gitea-token" | sops --encrypt /dev/stdin > ~/.provisioning/secrets/gitea-token.enc
-
-

For remote Gitea:

-
    -
  1. Login to your Gitea instance
  2. -
  3. Generate personal access token
  4. -
  5. Save encrypted as above
  6. -
-

3. Verify Setup

-
# Check Gitea status
-provisioning gitea status
-
-# Validate token
-provisioning gitea auth validate
-
-# Show current user
-provisioning gitea user
-
-
-

Workspace Git Integration

-

Initialize Workspace with Git

-

When creating a new workspace, enable git integration:

-
# Initialize new workspace with Gitea
-provisioning workspace init my-workspace --git --remote gitea
-
-# Or initialize existing workspace
-cd workspace_my-workspace
-provisioning gitea workspace init . my-workspace --remote gitea
-
-

This will:

-
    -
  1. Initialize git repository in workspace
  2. -
  3. Create repository on Gitea (workspaces/my-workspace)
  4. -
  5. Add remote origin
  6. -
  7. Push initial commit
  8. -
-

Clone Existing Workspace

-
# Clone from Gitea
-provisioning workspace clone workspaces/my-workspace ./workspace_my-workspace
-
-# Or using full identifier
-provisioning workspace clone my-workspace ./workspace_my-workspace
-
-

Push/Pull Changes

-
# Push workspace changes
-cd workspace_my-workspace
-provisioning workspace push --message "Updated infrastructure configs"
-
-# Pull latest changes
-provisioning workspace pull
-
-# Sync (pull + push)
-provisioning workspace sync
-
-

Branch Management

-
# Create branch
-provisioning workspace branch create feature-new-cluster
-
-# Switch branch
-provisioning workspace branch switch feature-new-cluster
-
-# List branches
-provisioning workspace branch list
-
-# Delete branch
-provisioning workspace branch delete feature-new-cluster
-
-

Git Status

-
# Get workspace git status
-provisioning workspace git status
-
-# Show uncommitted changes
-provisioning workspace git diff
-
-# Show staged changes
-provisioning workspace git diff --staged
-
-
-

Workspace Locking

-

Distributed locking prevents concurrent modifications to workspaces using Gitea issues.

-

Lock Types

-
    -
  • read: Multiple readers allowed, blocks writers
  • -
  • write: Exclusive access, blocks all other locks
  • -
  • deploy: Exclusive access for deployments
  • -
-

Acquire Lock

-
# Acquire write lock
-provisioning gitea lock acquire my-workspace write \
-    --operation "Deploying servers" \
-    --expiry "2025-10-06T14:00:00Z"
-
-# Output:
-# ✓ Lock acquired for workspace: my-workspace
-#   Lock ID: 42
-#   Type: write
-#   User: provisioning
-
-

Check Lock Status

-
# List locks for workspace
-provisioning gitea lock list my-workspace
-
-# List all active locks
-provisioning gitea lock list
-
-# Get lock details
-provisioning gitea lock info my-workspace 42
-
-

Release Lock

-
# Release lock
-provisioning gitea lock release my-workspace 42
-
-

Force Release Lock (Admin)

-
# Force release stuck lock
-provisioning gitea lock force-release my-workspace 42 \
-    --reason "Deployment failed, releasing lock"
-
-

Automatic Locking

-

Use with-workspace-lock for automatic lock management:

-
use lib_provisioning/gitea/locking.nu *
-
-with-workspace-lock "my-workspace" "deploy" "Server deployment" {
-    # Your deployment code here
-    # Lock automatically released on completion or error
-}
-
-

Lock Cleanup

-
# Cleanup expired locks
-provisioning gitea lock cleanup
-
-
-

Extension Publishing

-

Publish taskservs, providers, and clusters as versioned releases on Gitea.

-

Publish Extension

-
# Publish taskserv
-provisioning gitea extension publish \
-    ./extensions/taskservs/database/postgres \
-    1.2.0 \
-    --release-notes "Added connection pooling support"
-
-# Publish provider
-provisioning gitea extension publish \
-    ./extensions/providers/aws_prov \
-    2.0.0 \
-    --prerelease
-
-# Publish cluster
-provisioning gitea extension publish \
-    ./extensions/clusters/buildkit \
-    1.0.0
-
-

This will:

-
    -
  1. Validate extension structure
  2. -
  3. Create git tag (if workspace is git repo)
  4. -
  5. Package extension as .tar.gz
  6. -
  7. Create Gitea release
  8. -
  9. Upload package as release asset
  10. -
-

List Published Extensions

-
# List all extensions
-provisioning gitea extension list
-
-# Filter by type
-provisioning gitea extension list --type taskserv
-provisioning gitea extension list --type provider
-provisioning gitea extension list --type cluster
-
-

Download Extension

-
# Download specific version
-provisioning gitea extension download postgres 1.2.0 \
-    --destination ./extensions/taskservs/database
-
-# Extension is downloaded and extracted automatically
-
-

Extension Metadata

-
# Get extension information
-provisioning gitea extension info postgres 1.2.0
-
-

Publishing Workflow

-
# 1. Make changes to extension
-cd extensions/taskservs/database/postgres
-
-# 2. Update version in kcl/kcl.mod
-# 3. Update CHANGELOG.md
-
-# 4. Commit changes
-git add .
-git commit -m "Release v1.2.0"
-
-# 5. Publish to Gitea
-provisioning gitea extension publish . 1.2.0
-
-
-

Service Management

-

Start/Stop Gitea

-
# Start Gitea (local mode)
-provisioning gitea start
-
-# Stop Gitea
-provisioning gitea stop
-
-# Restart Gitea
-provisioning gitea restart
-
-

Check Status

-
# Get service status
-provisioning gitea status
-
-# Output:
-# Gitea Status:
-#   Mode: local
-#   Deployment: docker
-#   Running: true
-#   Port: 3000
-#   URL: http://localhost:3000
-#   Container: provisioning-gitea
-#   Health: ✓ OK
-
-

View Logs

-
# View recent logs
-provisioning gitea logs
-
-# Follow logs
-provisioning gitea logs --follow
-
-# Show specific number of lines
-provisioning gitea logs --lines 200
-
-

Install Gitea Binary

-
# Install latest version
-provisioning gitea install
-
-# Install specific version
-provisioning gitea install 1.21.0
-
-# Custom install directory
-provisioning gitea install --install-dir ~/bin
-
-
-

API Reference

-

Repository Operations

-
use lib_provisioning/gitea/api_client.nu *
-
-# Create repository
-create-repository "my-org" "my-repo" "Description" true
-
-# Get repository
-get-repository "my-org" "my-repo"
-
-# Delete repository
-delete-repository "my-org" "my-repo" --force
-
-# List repositories
-list-repositories "my-org"
-
-

Release Operations

-
# Create release
-create-release "my-org" "my-repo" "v1.0.0" "Release Name" "Notes"
-
-# Upload asset
-upload-release-asset "my-org" "my-repo" 123 "./file.tar.gz"
-
-# Get release
-get-release-by-tag "my-org" "my-repo" "v1.0.0"
-
-# List releases
-list-releases "my-org" "my-repo"
-
-

Workspace Operations

-
use lib_provisioning/gitea/workspace_git.nu *
-
-# Initialize workspace git
-init-workspace-git "./workspace_test" "test" --remote "gitea"
-
-# Clone workspace
-clone-workspace "workspaces/my-workspace" "./workspace_my-workspace"
-
-# Push changes
-push-workspace "./workspace_my-workspace" "Updated configs"
-
-# Pull changes
-pull-workspace "./workspace_my-workspace"
-
-

Locking Operations

-
use lib_provisioning/gitea/locking.nu *
-
-# Acquire lock
-let lock = acquire-workspace-lock "my-workspace" "write" "Deployment"
-
-# Release lock
-release-workspace-lock "my-workspace" $lock.lock_id
-
-# Check if locked
-is-workspace-locked "my-workspace" "write"
-
-# List locks
-list-workspace-locks "my-workspace"
-
-
-

Troubleshooting

-

Gitea Not Starting

-

Problem: provisioning gitea start fails

-

Solutions:

-
# Check Docker status
-docker ps
-
-# Check if port is in use
-lsof -i :3000
-
-# Check Gitea logs
-provisioning gitea logs
-
-# Remove old container
-docker rm -f provisioning-gitea
-provisioning gitea start
-
-

Token Authentication Failed

-

Problem: provisioning gitea auth validate returns false

-

Solutions:

-
# Verify token file exists
-ls ~/.provisioning/secrets/gitea-token.enc
-
-# Test decryption
-sops --decrypt ~/.provisioning/secrets/gitea-token.enc
-
-# Regenerate token in Gitea UI
-# Save new token
-echo "new-token" | sops --encrypt /dev/stdin > ~/.provisioning/secrets/gitea-token.enc
-
-

Cannot Push to Repository

-

Problem: Git push fails with authentication error

-

Solutions:

-
# Check remote URL
-cd workspace_my-workspace
-git remote -v
-
-# Reconfigure remote with token
-git remote set-url origin http://username:token@localhost:3000/org/repo.git
-
-# Or use SSH
-git remote set-url origin git@localhost:workspaces/my-workspace.git
-
-

Lock Already Exists

-

Problem: Cannot acquire lock, workspace already locked

-

Solutions:

-
# Check active locks
-provisioning gitea lock list my-workspace
-
-# Get lock details
-provisioning gitea lock info my-workspace 42
-
-# If lock is stale, force release
-provisioning gitea lock force-release my-workspace 42 --reason "Stale lock"
-
-

Extension Validation Failed

-

Problem: Extension publishing fails validation

-

Solutions:

-
# Check extension structure
-ls -la extensions/taskservs/myservice/
-# Required:
-# - schemas/manifest.toml
-# - schemas/*.ncl (main schema file)
-
-# Verify manifest.toml format
-cat extensions/taskservs/myservice/schemas/manifest.toml
-
-# Should have:
-# [package]
-# name = "myservice"
-# version = "1.0.0"
-
-

Docker Volume Permissions

-

Problem: Gitea Docker container has permission errors

-

Solutions:

-
# Fix data directory permissions
-sudo chown -R 1000:1000 ~/.provisioning/gitea
-
-# Or recreate with correct permissions
-provisioning gitea stop --remove
-rm -rf ~/.provisioning/gitea
-provisioning gitea start
-
-
-

Best Practices

-

Workspace Management

-
    -
  1. Always use locking for concurrent operations
  2. -
  3. Commit frequently with descriptive messages
  4. -
  5. Use branches for experimental changes
  6. -
  7. Sync before operations to get latest changes
  8. -
-

Extension Publishing

-
    -
  1. Follow semantic versioning (MAJOR.MINOR.PATCH)
  2. -
  3. Update CHANGELOG.md for each release
  4. -
  5. Test extensions before publishing
  6. -
  7. Use prerelease flag for beta versions
  8. -
-

Security

-
    -
  1. Encrypt tokens with SOPS
  2. -
  3. Use private repositories for sensitive workspaces
  4. -
  5. Rotate tokens regularly
  6. -
  7. Audit lock history via Gitea issues
  8. -
-

Performance

-
    -
  1. Cleanup expired locks periodically
  2. -
  3. Use shallow clones for large workspaces
  4. -
  5. Archive old releases to reduce storage
  6. -
  7. Monitor Gitea resources for local deployments
  8. -
-
-

Advanced Usage

-

Custom Gitea Deployment

-

Edit docker-compose.yml:

-
services:
-  gitea:
-    image: gitea/gitea:1.21
-    environment:
-      - GITEA__server__DOMAIN=gitea.example.com
-      - GITEA__server__ROOT_URL=https://gitea.example.com
-      # Add custom settings
-    volumes:
-      - /custom/path/gitea:/data
-
-

Webhooks Integration

-

Configure webhooks for automated workflows:

-
import provisioning.gitea as gitea
-
-_webhook = gitea.GiteaWebhook {
-    url = "https://provisioning.example.com/api/webhooks/gitea"
-    events = ["push", "pull_request", "release"]
-    secret = "webhook-secret"
-}
-
-

Batch Extension Publishing

-
# Publish all taskservs with same version
-provisioning gitea extension publish-batch \
-    ./extensions/taskservs \
-    1.0.0 \
-    --extension-type taskserv
-
-
-

References

-
    -
  • Gitea API Documentation: https://docs.gitea.com/api/
  • -
  • Nickel Schema: /Users/Akasha/project-provisioning/provisioning/schemas/gitea.ncl
  • -
  • API Client: /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/api_client.nu
  • -
  • Workspace Git: /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/workspace_git.nu
  • -
  • Locking: /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/locking.nu
  • -
-
-

Version: 1.0.0 -Maintained By: Provisioning Team -Last Updated: 2025-10-06

-

Service Mesh & Ingress Guide

-

Comparison

-

This guide helps you choose between different service mesh and ingress controller options for your Kubernetes deployments.

-

Understanding the Difference

-

Service Mesh

-

Handles East-West traffic (service-to-service communication):

-
    -
  • Automatic mTLS encryption between services
  • -
  • Traffic management and routing
  • -
  • Observability and monitoring
  • -
  • Service discovery
  • -
  • Fault tolerance and resilience
  • -
-

Ingress Controller

-

Handles North-South traffic (external to internal):

-
    -
  • Route external traffic into the cluster
  • -
  • TLS/HTTPS termination
  • -
  • Virtual hosts and path routing
  • -
  • Load balancing
  • -
  • Can work with or without a service mesh
  • -
-

Service Mesh Options

-

Istio

-

Version: 1.24.0

-

Best for: Full-featured service mesh deployments with comprehensive observability

-

Key Features:

-
    -
  • ✅ Comprehensive feature set
  • -
  • ✅ Built-in Istio Gateway ingress controller
  • -
  • ✅ Advanced traffic management
  • -
  • ✅ Strong observability (Kiali, Grafana, Jaeger)
  • -
  • ✅ Virtual services, destination rules, traffic policies
  • -
  • ✅ Mutual TLS (mTLS) with automatic certificate rotation
  • -
  • ✅ Canary deployments and traffic mirroring
  • -
-

Resource Requirements:

-
    -
  • CPU: 500m (Pilot) + 100m per gateway
  • -
  • Memory: 2048Mi (Pilot) + 128Mi per gateway
  • -
  • High overhead
  • -
-

Pros:

-
    -
  • Industry-standard solution with large community
  • -
  • Rich feature set for complex requirements
  • -
  • Built-in ingress gateway (don’t need external ingress)
  • -
  • Strong observability capabilities
  • -
  • Enterprise support available
  • -
-

Cons:

-
    -
  • Significant resource overhead
  • -
  • Complex configuration learning curve
  • -
  • Can be overkill for simple applications
  • -
  • Sidecar injection required for all services
  • -
-

Use when:

-
    -
  • You need comprehensive traffic management
  • -
  • Complex microservice patterns (canary deployments, traffic mirroring)
  • -
  • Enterprise requirements
  • -
  • You already understand service meshes
  • -
  • Your team has Istio expertise
  • -
-

Installation:

-
provisioning taskserv create istio
-
-
-

Linkerd

-

Version: 2.16.0

-

Best for: Lightweight, high-performance service mesh with minimal complexity

-

Key Features:

-
    -
  • ✅ Ultra-lightweight (minimal resource footprint)
  • -
  • ✅ Simple configuration
  • -
  • ✅ Automatic mTLS with certificate rotation
  • -
  • ✅ Fast sidecar startup (built in Rust)
  • -
  • ✅ Live traffic visualization
  • -
  • ✅ Service topology and dependency discovery
  • -
  • ✅ Golden metrics out of the box (latency, success rate, throughput)
  • -
-

Resource Requirements:

-
    -
  • CPU proxy: 100m request, 1000m limit
  • -
  • Memory proxy: 20Mi request, 250Mi limit
  • -
  • Very lightweight compared to Istio
  • -
-

Pros:

-
    -
  • Minimal resource overhead
  • -
  • Simple, intuitive configuration
  • -
  • Fast startup and deployment
  • -
  • Built in Rust for performance
  • -
  • Excellent golden metrics
  • -
  • Good for resource-constrained environments
  • -
  • Can run alongside Istio
  • -
-

Cons:

-
    -
  • Fewer advanced features than Istio
  • -
  • Requires external ingress controller
  • -
  • Smaller ecosystem and fewer integrations
  • -
  • Less feature-rich traffic management
  • -
  • Requires cert-manager for mTLS
  • -
-

Use when:

-
    -
  • You want simplicity and minimal overhead
  • -
  • Running on resource-constrained clusters
  • -
  • You prefer straightforward configuration
  • -
  • You don’t need advanced traffic management
  • -
  • You’re using Kubernetes 1.21+
  • -
-

Installation:

-
# Linkerd requires cert-manager
-provisioning taskserv create cert-manager
-provisioning taskserv create linkerd
-provisioning taskserv create nginx-ingress  # Or traefik/contour
-
-
-

Cilium

-

Version: See existing Cilium taskserv

-

Best for: CNI-based networking with integrated service mesh

-

Key Features:

-
    -
  • ✅ CNI and service mesh in one solution
  • -
  • ✅ eBPF-based for high performance
  • -
  • ✅ Network policy enforcement
  • -
  • ✅ Service mesh mode (optional)
  • -
  • ✅ Hubble for observability
  • -
  • ✅ Cluster mesh for multi-cluster
  • -
-

Pros:

-
    -
  • Replaces CNI plugin entirely
  • -
  • High-performance eBPF kernel networking
  • -
  • Can serve as both CNI and service mesh
  • -
  • No sidecar needed (uses eBPF)
  • -
  • Network policy support
  • -
-

Cons:

-
    -
  • Requires Linux kernel with eBPF support
  • -
  • Service mesh mode is secondary feature
  • -
  • More complex than Linkerd
  • -
  • Not as mature in service mesh role
  • -
-

Use when:

-
    -
  • You need both CNI and service mesh
  • -
  • You’re on modern Linux kernels with eBPF
  • -
  • You want kernel-level networking
  • -
-
-

Ingress Controller Options

-

Nginx Ingress

-

Version: 1.12.0

-

Best for: Most Kubernetes deployments - proven, reliable, widely supported

-

Key Features:

-
    -
  • ✅ Battle-tested and production-proven
  • -
  • ✅ Most popular ingress controller
  • -
  • ✅ Extensive documentation and community
  • -
  • ✅ Rich configuration options
  • -
  • ✅ SSL/TLS termination
  • -
  • ✅ URL rewriting and routing
  • -
  • ✅ Rate limiting and DDoS protection
  • -
-

Pros:

-
    -
  • Proven stability in production
  • -
  • Widest community and ecosystem
  • -
  • Extensive documentation
  • -
  • Multiple commercial support options
  • -
  • Works with any service mesh
  • -
  • Moderate resource footprint
  • -
-

Cons:

-
    -
  • Configuration can be verbose
  • -
  • Limited middleware ecosystem (compared to Traefik)
  • -
  • No automatic TLS with Let’s Encrypt
  • -
  • Configuration via annotations
  • -
-

Use when:

-
    -
  • You want proven stability
  • -
  • Wide community support is important
  • -
  • You need traditional ingress controller
  • -
  • You’re building production systems
  • -
  • You want abundant documentation
  • -
-

Installation:

-
provisioning taskserv create nginx-ingress
-
-

With Linkerd:

-
provisioning taskserv create linkerd
-provisioning taskserv create nginx-ingress
-
-
-

Traefik

-

Version: 3.3.0

-

Best for: Modern cloud-native applications with dynamic service discovery

-

Key Features:

-
    -
  • ✅ Automatic service discovery
  • -
  • ✅ Native Let’s Encrypt support
  • -
  • ✅ Middleware system for advanced routing
  • -
  • ✅ Built-in dashboard and metrics
  • -
  • ✅ API-driven configuration
  • -
  • ✅ Dynamic configuration updates
  • -
  • ✅ Support for multiple protocols (HTTP, TCP, gRPC)
  • -
-

Pros:

-
    -
  • Modern, cloud-native design
  • -
  • Automatic TLS with Let’s Encrypt
  • -
  • Middleware ecosystem for extensibility
  • -
  • Built-in dashboard for monitoring
  • -
  • Dynamic configuration without restart
  • -
  • API-driven approach
  • -
  • Growing community
  • -
-

Cons:

-
    -
  • Different configuration paradigm (IngressRoute CRD)
  • -
  • Smaller community than Nginx
  • -
  • Learning curve for traditional ops
  • -
  • Less mature than Nginx
  • -
-

Use when:

-
    -
  • You want modern cloud-native features
  • -
  • Automatic TLS is important
  • -
  • You like middleware-based routing
  • -
  • You want dynamic configuration
  • -
  • You’re building microservices platforms
  • -
-

Installation:

-
provisioning taskserv create traefik
-
-

With Linkerd:

-
provisioning taskserv create linkerd
-provisioning taskserv create traefik
-
-
-

Contour

-

Version: 1.31.0

-

Best for: Envoy-based ingress with simple CRD configuration

-

Key Features:

-
    -
  • ✅ Envoy proxy backend (same as Istio)
  • -
  • ✅ Simple CRD-based configuration
  • -
  • ✅ HTTPProxy CRD for advanced routing
  • -
  • ✅ Service delegation and composition
  • -
  • ✅ External authorization
  • -
  • ✅ Rate limiting support
  • -
-

Pros:

-
    -
  • Uses same Envoy proxy as Istio
  • -
  • Simple but powerful configuration
  • -
  • Good for multi-tenant clusters
  • -
  • CRD-based (declarative)
  • -
  • Good documentation
  • -
-

Cons:

-
    -
  • Smaller community than Nginx/Traefik
  • -
  • Fewer integrations and plugins
  • -
  • Less feature-rich than Traefik
  • -
  • Fewer real-world examples
  • -
-

Use when:

-
    -
  • You want Envoy proxy for consistency with Istio
  • -
  • You prefer simple configuration
  • -
  • You like CRD-based approach
  • -
  • You need multi-tenant support
  • -
-

Installation:

-
provisioning taskserv create contour
-
-
-

HAProxy Ingress

-

Version: 0.15.0

-

Best for: High-performance environments requiring advanced load balancing

-

Key Features:

-
    -
  • ✅ HAProxy backend for performance
  • -
  • ✅ Advanced load balancing algorithms
  • -
  • ✅ High throughput
  • -
  • ✅ Flexible configuration
  • -
  • ✅ Proven performance
  • -
-

Pros:

-
    -
  • Excellent performance
  • -
  • Advanced load balancing options
  • -
  • Battle-tested HAProxy backend
  • -
  • Good for high-traffic scenarios
  • -
-

Cons:

-
    -
  • Less Kubernetes-native than others
  • -
  • Smaller community
  • -
  • Configuration complexity
  • -
  • Fewer modern features
  • -
-

Use when:

-
    -
  • Performance is critical
  • -
  • High traffic is expected
  • -
  • You need advanced load balancing
  • -
-
- - -

Why: Lightweight mesh + proven ingress = great balance

-
provisioning taskserv create cert-manager
-provisioning taskserv create linkerd
-provisioning taskserv create nginx-ingress
-
-

Pros:

-
    -
  • Minimal overhead
  • -
  • Simple to manage
  • -
  • Proven stability
  • -
  • Good observability
  • -
-

Cons:

-
    -
  • Less advanced features than Istio
  • -
-
-

2. Istio (Standalone)

-

Why: All-in-one service mesh with built-in gateway

-
provisioning taskserv create istio
-
-

Pros:

-
    -
  • Unified traffic management
  • -
  • Powerful observability
  • -
  • No external ingress needed
  • -
  • Rich features
  • -
-

Cons:

-
    -
  • Higher resource usage
  • -
  • More complex
  • -
-
-

3. Linkerd + Traefik

-

Why: Lightweight mesh + modern ingress

-
provisioning taskserv create cert-manager
-provisioning taskserv create linkerd
-provisioning taskserv create traefik
-
-

Pros:

-
    -
  • Minimal overhead
  • -
  • Modern features
  • -
  • Automatic TLS
  • -
-
-

4. No Mesh + Nginx Ingress (Simple deployments)

-

Why: Just get traffic in without service mesh

-
provisioning taskserv create nginx-ingress
-
-

Pros:

-
    -
  • Simplest setup
  • -
  • Minimal overhead
  • -
  • Proven stability
  • -
-
-

Decision Matrix

-
- - - - - - - -
RequirementIstioLinkerdCiliumNginxTraefikContourHAProxy
Lightweight
Simple Config⚠️⚠️
Full Features⚠️⚠️⚠️
Auto TLS
Service Mesh
Performance
Community⚠️⚠️
-
-

Migration Paths

-

From Istio to Linkerd

-
    -
  1. Install Linkerd alongside Istio
  2. -
  3. Gradually migrate services (add Linkerd annotations)
  4. -
  5. Verify Linkerd handles traffic correctly
  6. -
  7. Install external ingress controller (Nginx/Traefik)
  8. -
  9. Update Istio Virtual Services to use new ingress
  10. -
  11. Remove Istio once migration complete
  12. -
-

Between Ingress Controllers

-
    -
  1. Install new ingress controller
  2. -
  3. Create duplicate Ingress resources pointing to new controller
  4. -
  5. Test with new ingress (use IngressClassName)
  6. -
  7. Update DNS/load balancer to point to new ingress
  8. -
  9. Drain connections from old ingress
  10. -
  11. Remove old ingress controller
  12. -
-
-

Examples

-

Complete examples of how to configure service meshes and ingress controllers in your workspace.

-

Example 1: Linkerd + Nginx Ingress Deployment

-

This is the recommended configuration for most deployments - lightweight and proven.

-

Step 1: Create Taskserv Configurations

-

File: workspace/infra/my-cluster/taskservs/cert-manager.ncl

-
import provisioning.extensions.taskservs.infrastructure.cert_manager as cm
-
-# Cert-manager is required for Linkerd's mTLS certificates
-_taskserv = cm.CertManager {
-    version = "v1.15.0"
-    namespace = "cert-manager"
-}
-
-

File: workspace/infra/my-cluster/taskservs/linkerd.ncl

-
import provisioning.extensions.taskservs.networking.linkerd as linkerd
-
-# Lightweight service mesh with minimal overhead
-_taskserv = linkerd.Linkerd {
-    version = "2.16.0"
-    namespace = "linkerd"
-
-    # Enable observability
-    ha_mode = False  # Use True for production HA
-    viz_enabled = True
-    prometheus = True
-    grafana = True
-
-    # Use cert-manager for mTLS certificates
-    cert_manager = True
-    trust_domain = "cluster.local"
-
-    # Resource configuration (very lightweight)
-    resources = {
-        proxy_cpu_request = "100m"
-        proxy_cpu_limit = "1000m"
-        proxy_memory_request = "20Mi"
-        proxy_memory_limit = "250Mi"
-    }
-}
-
-

File: workspace/infra/my-cluster/taskservs/nginx-ingress.ncl

-
import provisioning.extensions.taskservs.networking.nginx_ingress as nginx
-
-# Battle-tested ingress controller
-_taskserv = nginx.NginxIngress {
-    version = "1.12.0"
-    namespace = "ingress-nginx"
-
-    # Deployment configuration
-    deployment_type = "Deployment"  # Or "DaemonSet" for node-local ingress
-    replicas = 2
-
-    # Enable metrics for observability
-    prometheus_metrics = True
-
-    # Resource allocation
-    resources = {
-        cpu_request = "100m"
-        cpu_limit = "1000m"
-        memory_request = "90Mi"
-        memory_limit = "500Mi"
-    }
-}
-
-

Step 2: Deploy Service Mesh Components

-
# Install cert-manager (prerequisite for Linkerd)
-provisioning taskserv create cert-manager
-
-# Install Linkerd service mesh
-provisioning taskserv create linkerd
-
-# Install Nginx ingress controller
-provisioning taskserv create nginx-ingress
-
-# Verify installation
-linkerd check
-kubectl get deploy -n ingress-nginx
-
-

Step 3: Configure Application Deployment

-

File: workspace/infra/my-cluster/clusters/web-api.ncl

-
import provisioning.kcl.k8s_deploy as k8s
-import provisioning.extensions.taskservs.networking.nginx_ingress as nginx
-
-# Define the web API service with Linkerd service mesh and Nginx ingress
-service = k8s.K8sDeploy {
-    # Basic information
-    name = "web-api"
-    namespace = "production"
-    create_ns = True
-
-    # Service mesh configuration - use Linkerd
-    service_mesh = "linkerd"
-    service_mesh_ns = "linkerd"
-    service_mesh_config = {
-        mtls_enabled = True
-        tracing_enabled = False
-    }
-
-    # Ingress configuration - use Nginx
-    ingress_controller = "nginx"
-    ingress_ns = "ingress-nginx"
-    ingress_config = {
-        tls_enabled = True
-        default_backend = "web-api:8080"
-    }
-
-    # Deployment spec
-    spec = {
-        replicas = 3
-        containers = [
-            {
-                name = "api"
-                image = "myregistry.azurecr.io/web-api:v1.0.0"
-                imagePull = "Always"
-                ports = [
-                    {
-                        name = "http"
-                        typ = "TCP"
-                        container = 8080
-                    }
-                ]
-            }
-        ]
-    }
-
-    # Kubernetes service
-    service = {
-        name = "web-api"
-        typ = "ClusterIP"
-        ports = [
-            {
-                name = "http"
-                typ = "TCP"
-                target = 8080
-            }
-        ]
-    }
-}
-
-

Step 4: Create Ingress Resource

-

File: workspace/infra/my-cluster/ingress/web-api-ingress.yaml

-
apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
-  name: web-api
-  namespace: production
-  annotations:
-    cert-manager.io/cluster-issuer: letsencrypt-prod
-    nginx.ingress.kubernetes.io/rewrite-target: /
-spec:
-  ingressClassName: nginx
-  tls:
-    - hosts:
-        - api.example.com
-      secretName: web-api-tls
-  rules:
-    - host: api.example.com
-      http:
-        paths:
-          - path: /
-            pathType: Prefix
-            backend:
-              service:
-                name: web-api
-                port:
-                  number: 8080
-
-
-

Example 2: Istio (Standalone) Deployment

-

Complete service mesh with built-in ingress gateway.

-

Step 1: Install Istio

-

File: workspace/infra/my-cluster/taskservs/istio.ncl

-
import provisioning.extensions.taskservs.networking.istio as istio
-
-# Full-featured service mesh
-_taskserv = istio.Istio {
-    version = "1.24.0"
-    profile = "default"  # Options: default, demo, minimal, remote
-    namespace = "istio-system"
-
-    # Core features
-    mtls_enabled = True
-    mtls_mode = "PERMISSIVE"  # Start with PERMISSIVE, switch to STRICT when ready
-
-    # Traffic management
-    ingress_gateway = True
-    egress_gateway = False
-
-    # Observability
-    tracing = {
-        enabled = True
-        provider = "jaeger"
-        sampling_rate = 0.1  # Sample 10% for production
-    }
-
-    prometheus = True
-    grafana = True
-    kiali = True
-
-    # Resource configuration
-    resources = {
-        pilot_cpu = "500m"
-        pilot_memory = "2048Mi"
-        gateway_cpu = "100m"
-        gateway_memory = "128Mi"
-    }
-}
-
-

Step 2: Deploy Istio

-
# Install Istio
-provisioning taskserv create istio
-
-# Verify installation
-istioctl verify-install
-
-

Step 3: Configure Application with Istio

-

File: workspace/infra/my-cluster/clusters/api-service.ncl

-
import provisioning.kcl.k8s_deploy as k8s
-
-service = k8s.K8sDeploy {
-    name = "api-service"
-    namespace = "production"
-    create_ns = True
-
-    # Use Istio for both service mesh AND ingress
-    service_mesh = "istio"
-    service_mesh_ns = "istio-system"
-    ingress_controller = "istio-gateway"  # Istio's built-in gateway
-
-    spec = {
-        replicas = 3
-        containers = [
-            {
-                name = "api"
-                image = "myregistry.azurecr.io/api:v1.0.0"
-                ports = [
-                    { name = "http", typ = "TCP", container = 8080 }
-                ]
-            }
-        ]
-    }
-
-    service = {
-        name = "api-service"
-        typ = "ClusterIP"
-        ports = [
-            { name = "http", typ = "TCP", target = 8080 }
-        ]
-    }
-
-    # Istio-specific proxy configuration
-    prxyGatewayServers = [
-        {
-            port = { number = 80, protocol = "HTTP", name = "http" }
-            hosts = ["api.example.com"]
-        },
-        {
-            port = { number = 443, protocol = "HTTPS", name = "https" }
-            hosts = ["api.example.com"]
-            tls = {
-                mode = "SIMPLE"
-                credentialName = "api-tls-cert"
-            }
-        }
-    ]
-
-    # Virtual service routing configuration
-    prxyVirtualService = {
-        hosts = ["api.example.com"]
-        gateways = ["api-gateway"]
-        matches = [
-            {
-                typ = "http"
-                location = [
-                    { port = 80 }
-                ]
-                route_destination = [
-                    { port_number = 8080, host = "api-service" }
-                ]
-            }
-        ]
-    }
-}
-
-
-

Example 3: Linkerd + Traefik (Modern Cloud-Native)

-

Lightweight mesh with modern ingress controller and automatic TLS.

-

Step 1: Create Configurations

-

File: workspace/infra/my-cluster/taskservs/linkerd.ncl

-
import provisioning.extensions.taskservs.networking.linkerd as linkerd
-
-_taskserv = linkerd.Linkerd {
-    version = "2.16.0"
-    namespace = "linkerd"
-    viz_enabled = True
-    prometheus = True
-}
-
-

File: workspace/infra/my-cluster/taskservs/traefik.ncl

-
import provisioning.extensions.taskservs.networking.traefik as traefik
-
-# Modern ingress with middleware and auto-TLS
-_taskserv = traefik.Traefik {
-    version = "3.3.0"
-    namespace = "traefik"
-    replicas = 2
-
-    dashboard = True
-    metrics = True
-    access_logs = True
-
-    # Enable Let's Encrypt for automatic TLS
-    lets_encrypt = True
-    lets_encrypt_email = "admin@example.com"
-
-    resources = {
-        cpu_request = "100m"
-        cpu_limit = "1000m"
-        memory_request = "128Mi"
-        memory_limit = "512Mi"
-    }
-}
-
-

Step 2: Deploy

-
provisioning taskserv create cert-manager
-provisioning taskserv create linkerd
-provisioning taskserv create traefik
-
-

Step 3: Create Traefik IngressRoute

-

File: workspace/infra/my-cluster/ingress/api-route.yaml

-
apiVersion: traefik.io/v1alpha1
-kind: IngressRoute
-metadata:
-  name: api
-  namespace: production
-spec:
-  entryPoints:
-    - websecure
-  routes:
-    - match: Host(`api.example.com`)
-      kind: Rule
-      services:
-        - name: api-service
-          port: 8080
-  tls:
-    certResolver: letsencrypt
-    domains:
-      - main: api.example.com
-
-
-

Example 4: Minimal Setup (Just Nginx, No Service Mesh)

-

For simple deployments that don’t need service mesh.

-

Step 1: Install Nginx

-

File: workspace/infra/my-cluster/taskservs/nginx-ingress.ncl

-
import provisioning.extensions.taskservs.networking.nginx_ingress as nginx
-
-_taskserv = nginx.NginxIngress {
-    version = "1.12.0"
-    replicas = 2
-    prometheus_metrics = True
-}
-
-

Step 2: Deploy

-
provisioning taskserv create nginx-ingress
-
-

Step 3: Application Configuration

-

File: workspace/infra/my-cluster/clusters/simple-app.ncl

-
import provisioning.kcl.k8s_deploy as k8s
-
-service = k8s.K8sDeploy {
-    name = "simple-app"
-    namespace = "default"
-
-    # No service mesh - just ingress
-    ingress_controller = "nginx"
-    ingress_ns = "ingress-nginx"
-
-    spec = {
-        replicas = 2
-        containers = [
-            {
-                name = "app"
-                image = "nginx:latest"
-                ports = [{ name = "http", typ = "TCP", container = 80 }]
-            }
-        ]
-    }
-
-    service = {
-        name = "simple-app"
-        typ = "ClusterIP"
-        ports = [{ name = "http", typ = "TCP", target = 80 }]
-    }
-}
-
-

Step 4: Create Ingress

-

File: workspace/infra/my-cluster/ingress/simple-app-ingress.yaml

-
apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
-  name: simple-app
-  namespace: default
-spec:
-  ingressClassName: nginx
-  rules:
-    - host: app.example.com
-      http:
-        paths:
-          - path: /
-            pathType: Prefix
-            backend:
-              service:
-                name: simple-app
-                port:
-                  number: 80
-
-
-

Enable Sidecar Injection for Services

-

For Linkerd

-
# Label namespace for automatic sidecar injection
-kubectl annotate namespace production linkerd.io/inject=enabled
-
-# Or add annotation to specific deployment
-kubectl annotate pod my-pod linkerd.io/inject=enabled
-
-

For Istio

-
# Label namespace for automatic sidecar injection
-kubectl label namespace production istio-injection=enabled
-
-# Verify injection
-kubectl describe pod -n production | grep istio-proxy
-
-
-

Monitoring and Observability

-

Linkerd Dashboard

-
# Open Linkerd Viz dashboard
-linkerd viz dashboard
-
-# View service topology
-linkerd viz stat ns
-linkerd viz tap -n production
-
-

Istio Dashboards

-
# Kiali (service mesh visualization)
-kubectl port-forward -n istio-system svc/kiali 20000:20000
-# http://localhost:20000
-
-# Grafana (metrics)
-kubectl port-forward -n istio-system svc/grafana 3000:3000
-# http://localhost:3000 (default: admin/admin)
-
-# Jaeger (distributed tracing)
-kubectl port-forward -n istio-system svc/jaeger-query 16686:16686
-# http://localhost:16686
-
-

Traefik Dashboard

-
# Forward Traefik dashboard
-kubectl port-forward -n traefik svc/traefik 8080:8080
-# http://localhost:8080/dashboard/
-
-
-

Quick Reference

-

Installation Commands

-

Service Mesh - Istio

-
# Install Istio (includes built-in ingress gateway)
-provisioning taskserv create istio
-
-# Verify installation
-istioctl verify-install
-
-# Enable sidecar injection on namespace
-kubectl label namespace default istio-injection=enabled
-
-# View Kiali dashboard
-kubectl port-forward -n istio-system svc/kiali 20000:20000
-# Open: http://localhost:20000
-
-

Service Mesh - Linkerd

-
# Install cert-manager first (Linkerd requirement)
-provisioning taskserv create cert-manager
-
-# Install Linkerd
-provisioning taskserv create linkerd
-
-# Verify installation
-linkerd check
-
-# Enable automatic sidecar injection
-kubectl annotate namespace default linkerd.io/inject=enabled
-
-# View live dashboard
-linkerd viz dashboard
-
-

Ingress Controllers

-
# Install Nginx Ingress (most popular)
-provisioning taskserv create nginx-ingress
-
-# Install Traefik (modern cloud-native)
-provisioning taskserv create traefik
-
-# Install Contour (Envoy-based)
-provisioning taskserv create contour
-
-# Install HAProxy Ingress (high-performance)
-provisioning taskserv create haproxy-ingress
-
-

Common Installation Combinations

- -

Lightweight mesh + proven ingress

-
# Step 1: Install cert-manager
-provisioning taskserv create cert-manager
-
-# Step 2: Install Linkerd
-provisioning taskserv create linkerd
-
-# Step 3: Install Nginx Ingress
-provisioning taskserv create nginx-ingress
-
-# Step 4: Verify installation
-linkerd check
-kubectl get deploy -n ingress-nginx
-
-# Step 5: Create sample application with Linkerd
-kubectl annotate namespace default linkerd.io/inject=enabled
-kubectl apply -f my-app.yaml
-
-

Option 2: Istio (Standalone)

-

Full-featured service mesh with built-in gateway

-
# Install Istio
-provisioning taskserv create istio
-
-# Verify
-istioctl verify-install
-
-# Enable sidecar injection
-kubectl label namespace default istio-injection=enabled
-
-# Deploy applications
-kubectl apply -f my-app.yaml
-
-

Option 3: Linkerd + Traefik

-

Lightweight mesh + modern ingress with auto TLS

-
# Install prerequisites
-provisioning taskserv create cert-manager
-
-# Install service mesh
-provisioning taskserv create linkerd
-
-# Install modern ingress with Let's Encrypt
-provisioning taskserv create traefik
-
-# Enable sidecar injection
-kubectl annotate namespace default linkerd.io/inject=enabled
-
-

Option 4: Just Nginx Ingress (No Mesh)

-

Simple deployments without service mesh

-
# Install ingress controller
-provisioning taskserv create nginx-ingress
-
-# Deploy applications
-kubectl apply -f ingress.yaml
-
-

Verification Commands

-

Check Linkerd

-
# Full system check
-linkerd check
-
-# Specific component checks
-linkerd check --pre              # Pre-install checks
-linkerd check -n linkerd         # Linkerd namespace
-linkerd check -n default         # Custom namespace
-
-# View version
-linkerd version --client
-linkerd version --server
-
-

Check Istio

-
# Full system analysis
-istioctl analyze
-
-# By namespace
-istioctl analyze -n default
-
-# Verify configuration
-istioctl verify-install
-
-# Check version
-istioctl version
-
-

Check Ingress Controllers

-
# List ingress resources
-kubectl get ingress -A
-
-# Get ingress details
-kubectl describe ingress -n default
-
-# Nginx specific
-kubectl get deploy -n ingress-nginx
-kubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx
-
-# Traefik specific
-kubectl get deploy -n traefik
-kubectl logs -n traefik deployment/traefik
-
-

Troubleshooting

-

Service Mesh Issues

-
# Linkerd - Check proxy status
-linkerd check -n <namespace>
-
-# Linkerd - View service topology
-linkerd tap -n <namespace> deployment/<name>
-
-# Istio - Check sidecar injection
-kubectl describe pod -n <namespace>  # Look for istio-proxy container
-
-# Istio - View traffic policies
-istioctl analyze
-
-

Ingress Controller Issues

-
# Check ingress controller logs
-kubectl logs -n ingress-nginx deployment/ingress-nginx-controller
-kubectl logs -n traefik deployment/traefik
-
-# Describe ingress resource
-kubectl describe ingress <name> -n <namespace>
-
-# Check ingress controller service
-kubectl get svc -n ingress-nginx
-kubectl get svc -n traefik
-
-

Uninstallation

-

Remove Linkerd

-
# Remove annotations from namespaces
-kubectl annotate namespace <namespace> linkerd.io/inject- --all
-
-# Uninstall Linkerd
-linkerd uninstall | kubectl delete -f -
-
-# Remove Linkerd namespace
-kubectl delete namespace linkerd
-
-

Remove Istio

-
# Remove labels from namespaces
-kubectl label namespace <namespace> istio-injection- --all
-
-# Uninstall Istio
-istioctl uninstall --purge
-
-# Remove Istio namespace
-kubectl delete namespace istio-system
-
-

Remove Ingress Controllers

-
# Nginx
-helm uninstall ingress-nginx -n ingress-nginx
-kubectl delete namespace ingress-nginx
-
-# Traefik
-helm uninstall traefik -n traefik
-kubectl delete namespace traefik
-
-

Performance Tuning

-

Linkerd Resource Limits

-
# Adjust proxy resource limits in linkerd.ncl
-_taskserv = linkerd.Linkerd {
-    resources: {
-        proxy_cpu_limit = "2000m"      # Increase if needed
-        proxy_memory_limit = "512Mi"   # Increase if needed
-    }
-}
-
-

Istio Profile Selection

-
# Different resource profiles available
-profile = "default"   # Full features (default)
-profile = "demo"      # Demo mode (more resources)
-profile = "minimal"   # Minimal (lower resources)
-profile = "remote"    # Control plane only (advanced)
-
-
-

Complete Workspace Directory Structure

-

After implementing these examples, your workspace should look like:

-
workspace/infra/my-cluster/
-├── taskservs/
-│   ├── cert-manager.ncl          # For Linkerd mTLS
-│   ├── linkerd.ncl             # Service mesh option
-│   ├── istio.ncl               # OR Istio option
-│   ├── nginx-ingress.ncl       # Ingress controller
-│   └── traefik.ncl             # Alternative ingress
-├── clusters/
-│   ├── web-api.ncl             # Application with Linkerd + Nginx
-│   ├── api-service.ncl         # Application with Istio
-│   └── simple-app.ncl          # App without service mesh
-├── ingress/
-│   ├── web-api-ingress.yaml    # Nginx Ingress resource
-│   ├── api-route.yaml          # Traefik IngressRoute
-│   └── simple-app-ingress.yaml # Simple Ingress
-└── config.toml                 # Infrastructure-specific config
-
-
-

Next Steps

-
    -
  1. Choose your deployment model (Linkerd+Nginx, Istio, or plain Nginx)
  2. -
  3. Create taskserv KCL files in workspace/infra/<cluster>/taskservs/
  4. -
  5. Install components using provisioning taskserv create
  6. -
  7. Create application deployments with appropriate mesh/ingress configuration
  8. -
  9. Monitor and observe using the appropriate dashboard
  10. -
-
-

Additional Resources

- -

OCI Registry User Guide

-

Version: 1.0.0 -Date: 2025-10-06 -Audience: Users and Developers

-

Table of Contents

-
    -
  1. Overview
  2. -
  3. Quick Start
  4. -
  5. OCI Commands Reference
  6. -
  7. Dependency Management
  8. -
  9. Extension Development
  10. -
  11. Registry Setup
  12. -
  13. Troubleshooting
  14. -
-
-

Overview

-

The OCI registry integration enables distribution and management of provisioning extensions as OCI artifacts. This provides:

-
    -
  • Standard Distribution: Use industry-standard OCI registries
  • -
  • Version Management: Proper semantic versioning for all extensions
  • -
  • Dependency Resolution: Automatic dependency management
  • -
  • Caching: Efficient caching to reduce downloads
  • -
  • Security: TLS, authentication, and vulnerability scanning support
  • -
-

What are OCI Artifacts

-

OCI (Open Container Initiative) artifacts are packaged files distributed through container registries. Unlike Docker images which contain -applications, OCI artifacts can contain any type of content - in our case, provisioning extensions (KCL schemas, Nushell scripts, templates, etc.).

-
-

Quick Start

-

Prerequisites

-

Install one of the following OCI tools:

-
# ORAS (recommended)
-brew install oras
-
-# Crane (Google's tool)
-go install github.com/google/go-containerregistry/cmd/crane@latest
-
-# Skopeo (RedHat's tool)
-brew install skopeo
-
-

1. Start Local OCI Registry (Development)

-
# Start lightweight OCI registry (Zot)
-provisioning oci-registry start
-
-# Verify registry is running
-curl http://localhost:5000/v2/_catalog
-
-

2. Pull an Extension

-
# Pull Kubernetes extension from registry
-provisioning oci pull kubernetes:1.28.0
-
-# Pull with specific registry
-provisioning oci pull kubernetes:1.28.0 \
-  --registry harbor.company.com \
-  --namespace provisioning-extensions
-
-

3. List Available Extensions

-
# List all extensions
-provisioning oci list
-
-# Search for specific extension
-provisioning oci search kubernetes
-
-# Show available versions
-provisioning oci tags kubernetes
-
-

4. Configure Workspace to Use OCI

-

Edit workspace/config/provisioning.yaml:

-
dependencies:
-  extensions:
-    source_type: "oci"
-
-    oci:
-      registry: "localhost:5000"
-      namespace: "provisioning-extensions"
-      tls_enabled: false
-
-    modules:
-      taskservs:
-        - "oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0"
-        - "oci://localhost:5000/provisioning-extensions/containerd:1.7.0"
-
-

5. Resolve Dependencies

-
# Resolve and install all dependencies
-provisioning dep resolve
-
-# Check what will be installed
-provisioning dep resolve --dry-run
-
-# Show dependency tree
-provisioning dep tree kubernetes
-
-
-

OCI Commands Reference

-

Pull Extension

-

Download extension from OCI registry

-
provisioning oci pull <artifact>:<version> [OPTIONS]
-
-# Examples:
-provisioning oci pull kubernetes:1.28.0
-provisioning oci pull redis:7.0.0 --registry harbor.company.com
-provisioning oci pull postgres:15.0 --insecure  # Skip TLS verification
-
-

Options:

-
    -
  • --registry <endpoint>: Override registry (default: from config)
  • -
  • --namespace <name>: Override namespace (default: provisioning-extensions)
  • -
  • --destination <path>: Local installation path
  • -
  • --insecure: Skip TLS certificate verification
  • -
-
-

Push Extension

-

Publish extension to OCI registry

-
provisioning oci push <source-path> <name> <version> [OPTIONS]
-
-# Examples:
-provisioning oci push ./extensions/taskservs/redis redis 1.0.0
-provisioning oci push ./my-provider aws 2.1.0 --registry localhost:5000
-
-

Options:

-
    -
  • --registry <endpoint>: Target registry
  • -
  • --namespace <name>: Target namespace
  • -
  • --insecure: Skip TLS verification
  • -
-

Prerequisites:

-
    -
  • Extension must have valid manifest.yaml
  • -
  • Must be logged in to registry (see oci login)
  • -
-
-

List Extensions

-

Show available extensions in registry

-
provisioning oci list [OPTIONS]
-
-# Examples:
-provisioning oci list
-provisioning oci list --namespace provisioning-platform
-provisioning oci list --registry harbor.company.com
-
-

Output:

-
┬───────────────┬──────────────────┬─────────────────────────┬─────────────────────────────────────────────┐
-│ name          │ registry         │ namespace               │ reference                                   │
-├───────────────┼──────────────────┼─────────────────────────┼─────────────────────────────────────────────┤
-│ kubernetes    │ localhost:5000   │ provisioning-extensions │ localhost:5000/provisioning-extensions/...  │
-│ containerd    │ localhost:5000   │ provisioning-extensions │ localhost:5000/provisioning-extensions/...  │
-│ cilium        │ localhost:5000   │ provisioning-extensions │ localhost:5000/provisioning-extensions/...  │
-└───────────────┴──────────────────┴─────────────────────────┴─────────────────────────────────────────────┘
-
-
-

Search Extensions

-

Search for extensions matching query

-
provisioning oci search <query> [OPTIONS]
-
-# Examples:
-provisioning oci search kube
-provisioning oci search postgres
-provisioning oci search "container-*"
-
-
-

Show Tags (Versions)

-

Display all available versions of an extension

-
provisioning oci tags <artifact-name> [OPTIONS]
-
-# Examples:
-provisioning oci tags kubernetes
-provisioning oci tags redis --registry harbor.company.com
-
-

Output:

-
┬────────────┬─────────┬──────────────────────────────────────────────────────┐
-│ artifact   │ version │ reference                                            │
-├────────────┼─────────┼──────────────────────────────────────────────────────┤
-│ kubernetes │ 1.29.0  │ localhost:5000/provisioning-extensions/kubernetes... │
-│ kubernetes │ 1.28.0  │ localhost:5000/provisioning-extensions/kubernetes... │
-│ kubernetes │ 1.27.0  │ localhost:5000/provisioning-extensions/kubernetes... │
-└────────────┴─────────┴──────────────────────────────────────────────────────┘
-
-
-

Inspect Extension

-

Show detailed manifest and metadata

-
provisioning oci inspect <artifact>:<version> [OPTIONS]
-
-# Examples:
-provisioning oci inspect kubernetes:1.28.0
-provisioning oci inspect redis:7.0.0 --format json
-
-

Output:

-
name: kubernetes
-type: taskserv
-version: 1.28.0
-description: Kubernetes container orchestration platform
-author: Provisioning Team
-license: MIT
-dependencies:
-  containerd: ">=1.7.0"
-  etcd: ">=3.5.0"
-platforms:
-  - linux/amd64
-  - linux/arm64
-
-
-

Login to Registry

-

Authenticate with OCI registry

-
provisioning oci login <registry> [OPTIONS]
-
-# Examples:
-provisioning oci login localhost:5000
-provisioning oci login harbor.company.com --username admin
-provisioning oci login registry.io --password-stdin < token.txt
-provisioning oci login registry.io --token-file ~/.provisioning/tokens/registry
-
-

Options:

-
    -
  • --username <user>: Username (default: _token)
  • -
  • --password-stdin: Read password from stdin
  • -
  • --token-file <path>: Read token from file
  • -
-

Note: Credentials are stored in Docker config (~/.docker/config.json)

-
-

Logout from Registry

-

Remove stored credentials

-
provisioning oci logout <registry>
-
-# Example:
-provisioning oci logout harbor.company.com
-
-
-

Delete Extension

-

Remove extension from registry

-
provisioning oci delete <artifact>:<version> [OPTIONS]
-
-# Examples:
-provisioning oci delete kubernetes:1.27.0
-provisioning oci delete redis:6.0.0 --force  # Skip confirmation
-
-

Options:

-
    -
  • --force: Skip confirmation prompt
  • -
  • --registry <endpoint>: Target registry
  • -
  • --namespace <name>: Target namespace
  • -
-

Warning: This operation is irreversible. Use with caution.

-
-

Copy Extension

-

Copy extension between registries

-
provisioning oci copy <source> <destination> [OPTIONS]
-
-# Examples:
-# Copy between namespaces in same registry
-provisioning oci copy \
-  localhost:5000/test/kubernetes:1.28.0 \
-  localhost:5000/production/kubernetes:1.28.0
-
-# Copy between different registries
-provisioning oci copy \
-  localhost:5000/provisioning-extensions/kubernetes:1.28.0 \
-  harbor.company.com/provisioning/kubernetes:1.28.0
-
-
-

Show OCI Configuration

-

Display current OCI settings

-
provisioning oci config
-
-# Output:
-{
-  tool: "oras"
-  registry: "localhost:5000"
-  namespace: {
-    extensions: "provisioning-extensions"
-    platform: "provisioning-platform"
-  }
-  cache_dir: "~/.provisioning/oci-cache"
-  tls_enabled: false
-}
-
-
-

Dependency Management

-

Dependency Configuration

-

Dependencies are configured in workspace/config/provisioning.yaml:

-
dependencies:
-  # Core provisioning system
-  core:
-    source: "oci://harbor.company.com/provisioning-core:v3.5.0"
-
-  # Extensions (providers, taskservs, clusters)
-  extensions:
-    source_type: "oci"
-
-    oci:
-      registry: "localhost:5000"
-      namespace: "provisioning-extensions"
-      tls_enabled: false
-      auth_token_path: "~/.provisioning/tokens/oci"
-
-    modules:
-      providers:
-        - "oci://localhost:5000/provisioning-extensions/aws:2.0.0"
-        - "oci://localhost:5000/provisioning-extensions/upcloud:1.5.0"
-
-      taskservs:
-        - "oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0"
-        - "oci://localhost:5000/provisioning-extensions/containerd:1.7.0"
-        - "oci://localhost:5000/provisioning-extensions/etcd:3.5.0"
-
-      clusters:
-        - "oci://localhost:5000/provisioning-extensions/buildkit:0.12.0"
-
-  # Platform services
-  platform:
-    source_type: "oci"
-    oci:
-      registry: "harbor.company.com"
-      namespace: "provisioning-platform"
-
-

Resolve Dependencies

-
# Resolve and install all configured dependencies
-provisioning dep resolve
-
-# Dry-run (show what would be installed)
-provisioning dep resolve --dry-run
-
-# Resolve with specific version constraints
-provisioning dep resolve --update  # Update to latest versions
-
-

Check for Updates

-
# Check all dependencies for updates
-provisioning dep check-updates
-
-# Output:
-┬─────────────┬─────────┬────────┬──────────────────┐
-│ name        │ current │ latest │ update_available │
-├─────────────┼─────────┼────────┼──────────────────┤
-│ kubernetes  │ 1.28.0  │ 1.29.0 │ true             │
-│ containerd  │ 1.7.0   │ 1.7.0  │ false            │
-│ etcd        │ 3.5.0   │ 3.5.1  │ true             │
-└─────────────┴─────────┴────────┴──────────────────┘
-
-

Update Dependency

-
# Update specific extension to latest version
-provisioning dep update kubernetes
-
-# Update to specific version
-provisioning dep update kubernetes --version 1.29.0
-
-

Dependency Tree

-
# Show dependency tree for extension
-provisioning dep tree kubernetes
-
-# Output:
-kubernetes:1.28.0
-├── containerd:1.7.0
-│   └── runc:1.1.0
-├── etcd:3.5.0
-└── kubectl:1.28.0
-
-

Validate Dependencies

-
# Validate dependency graph (check for cycles, conflicts)
-provisioning dep validate
-
-# Validate specific extension
-provisioning dep validate kubernetes
-
-
-

Extension Development

-

Create New Extension

-
# Generate extension from template
-provisioning generate extension taskserv redis
-
-# Directory structure created:
-# extensions/taskservs/redis/
-# ├── schemas/
-# │   ├── manifest.toml
-# │   ├── main.ncl
-# │   ├── version.ncl
-# │   └── dependencies.ncl
-# ├── scripts/
-# │   ├── install.nu
-# │   ├── check.nu
-# │   └── uninstall.nu
-# ├── templates/
-# ├── docs/
-# │   └── README.md
-# ├── tests/
-# └── manifest.yaml
-
-

Extension Manifest

-

Edit manifest.yaml:

-
name: redis
-type: taskserv
-version: 1.0.0
-description: Redis in-memory data structure store
-author: Your Name
-license: MIT
-homepage: https://redis.io
-repository: https://gitea.example.com/provisioning-extensions/redis
-
-dependencies:
-  os: ">=1.0.0"  # Required OS taskserv
-
-tags:
-  - database
-  - cache
-  - key-value
-
-platforms:
-  - linux/amd64
-  - linux/arm64
-
-min_provisioning_version: "3.0.0"
-
-

Test Extension Locally

-
# Load extension from local path
-provisioning module load taskserv workspace_dev redis --source local
-
-# Test installation
-provisioning taskserv create redis --infra test-env --check
-
-# Run tests
-provisioning test extension redis
-
-

Validate Extension

-
# Validate extension structure
-provisioning oci package validate ./extensions/taskservs/redis
-
-# Output:
-✓ Extension structure valid
-Warnings:
-  - Missing docs/README.md (recommended)
-
-

Package Extension

-
# Package as OCI artifact
-provisioning oci package ./extensions/taskservs/redis
-
-# Output: redis-1.0.0.tar.gz
-
-# Inspect package
-provisioning oci inspect-artifact redis-1.0.0.tar.gz
-
-

Publish Extension

-
# Login to registry (one-time)
-provisioning oci login localhost:5000
-
-# Publish extension
-provisioning oci push ./extensions/taskservs/redis redis 1.0.0
-
-# Verify publication
-provisioning oci tags redis
-
-# Share with team
-echo "Published: oci://localhost:5000/provisioning-extensions/redis:1.0.0"
-
-
-

Registry Setup

-

Local Registry (Development)

-

Using Zot (lightweight):

-
# Start Zot registry
-provisioning oci-registry start
-
-# Configuration:
-# - Endpoint: localhost:5000
-# - Storage: ~/.provisioning/oci-registry/
-# - No authentication
-# - TLS disabled
-
-# Stop registry
-provisioning oci-registry stop
-
-# Check status
-provisioning oci-registry status
-
-

Manual Zot Setup:

-
# Install Zot
-brew install project-zot/tap/zot
-
-# Create config
-cat > zot-config.json <<EOF
-{
-  "storage": {
-    "rootDirectory": "/tmp/zot"
-  },
-  "http": {
-    "address": "0.0.0.0",
-    "port": "5000"
-  },
-  "log": {
-    "level": "info"
-  }
-}
-EOF
-
-# Run Zot
-zot serve zot-config.json
-
-
-

Remote Registry (Production)

-

Using Harbor:

-
    -
  1. -

    Deploy Harbor:

    -
    # Using Docker Compose
    -wget https://github.com/goharbor/harbor/releases/download/v2.9.0/harbor-offline-installer-v2.9.0.tgz
    -tar xvf harbor-offline-installer-v2.9.0.tgz
    -cd harbor
    -./install.sh
    -
    -
  2. -
  3. -

    Configure Workspace:

    -
    # workspace/config/provisioning.yaml
    -dependencies:
    -  registry:
    -    type: "oci"
    -    oci:
    -      endpoint: "https://harbor.company.com"
    -      namespaces:
    -        extensions: "provisioning/extensions"
    -        platform: "provisioning/platform"
    -      tls_enabled: true
    -      auth_token_path: "~/.provisioning/tokens/harbor"
    -
    -
  4. -
  5. -

    Login:

    -
    provisioning oci login harbor.company.com --username admin
    -
    -
  6. -
-
-

Troubleshooting

-

No OCI Tool Found

-

Error: “No OCI tool found. Install oras, crane, or skopeo”

-

Solution:

-
# Install ORAS (recommended)
-brew install oras
-
-# Or install Crane
-go install github.com/google/go-containerregistry/cmd/crane@latest
-
-# Or install Skopeo
-brew install skopeo
-
-
-

Connection Refused

-

Error: “Connection refused to localhost:5000”

-

Solution:

-
# Check if registry is running
-curl http://localhost:5000/v2/_catalog
-
-# Start local registry if not running
-provisioning oci-registry start
-
-
-

TLS Certificate Error

-

Error: “x509: certificate signed by unknown authority”

-

Solution:

-
# For development, use --insecure flag
-provisioning oci pull kubernetes:1.28.0 --insecure
-
-# For production, configure TLS properly in workspace config:
-# dependencies:
-#   extensions:
-#     oci:
-#       tls_enabled: true
-#       # Add CA certificate to system trust store
-
-
-

Authentication Failed

-

Error: “unauthorized: authentication required”

-

Solution:

-
# Login to registry
-provisioning oci login localhost:5000
-
-# Or provide auth token in config:
-# dependencies:
-#   extensions:
-#     oci:
-#       auth_token_path: "~/.provisioning/tokens/oci"
-
-
-

Extension Not Found

-

Error: “Dependency not found: kubernetes”

-

Solutions:

-
    -
  1. -

    Check registry endpoint:

    -
    provisioning oci config
    -
    -
  2. -
  3. -

    List available extensions:

    -
    provisioning oci list
    -
    -
  4. -
  5. -

    Check namespace:

    -
    provisioning oci list --namespace provisioning-extensions
    -
    -
  6. -
  7. -

    Verify extension exists:

    -
    provisioning oci tags kubernetes
    -
    -
  8. -
-
-

Dependency Resolution Failed

-

Error: “Circular dependency detected”

-

Solution:

-
# Validate dependency graph
-provisioning dep validate kubernetes
-
-# Check dependency tree
-provisioning dep tree kubernetes
-
-# Fix circular dependencies in extension manifests
-
-
-

Best Practices

-

Version Pinning

-

DO: Pin to specific versions in production

-
modules:
-  taskservs:
-    - "oci://registry/kubernetes:1.28.0"  # Specific version
-
-

DON’T: Use latest tag in production

-
modules:
-  taskservs:
-    - "oci://registry/kubernetes:latest"  # Unpredictable
-
-
-

Semantic Versioning

-

DO: Follow semver (MAJOR.MINOR.PATCH)

-
    -
  • 1.0.01.0.1: Backward-compatible bug fix
  • -
  • 1.0.01.1.0: Backward-compatible new feature
  • -
  • 1.0.02.0.0: Breaking change
  • -
-

DON’T: Use arbitrary version numbers

-
    -
  • v1, version-2, latest-stable
  • -
-
-

Dependency Management

-

DO: Specify version constraints

-
dependencies:
-  containerd: ">=1.7.0"
-  etcd: "^3.5.0"  # 3.5.x compatible
-
-

DON’T: Leave dependencies unversioned

-
dependencies:
-  containerd: "*"  # Too permissive
-
-
-

Security

-

DO:

-
    -
  • Use TLS for remote registries
  • -
  • Rotate authentication tokens regularly
  • -
  • Scan images for vulnerabilities (Harbor)
  • -
  • Sign artifacts (cosign)
  • -
-

DON’T:

-
    -
  • Use --insecure in production
  • -
  • Store passwords in config files
  • -
  • Skip certificate verification
  • -
-
- - -
-

Maintained By: Documentation Team -Last Updated: 2025-10-06 -Next Review: 2026-01-06

-

Prov-Ecosystem & Provctl Integrations - Quick Start Guide

-

Date: 2025-11-23 -Version: 1.0.0 -For: provisioning v3.6.0+

-
-

Access powerful functionality from prov-ecosystem and provctl directly through provisioning CLI.

-
-
-

Overview

-

Four integrated feature sets:

-
- - - - - -
FeaturePurposeBest For
Runtime AbstractionUnified Docker/Podman/OrbStack/Colima/nerdctlMulti-platform deployments
SSH AdvancedPooling, circuit breaker, retry strategiesLarge-scale distributed operations
Backup SystemMulti-backend backups (Restic, Borg, Tar, Rsync)Data protection & disaster recovery
GitOps EventsEvent-driven deployments from GitContinuous deployment automation
Service ManagementCross-platform services (systemd, launchd, runit)Infrastructure service orchestration
-
-
-

Quick Start Commands

-

🏃 30-Second Test

-
# 1. Check what runtimes you have available
-provisioning runtime list
-
-# 2. Detect which runtime provisioning will use
-provisioning runtime detect
-
-# 3. Verify runtime works
-provisioning runtime info
-
-

Expected Output:

-
Available runtimes:
-  • docker
-  • podman
-
-
-

1️⃣ Runtime Abstraction

-

What It Does

-

Automatically detects and uses Docker, Podman, OrbStack, Colima, or nerdctl - whichever is available on your system. Eliminates hardcoding “docker” commands.

-

Commands

-
# Detect available runtime
-provisioning runtime detect
-# Output: "Detected runtime: docker"
-
-# Execute command in runtime
-provisioning runtime exec "docker images"
-# Runs: docker images
-
-# Get runtime info
-provisioning runtime info
-# Shows: name, command, version
-
-# List all available runtimes
-provisioning runtime list
-# Shows: docker, podman, orbstack...
-
-# Adapt docker-compose for detected runtime
-provisioning runtime compose ./docker-compose.yml
-# Output: docker compose -f ./docker-compose.yml
-
-

Examples

-

Use Case 1: Works on macOS with OrbStack, Linux with Docker

-
# User on macOS with OrbStack
-$ provisioning runtime exec "docker run -it ubuntu bash"
-# Automatically uses orbctl (OrbStack)
-
-# User on Linux with Docker
-$ provisioning runtime exec "docker run -it ubuntu bash"
-# Automatically uses docker
-
-

Use Case 2: Run docker-compose with detected runtime

-
# Detect and run compose
-$ compose_cmd=$(provisioning runtime compose ./docker-compose.yml)
-$ eval $compose_cmd up -d
-# Works with docker, podman, nerdctl automatically
-
-

Configuration

-

No configuration needed! Runtime is auto-detected in order:

-
    -
  1. Docker (macOS: OrbStack first; Linux: Docker first)
  2. -
  3. Podman
  4. -
  5. OrbStack (macOS)
  6. -
  7. Colima (macOS)
  8. -
  9. nerdctl
  10. -
-
-

2️⃣ SSH Advanced Operations

-

What It Does

-

Advanced SSH with connection pooling (90% faster), circuit breaker for fault isolation, and deployment strategies (rolling, blue-green, canary).

-

Commands

-
# Create SSH pool connection to host
-provisioning ssh pool connect server.example.com root --port 22 --timeout 30
-
-# Check pool status
-provisioning ssh pool status
-
-# List available deployment strategies
-provisioning ssh strategies
-# Output: rolling, blue-green, canary
-
-# Configure retry strategy
-provisioning ssh retry-config exponential --max-retries 3
-
-# Check circuit breaker status
-provisioning ssh circuit-breaker
-# Output: state=closed, failures=0/5
-
-

Deployment Strategies

-
- - - -
StrategyUse CaseRisk
RollingGradual rollout across hostsLow (but slower)
Blue-GreenZero-downtime, instant rollbackVery low
CanaryTest on small % before full rolloutVery low (5% at risk)
-
-

Example: Multi-Host Deployment

-
# Set up SSH pool
-provisioning ssh pool connect srv01.example.com root
-provisioning ssh pool connect srv02.example.com root
-provisioning ssh pool connect srv03.example.com root
-
-# Execute on pool (all 3 hosts in parallel)
-provisioning ssh pool exec [srv01, srv02, srv03] "systemctl restart myapp" --strategy rolling
-
-# Check status
-provisioning ssh pool status
-# Output: connections=3, active=0, idle=3, circuit_breaker=green
-
-

Retry Strategies

-
# Exponential backoff: 100 ms, 200 ms, 400 ms, 800 ms...
-provisioning ssh retry-config exponential --max-retries 5
-
-# Linear backoff: 100 ms, 200 ms, 300 ms, 400 ms...
-provisioning ssh retry-config linear --max-retries 3
-
-# Fibonacci backoff: 100 ms, 100 ms, 200 ms, 300 ms, 500 ms...
-provisioning ssh retry-config fibonacci --max-retries 4
-
-
-

3️⃣ Backup System

-

What It Does

-

Multi-backend backup management with Restic, BorgBackup, Tar, or Rsync. Supports local, S3, SFTP, REST API, and Backblaze B2 repositories.

-

Commands

-
# Create backup job
-provisioning backup create daily-backup /data /var/lib \
-  --backend restic \
-  --repository s3://my-bucket/backups
-
-# Restore from snapshot
-provisioning backup restore snapshot-001 --restore_path /data
-
-# List available snapshots
-provisioning backup list
-
-# Schedule regular backups
-provisioning backup schedule daily-backup "0 2 * * *" \
-  --paths ["/data" "/var/lib"] \
-  --backend restic
-
-# Show retention policy
-provisioning backup retention
-# Output: daily=7, weekly=4, monthly=12, yearly=5
-
-# Check backup job status
-provisioning backup status backup-job-001
-
-

Backend Comparison

-
- - - - -
BackendSpeedCompressionBest For
Restic⚡⚡⚡ExcellentCloud backups
BorgBackup⚡⚡ExcellentLarge archives
Tar⚡⚡⚡GoodSimple backups
Rsync⚡⚡⚡NoneIncremental syncs
-
-

Example: Automated Daily Backups to S3

-
# Create backup configuration
-provisioning backup create app-backup /opt/myapp /var/lib/myapp \
-  --backend restic \
-  --repository s3://prod-backups/myapp
-
-# Schedule daily at 2 AM
-provisioning backup schedule app-backup "0 2 * * *"
-
-# Set retention: keep 7 days, 4 weeks, 12 months, 5 years
-provisioning backup retention \
-  --daily 7 \
-  --weekly 4 \
-  --monthly 12 \
-  --yearly 5
-
-# Verify backup was created
-provisioning backup list
-
-

Dry-Run (Test First)

-
# Test backup without actually creating it
-provisioning backup create test-backup /data --check
-
-# Test restore without actually restoring
-provisioning backup restore snapshot-001 --check
-
-
-

4️⃣ GitOps Event-Driven Deployments

-

What It Does

-

Automatically trigger deployments from Git events (push, PR, webhook, scheduled). Supports GitHub, GitLab, Gitea.

-

Commands

-
# Load GitOps rules from configuration file
-provisioning gitops rules ./gitops-rules.yaml
-
-# Watch for Git events (starts webhook listener)
-provisioning gitops watch --provider github --webhook-port 8080
-
-# List supported events
-provisioning gitops events
-# Output: push, pull-request, webhook, scheduled, health-check, manual
-
-# Manually trigger deployment
-provisioning gitops trigger deploy-prod --environment prod
-
-# List active deployments
-provisioning gitops deployments --status running
-
-# Show GitOps status
-provisioning gitops status
-# Output: active_rules=5, total=42, successful=40, failed=2
-
-

Example: GitOps Configuration

-

File: gitops-rules.yaml

-
rules:
-  - name: deploy-prod
-    provider: github
-    repository: https://github.com/myorg/myrepo
-    branch: main
-    events:
-      - push
-    targets:
-      - prod
-    command: "provisioning deploy"
-    require_approval: true
-
-  - name: deploy-staging
-    provider: github
-    repository: https://github.com/myorg/myrepo
-    branch: develop
-    events:
-      - push
-      - pull-request
-    targets:
-      - staging
-    command: "provisioning deploy"
-    require_approval: false
-
-

Then:

-
# Load rules
-provisioning gitops rules ./gitops-rules.yaml
-
-# Watch for events
-provisioning gitops watch --provider github
-
-# When you push to main, deployment auto-triggers!
-# git push origin main → provisioning deploy runs automatically
-
-
-

5️⃣ Service Management

-

What It Does

-

Install, start, stop, and manage services across systemd (Linux), launchd (macOS), runit, and OpenRC.

-

Commands

-
# Install service
-provisioning service install myapp /usr/local/bin/myapp \
-  --user myapp \
-  --working-dir /opt/myapp
-
-# Start service
-provisioning service start myapp
-
-# Stop service
-provisioning service stop myapp
-
-# Restart service
-provisioning service restart myapp
-
-# Check service status
-provisioning service status myapp
-# Output: running=true, uptime=86400s, restarts=2
-
-# List all services
-provisioning service list
-
-# Detect init system
-provisioning service detect-init
-# Output: systemd (Linux), launchd (macOS), etc.
-
-

Example: Install Custom Service

-
# On Linux (systemd)
-provisioning service install provisioning-worker \
-  /usr/local/bin/provisioning-worker \
-  --user provisioning \
-  --working-dir /opt/provisioning
-
-# On macOS (launchd) - works the same!
-provisioning service install provisioning-worker \
-  /usr/local/bin/provisioning-worker \
-  --user provisioning \
-  --working-dir /opt/provisioning
-
-# Service file is generated automatically for your platform
-provisioning service start provisioning-worker
-provisioning service status provisioning-worker
-
-
-

🎯 Common Workflows

-

Workflow 1: Multi-Platform Deployment

-
# Works on macOS with OrbStack, Linux with Docker, etc.
-provisioning runtime detect          # Detects your platform
-provisioning runtime exec "docker ps" # Uses your runtime
-
-

Workflow 2: Large-Scale SSH Operations

-
# Connect to multiple servers
-for host in srv01 srv02 srv03; do
-  provisioning ssh pool connect $host.example.com root
-done
-
-# Execute in parallel with 3x retry
-provisioning ssh pool exec [srv01, srv02, srv03] \
-  "systemctl restart app" \
-  --strategy rolling \
-  --retry exponential
-
-

Workflow 3: Automated Backups

-
# Create backup job
-provisioning backup create daily /opt/app /data \
-  --backend restic \
-  --repository s3://backups
-
-# Schedule for 2 AM every day
-provisioning backup schedule daily "0 2 * * *"
-
-# Verify it works
-provisioning backup list
-
-

Workflow 4: Continuous Deployment from Git

-
# Define rules in YAML
-cat > gitops-rules.yaml << 'EOF'
-rules:
-  - name: deploy-prod
-    provider: github
-    repository: https://github.com/myorg/repo
-    branch: main
-    events: [push]
-    targets: [prod]
-    command: "provisioning deploy"
-EOF
-
-# Load and activate
-provisioning gitops rules ./gitops-rules.yaml
-provisioning gitops watch --provider github
-
-# Now pushing to main auto-deploys!
-
-
-

🔧 Advanced Configuration

-

Using with Nickel Configuration

-

All integrations support Nickel schemas for advanced configuration:

-
let { IntegrationConfig } = import "provisioning/integrations.ncl" in
-{
-  integrations = {
-    # Runtime configuration
-    runtime = {
-      preferred = "podman",
-      check_order = ["podman", "docker", "nerdctl"],
-      timeout_secs = 5,
-      enable_cache = true,
-    },
-
-    # Backup with retention policy
-    backup = {
-      default_backend = "restic",
-      default_repository = {
-        type = "s3",
-        bucket = "prod-backups",
-        prefix = "daily",
-      },
-      jobs = [],
-      verify_after_backup = true,
-    },
-
-    # GitOps rules with approval
-    gitops = {
-      rules = [],
-      default_strategy = "blue-green",
-      dry_run_by_default = false,
-      enable_audit_log = true,
-    },
-  }
-}
-
-
-

💡 Tips & Tricks

-

Tip 1: Dry-Run Mode

-

All major operations support --check for testing:

-
provisioning runtime exec "systemctl restart app" --check
-# Output: Would execute: [docker exec ...]
-
-provisioning backup create test /data --check
-# Output: Backup would be created: [test]
-
-provisioning gitops trigger deploy-test --check
-# Output: Deployment would trigger
-
-

Tip 2: Output Formats

-

Some commands support JSON output:

-
provisioning runtime list --out json
-provisioning backup list --out json
-provisioning gitops deployments --out json
-
-

Tip 3: Integration with Scripts

-

Chain commands in shell scripts:

-
#!/bin/bash
-
-# Detect runtime and use it
-RUNTIME=$(provisioning runtime detect | grep -oP 'docker|podman|nerdctl')
-
-# Execute using detected runtime
-provisioning runtime exec "docker ps"
-
-# Create backup before deploy
-provisioning backup create pre-deploy-$(date +%s) /opt/app
-
-# Deploy
-provisioning deploy
-
-# Verify with GitOps
-provisioning gitops status
-
-
-

🐛 Troubleshooting

-

Problem: “No container runtime detected”

-

Solution: Install Docker, Podman, or OrbStack:

-
# macOS
-brew install orbstack
-
-# Linux
-sudo apt-get install docker.io
-
-# Then verify
-provisioning runtime detect
-
-

Problem: SSH connection timeout

-

Solution: Check port and timeout settings:

-
# Use different port
-provisioning ssh pool connect server.example.com root --port 2222
-
-# Increase timeout
-provisioning ssh pool connect server.example.com root --timeout 60
-
-

Problem: Backup fails with “Permission denied”

-

Solution: Check permissions on backup path:

-
# Check if user can read target paths
-ls -l /data  # Should be readable
-
-# Run with elevated privileges if needed
-sudo provisioning backup create mybak /data --backend restic
-
-
-

📚 Learn More

-
- - - - - -
TopicLocation
Architecturedocs/architecture/ECOSYSTEM_INTEGRATION.md
CLI Helpprovisioning help integrations
Rust Bridgeprovisioning/platform/integrations/provisioning-bridge/
Nushell Modulesprovisioning/core/nulib/lib_provisioning/integrations/
Nickel Schemasprovisioning/schemas/integrations/
-
-
-

🆘 Need Help

-
# General help
-provisioning help integrations
-
-# Specific command help
-provisioning runtime --help
-provisioning backup --help
-provisioning gitops --help
-
-# System diagnostics
-provisioning status
-provisioning health
-
-
-

Last Updated: 2025-11-23 -Version: 1.0.0

-

Secrets Service Layer (SST) - Complete User Guide

-
-

Status: ✅ COMPLETED - All phases (1-6) implemented and tested -Date: December 2025 -Tests: 25/25 passing (100%)

-
-

📋 Executive Summary

-

The Secrets Service Layer (SST) is an enterprise-grade unified solution for managing all types of secrets (database credentials, SSH keys, API -tokens, provider credentials) through a REST API controlled by Cedar policies with workspace isolation and real-time monitoring.

-

✨ Key Features

-
- - - - - - - - - -
FeatureDescriptionStatus
Centralized ManagementUnified API for all secrets✅ Complete
Cedar AuthorizationMandatory configurable policies✅ Complete
Workspace IsolationSecrets isolated by workspace and domain✅ Complete
Auto RotationAutomatic scheduling and rotation✅ Complete
Secret SharingCross-workspace sharing with access control✅ Complete
Real-time MonitoringDashboard, expiration alerts✅ Complete
Complete AuditFull operation logging✅ Complete
KMS EncryptionEnvelope-based key encryption✅ Complete
Temporal + PermanentSupport for SSH and provider credentials✅ Complete
-
-
-

🚀 Quick Start (5 minutes)

-

1. Register the workspace librecloud

-
# Register workspace
-provisioning workspace register librecloud /Users/Akasha/project-provisioning/workspace_librecloud
-
-# Verify
-provisioning workspace list
-provisioning workspace active
-
-

2. Create your first database secret

-
# Create PostgreSQL credential
-provisioning secrets create database postgres \
-  --workspace librecloud \
-  --infra wuji \
-  --user admin \
-  --password "secure_password" \
-  --host db.local \
-  --port 5432 \
-  --database myapp
-
-

3. Retrieve the secret

-
# Get credential (requires Cedar authorization)
-provisioning secrets get librecloud/wuji/postgres/admin_password
-
-

4. List secrets by domain

-
# List all PostgreSQL secrets
-provisioning secrets list --workspace librecloud --domain postgres
-
-# List all infrastructure secrets
-provisioning secrets list --workspace librecloud --infra wuji
-
-
-

📚 Complete Guide by Phases

-

Phase 1: Database and Application Secrets

-

1.1 Create Database Credentials

-

REST Endpoint:

-
POST /api/v1/secrets/database
-Content-Type: application/json
-
-{
-  "workspace_id": "librecloud",
-  "infra_id": "wuji",
-  "db_type": "postgresql",
-  "host": "db.librecloud.internal",
-  "port": 5432,
-  "database": "production_db",
-  "username": "admin",
-  "password": "encrypted_password"
-}
-
-

CLI Command:

-
provisioning secrets create database postgres \
-  --workspace librecloud \
-  --infra wuji \
-  --user admin \
-  --password "password" \
-  --host db.librecloud.internal \
-  --port 5432 \
-  --database production_db
-
-

Result: Secret stored in SurrealDB with KMS encryption

-
✓ Secret created: librecloud/wuji/postgres/admin_password
-  Workspace: librecloud
-  Infrastructure: wuji
-  Domain: postgres
-  Type: Database
-  Encrypted: Yes (KMS)
-
-

1.2 Create Application Secrets

-

REST API:

-
POST /api/v1/secrets/application
-{
-  "workspace_id": "librecloud",
-  "app_name": "myapp-web",
-  "key_type": "api_token",
-  "value": "sk_live_abc123xyz"
-}
-
-

CLI:

-
provisioning secrets create app myapp-web \
-  --workspace librecloud \
-  --domain web \
-  --type api_token \
-  --value "sk_live_abc123xyz"
-
-

1.3 List Secrets

-

REST API:

-
GET /api/v1/secrets/list?workspace=librecloud&domain=postgres
-
-Response:
-{
-  "secrets": [
-    {
-      "path": "librecloud/wuji/postgres/admin_password",
-      "workspace_id": "librecloud",
-      "domain": "postgres",
-      "secret_type": "Database",
-      "created_at": "2025-12-06T10:00:00Z",
-      "created_by": "admin"
-    }
-  ]
-}
-
-

CLI:

-
# All workspace secrets
-provisioning secrets list --workspace librecloud
-
-# Filter by domain
-provisioning secrets list --workspace librecloud --domain postgres
-
-# Filter by infrastructure
-provisioning secrets list --workspace librecloud --infra wuji
-
-

1.4 Retrieve a Secret

-

REST API:

-
GET /api/v1/secrets/librecloud/wuji/postgres/admin_password
-
-Requires:
-- Header: Authorization: Bearer <jwt_token>
-- Cedar verification: [user has read permission]
-- If MFA required: mfa_verified=true in JWT
-
-

CLI:

-
# Get full secret
-provisioning secrets get librecloud/wuji/postgres/admin_password
-
-# Output:
-# Host: db.librecloud.internal
-# Port: 5432
-# User: admin
-# Database: production_db
-# Password: [encrypted in transit]
-
-
-

Phase 2: SSH Keys and Provider Credentials

-

2.1 Temporal SSH Keys (Auto-expiring)

-

Use Case: Temporary server access (max 24 hours)

-
# Generate temporary SSH key (TTL 2 hours)
-provisioning secrets create ssh \
-  --workspace librecloud \
-  --infra wuji \
-  --server web01 \
-  --ttl 2h
-
-# Result:
-# ✓ SSH key generated
-#   Server: web01
-#   TTL: 2 hours
-#   Expires at: 2025-12-06T12:00:00Z
-#   Private Key: [encrypted]
-
-

Technical Details:

-
    -
  • Generated in real-time by Orchestrator
  • -
  • Stored in memory (TTL-based)
  • -
  • Automatic revocation on expiry
  • -
  • Complete audit trail in vault_audit
  • -
-

2.2 Permanent SSH Keys (Stored)

-

Use Case: Long-duration infrastructure keys

-
# Create permanent SSH key (stored in DB)
-provisioning secrets create ssh \
-  --workspace librecloud \
-  --infra wuji \
-  --server web01 \
-  --permanent
-
-# Result:
-# ✓ Permanent SSH key created
-#   Storage: SurrealDB (encrypted)
-#   Rotation: Manual (or automatic if configured)
-#   Access: Cedar controlled
-
-

2.3 Provider Credentials

-

UpCloud API (Temporal):

-
provisioning secrets create provider upcloud \
-  --workspace librecloud \
-  --roles "server,network,storage" \
-  --ttl 4h
-
-# Result:
-# ✓ UpCloud credential generated
-#   Token: tmp_upcloud_abc123
-#   Roles: server, network, storage
-#   TTL: 4 hours
-
-

UpCloud API (Permanent):

-
provisioning secrets create provider upcloud \
-  --workspace librecloud \
-  --roles "server,network" \
-  --permanent
-
-# Result:
-# ✓ Permanent UpCloud credential created
-#   Token: upcloud_live_xyz789
-#   Storage: SurrealDB
-#   Rotation: Manual
-
-
-

Phase 3: Auto Rotation

-

3.1 Plan Automatic Rotation

-

Predefined Rotation Policies:

-
- - - - -
TypeProdDev
DatabaseEvery 30dEvery 90d
ApplicationEvery 60dEvery 14d
SSHEvery 365dEvery 90d
ProviderEvery 180dEvery 30d
-
-

Force Immediate Rotation:

-
# Force rotation now
-provisioning secrets rotate librecloud/wuji/postgres/admin_password
-
-# Result:
-# ✓ Rotation initiated
-#   Status: In Progress
-#   New password: [generated]
-#   Old password: [archived]
-#   Next rotation: 2025-01-05
-
-

Check Rotation Status:

-
GET /api/v1/secrets/{path}/rotation-status
-
-Response:
-{
-  "path": "librecloud/wuji/postgres/admin_password",
-  "status": "pending",
-  "next_rotation": "2025-01-05T10:00:00Z",
-  "last_rotation": "2025-12-05T10:00:00Z",
-  "days_remaining": 30,
-  "failure_count": 0
-}
-
-

3.2 Rotation Job Scheduler (Background)

-

System automatically runs rotations every hour:

-
┌─────────────────────────────────┐
-│  Rotation Job Scheduler         │
-│  - Interval: 1 hour             │
-│  - Max concurrency: 5 rotations │
-│  - Auto retry                   │
-└─────────────────────────────────┘
-        ↓
-    Get due secrets
-        ↓
-    Generate new credentials
-        ↓
-    Validate functionality
-        ↓
-    Update SurrealDB
-        ↓
-    Log to audit trail
-
-

Check Scheduler Status:

-
provisioning secrets scheduler status
-
-# Result:
-# Status: Running
-# Last check: 2025-12-06T11:00:00Z
-# Completed rotations: 24
-# Failed rotations: 0
-
-
-

Phase 3.2: Share Secrets Across Workspaces

-

Create a Grant (Access Authorization)

-

Scenario: Share DB credential between librecloud and staging

-
# REST API
-POST /api/v1/secrets/{path}/grant
-
-{
-  "source_workspace": "librecloud",
-  "target_workspace": "staging",
-  "permission": "read",  # read, write, rotate
-  "require_approval": false
-}
-
-# Response:
-{
-  "grant_id": "grant-12345",
-  "secret_path": "librecloud/wuji/postgres/admin_password",
-  "source_workspace": "librecloud",
-  "target_workspace": "staging",
-  "permission": "read",
-  "status": "active",
-  "granted_at": "2025-12-06T10:00:00Z",
-  "access_count": 0
-}
-
-

CLI:

-
provisioning secrets grant \
-  --secret librecloud/wuji/postgres/admin_password \
-  --target-workspace staging \
-  --permission read
-
-# ✓ Grant created: grant-12345
-#   Source workspace: librecloud
-#   Target workspace: staging
-#   Permission: Read
-#   Approval required: No
-
-

Revoke a Grant

-
# Revoke access immediately
-POST /api/v1/secrets/grant/{grant_id}/revoke
-{
-  "reason": "User left the team"
-}
-
-# CLI
-provisioning secrets revoke-grant grant-12345 \
-  --reason "User left the team"
-
-# ✓ Grant revoked
-#   Status: Revoked
-#   Access records: 42
-
-

List Grants

-
# All workspace grants
-GET /api/v1/secrets/grants?workspace=librecloud
-
-# Response:
-{
-  "grants": [
-    {
-      "grant_id": "grant-12345",
-      "secret_path": "librecloud/wuji/postgres/admin_password",
-      "target_workspace": "staging",
-      "permission": "read",
-      "status": "active",
-      "access_count": 42,
-      "last_accessed": "2025-12-06T10:30:00Z"
-    }
-  ]
-}
-
-
-

Phase 3.4: Monitoring and Alerts

-

Dashboard Metrics

-
GET /api/v1/secrets/monitoring/dashboard
-
-Response:
-{
-  "total_secrets": 45,
-  "temporal_secrets": 12,
-  "permanent_secrets": 33,
-  "expiring_secrets": [
-    {
-      "path": "librecloud/wuji/postgres/admin_password",
-      "domain": "postgres",
-      "days_remaining": 5,
-      "severity": "critical"
-    }
-  ],
-  "failed_access_attempts": [
-    {
-      "user": "alice",
-      "secret_path": "librecloud/wuji/postgres/admin_password",
-      "reason": "insufficient_permissions",
-      "timestamp": "2025-12-06T10:00:00Z"
-    }
-  ],
-  "rotation_metrics": {
-    "total": 45,
-    "completed": 40,
-    "pending": 3,
-    "failed": 2
-  }
-}
-
-

CLI:

-
provisioning secrets monitoring dashboard
-
-# ✓ Secrets Dashboard - Librecloud
-#
-#  Total secrets: 45
-#  Temporal secrets: 12
-#  Permanent secrets: 33
-#
-#  ⚠️  CRITICAL (next 3 days): 2
-#      - librecloud/wuji/postgres/admin_password (5 days)
-#      - librecloud/wuji/redis/password (1 day)
-#
-#  ⚡ WARNING (next 7 days): 3
-#      - librecloud/app/api_token (7 days)
-#
-#  📊 Rotations completed: 40/45 (89%)
-
-

Expiring Secrets Alerts

-
GET /api/v1/secrets/monitoring/expiring?days=7
-
-Response:
-{
-  "expiring_secrets": [
-    {
-      "path": "librecloud/wuji/postgres/admin_password",
-      "domain": "postgres",
-      "expires_in_days": 5,
-      "type": "database",
-      "last_rotation": "2025-11-05T10:00:00Z"
-    }
-  ]
-}
-
-
-

🔐 Cedar Authorization

-

All operations are protected by Cedar policies:

-

Example Policy: Production Secret Access

-
// Requires MFA for production secrets
-@id("prod-secret-access-mfa")
-permit (
-  principal,
-  action == Provisioning::Action::"access",
-  resource is Provisioning::Secret in Provisioning::Environment::"production"
-) when {
-  context.mfa_verified == true &&
-  resource.is_expired == false
-};
-
-// Only admins can create permanent secrets
-@id("permanent-secret-admin-only")
-permit (
-  principal in Provisioning::Role::"security_admin",
-  action == Provisioning::Action::"create",
-  resource is Provisioning::Secret
-) when {
-  resource.lifecycle == "permanent"
-};
-
-

Verify Authorization

-
# Test Cedar decision
-provisioning policies check alice can access secret:librecloud/postgres/password
-
-# Result:
-# User: alice
-# Resource: secret:librecloud/postgres/password
-# Decision: ✅ ALLOWED
-#   - Role: database_admin
-#   - MFA verified: Yes
-#   - Workspace: librecloud
-
-
-

🏗️ Data Structure

-

Secret in Database

-
-- Table vault_secrets (SurrealDB)
-{
-  id: "secret:uuid123",
-  path: "librecloud/wuji/postgres/admin_password",
-  workspace_id: "librecloud",
-  infra_id: "wuji",
-  domain: "postgres",
-  secret_type: "Database",
-  encrypted_value: "U2FsdGVkX1...", -- AES-256-GCM encrypted
-  version: 1,
-  created_at: "2025-12-05T10:00:00Z",
-  created_by: "admin",
-  updated_at: "2025-12-05T10:00:00Z",
-  updated_by: "admin",
-  tags: ["production", "critical"],
-  auto_rotate: true,
-  rotation_interval_days: 30,
-  ttl_seconds: null,  -- null = no auto expiry
-  deleted: false,
-  metadata: {
-    db_host: "db.librecloud.internal",
-    db_port: 5432,
-    db_name: "production_db",
-    username: "admin"
-  }
-}
-
-

Secret Hierarchy

-
librecloud (Workspace)
-  ├── wuji (Infrastructure)
-  │   ├── postgres (Domain)
-  │   │   ├── admin_password
-  │   │   ├── readonly_user
-  │   │   └── replication_user
-  │   ├── redis (Domain)
-  │   │   └── master_password
-  │   └── ssh (Domain)
-  │       ├── web01_key
-  │       └── db01_key
-  └── web (Infrastructure)
-      ├── api (Domain)
-      │   ├── stripe_token
-      │   ├── github_token
-      │   └── sendgrid_key
-      └── auth (Domain)
-          ├── jwt_secret
-          └── oauth_client_secret
-
-
-

🔄 Complete Workflows

-

Workflow 1: Create and Rotate Database Credential

-
1. Admin creates credential
-   POST /api/v1/secrets/database
-
-2. System encrypts with KMS
-   ├─ Generates data key
-   ├─ Encrypts secret with data key
-   └─ Encrypts data key with KMS master key
-
-3. Stores in SurrealDB
-   ├─ vault_secrets (encrypted value)
-   ├─ vault_versions (history)
-   └─ vault_audit (audit record)
-
-4. System schedules auto rotation
-   ├─ Calculates next date (30 days)
-   └─ Creates rotation_scheduler entry
-
-5. Every hour, background job checks
-   ├─ Any secrets due for rotation?
-   ├─ Yes → Generate new password
-   ├─ Validate functionality (connect to DB)
-   ├─ Update SurrealDB
-   └─ Log to audit
-
-6. Monitoring alerts
-   ├─ If 7 days remaining → WARNING alert
-   ├─ If 3 days remaining → CRITICAL alert
-   └─ If expired → EXPIRED alert
-
-

Workflow 2: Share Secret Between Workspaces

-
1. Admin of librecloud creates grant
-   POST /api/v1/secrets/{path}/grant
-
-2. Cedar verifies authorization
-   ├─ Is user admin of source workspace?
-   └─ Is target workspace valid?
-
-3. Grant created and recorded
-   ├─ Unique ID: grant-xxxxx
-   ├─ Status: active
-   └─ Audit: who, when, why
-
-4. Staging workspace user accesses secret
-   GET /api/v1/secrets/{path}
-
-5. System verifies access
-   ├─ Cedar: Is grant active?
-   ├─ Cedar: Sufficient permission?
-   ├─ Cedar: MFA if required?
-   └─ Yes → Return decrypted secret
-
-6. Audit records access
-   ├─ User who accessed
-   ├─ Source IP
-   ├─ Exact timestamp
-   ├─ Success/failure
-   └─ Increment access count in grant
-
-

Workflow 3: Access Temporal SSH Secret

-
1. User requests temporary SSH key
-   POST /api/v1/secrets/ssh
-   {ttl: "2h"}
-
-2. Cedar authorizes (requires MFA)
-   ├─ User has role?
-   ├─ MFA verified?
-   └─ TTL within limit (max 24h)?
-
-3. Orchestrator generates key
-   ├─ Generates SSH key pair (RSA 4096)
-   ├─ Stores in memory (TTL-based)
-   ├─ Logs to audit
-   └─ Returns private key
-
-4. User downloads key
-   └─ Valid for 2 hours
-
-5. Automatic expiration
-   ├─ 2-hour timer starts
-   ├─ TTL expires → Auto revokes
-   ├─ Later attempts → Access denied
-   └─ Audit: automatic revocation
-
-
-

📝 Practical Examples

-

Example 1: Manage PostgreSQL Secrets

-
# 1. Create credential
-provisioning secrets create database postgres \
-  --workspace librecloud \
-  --infra wuji \
-  --user admin \
-  --password "P@ssw0rd123!" \
-  --host db.librecloud.internal \
-  --port 5432 \
-  --database myapp_prod
-
-# 2. List PostgreSQL secrets
-provisioning secrets list --workspace librecloud --domain postgres
-
-# 3. Get for connection
-provisioning secrets get librecloud/wuji/postgres/admin_password
-
-# 4. Share with staging team
-provisioning secrets grant \
-  --secret librecloud/wuji/postgres/admin_password \
-  --target-workspace staging \
-  --permission read
-
-# 5. Force rotation
-provisioning secrets rotate librecloud/wuji/postgres/admin_password
-
-# 6. Check status
-provisioning secrets monitoring dashboard | grep postgres
-
-

Example 2: Temporary SSH Access

-
# 1. Generate temporary SSH key (4 hours)
-provisioning secrets create ssh \
-  --workspace librecloud \
-  --infra wuji \
-  --server web01 \
-  --ttl 4h
-
-# 2. Download private key
-provisioning secrets get librecloud/wuji/ssh/web01_key > ~/.ssh/web01_temp
-
-# 3. Connect to server
-chmod 600 ~/.ssh/web01_temp
-ssh -i ~/.ssh/web01_temp ubuntu@web01.librecloud.internal
-
-# 4. After 4 hours
-# → Key revoked automatically
-# → New SSH attempts fail
-# → Access logged in audit
-
-

Example 3: CI/CD Integration

-
# GitLab CI / GitHub Actions
-jobs:
-  deploy:
-    script:
-      # 1. Get DB credential
-      - export DB_PASSWORD=$(provisioning secrets get librecloud/prod/postgres/admin_password)
-
-      # 2. Get API token
-      - export API_TOKEN=$(provisioning secrets get librecloud/app/api_token)
-
-      # 3. Deploy application
-      - docker run -e DB_PASSWORD=$DB_PASSWORD -e API_TOKEN=$API_TOKEN myapp:latest
-
-      # 4. System logs access in audit
-      #    → User: ci-deploy
-      #    → Workspace: librecloud
-      #    → Secrets accessed: 2
-      #    → Status: success
-
-
-

🛡️ Security

-

Encryption

-
    -
  • At Rest: AES-256-GCM with KMS key rotation
  • -
  • In Transit: TLS 1.3
  • -
  • In Memory: Automatic cleanup of sensitive variables
  • -
-

Access Control

-
    -
  • Cedar: All operations evaluated against policies
  • -
  • MFA: Required for production secrets
  • -
  • Workspace Isolation: Data separation at DB level
  • -
-

Audit

-
{
-  "timestamp": "2025-12-06T10:30:45Z",
-  "user_id": "alice",
-  "workspace": "librecloud",
-  "action": "secrets:get",
-  "resource": "librecloud/wuji/postgres/admin_password",
-  "result": "success",
-  "ip_address": "192.168.1.100",
-  "mfa_verified": true,
-  "cedar_policy": "prod-secret-access-mfa"
-}
-
-
-

📊 Test Results

-

All 25 Integration Tests Passing

-
✅ Phase 3.1: Rotation Scheduler (9 tests)
-   - Schedule creation
-   - Status transitions
-   - Failure tracking
-
-✅ Phase 3.2: Secret Sharing (8 tests)
-   - Grant creation with permissions
-   - Permission hierarchy
-   - Access logging
-
-✅ Phase 3.4: Monitoring (4 tests)
-   - Dashboard metrics
-   - Expiring alerts
-   - Failed access recording
-
-✅ Phase 5: Rotation Job Scheduler (4 tests)
-   - Background job lifecycle
-   - Configuration management
-
-✅ Integration Tests (3 tests)
-   - Multi-service workflows
-   - End-to-end scenarios
-
-

Execution:

-
cargo test --test secrets_phases_integration_test
-
-test result: ok. 25 passed; 0 failed
-
-
-

🆘 Troubleshooting

-

Problem: “Authorization denied by Cedar policy”

-

Cause: User lacks permissions in policy -Solution:

-
# Check user and permission
-provisioning policies check $USER can access secret:librecloud/postgres/admin_password
-
-# Check roles
-provisioning auth whoami
-
-# Request access from admin
-provisioning secrets grant \
-  --secret librecloud/wuji/postgres/admin_password \
-  --target-workspace $WORKSPACE \
-  --permission read
-
-

Problem: “Secret not found”

-

Cause: Typo in path or workspace doesn’t exist -Solution:

-
# List available secrets
-provisioning secrets list --workspace librecloud
-
-# Check active workspace
-provisioning workspace active
-
-# Switch workspace if needed
-provisioning workspace switch librecloud
-
-

Problem: “MFA required”

-

Cause: Operation requires MFA but not verified -Solution:

-
# Check MFA status
-provisioning auth status
-
-# Enroll if not configured
-provisioning mfa totp enroll
-
-# Use MFA token on next access
-provisioning secrets get librecloud/wuji/postgres/admin_password --mfa-code 123456
-
-
-

📚 Complete Documentation

-
    -
  • REST API: /docs/api/secrets-api.md
  • -
  • CLI Reference: provisioning secrets --help
  • -
  • Cedar Policies: provisioning/config/cedar-policies/secrets.cedar
  • -
  • Architecture: /docs/architecture/SECRETS_SERVICE_LAYER.md
  • -
  • Security: /docs/user/SECRETS_SECURITY_GUIDE.md
  • -
-
-

🎯 Next Steps (Future)

-
    -
  1. Phase 7: Web UI Dashboard for visual management
  2. -
  3. Phase 8: HashiCorp Vault integration
  4. -
  5. Phase 9: Multi-datacenter secret replication
  6. -
-
-

Status: ✅ Secrets Service Layer - COMPLETED AND TESTED

-

OCI Registry Service

-

Comprehensive OCI (Open Container Initiative) registry deployment and management for the provisioning system.

-
-

Source: provisioning/platform/oci-registry/

-
-

Supported Registries

-
    -
  • Zot (Recommended for Development): Lightweight, fast, OCI-native with UI
  • -
  • Harbor (Recommended for Production): Full-featured enterprise registry
  • -
  • Distribution (OCI Reference): Official OCI reference implementation
  • -
-

Features

-
    -
  • Multi-Registry Support: Zot, Harbor, Distribution
  • -
  • Namespace Organization: Logical separation of artifacts
  • -
  • Access Control: RBAC, policies, authentication
  • -
  • Monitoring: Prometheus metrics, health checks
  • -
  • Garbage Collection: Automatic cleanup of unused artifacts
  • -
  • High Availability: Optional HA configurations
  • -
  • TLS/SSL: Secure communication
  • -
  • UI Interface: Web-based management (Zot, Harbor)
  • -
-

Quick Start

-

Start Zot Registry (Default)

-
cd provisioning/platform/oci-registry/zot
-docker-compose up -d
-
-# Initialize with namespaces and policies
-nu ../scripts/init-registry.nu --registry-type zot
-
-# Access UI
-open http://localhost:5000
-
-

Start Harbor Registry

-
cd provisioning/platform/oci-registry/harbor
-docker-compose up -d
-sleep 120  # Wait for services
-
-# Initialize
-nu ../scripts/init-registry.nu --registry-type harbor --admin-password Harbor12345
-
-# Access UI
-open http://localhost
-# Login: admin / Harbor12345
-
-

Default Namespaces

-
- - - - -
NamespaceDescriptionPublicRetention
provisioning-extensionsExtension packagesNo10 tags, 90 days
provisioning-kclKCL schemasNo20 tags, 180 days
provisioning-platformPlatform imagesNo5 tags, 30 days
provisioning-testTest artifactsYes3 tags, 7 days
-
-

Management

-

Nushell Commands

-
# Start registry
-nu -c "use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry start --type zot"
-
-# Check status
-nu -c "use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry status --type zot"
-
-# View logs
-nu -c "use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry logs --type zot --follow"
-
-# Health check
-nu -c "use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry health --type zot"
-
-# List namespaces
-nu -c "use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry namespaces"
-
-

Docker Compose

-
# Start
-docker-compose up -d
-
-# Stop
-docker-compose down
-
-# View logs
-docker-compose logs -f
-
-# Remove (including volumes)
-docker-compose down -v
-
-

Registry Comparison

-
- - - - - - - -
FeatureZotHarborDistribution
SetupSimpleComplexSimple
UIBuilt-inFull-featuredNone
SearchYesYesNo
ScanningNoTrivyNo
ReplicationNoYesNo
RBACBasicAdvancedBasic
Best ForDev/CIProductionCompliance
-
-

Security

-

Authentication

-

Zot/Distribution (htpasswd):

-
htpasswd -Bc htpasswd provisioning
-docker login localhost:5000
-
-

Harbor (Database):

-
docker login localhost
-# Username: admin / Password: Harbor12345
-
-

Monitoring

-

Health Checks

-
# API check
-curl http://localhost:5000/v2/
-
-# Catalog check
-curl http://localhost:5000/v2/_catalog
-
-

Metrics

-

Zot:

-
curl http://localhost:5000/metrics
-
-

Harbor:

-
curl http://localhost:9090/metrics
-
- - -

Test Environment Guide

-

Version: 1.0.0 -Date: 2025-10-06 -Status: Production Ready

-
-

Overview

-

The Test Environment Service provides automated containerized testing for taskservs, servers, and multi-node clusters. Built into the orchestrator, it -eliminates manual Docker management and provides realistic test scenarios.

-

Architecture

-
┌─────────────────────────────────────────────────┐
-│         Orchestrator (port 8080)                │
-│  ┌──────────────────────────────────────────┐  │
-│  │  Test Orchestrator                       │  │
-│  │  • Container Manager (Docker API)        │  │
-│  │  • Network Isolation                     │  │
-│  │  • Multi-node Topologies                 │  │
-│  │  • Test Execution                        │  │
-│  └──────────────────────────────────────────┘  │
-└─────────────────────────────────────────────────┘
-                      ↓
-         ┌────────────────────────┐
-         │   Docker Containers    │
-         │  • Isolated Networks   │
-         │  • Resource Limits     │
-         │  • Volume Mounts       │
-         └────────────────────────┘
-
-

Test Environment Types

-

1. Single Taskserv Test

-

Test individual taskserv in isolated container.

-
# Basic test
-provisioning test env single kubernetes
-
-# With resource limits
-provisioning test env single redis --cpu 2000 --memory 4096
-
-# Auto-start and cleanup
-provisioning test quick postgres
-
-

2. Server Simulation

-

Simulate complete server with multiple taskservs.

-
# Server with taskservs
-provisioning test env server web-01 [containerd kubernetes cilium]
-
-# With infrastructure context
-provisioning test env server db-01 [postgres redis] --infra prod-stack
-
-

3. Cluster Topology

-

Multi-node cluster simulation from templates.

-
# 3-node Kubernetes cluster
-provisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start
-
-# etcd cluster
-provisioning test topology load etcd_cluster | test env cluster etcd
-
-

Quick Start

-

Prerequisites

-
    -
  1. -

    Docker running:

    -
    docker ps  # Should work without errors
    -
    -
  2. -
  3. -

    Orchestrator running:

    -
    cd provisioning/platform/orchestrator
    -./scripts/start-orchestrator.nu --background
    -
    -
  4. -
-

Basic Workflow

-
# 1. Quick test (fastest)
-provisioning test quick kubernetes
-
-# 2. Or step-by-step
-# Create environment
-provisioning test env single kubernetes --auto-start
-
-# List environments
-provisioning test env list
-
-# Check status
-provisioning test env status <env-id>
-
-# View logs
-provisioning test env logs <env-id>
-
-# Cleanup
-provisioning test env cleanup <env-id>
-
-

Topology Templates

-

Available Templates

-
# List templates
-provisioning test topology list
-
-
- - - - - -
TemplateDescriptionNodes
kubernetes_3nodeK8s HA cluster1 CP + 2 workers
kubernetes_singleAll-in-one K8s1 node
etcd_clusteretcd cluster3 members
containerd_testStandalone containerd1 node
postgres_redisDatabase stack2 nodes
-
-

Using Templates

-
# Load and use template
-provisioning test topology load kubernetes_3node | test env cluster kubernetes
-
-# View template
-provisioning test topology load etcd_cluster
-
-

Custom Topology

-

Create my-topology.toml:

-
[my_cluster]
-name = "My Custom Cluster"
-cluster_type = "custom"
-
-[[my_cluster.nodes]]
-name = "node-01"
-role = "primary"
-taskservs = ["postgres", "redis"]
-[my_cluster.nodes.resources]
-cpu_millicores = 2000
-memory_mb = 4096
-
-[[my_cluster.nodes]]
-name = "node-02"
-role = "replica"
-taskservs = ["postgres"]
-[my_cluster.nodes.resources]
-cpu_millicores = 1000
-memory_mb = 2048
-
-[my_cluster.network]
-subnet = "172.30.0.0/16"
-
-

Commands Reference

-

Environment Management

-
# Create from config
-provisioning test env create <config>
-
-# Single taskserv
-provisioning test env single <taskserv> [--cpu N] [--memory MB]
-
-# Server simulation
-provisioning test env server <name> <taskservs> [--infra NAME]
-
-# Cluster topology
-provisioning test env cluster <type> <topology>
-
-# List environments
-provisioning test env list
-
-# Get details
-provisioning test env get <env-id>
-
-# Show status
-provisioning test env status <env-id>
-
-

Test Execution

-
# Run tests
-provisioning test env run <env-id> [--tests [test1, test2]]
-
-# View logs
-provisioning test env logs <env-id>
-
-# Cleanup
-provisioning test env cleanup <env-id>
-
-

Quick Test

-
# One-command test (create, run, cleanup)
-provisioning test quick <taskserv> [--infra NAME]
-
-

REST API

-

Create Environment

-
curl -X POST http://localhost:9090/test/environments/create \
-  -H "Content-Type: application/json" \
-  -d '{
-    "config": {
-      "type": "single_taskserv",
-      "taskserv": "kubernetes",
-      "base_image": "ubuntu:22.04",
-      "environment": {},
-      "resources": {
-        "cpu_millicores": 2000,
-        "memory_mb": 4096
-      }
-    },
-    "infra": "my-project",
-    "auto_start": true,
-    "auto_cleanup": false
-  }'
-
-

List Environments

-
curl http://localhost:9090/test/environments
-
-

Run Tests

-
curl -X POST http://localhost:9090/test/environments/{id}/run \
-  -H "Content-Type: application/json" \
-  -d '{
-    "tests": [],
-    "timeout_seconds": 300
-  }'
-
-

Cleanup

-
curl -X DELETE http://localhost:9090/test/environments/{id}
-
-

Use Cases

-

1. Taskserv Development

-

Test taskserv before deployment:

-
# Test new taskserv version
-provisioning test env single my-taskserv --auto-start
-
-# Check logs
-provisioning test env logs <env-id>
-
-

2. Multi-Taskserv Integration

-

Test taskserv combinations:

-
# Test kubernetes + cilium + containerd
-provisioning test env server k8s-test [kubernetes cilium containerd] --auto-start
-
-

3. Cluster Validation

-

Test cluster configurations:

-
# Test 3-node etcd cluster
-provisioning test topology load etcd_cluster | test env cluster etcd --auto-start
-
-

4. CI/CD Integration

-
# .gitlab-ci.yml
-test-taskserv:
-  stage: test
-  script:
-    - provisioning test quick kubernetes
-    - provisioning test quick redis
-    - provisioning test quick postgres
-
-

Advanced Features

-

Resource Limits

-
# Custom CPU and memory
-provisioning test env single postgres \
-  --cpu 4000 \
-  --memory 8192
-
-

Network Isolation

-

Each environment gets isolated network:

-
    -
  • Subnet: 172.20.0.0/16 (default)
  • -
  • DNS enabled
  • -
  • Container-to-container communication
  • -
-

Auto-Cleanup

-
# Auto-cleanup after tests
-provisioning test env single redis --auto-start --auto-cleanup
-
-

Multiple Environments

-

Run tests in parallel:

-
# Create multiple environments
-provisioning test env single kubernetes --auto-start &
-provisioning test env single postgres --auto-start &
-provisioning test env single redis --auto-start &
-
-wait
-
-# List all
-provisioning test env list
-
-

Troubleshooting

-

Docker not running

-
Error: Failed to connect to Docker
-
-

Solution:

-
# Check Docker
-docker ps
-
-# Start Docker daemon
-sudo systemctl start docker  # Linux
-open -a Docker  # macOS
-
-

Orchestrator not running

-
Error: Connection refused (port 8080)
-
-

Solution:

-
cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-

Environment creation fails

-

Check logs:

-
provisioning test env logs <env-id>
-
-

Check Docker:

-
docker ps -a
-docker logs <container-id>
-
-

Out of resources

-
Error: Cannot allocate memory
-
-

Solution:

-
# Cleanup old environments
-provisioning test env list | each {|env| provisioning test env cleanup $env.id }
-
-# Or cleanup Docker
-docker system prune -af
-
-

Best Practices

-

1. Use Templates

-

Reuse topology templates instead of recreating:

-
provisioning test topology load kubernetes_3node | test env cluster kubernetes
-
-

2. Auto-Cleanup

-

Always use auto-cleanup in CI/CD:

-
provisioning test quick <taskserv>  # Includes auto-cleanup
-
-

3. Resource Planning

-

Adjust resources based on needs:

-
    -
  • Development: 1-2 cores, 2 GB RAM
  • -
  • Integration: 2-4 cores, 4-8 GB RAM
  • -
  • Production-like: 4+ cores, 8+ GB RAM
  • -
-

4. Parallel Testing

-

Run independent tests in parallel:

-
for taskserv in [kubernetes postgres redis] {
-    provisioning test quick $taskserv &
-}
-wait
-
-

Configuration

-

Default Settings

-
    -
  • Base image: ubuntu:22.04
  • -
  • CPU: 1000 millicores (1 core)
  • -
  • Memory: 2048 MB (2 GB)
  • -
  • Network: 172.20.0.0/16
  • -
-

Custom Config

-
# Override defaults
-provisioning test env single postgres \
-  --base-image debian:12 \
-  --cpu 2000 \
-  --memory 4096
-
-
- - -
-

Version History

-
- -
VersionDateChanges
1.0.02025-10-06Initial test environment service
-
-
-

Maintained By: Infrastructure Team

-

Test Environment Service (v3.4.0)

-

🚀 Test Environment Service Completed (2025-10-06)

-

A comprehensive containerized test environment service has been integrated into the orchestrator, enabling automated testing of taskservs, complete -servers, and multi-node clusters without manual Docker management.

-

Key Features

-
    -
  • Automated Container Management: No manual Docker operations required
  • -
  • Three Test Environment Types: Single taskserv, server simulation, multi-node clusters
  • -
  • Multi-Node Support: Test complex topologies (Kubernetes HA, etcd clusters)
  • -
  • Network Isolation: Each test environment gets dedicated Docker networks
  • -
  • Resource Management: Configurable CPU, memory, and disk limits
  • -
  • Topology Templates: Predefined cluster configurations for common scenarios
  • -
  • Auto-Cleanup: Optional automatic cleanup after tests complete
  • -
  • CI/CD Integration: Easy integration into automated pipelines
  • -
-

Test Environment Types

-

1. Single Taskserv Testing

-

Test individual taskserv in isolated container:

-
# Quick test (create, run, cleanup)
-provisioning test quick kubernetes
-
-# With custom resources
-provisioning test env single postgres --cpu 2000 --memory 4096 --auto-start --auto-cleanup
-
-# With infrastructure context
-provisioning test env single redis --infra my-project
-
-

2. Server Simulation

-

Test complete server configurations with multiple taskservs:

-
# Simulate web server
-provisioning test env server web-01 [containerd kubernetes cilium] --auto-start
-
-# Simulate database server
-provisioning test env server db-01 [postgres redis] --infra prod-stack --auto-start
-
-

3. Multi-Node Cluster Topology

-

Test complex cluster configurations before deployment:

-
# 3-node Kubernetes HA cluster
-provisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start
-
-# etcd cluster
-provisioning test topology load etcd_cluster | test env cluster etcd --auto-start
-
-# Single-node Kubernetes
-provisioning test topology load kubernetes_single | test env cluster kubernetes
-
-

Test Environment Management

-
# List all test environments
-provisioning test env list
-
-# Check environment status
-provisioning test env status <env-id>
-
-# View environment logs
-provisioning test env logs <env-id>
+export PROVISIONING_ENV=production
 
-# Run tests in environment
-provisioning test env run <env-id>
+# Logging
+export PROVISIONING_LOG_LEVEL=debug
+export PROVISIONING_LOG_FILE=~/.provisioning/logs/provisioning.log
 
-# Cleanup environment
-provisioning test env cleanup <env-id>
-
-

Available Topology Templates

-

Predefined multi-node cluster templates in provisioning/config/test-topologies.toml:

-
- - - - - -
TemplateDescriptionNodesUse Case
kubernetes_3nodeK8s HA cluster1 CP + 2 workersProduction-like testing
kubernetes_singleAll-in-one K8s1 nodeDevelopment testing
etcd_clusteretcd cluster3 membersDistributed consensus
containerd_testStandalone containerd1 nodeContainer runtime
postgres_redisDatabase stack2 nodesDatabase integration
-
-

REST API Endpoints

-

The orchestrator exposes test environment endpoints:

-
    -
  • Create Environment: POST http://localhost:9090/v1/test/environments/create
  • -
  • List Environments: GET http://localhost:9090/v1/test/environments
  • -
  • Get Environment: GET http://localhost:9090/v1/test/environments/{id}
  • -
  • Run Tests: POST http://localhost:9090/v1/test/environments/{id}/run
  • -
  • Cleanup: DELETE http://localhost:9090/v1/test/environments/{id}
  • -
  • Get Logs: GET http://localhost:9090/v1/test/environments/{id}/logs
  • -
-

Prerequisites

-
    -
  1. -

    Docker Running: Test environments require Docker daemon

    -
    docker ps  # Should work without errors
    -
    -
  2. -
  3. -

    Orchestrator Running: Start the orchestrator to manage test containers

    -
    cd provisioning/platform/orchestrator
    -./scripts/start-orchestrator.nu --background
    -
    -
  4. -
-

Architecture

-
User Command (CLI/API)
-    ↓
-Test Orchestrator (Rust)
-    ↓
-Container Manager (bollard)
-    ↓
-Docker API
-    ↓
-Isolated Test Containers
-    • Dedicated networks
-    • Resource limits
-    • Volume mounts
-    • Multi-node support
-
-

Configuration

-
    -
  • Topology Templates: provisioning/config/test-topologies.toml
  • -
  • Default Resources: 1000 millicores CPU, 2048 MB memory
  • -
  • Network: 172.20.0.0/16 (default subnet)
  • -
  • Base Image: ubuntu:22.04 (configurable)
  • -
-

Use Cases

-
    -
  1. Taskserv Development: Test new taskservs before deployment
  2. -
  3. Integration Testing: Validate taskserv combinations
  4. -
  5. Cluster Validation: Test multi-node configurations
  6. -
  7. CI/CD Integration: Automated infrastructure testing
  8. -
  9. Production Simulation: Test production-like deployments safely
  10. -
-

CI/CD Integration Example

-
# GitLab CI
-test-infrastructure:
-  stage: test
-  script:
-    - ./scripts/start-orchestrator.nu --background
-    - provisioning test quick kubernetes
-    - provisioning test quick postgres
-    - provisioning test quick redis
-    - provisioning test topology load kubernetes_3node |
-        test env cluster kubernetes --auto-start
-  artifacts:
-    when: on_failure
-    paths:
-      - test-logs/
-
-

Documentation

-

Complete documentation available:

- -

Command Shortcuts

-

Test commands are integrated into the CLI with shortcuts:

-
    -
  • test or tst - Test command prefix
  • -
  • test quick <taskserv> - One-command test
  • -
  • test env single/server/cluster - Create test environments
  • -
  • test topology load/list - Manage topology templates
  • -
-

Taskserv Validation and Testing Guide

-

Version: 1.0.0 -Date: 2025-10-06 -Status: Production Ready

-
-

Overview

-

The taskserv validation and testing system provides comprehensive evaluation of infrastructure services before deployment, reducing errors and -increasing confidence in deployments.

-

Validation Levels

-

1. Static Validation

-

Validates configuration files, templates, and scripts without requiring infrastructure access.

-

What it checks:

-
    -
  • KCL schema syntax and semantics
  • -
  • Jinja2 template syntax
  • -
  • Shell script syntax (with shellcheck if available)
  • -
  • File structure and naming conventions
  • -
-

Command:

-
provisioning taskserv validate kubernetes --level static
-
-

2. Dependency Validation

-

Checks taskserv dependencies, conflicts, and requirements.

-

What it checks:

-
    -
  • Required dependencies are available
  • -
  • Optional dependencies status
  • -
  • Conflicting taskservs
  • -
  • Resource requirements (memory, CPU, disk)
  • -
  • Health check configuration
  • -
-

Command:

-
provisioning taskserv validate kubernetes --level dependencies
-
-

Check against infrastructure:

-
provisioning taskserv check-deps kubernetes --infra my-project
-
-

3. Check Mode (Dry-Run)

-

Enhanced check mode that performs validation and previews deployment without making changes.

-

What it does:

-
    -
  • Runs static validation
  • -
  • Validates dependencies
  • -
  • Previews configuration generation
  • -
  • Lists files to be deployed
  • -
  • Checks prerequisites (without SSH in check mode)
  • -
-

Command:

-
provisioning taskserv create kubernetes --check
-
-

4. Sandbox Testing

-

Tests taskserv in isolated container environment before actual deployment.

-

What it tests:

-
    -
  • Package prerequisites
  • -
  • Configuration validity
  • -
  • Script execution
  • -
  • Health check simulation
  • -
-

Command:

-
# Test with Docker
-provisioning taskserv test kubernetes --runtime docker
-
-# Test with Podman
-provisioning taskserv test kubernetes --runtime podman
-
-# Keep container for inspection
-provisioning taskserv test kubernetes --runtime docker --keep
-
-
-

Complete Validation Workflow

- -
# 1. Static validation (fastest, no infrastructure needed)
-provisioning taskserv validate kubernetes --level static -v
-
-# 2. Dependency validation
-provisioning taskserv check-deps kubernetes --infra my-project
-
-# 3. Check mode (dry-run with full validation)
-provisioning taskserv create kubernetes --check -v
-
-# 4. Sandbox testing (optional, requires Docker/Podman)
-provisioning taskserv test kubernetes --runtime docker
-
-# 5. Actual deployment (after all validations pass)
-provisioning taskserv create kubernetes
-
-

Quick Validation (All Levels)

-
# Run all validation levels
-provisioning taskserv validate kubernetes --level all -v
-
-
-

Validation Commands Reference

-

provisioning taskserv validate <taskserv>

-

Multi-level validation framework.

-

Options:

-
    -
  • --level <level> - Validation level: static, dependencies, health, all (default: all)
  • -
  • --infra <name> - Infrastructure context
  • -
  • --settings <path> - Settings file path
  • -
  • --verbose - Verbose output
  • -
  • --out <format> - Output format: json, yaml, text
  • -
-

Examples:

-
# Complete validation
-provisioning taskserv validate kubernetes
-
-# Only static validation
-provisioning taskserv validate kubernetes --level static
-
-# With verbose output
-provisioning taskserv validate kubernetes -v
-
-# JSON output
-provisioning taskserv validate kubernetes --out json
-
-

provisioning taskserv check-deps <taskserv>

-

Check dependencies against infrastructure.

-

Options:

-
    -
  • --infra <name> - Infrastructure context
  • -
  • --settings <path> - Settings file path
  • -
  • --verbose - Verbose output
  • -
-

Examples:

-
# Check dependencies
-provisioning taskserv check-deps kubernetes --infra my-project
-
-# Verbose output
-provisioning taskserv check-deps kubernetes --infra my-project -v
-
-

provisioning taskserv create <taskserv> --check

-

Enhanced check mode with full validation and preview.

-

Options:

-
    -
  • --check - Enable check mode (no actual deployment)
  • -
  • --verbose - Verbose output
  • -
  • All standard create options
  • -
-

Examples:

-
# Check mode with verbose output
-provisioning taskserv create kubernetes --check -v
-
-# Check specific server
-provisioning taskserv create kubernetes server-01 --check
-
-

provisioning taskserv test <taskserv>

-

Sandbox testing in isolated environment.

-

Options:

-
    -
  • --runtime <name> - Runtime: docker, podman, native (default: docker)
  • -
  • --infra <name> - Infrastructure context
  • -
  • --settings <path> - Settings file path
  • -
  • --keep - Keep container after test
  • -
  • --verbose - Verbose output
  • -
-

Examples:

-
# Test with Docker
-provisioning taskserv test kubernetes --runtime docker
-
-# Test with Podman
-provisioning taskserv test kubernetes --runtime podman
-
-# Keep container for debugging
-provisioning taskserv test kubernetes --keep -v
-
-# Connect to kept container
-docker exec -it taskserv-test-kubernetes bash
-
-
-

Validation Output

-

Static Validation

-
Taskserv Validation
-Taskserv: kubernetes
-Level: static
-
-Validating Nickel schemas for kubernetes...
-  Checking main.ncl...
-    ✓ Valid
-  Checking version.ncl...
-    ✓ Valid
-  Checking dependencies.ncl...
-    ✓ Valid
-
-Validating templates for kubernetes...
-  Checking env-kubernetes.j2...
-    ✓ Basic syntax OK
-  Checking install-kubernetes.sh...
-    ✓ Basic syntax OK
+# Configuration path
+export PROVISIONING_CONFIG=~/.config/provisioning/
 
-Validation Summary
-✓ nickel: 0 errors, 0 warnings
-✓ templates: 0 errors, 0 warnings
-✓ scripts: 0 errors, 0 warnings
+# KMS endpoint
+export PROVISIONING_KMS_ENDPOINT= [http://localhost:8080](http://localhost:8080)
 
-Overall Status
-✓ VALID - 0 warnings
+# Feature flags
+export PROVISIONING_FEATURE_BATCH_WORKFLOWS=true
+export PROVISIONING_FEATURE_TEST_ENVIRONMENT=true
 
-

Dependency Validation

-
Dependency Validation Report
-Taskserv: kubernetes
+

Best Practices

+

1. Secure Credentials

+
# NEVER commit credentials
+echo "config/local-overrides.toml" >> .gitignore
+echo ".secrets/" >> .gitignore
 
-Status: VALID
+# Use SOPS for shared secrets
+provisioning sops encrypt config/credentials.toml
+git add config/credentials.enc.toml
 
-Required Dependencies:
-  • containerd
-  • etcd
-  • os
-
-Optional Dependencies:
-  • cilium
-  • helm
-
-Conflicts:
-  • docker
-  • podman
-
-

Check Mode Output

-
Check Mode: kubernetes on server-01
-
-→ Running static validation...
-  ✓ Static validation passed
-
-→ Checking dependencies...
-  ✓ Dependencies OK
-    Required: containerd, etcd, os
-
-→ Previewing configuration generation...
-  ✓ Configuration preview generated
-    Files to process: 15
-
-→ Checking prerequisites...
-  ℹ Prerequisite checks (preview mode):
-    ⊘ Server accessibility: Check mode - SSH not tested
-    ℹ Directory /tmp: Would verify directory exists
-    ℹ Command bash: Would verify command is available
-
-Check Mode Summary
-✓ All validations passed
-
-💡 Taskserv can be deployed with: provisioning taskserv create kubernetes
-
-

Test Output

-
Taskserv Sandbox Testing
-Taskserv: kubernetes
-Runtime: docker
-
-→ Running pre-test validation...
-✓ Validation passed
-
-→ Preparing sandbox environment...
-  Using base image: ubuntu:22.04
-✓ Sandbox prepared: a1b2c3d4e5f6
-
-→ Running tests in sandbox...
-  Test 1: Package prerequisites...
-  Test 2: Configuration validity...
-  Test 3: Script execution...
-  Test 4: Health check simulation...
-
-Test Summary
-Total tests: 4
-Passed: 4
-Failed: 0
-Skipped: 0
-
-Detailed Results:
-  ✓ Package prerequisites: Package manager accessible
-  ✓ Configuration validity: 3 configuration files validated
-  ✓ Script execution: 2 scripts validated
-  ✓ Health check: Health check configuration valid: http://localhost:6443/healthz
-
-✓ All tests passed
+# Use environment variables for local overrides
+export PROVISIONING_PROVIDER_UPCLOUD_API_KEY="your-key"
 
-
-

Integration with CI/CD

-

GitLab CI Example

-
validate-taskservs:
-  stage: validate
-  script:
-    - provisioning taskserv validate kubernetes --level all --out json
-    - provisioning taskserv check-deps kubernetes --infra production
+

2. Environment-Specific Configuration

+
# Development uses different credentials
+PROVISIONING_ENV=dev provisioning workspace switch myapp-dev
 
-test-taskservs:
-  stage: test
-  script:
-    - provisioning taskserv test kubernetes --runtime docker
-  dependencies:
-    - validate-taskservs
-
-deploy-taskservs:
-  stage: deploy
-  script:
-    - provisioning taskserv create kubernetes
-  dependencies:
-    - test-taskservs
-  only:
-    - main
-
-

GitHub Actions Example

-
name: Taskserv Validation
-
-on: [push, pull_request]
-
-jobs:
-  validate:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-
-      - name: Validate Taskservs
-        run: |
-          provisioning taskserv validate kubernetes --level all -v
-
-      - name: Check Dependencies
-        run: |
-          provisioning taskserv check-deps kubernetes --infra production
-
-      - name: Test in Sandbox
-        run: |
-          provisioning taskserv test kubernetes --runtime docker
-
-
-

Troubleshooting

-

shellcheck not found

-

If shellcheck is not available, script validation will be skipped with a warning.

-

Install shellcheck:

-
# macOS
-brew install shellcheck
-
-# Ubuntu/Debian
-apt install shellcheck
-
-# Fedora
-dnf install shellcheck
-
-

Docker/Podman not available

-

Sandbox testing requires Docker or Podman.

-

Check runtime:

-
# Docker
-docker ps
-
-# Podman
-podman ps
-
-# Use native mode (limited testing)
-provisioning taskserv test kubernetes --runtime native
+# Production uses restricted credentials
+PROVISIONING_ENV=prod provisioning workspace switch myapp-prod
 
-

Nickel type checking errors

-

Nickel type checking errors indicate syntax or type problems.

-

Common fixes:

-
    -
  • Check schema syntax in .ncl files
  • -
  • Validate imports and dependencies
  • -
  • Run nickel format to format files
  • -
  • Check manifest.toml dependencies
  • -
-

Dependency conflicts

-

If conflicting taskservs are detected:

-
    -
  • Remove conflicting taskserv first
  • -
  • Check infrastructure configuration
  • -
  • Review dependency declarations in dependencies.ncl
  • -
-
-

Advanced Usage

-

Custom Validation Scripts

-

You can create custom validation scripts by extending the validation framework:

-
# custom_validation.nu
-use provisioning/core/nulib/taskservs/validate.nu *
-
-def custom-validate [taskserv: string] {
-    # Custom validation logic
-    let result = (validate-nickel-schemas $taskserv --verbose=true)
+

3. Configuration Documentation

+

Document your configuration choices:

+
# provisioning.yaml
+configuration:
+  provider: "upcloud"
+  reason: "Primary European cloud"
 
-    # Additional custom checks
-    # ...
+  backup_strategy: "daily"
+  reason: "Compliance requirement"
 
-    return $result
-}
+  monitoring: "enabled"
+  reason: "SLA monitoring"
 
-

Batch Validation

-

Validate multiple taskservs:

-
# Validate all taskservs in infrastructure
-for taskserv in (provisioning taskserv list | get name) {
-    provisioning taskserv validate $taskserv
-}
-
-

Automated Testing

-

Create test suite for all taskservs:

-
#!/usr/bin/env nu
+

4. Regular Validation

+
# Validate before deployment
+provisioning validate config --strict
 
-let taskservs = ["kubernetes", "containerd", "cilium", "etcd"]
+# Export and inspect
+provisioning config export --format yaml | less
 
-for ts in $taskservs {
-    print $"Testing ($ts)..."
-    provisioning taskserv test $ts --runtime docker
-}
+# Test provider connectivity
+provisioning providers test --all
 
-
-

Best Practices

-

Before Deployment

-
    -
  1. Always validate before deploying to production
  2. -
  3. Run check mode to preview changes
  4. -
  5. Test in sandbox for critical services
  6. -
  7. Check dependencies in infrastructure context
  8. -
-

During Development

-
    -
  1. Validate frequently during taskserv development
  2. -
  3. Use verbose mode to understand validation details
  4. -
  5. Fix warnings even if validation passes
  6. -
  7. Keep containers for debugging test failures
  8. -
-

In CI/CD

-
    -
  1. Fail fast on validation errors
  2. -
  3. Require all tests pass before merge
  4. -
  5. Generate reports in JSON format for analysis
  6. -
  7. Archive test results for audit trail
  8. -
-
- - -
-

Version History

-
- -
VersionDateChanges
1.0.02025-10-06Initial validation and testing guide
-
-
-

Maintained By: Infrastructure Team -Review Cycle: Quarterly

-

Troubleshooting Guide

-

This comprehensive troubleshooting guide helps you diagnose and resolve common issues with Infrastructure Automation.

-

What You’ll Learn

-
    -
  • Common issues and their solutions
  • -
  • Diagnostic commands and techniques
  • -
  • Error message interpretation
  • -
  • Performance optimization
  • -
  • Recovery procedures
  • -
  • Prevention strategies
  • -
-

General Troubleshooting Approach

-

1. Identify the Problem

-
# Check overall system status
-provisioning env
-provisioning validate config
+

Troubleshooting

+

Configuration Not Loading

+
# Check configuration file
+cat ~/.config/provisioning/user_config.yaml
 
-# Check specific component status
-provisioning show servers --infra my-infra
-provisioning taskserv list --infra my-infra --installed
-
-

2. Gather Information

-
# Enable debug mode for detailed output
-provisioning --debug <command>
+# Validate YAML syntax
+yamllint ~/.config/provisioning/user_config.yaml
 
-# Check logs and errors
-provisioning show logs --infra my-infra
+# Debug configuration loading
+provisioning config show --verbose
 
-

3. Use Diagnostic Commands

-
# Validate configuration
-provisioning validate config --detailed
+

Provider Connection Failed

+
# Check provider configuration
+provisioning config show --section providers
 
 # Test connectivity
-provisioning provider test aws
-provisioning network test --infra my-infra
-
-

Installation and Setup Issues

-

Issue: Installation Fails

-

Symptoms:

-
    -
  • Installation script errors
  • -
  • Missing dependencies
  • -
  • Permission denied errors
  • -
-

Diagnosis:

-
# Check system requirements
-uname -a
-df -h
-whoami
-
-# Check permissions
-ls -la /usr/local/
-sudo -l
-
-

Solutions:

-

Permission Issues

-
# Run installer with sudo
-sudo ./install-provisioning
-
-# Or install to user directory
-./install-provisioning --prefix=$HOME/provisioning
-export PATH="$HOME/provisioning/bin:$PATH"
-
-

Missing Dependencies

-
# Ubuntu/Debian
-sudo apt update
-sudo apt install -y curl wget tar build-essential
-
-# RHEL/CentOS
-sudo dnf install -y curl wget tar gcc make
-
-

Architecture Issues

-
# Check architecture
-uname -m
-
-# Download correct architecture package
-# x86_64: Intel/AMD 64-bit
-# arm64: ARM 64-bit (Apple Silicon)
-wget https://releases.example.com/provisioning-linux-x86_64.tar.gz
-
-

Issue: Command Not Found

-

Symptoms:

-
bash: provisioning: command not found
-
-

Diagnosis:

-
# Check if provisioning is installed
-which provisioning
-ls -la /usr/local/bin/provisioning
-
-# Check PATH
-echo $PATH
-
-

Solutions:

-
# Add to PATH
-export PATH="/usr/local/bin:$PATH"
-
-# Make permanent (add to shell profile)
-echo 'export PATH="/usr/local/bin:$PATH"' >> ~/.bashrc
-source ~/.bashrc
-
-# Create symlink if missing
-sudo ln -sf /usr/local/provisioning/core/nulib/provisioning /usr/local/bin/provisioning
-
-

Issue: Nushell Plugin Errors

-

Symptoms:

-
Plugin not found: nu_plugin_kcl
-Plugin registration failed
-
-

Diagnosis:

-
# Check Nushell version
-nu --version
-
-# Check KCL installation (required for nu_plugin_kcl)
-kcl version
-
-# Check plugin registration
-nu -c "version | get installed_plugins"
-
-

Solutions:

-
# Install KCL CLI (required for nu_plugin_kcl)
-# Download from: https://github.com/kcl-lang/cli/releases
-
-# Re-register plugins
-nu -c "plugin add /usr/local/provisioning/plugins/nu_plugin_kcl"
-nu -c "plugin add /usr/local/provisioning/plugins/nu_plugin_tera"
-
-# Restart Nushell after plugin registration
-
-

Configuration Issues

-

Issue: Configuration Not Found

-

Symptoms:

-
Configuration file not found
-Failed to load configuration
-
-

Diagnosis:

-
# Check configuration file locations
-provisioning env | grep config
-
-# Check if files exist
-ls -la ~/.config/provisioning/
-ls -la /usr/local/provisioning/config.defaults.toml
-
-

Solutions:

-
# Initialize user configuration
-provisioning init config
-
-# Create missing directories
-mkdir -p ~/.config/provisioning
-
-# Copy template
-cp /usr/local/provisioning/config-examples/config.user.toml ~/.config/provisioning/config.toml
-
-# Verify configuration
-provisioning validate config
-
-

Issue: Configuration Validation Errors

-

Symptoms:

-
Configuration validation failed
-Invalid configuration value
-Missing required field
-
-

Diagnosis:

-
# Detailed validation
-provisioning validate config --detailed
-
-# Check specific sections
-provisioning config show --section paths
-provisioning config show --section providers
-
-

Solutions:

-

Path Configuration Issues

-
# Check base path exists
-ls -la /path/to/provisioning
-
-# Update configuration
-nano ~/.config/provisioning/config.toml
-
-# Fix paths section
-[paths]
-base = "/correct/path/to/provisioning"
-
-

Provider Configuration Issues

-
# Test provider connectivity
-provisioning provider test aws
+provisioning providers test upcloud --verbose
 
 # Check credentials
-aws configure list  # For AWS
-upcloud-cli config  # For UpCloud
-
-# Update provider configuration
-[providers.aws]
-interface = "CLI"  # or "API"
+provisioning kms get providers.upcloud.api_key
 
-

Issue: Interpolation Failures

-

Symptoms:

-
Interpolation pattern not resolved: {{env.VARIABLE}}
-Template rendering failed
+

Environment Variable Conflicts

+
# Check environment variables
+env | grep PROVISIONING
+
+# Unset conflicting variables
+unset PROVISIONING_PROVIDER
+
+# Set correct values
+export PROVISIONING_PROVIDER=aws
+export AWS_REGION=us-east-1
 
-

Diagnosis:

-
# Test interpolation
-provisioning validate interpolation test
-
-# Check environment variables
-env | grep VARIABLE
-
-# Debug interpolation
-provisioning --debug validate interpolation validate
-
-

Solutions:

-
# Set missing environment variables
-export MISSING_VARIABLE="value"
-
-# Use fallback values in configuration
-config_value = "{{env.VARIABLE || 'default_value'}}"
-
-# Check interpolation syntax
-# Correct: {{env.HOME}}
-# Incorrect: ${HOME} or $HOME
-
-

Server Management Issues

-

Issue: Server Creation Fails

-

Symptoms:

-
Failed to create server
-Provider API error
-Insufficient quota
-
-

Diagnosis:

-
# Check provider status
-provisioning provider status aws
-
-# Test connectivity
-ping api.provider.com
-curl -I https://api.provider.com
-
-# Check quota
-provisioning provider quota --infra my-infra
-
-# Debug server creation
-provisioning --debug server create web-01 --infra my-infra --check
-
-

Solutions:

-

API Authentication Issues

-
# AWS
-aws configure list
-aws sts get-caller-identity
-
-# UpCloud
-upcloud-cli account show
-
-# Update credentials
-aws configure  # For AWS
-export UPCLOUD_USERNAME="your-username"
-export UPCLOUD_PASSWORD="your-password"
-
-

Quota/Limit Issues

-
# Check current usage
-provisioning show costs --infra my-infra
-
-# Request quota increase from provider
-# Or reduce resource requirements
-
-# Use smaller instance types
-# Reduce number of servers
-
-

Network/Connectivity Issues

-
# Test network connectivity
-curl -v https://api.aws.amazon.com
-curl -v https://api.upcloud.com
-
-# Check DNS resolution
-nslookup api.aws.amazon.com
-
-# Check firewall rules
-# Ensure outbound HTTPS (port 443) is allowed
-
-

Issue: SSH Access Fails

-

Symptoms:

-
Connection refused
-Permission denied
-Host key verification failed
-
-

Diagnosis:

-
# Check server status
-provisioning server list --infra my-infra
-
-# Test SSH manually
-ssh -v user@server-ip
-
-# Check SSH configuration
-provisioning show servers web-01 --infra my-infra
-
-

Solutions:

-

Connection Issues

-
# Wait for server to be fully ready
-provisioning server list --infra my-infra --status
-
-# Check security groups/firewall
-# Ensure SSH (port 22) is allowed
-
-# Use correct IP address
-provisioning show servers web-01 --infra my-infra | grep ip
-
-

Authentication Issues

-
# Check SSH key
-ls -la ~/.ssh/
-ssh-add -l
-
-# Generate new key if needed
-ssh-keygen -t ed25519 -f ~/.ssh/provisioning_key
-
-# Use specific key
-provisioning server ssh web-01 --key ~/.ssh/provisioning_key --infra my-infra
-
-

Host Key Issues

-
# Remove old host key
-ssh-keygen -R server-ip
-
-# Accept new host key
-ssh -o StrictHostKeyChecking=accept-new user@server-ip
-
-

Task Service Issues

-

Issue: Service Installation Fails

-

Symptoms:

-
Service installation failed
-Package not found
-Dependency conflicts
-
-

Diagnosis:

-
# Check service prerequisites
-provisioning taskserv check kubernetes --infra my-infra
-
-# Debug installation
-provisioning --debug taskserv create kubernetes --infra my-infra --check
-
-# Check server resources
-provisioning server ssh web-01 --command "free -h && df -h" --infra my-infra
-
-

Solutions:

-

Resource Issues

-
# Check available resources
-provisioning server ssh web-01 --command "
-    echo 'Memory:' && free -h
-    echo 'Disk:' && df -h
-    echo 'CPU:' && nproc
-" --infra my-infra
-
-# Upgrade server if needed
-provisioning server resize web-01 --plan larger-plan --infra my-infra
-
-

Package Repository Issues

-
# Update package lists
-provisioning server ssh web-01 --command "
-    sudo apt update && sudo apt upgrade -y
-" --infra my-infra
-
-# Check repository connectivity
-provisioning server ssh web-01 --command "
-    curl -I https://download.docker.com/linux/ubuntu/
-" --infra my-infra
-
-

Dependency Issues

-
# Install missing dependencies
-provisioning taskserv create containerd --infra my-infra
-
-# Then install dependent service
-provisioning taskserv create kubernetes --infra my-infra
-
-

Issue: Service Not Running

-

Symptoms:

-
Service status: failed
-Service not responding
-Health check failures
-
-

Diagnosis:

-
# Check service status
-provisioning taskserv status kubernetes --infra my-infra
-
-# Check service logs
-provisioning taskserv logs kubernetes --infra my-infra
-
-# SSH and check manually
-provisioning server ssh web-01 --command "
-    sudo systemctl status kubernetes
-    sudo journalctl -u kubernetes --no-pager -n 50
-" --infra my-infra
-
-

Solutions:

-

Configuration Issues

-
# Reconfigure service
-provisioning taskserv configure kubernetes --infra my-infra
-
-# Reset to defaults
-provisioning taskserv reset kubernetes --infra my-infra
-
-

Port Conflicts

-
# Check port usage
-provisioning server ssh web-01 --command "
-    sudo netstat -tulpn | grep :6443
-    sudo ss -tulpn | grep :6443
-" --infra my-infra
-
-# Change port configuration or stop conflicting service
-
-

Permission Issues

-
# Fix permissions
-provisioning server ssh web-01 --command "
-    sudo chown -R kubernetes:kubernetes /var/lib/kubernetes
-    sudo chmod 600 /etc/kubernetes/admin.conf
-" --infra my-infra
-
-

Cluster Management Issues

-

Issue: Cluster Deployment Fails

-

Symptoms:

-
Cluster deployment failed
-Pod creation errors
-Service unavailable
-
-

Diagnosis:

-
# Check cluster status
-provisioning cluster status web-cluster --infra my-infra
-
-# Check Kubernetes cluster
-provisioning server ssh master-01 --command "
-    kubectl get nodes
-    kubectl get pods --all-namespaces
-" --infra my-infra
-
-# Check cluster logs
-provisioning cluster logs web-cluster --infra my-infra
-
-

Solutions:

-

Node Issues

-
# Check node status
-provisioning server ssh master-01 --command "
-    kubectl describe nodes
-" --infra my-infra
-
-# Drain and rejoin problematic nodes
-provisioning server ssh master-01 --command "
-    kubectl drain worker-01 --ignore-daemonsets
-    kubectl delete node worker-01
-" --infra my-infra
-
-# Rejoin node
-provisioning taskserv configure kubernetes --infra my-infra --servers worker-01
-
-

Resource Constraints

-
# Check resource usage
-provisioning server ssh master-01 --command "
-    kubectl top nodes
-    kubectl top pods --all-namespaces
-" --infra my-infra
-
-# Scale down or add more nodes
-provisioning cluster scale web-cluster --replicas 3 --infra my-infra
-provisioning server create worker-04 --infra my-infra
-
-

Network Issues

-
# Check network plugin
-provisioning server ssh master-01 --command "
-    kubectl get pods -n kube-system | grep cilium
-" --infra my-infra
-
-# Restart network plugin
-provisioning taskserv restart cilium --infra my-infra
-
-

Performance Issues

-

Issue: Slow Operations

-

Symptoms:

-
    -
  • Commands take very long to complete
  • -
  • Timeouts during operations
  • -
  • High CPU/memory usage
  • -
-

Diagnosis:

-
# Check system resources
-top
-htop
-free -h
-df -h
-
-# Check network latency
-ping api.aws.amazon.com
-traceroute api.aws.amazon.com
-
-# Profile command execution
-time provisioning server list --infra my-infra
-
-

Solutions:

-

Local System Issues

-
# Close unnecessary applications
-# Upgrade system resources
-# Use SSD storage if available
-
-# Increase timeout values
-export PROVISIONING_TIMEOUT=600  # 10 minutes
-
-

Network Issues

-
# Use region closer to your location
-[providers.aws]
-region = "us-west-1"  # Closer region
-
-# Enable connection pooling/caching
-[cache]
-enabled = true
-
-

Large Infrastructure Issues

-
# Use parallel operations
-provisioning server create --infra my-infra --parallel 4
-
-# Filter results
-provisioning server list --infra my-infra --filter "status == 'running'"
-
-

Issue: High Memory Usage

-

Symptoms:

-
    -
  • System becomes unresponsive
  • -
  • Out of memory errors
  • -
  • Swap usage high
  • -
-

Diagnosis:

-
# Check memory usage
-free -h
-ps aux --sort=-%mem | head
-
-# Check for memory leaks
-valgrind provisioning server list --infra my-infra
-
-

Solutions:

-
# Increase system memory
-# Close other applications
-# Use streaming operations for large datasets
-
-# Enable garbage collection
-export PROVISIONING_GC_ENABLED=true
-
-# Reduce concurrent operations
-export PROVISIONING_MAX_PARALLEL=2
-
-

Network and Connectivity Issues

-

Issue: API Connectivity Problems

-

Symptoms:

-
Connection timeout
-DNS resolution failed
-SSL certificate errors
-
-

Diagnosis:

-
# Test basic connectivity
-ping 8.8.8.8
-curl -I https://api.aws.amazon.com
-nslookup api.upcloud.com
-
-# Check SSL certificates
-openssl s_client -connect api.aws.amazon.com:443 -servername api.aws.amazon.com
-
-

Solutions:

-

DNS Issues

-
# Use alternative DNS
-echo 'nameserver 8.8.8.8' | sudo tee /etc/resolv.conf
-
-# Clear DNS cache
-sudo systemctl restart systemd-resolved  # Ubuntu
-sudo dscacheutil -flushcache             # macOS
-
-

Proxy/Firewall Issues

-
# Configure proxy if needed
-export HTTP_PROXY=http://proxy.company.com:9090
-export HTTPS_PROXY=http://proxy.company.com:9090
-
-# Check firewall rules
-sudo ufw status  # Ubuntu
-sudo firewall-cmd --list-all  # RHEL/CentOS
-
-

Certificate Issues

-
# Update CA certificates
-sudo apt update && sudo apt install ca-certificates  # Ubuntu
-brew install ca-certificates                         # macOS
-
-# Skip SSL verification (temporary)
-export PROVISIONING_SKIP_SSL_VERIFY=true
-
-

Security and Encryption Issues

-

Issue: SOPS Decryption Fails

-

Symptoms:

-
SOPS decryption failed
-Age key not found
-Invalid key format
-
-

Diagnosis:

-
# Check SOPS configuration
-provisioning sops config
-
-# Test SOPS manually
-sops -d encrypted-file.ncl
-
-# Check Age keys
-ls -la ~/.config/sops/age/keys.txt
-age-keygen -y ~/.config/sops/age/keys.txt
-
-

Solutions:

-

Missing Keys

-
# Generate new Age key
-age-keygen -o ~/.config/sops/age/keys.txt
-
-# Update SOPS configuration
-provisioning sops config --key-file ~/.config/sops/age/keys.txt
-
-

Key Permissions

-
# Fix key file permissions
-chmod 600 ~/.config/sops/age/keys.txt
-chown $(whoami) ~/.config/sops/age/keys.txt
-
-

Configuration Issues

-
# Update SOPS configuration in ~/.config/provisioning/config.toml
-[sops]
-use_sops = true
-key_search_paths = [
-    "~/.config/sops/age/keys.txt",
-    "/path/to/your/key.txt"
-]
-
-

Issue: Access Denied Errors

-

Symptoms:

-
Permission denied
-Access denied
-Insufficient privileges
-
-

Diagnosis:

-
# Check user permissions
-id
-groups
-
-# Check file permissions
-ls -la ~/.config/provisioning/
-ls -la /usr/local/provisioning/
-
-# Test with sudo
-sudo provisioning env
-
-

Solutions:

-
# Fix file ownership
-sudo chown -R $(whoami):$(whoami) ~/.config/provisioning/
-
-# Fix permissions
-chmod -R 755 ~/.config/provisioning/
-chmod 600 ~/.config/provisioning/config.toml
-
-# Add user to required groups
-sudo usermod -a -G docker $(whoami)  # For Docker access
-
-

Data and Storage Issues

-

Issue: Disk Space Problems

-

Symptoms:

-
No space left on device
-Write failed
-Disk full
-
-

Diagnosis:

-
# Check disk usage
-df -h
-du -sh ~/.config/provisioning/
-du -sh /usr/local/provisioning/
-
-# Find large files
-find /usr/local/provisioning -type f -size +100M
-
-

Solutions:

-
# Clean up cache files
-rm -rf ~/.config/provisioning/cache/*
-rm -rf /usr/local/provisioning/.cache/*
-
-# Clean up logs
-find /usr/local/provisioning -name "*.log" -mtime +30 -delete
-
-# Clean up temporary files
-rm -rf /tmp/provisioning-*
-
-# Compress old backups
-gzip ~/.config/provisioning/backups/*.yaml
-
-

Recovery Procedures

-

Configuration Recovery

-
# Restore from backup
-provisioning config restore --backup latest
-
-# Reset to defaults
-provisioning config reset
-
-# Recreate configuration
-provisioning init config --force
-
-

Infrastructure Recovery

-
# Check infrastructure status
-provisioning show servers --infra my-infra
-
-# Recover failed servers
-provisioning server create failed-server --infra my-infra
-
-# Restore from backup
-provisioning restore --backup latest --infra my-infra
-
-

Service Recovery

-
# Restart failed services
-provisioning taskserv restart kubernetes --infra my-infra
-
-# Reinstall corrupted services
-provisioning taskserv delete kubernetes --infra my-infra
-provisioning taskserv create kubernetes --infra my-infra
-
-

Prevention Strategies

-

Regular Maintenance

-
# Weekly maintenance script
-#!/bin/bash
-
-# Update system
-provisioning update --check
-
-# Validate configuration
-provisioning validate config
-
-# Check for service updates
-provisioning taskserv check-updates
-
-# Clean up old files
-provisioning cleanup --older-than 30d
-
-# Create backup
-provisioning backup create --name "weekly-$(date +%Y%m%d)"
-
-

Monitoring Setup

-
# Set up health monitoring
-#!/bin/bash
-
-# Check system health every hour
-0 * * * * /usr/local/bin/provisioning health check || echo "Health check failed" | mail -s "Provisioning Alert" admin@company.com
-
-# Weekly cost reports
-0 9 * * 1 /usr/local/bin/provisioning show costs --all | mail -s "Weekly Cost Report" finance@company.com
-
-

Best Practices

+

Next Steps

    -
  1. -

    Configuration Management

    -
      -
    • Version control all configuration files
    • -
    • Use check mode before applying changes
    • -
    • Regular validation and testing
    • -
    -
  2. -
  3. -

    Security

    -
      -
    • Regular key rotation
    • -
    • Principle of least privilege
    • -
    • Audit logs review
    • -
    -
  4. -
  5. -

    Backup Strategy

    -
      -
    • Automated daily backups
    • -
    • Test restore procedures
    • -
    • Off-site backup storage
    • -
    -
  6. -
  7. -

    Documentation

    -
      -
    • Document custom configurations
    • -
    • Keep troubleshooting logs
    • -
    • Share knowledge with team
    • -
    -
  8. +
  9. Create workspace
  10. +
  11. Deploy infrastructure
  12. +
  13. Configure batch workflows
-

Getting Additional Help

-

Debug Information Collection

-
#!/bin/bash
-# Collect debug information
-
-echo "Collecting provisioning debug information..."
-
-mkdir -p /tmp/provisioning-debug
-cd /tmp/provisioning-debug
-
-# System information
-uname -a > system-info.txt
-free -h >> system-info.txt
-df -h >> system-info.txt
-
-# Provisioning information
-provisioning --version > provisioning-info.txt
-provisioning env >> provisioning-info.txt
-provisioning validate config --detailed > config-validation.txt 2>&1
-
-# Configuration files
-cp ~/.config/provisioning/config.toml user-config.toml 2>/dev/null || echo "No user config" > user-config.toml
-
-# Logs
-provisioning show logs > system-logs.txt 2>&1
-
-# Create archive
-cd /tmp
-tar czf provisioning-debug-$(date +%Y%m%d_%H%M%S).tar.gz provisioning-debug/
-
-echo "Debug information collected in: provisioning-debug-*.tar.gz"
-
-

Support Channels

+

+ Provisioning Logo +

+

+ Provisioning +

+

User Guides

+

Step-by-step guides for common workflows, best practices, and advanced operational +scenarios using the Provisioning platform.

+

Overview

+

This section provides practical guides for:

+
    +
  • Getting started - From-scratch deployment and initial setup
  • +
  • Organization - Workspace management and multi-cloud strategies
  • +
  • Automation - Advanced workflow orchestration and GitOps
  • +
  • Operations - Disaster recovery, secrets rotation, cost governance
  • +
  • Integration - Hybrid cloud setup, zero-trust networks, legacy migration
  • +
  • Scaling - Multi-tenant environments, high availability, performance optimization
  • +
+

Each guide includes step-by-step instructions, configuration examples, troubleshooting, and best practices.

+

Getting Started

+

I’m completely new to Provisioning

+

Start with: From Scratch Guide - Complete walkthrough from installation through first deployment with explanations and examples.

+

I want to organize infrastructure

+

Read: Workspace Management - Best practices for organizing workspaces, isolation, and multi-team setup.

+

Core Workflow Guides

+
    +
  • +

    From Scratch Guide - Installation, workspace creation, +first deployment step-by-step

    +
  • +
  • +

    Workspace Management - Organization best +practices, multi-tenancy, collaboration, customization, schemas

    +
  • +
  • +

    Multi-Cloud Deployment - Deploy across +AWS, UpCloud, Hetzner with abstraction and failover

    +
  • +
+

+ Multi-Cloud Deployment AWS UpCloud Hetzner Provider Abstraction +

+ +

+ Deployment Pipeline Dev Staging Canary Production with Validation Gates +

+

Advanced Operational Guides

+ +

Enterprise Features

+ +

Quick Navigation

+

I need to

+

Deploy infrastructure quicklyFrom Scratch Guide

+

Organize multiple workspacesWorkspace Management

+

Deploy across cloudsMulti-Cloud Deployment

+

Build complex workflowsAdvanced Workflow Orchestration

+

Set up GitOpsGitOps Infrastructure Deployment

+

Handle disastersDisaster Recovery Guide

+

Rotate secrets safelySecrets Rotation Strategy

+

Connect on-premise to cloudHybrid Cloud Deployment

+

Design secure networksAdvanced Networking

+

Build custom extensionsCustom Extensions

+

Migrate legacy systemsLegacy System Migration

+

Guide Structure

+

Each guide follows this pattern:

    -
  1. -

    Built-in Help

    -
    provisioning help
    -provisioning help <command>
    -
    -
  2. -
  3. -

    Documentation

    -
      -
    • User guides in docs/user/
    • -
    • CLI reference: docs/user/cli-reference.md
    • -
    • Configuration guide: docs/user/configuration.md
    • -
    -
  4. -
  5. -

    Community Resources

    -
      -
    • Project repository issues
    • -
    • Community forums
    • -
    • Documentation wiki
    • -
    -
  6. -
  7. -

    Enterprise Support

    -
      -
    • Professional services
    • -
    • Priority support
    • -
    • Custom development
    • -
    -
  8. +
  9. Overview - What you’ll accomplish
  10. +
  11. Prerequisites - What you need before starting
  12. +
  13. Architecture - Visual diagram of the solution
  14. +
  15. Step-by-Step - Detailed instructions with examples
  16. +
  17. Configuration - Full Nickel configuration examples
  18. +
  19. Verification - How to validate the deployment
  20. +
  21. Troubleshooting - Common issues and solutions
  22. +
  23. Next Steps - How to extend or customize
  24. +
  25. Best Practices - Lessons learned and recommendations
-

Remember: When reporting issues, always include the debug information collected above and specific error messages.

-

Complete Deployment Guide: From Scratch to Production

-

Version: 3.5.0 -Last Updated: 2025-10-09 -Estimated Time: 30-60 minutes -Difficulty: Beginner to Intermediate

-
-

Table of Contents

+

Learning Paths

+

Path 1: I’m new to Provisioning (Day 1)

    -
  1. Prerequisites
  2. -
  3. Step 1: Install Nushell
  4. -
  5. Step 2: Install Nushell Plugins (Recommended)
  6. -
  7. Step 3: Install Required Tools
  8. -
  9. Step 4: Clone and Setup Project
  10. -
  11. Step 5: Initialize Workspace
  12. -
  13. Step 6: Configure Environment
  14. -
  15. Step 7: Discover and Load Modules
  16. -
  17. Step 8: Validate Configuration
  18. -
  19. Step 9: Deploy Servers
  20. -
  21. Step 10: Install Task Services
  22. -
  23. Step 11: Create Clusters
  24. -
  25. Step 12: Verify Deployment
  26. -
  27. Step 13: Post-Deployment
  28. -
  29. Troubleshooting
  30. -
  31. Next Steps
  32. +
  33. From Scratch Guide - Basic setup
  34. +
  35. Workspace Management - Organization
  36. +
  37. Multi-Cloud Deployment - Multi-cloud
-
-

Prerequisites

-

Before starting, ensure you have:

+

Path 2: I need production-ready setup (Week 1)

+
    +
  1. Workspace Management - Organization
  2. +
  3. GitOps Infrastructure Deployment - Automation
  4. +
  5. Disaster Recovery Guide - Resilience
  6. +
  7. Secrets Rotation Strategy - Security
  8. +
  9. Advanced Networking - Enterprise networking
  10. +
+

Path 3: I’m migrating from legacy (Month-long project)

+
    +
  1. Legacy System Migration - Migration plan
  2. +
  3. Advanced Workflow Orchestration - Complex deployments
  4. +
  5. Hybrid Cloud Deployment - Coexistence
  6. +
  7. GitOps Infrastructure Deployment - Continuous deployment
  8. +
  9. Disaster Recovery Guide - Failover strategies
  10. +
+

Path 4: I’m building a platform (Team project)

+
    +
  1. Custom Extensions - Build extensions
  2. +
  3. Workspace Management - Multi-tenant setup
  4. +
  5. Advanced Workflow Orchestration - Complex workflows
  6. +
  7. GitOps Infrastructure Deployment - CD/GitOps
  8. +
  9. Secrets Rotation Strategy - Security at scale
  10. +
+
    -
  • Operating System: macOS, Linux, or Windows (WSL2 recommended)
  • -
  • Administrator Access: Ability to install software and configure system
  • -
  • Internet Connection: For downloading dependencies and accessing cloud providers
  • -
  • Cloud Provider Credentials: UpCloud, Hetzner, AWS, or local development environment
  • -
  • Basic Terminal Knowledge: Comfortable running shell commands
  • -
  • Text Editor: vim, nano, Zed, VSCode, or your preferred editor
  • +
  • Getting Started → See provisioning/docs/src/getting-started/
  • +
  • Examples → See provisioning/docs/src/examples/
  • +
  • Features → See provisioning/docs/src/features/
  • +
  • Operations → See provisioning/docs/src/operations/
  • +
  • Development → See provisioning/docs/src/development/
- +

From Scratch Guide

+

Complete walkthrough from zero to production-ready infrastructure deployment using the Provisioning platform. This guide covers installation, configuration, +workspace setup, infrastructure definition, and deployment workflows.

+

Overview

+

This guide walks you through:

    -
  • CPU: 2+ cores
  • -
  • RAM: 8 GB minimum, 16 GB recommended
  • -
  • Disk: 20 GB free space minimum
  • +
  • Installing prerequisites and the Provisioning platform
  • +
  • Configuring cloud provider credentials
  • +
  • Creating your first workspace
  • +
  • Defining infrastructure using Nickel
  • +
  • Deploying servers and task services
  • +
  • Setting up Kubernetes clusters
  • +
  • Implementing security best practices
  • +
  • Monitoring and maintaining infrastructure
-
-

Step 1: Install Nushell

-

Nushell 0.109.1+ is the primary shell and scripting language for the provisioning platform.

-

macOS (via Homebrew)

-
# Install Nushell
+

Time commitment: 2-3 hours for complete setup +Prerequisites: Linux or macOS, terminal access, cloud provider account (optional)

+

Phase 1: Installation

+

System Prerequisites

+

Ensure your system meets minimum requirements:

+
# Check OS (Linux or macOS)
+uname -s
+
+# Verify available disk space (minimum 10GB recommended)
+df -h ~
+
+# Check internet connectivity
+ping -c 3 github.com
+
+

Install Required Tools

+

Nushell (Required)

+
# macOS
 brew install nushell
 
-# Verify installation
-nu --version
-# Expected: 0.109.1 or higher
-
-

Linux (via Package Manager)

-

Ubuntu/Debian:

-
# Add Nushell repository
-curl -fsSL https://starship.rs/install.sh | bash
-
-# Install Nushell
-sudo apt update
-sudo apt install nushell
+# Linux
+cargo install nu
 
 # Verify installation
-nu --version
+nu --version  # Expected: 0.109.1+
 
-

Fedora:

-
sudo dnf install nushell
-nu --version
-
-

Arch Linux:

-
sudo pacman -S nushell
-nu --version
-
-

Linux/macOS (via Cargo)

-
# Install Rust (if not already installed)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
-
-# Install Nushell
-cargo install nu --locked
-
-# Verify installation
-nu --version
-
-

Windows (via Winget)

-
# Install Nushell
-winget install nushell
-
-# Verify installation
-nu --version
-
-

Configure Nushell

-
# Start Nushell
-nu
-
-# Configure (creates default config if not exists)
-config nu
-
-
- -

Native plugins provide 10-50x performance improvement for authentication, KMS, and orchestrator operations.

-

Why Install Plugins

-

Performance Gains:

-
    -
  • 🚀 KMS operations: ~5 ms vs ~50 ms (10x faster)
  • -
  • 🚀 Orchestrator queries: ~1 ms vs ~30 ms (30x faster)
  • -
  • 🚀 Batch encryption: 100 files in 0.5s vs 5s (10x faster)
  • -
-

Benefits:

-
    -
  • ✅ Native Nushell integration (pipelines, data structures)
  • -
  • ✅ OS keyring for secure token storage
  • -
  • ✅ Offline capability (Age encryption, local orchestrator)
  • -
  • ✅ Graceful fallback to HTTP if not installed
  • -
-

Prerequisites for Building Plugins

-
# Install Rust toolchain (if not already installed)
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-source $HOME/.cargo/env
-rustc --version
-# Expected: rustc 1.75+ or higher
-
-# Linux only: Install development packages
-sudo apt install libssl-dev pkg-config  # Ubuntu/Debian
-sudo dnf install openssl-devel          # Fedora
-
-# Linux only: Install keyring service (required for auth plugin)
-sudo apt install gnome-keyring          # Ubuntu/Debian (GNOME)
-sudo apt install kwalletmanager         # Ubuntu/Debian (KDE)
-
-

Build Plugins

-
# Navigate to plugins directory
-cd provisioning/core/plugins/nushell-plugins
-
-# Build all three plugins in release mode (optimized)
-cargo build --release --all
-
-# Expected output:
-#    Compiling nu_plugin_auth v0.1.0
-#    Compiling nu_plugin_kms v0.1.0
-#    Compiling nu_plugin_orchestrator v0.1.0
-#     Finished release [optimized] target(s) in 2m 15s
-
-

Build time: ~2-5 minutes depending on hardware

-

Register Plugins with Nushell

-
# Register all three plugins (full paths recommended)
-plugin add $PWD/target/release/nu_plugin_auth
-plugin add $PWD/target/release/nu_plugin_kms
-plugin add $PWD/target/release/nu_plugin_orchestrator
-
-# Alternative (from plugins directory)
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-

Verify Plugin Installation

-
# List registered plugins
-plugin list | where name =~ "auth|kms|orch"
-
-# Expected output:
-# ╭───┬─────────────────────────┬─────────┬───────────────────────────────────╮
-# │ # │          name           │ version │           filename                │
-# ├───┼─────────────────────────┼─────────┼───────────────────────────────────┤
-# │ 0 │ nu_plugin_auth          │ 0.1.0   │ .../nu_plugin_auth                │
-# │ 1 │ nu_plugin_kms           │ 0.1.0   │ .../nu_plugin_kms                 │
-# │ 2 │ nu_plugin_orchestrator  │ 0.1.0   │ .../nu_plugin_orchestrator        │
-# ╰───┴─────────────────────────┴─────────┴───────────────────────────────────╯
-
-# Test each plugin
-auth --help       # Should show auth commands
-kms --help        # Should show kms commands
-orch --help       # Should show orch commands
-
-

Configure Plugin Environments

-
# Add to ~/.config/nushell/env.nu
-$env.CONTROL_CENTER_URL = "http://localhost:3000"
-$env.RUSTYVAULT_ADDR = "http://localhost:8200"
-$env.RUSTYVAULT_TOKEN = "your-vault-token-here"
-$env.ORCHESTRATOR_DATA_DIR = "provisioning/platform/orchestrator/data"
-
-# For Age encryption (local development)
-$env.AGE_IDENTITY = $"($env.HOME)/.age/key.txt"
-$env.AGE_RECIPIENT = "age1xxxxxxxxx"  # Replace with your public key
-
-

Test Plugins (Quick Smoke Test)

-
# Test KMS plugin (requires backend configured)
-kms status
-# Expected: { backend: "rustyvault", status: "healthy", ... }
-# Or: Error if backend not configured (OK for now)
-
-# Test orchestrator plugin (reads local files)
-orch status
-# Expected: { active_tasks: 0, completed_tasks: 0, health: "healthy" }
-# Or: Error if orchestrator not started yet (OK for now)
-
-# Test auth plugin (requires control center)
-auth verify
-# Expected: { active: false }
-# Or: Error if control center not running (OK for now)
-
-

Note: It’s OK if plugins show errors at this stage. We’ll configure backends and services later.

- -

If you want to skip plugin installation for now:

-
    -
  • ✅ All features work via HTTP API (slower but functional)
  • -
  • ⚠️ You’ll miss 10-50x performance improvements
  • -
  • ⚠️ No offline capability for KMS/orchestrator
  • -
  • ℹ️ You can install plugins later anytime
  • -
-

To use HTTP fallback:

-
# System automatically uses HTTP if plugins not available
-# No configuration changes needed
-
-
-

Step 3: Install Required Tools

-

Essential Tools

-

SOPS (Secrets Management)

-
# macOS
-brew install sops
-
-# Linux
-wget https://github.com/mozilla/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64
-sudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops
-sudo chmod +x /usr/local/bin/sops
-
-# Verify
-sops --version
-# Expected: 3.10.2 or higher
-
-

Age (Encryption Tool)

-
# macOS
-brew install age
-
-# Linux
-sudo apt install age  # Ubuntu/Debian
-sudo dnf install age  # Fedora
-
-# Or from source
-go install filippo.io/age/cmd/...@latest
-
-# Verify
-age --version
-# Expected: 1.2.1 or higher
-
-# Generate Age key (for local encryption)
-age-keygen -o ~/.age/key.txt
-cat ~/.age/key.txt
-# Save the public key (age1...) for later
-
- -

K9s (Kubernetes Management)

-
# macOS
-brew install k9s
-
-# Linux
-curl -sS https://webinstall.dev/k9s | bash
-
-# Verify
-k9s version
-# Expected: 0.50.6 or higher
-
-

glow (Markdown Renderer)

-
# macOS
-brew install glow
-
-# Linux
-sudo apt install glow  # Ubuntu/Debian
-sudo dnf install glow  # Fedora
-
-# Verify
-glow --version
-
-
-

Step 4: Clone and Setup Project

-

Clone Repository

-
# Clone project
-git clone https://github.com/your-org/project-provisioning.git
-cd project-provisioning
-
-# Or if already cloned, update to latest
-git pull origin main
-
-

Add CLI to PATH (Optional)

-
# Add to ~/.bashrc or ~/.zshrc
-export PATH="$PATH:/Users/Akasha/project-provisioning/provisioning/core/cli"
-
-# Or create symlink
-sudo ln -s /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning /usr/local/bin/provisioning
-
-# Verify
-provisioning version
-# Expected: 3.5.0
-
-
-

Step 5: Initialize Workspace

-

A workspace is a self-contained environment for managing infrastructure.

-

Create New Workspace

-
# Initialize new workspace
-provisioning workspace init --name production
-
-# Or use interactive mode
-provisioning workspace init
-# Name: production
-# Description: Production infrastructure
-# Provider: upcloud
-
-

What this creates:

-

The new workspace initialization now generates Nickel configuration files for type-safe, schema-validated infrastructure definitions:

-
workspace/
-├── config/
-│   ├── config.ncl               # Master Nickel configuration (type-safe)
-│   ├── providers/
-│   │   └── upcloud.toml         # Provider-specific settings
-│   ├── platform/                # Platform service configs
-│   └── kms.toml                 # Key management settings
-├── infra/
-│   └── default/
-│       ├── main.ncl             # Infrastructure entry point
-│       └── servers.ncl          # Server definitions
-├── docs/                        # Auto-generated guides
-└── workspace.nu                 # Workspace utility scripts
-
-

Workspace Configuration Format

-

The workspace configuration uses Nickel (type-safe, validated). This provides:

-
    -
  • Type Safety: Schema validation catches errors at load time
  • -
  • Lazy Evaluation: Only computes what’s needed
  • -
  • Validation: Record merging, required fields, constraints
  • -
  • Documentation: Self-documenting with records
  • -
-

Example Nickel config (config.ncl):

-
{
-  workspace = {
-    name = "production",
-    version = "1.0.0",
-    created = "2025-12-03T14:30:00Z",
-  },
-
-  paths = {
-    base = "/opt/workspaces/production",
-    infra = "/opt/workspaces/production/infra",
-    cache = "/opt/workspaces/production/.cache",
-  },
-
-  providers = {
-    active = ["upcloud"],
-    default = "upcloud",
-  },
-}
-
-

Verify Workspace

-
# Show workspace info
-provisioning workspace info
-
-# List all workspaces
-provisioning workspace list
-
-# Show active workspace
-provisioning workspace active
-# Expected: production
-
-

View and Validate Workspace Configuration

-

Now you can inspect and validate your Nickel workspace configuration:

-
# View complete workspace configuration
-provisioning workspace config show
-
-# Show specific workspace
-provisioning workspace config show production
-
-# View configuration in different formats
-provisioning workspace config show --format=json
-provisioning workspace config show --format=yaml
-provisioning workspace config show --format=nickel  # Raw Nickel file
-
-# Validate workspace configuration
-provisioning workspace config validate
-# Output: ✅ Validation complete - all configs are valid
-
-# Show configuration hierarchy (priority order)
-provisioning workspace config hierarchy
-
-

Configuration Validation: The Nickel schema automatically validates:

-
    -
  • ✅ Semantic versioning format (for example, “1.0.0”)
  • -
  • ✅ Required sections present (workspace, paths, provisioning, etc.)
  • -
  • ✅ Valid file paths and types
  • -
  • ✅ Provider configuration exists for active providers
  • -
  • ✅ KMS and SOPS settings properly configured
  • -
-
-

Step 6: Configure Environment

-

Set Provider Credentials

-

UpCloud Provider:

-
# Create provider config
-vim workspace/config/providers/upcloud.toml
-
-
[upcloud]
-username = "your-upcloud-username"
-password = "your-upcloud-password"  # Will be encrypted
-
-# Default settings
-default_zone = "de-fra1"
-default_plan = "2xCPU-4 GB"
-
-

AWS Provider:

-
# Create AWS config
-vim workspace/config/providers/aws.toml
-
-
[aws]
-region = "us-east-1"
-access_key_id = "AKIAXXXXX"
-secret_access_key = "xxxxx"  # Will be encrypted
-
-# Default settings
-default_instance_type = "t3.medium"
-default_region = "us-east-1"
-
-

Encrypt Sensitive Data

-
# Generate Age key if not done already
-age-keygen -o ~/.age/key.txt
-
-# Encrypt provider configs
-kms encrypt (open workspace/config/providers/upcloud.toml) --backend age \
-    | save workspace/config/providers/upcloud.toml.enc
-
-# Or use SOPS
-sops --encrypt --age $(cat ~/.age/key.txt | grep "public key:" | cut -d: -f2) \
-    workspace/config/providers/upcloud.toml > workspace/config/providers/upcloud.toml.enc
-
-# Remove plaintext
-rm workspace/config/providers/upcloud.toml
-
-

Configure Local Overrides

-
# Edit user-specific settings
-vim workspace/config/local-overrides.toml
-
-
[user]
-name = "admin"
-email = "admin@example.com"
-
-[preferences]
-editor = "vim"
-output_format = "yaml"
-confirm_delete = true
-confirm_deploy = true
-
-[http]
-use_curl = true  # Use curl instead of ureq
-
-[paths]
-ssh_key = "~/.ssh/id_ed25519"
-
-
-

Step 7: Discover and Load Modules

-

Discover Available Modules

-
# Discover task services
-provisioning module discover taskserv
-# Shows: kubernetes, containerd, etcd, cilium, helm, etc.
-
-# Discover providers
-provisioning module discover provider
-# Shows: upcloud, aws, local
-
-# Discover clusters
-provisioning module discover cluster
-# Shows: buildkit, registry, monitoring, etc.
-
-

Load Modules into Workspace

-
# Load Kubernetes taskserv
-provisioning module load taskserv production kubernetes
-
-# Load multiple modules
-provisioning module load taskserv production kubernetes containerd cilium
-
-# Load cluster configuration
-provisioning module load cluster production buildkit
-
-# Verify loaded modules
-provisioning module list taskserv production
-provisioning module list cluster production
-
-
-

Step 8: Validate Configuration

-

Before deploying, validate all configuration:

-
# Validate workspace configuration
-provisioning workspace validate
-
-# Validate infrastructure configuration
-provisioning validate config
-
-# Validate specific infrastructure
-provisioning infra validate --infra production
-
-# Check environment variables
-provisioning env
-
-# Show all configuration and environment
-provisioning allenv
-
-

Expected output:

-
✓ Configuration valid
-✓ Provider credentials configured
-✓ Workspace initialized
-✓ Modules loaded: 3 taskservs, 1 cluster
-✓ SSH key configured
-✓ Age encryption key available
-
-

Fix any errors before proceeding to deployment.

-
-

Step 9: Deploy Servers

-

Preview Server Creation (Dry Run)

-
# Check what would be created (no actual changes)
-provisioning server create --infra production --check
-
-# With debug output for details
-provisioning server create --infra production --check --debug
-
-

Review the output:

-
    -
  • Server names and configurations
  • -
  • Zones and regions
  • -
  • CPU, memory, disk specifications
  • -
  • Estimated costs
  • -
  • Network settings
  • -
-

Create Servers

-
# Create servers (with confirmation prompt)
-provisioning server create --infra production
-
-# Or auto-confirm (skip prompt)
-provisioning server create --infra production --yes
-
-# Wait for completion
-provisioning server create --infra production --wait
-
-

Expected output:

-
Creating servers for infrastructure: production
-
-  ● Creating server: k8s-master-01 (de-fra1, 4xCPU-8 GB)
-  ● Creating server: k8s-worker-01 (de-fra1, 4xCPU-8 GB)
-  ● Creating server: k8s-worker-02 (de-fra1, 4xCPU-8 GB)
-
-✓ Created 3 servers in 120 seconds
-
-Servers:
-  • k8s-master-01: 192.168.1.10 (Running)
-  • k8s-worker-01: 192.168.1.11 (Running)
-  • k8s-worker-02: 192.168.1.12 (Running)
-
-

Verify Server Creation

-
# List all servers
-provisioning server list --infra production
-
-# Show detailed server info
-provisioning server list --infra production --out yaml
-
-# SSH to server (test connectivity)
-provisioning server ssh k8s-master-01
-# Type 'exit' to return
-
-
-

Step 10: Install Task Services

-

Task services are infrastructure components like Kubernetes, databases, monitoring, etc.

-

Install Kubernetes (Check Mode First)

-
# Preview Kubernetes installation
-provisioning taskserv create kubernetes --infra production --check
-
-# Shows:
-# - Dependencies required (containerd, etcd)
-# - Configuration to be applied
-# - Resources needed
-# - Estimated installation time
-
-

Install Kubernetes

-
# Install Kubernetes (with dependencies)
-provisioning taskserv create kubernetes --infra production
-
-# Or install dependencies first
-provisioning taskserv create containerd --infra production
-provisioning taskserv create etcd --infra production
-provisioning taskserv create kubernetes --infra production
-
-# Monitor progress
-provisioning workflow monitor <task_id>
-
-

Expected output:

-
Installing taskserv: kubernetes
-
-  ● Installing containerd on k8s-master-01
-  ● Installing containerd on k8s-worker-01
-  ● Installing containerd on k8s-worker-02
-  ✓ Containerd installed (30s)
-
-  ● Installing etcd on k8s-master-01
-  ✓ etcd installed (20s)
-
-  ● Installing Kubernetes control plane on k8s-master-01
-  ✓ Kubernetes control plane ready (45s)
-
-  ● Joining worker nodes
-  ✓ k8s-worker-01 joined (15s)
-  ✓ k8s-worker-02 joined (15s)
-
-✓ Kubernetes installation complete (125 seconds)
-
-Cluster Info:
-  • Version: 1.28.0
-  • Nodes: 3 (1 control-plane, 2 workers)
-  • API Server: https://192.168.1.10:6443
-
-

Install Additional Services

-
# Install Cilium (CNI)
-provisioning taskserv create cilium --infra production
-
-# Install Helm
-provisioning taskserv create helm --infra production
-
-# Verify all taskservs
-provisioning taskserv list --infra production
-
-
-

Step 11: Create Clusters

-

Clusters are complete application stacks (for example, BuildKit, OCI Registry, Monitoring).

-

Create BuildKit Cluster (Check Mode)

-
# Preview cluster creation
-provisioning cluster create buildkit --infra production --check
-
-# Shows:
-# - Components to be deployed
-# - Dependencies required
-# - Configuration values
-# - Resource requirements
-
-

Create BuildKit Cluster

-
# Create BuildKit cluster
-provisioning cluster create buildkit --infra production
-
-# Monitor deployment
-provisioning workflow monitor <task_id>
-
-# Or use plugin for faster monitoring
-orch tasks --status running
-
-

Expected output:

-
Creating cluster: buildkit
-
-  ● Deploying BuildKit daemon
-  ● Deploying BuildKit worker
-  ● Configuring BuildKit cache
-  ● Setting up BuildKit registry integration
-
-✓ BuildKit cluster ready (60 seconds)
-
-Cluster Info:
-  • BuildKit version: 0.12.0
-  • Workers: 2
-  • Cache: 50 GB
-  • Registry: registry.production.local
-
-

Verify Cluster

-
# List all clusters
-provisioning cluster list --infra production
-
-# Show cluster details
-provisioning cluster list --infra production --out yaml
-
-# Check cluster health
-kubectl get pods -n buildkit
-
-
-

Step 12: Verify Deployment

-

Comprehensive Health Check

-
# Check orchestrator status
-orch status
-# or
-provisioning orchestrator status
-
-# Check all servers
-provisioning server list --infra production
-
-# Check all taskservs
-provisioning taskserv list --infra production
-
-# Check all clusters
-provisioning cluster list --infra production
-
-# Verify Kubernetes cluster
-kubectl get nodes
-kubectl get pods --all-namespaces
-
-

Run Validation Tests

-
# Validate infrastructure
-provisioning infra validate --infra production
-
-# Test connectivity
-provisioning server ssh k8s-master-01 "kubectl get nodes"
-
-# Test BuildKit
-kubectl exec -it -n buildkit buildkit-0 -- buildctl --version
-
-

Expected Results

-

All checks should show:

-
    -
  • ✅ Servers: Running
  • -
  • ✅ Taskservs: Installed and healthy
  • -
  • ✅ Clusters: Deployed and operational
  • -
  • ✅ Kubernetes: 3/3 nodes ready
  • -
  • ✅ BuildKit: 2/2 workers ready
  • -
-
-

Step 13: Post-Deployment

-

Configure kubectl Access

-
# Get kubeconfig from master node
-provisioning server ssh k8s-master-01 "cat ~/.kube/config" > ~/.kube/config-production
-
-# Set KUBECONFIG
-export KUBECONFIG=~/.kube/config-production
-
-# Verify access
-kubectl get nodes
-kubectl get pods --all-namespaces
-
-

Set Up Monitoring (Optional)

-
# Deploy monitoring stack
-provisioning cluster create monitoring --infra production
-
-# Access Grafana
-kubectl port-forward -n monitoring svc/grafana 3000:80
-# Open: http://localhost:3000
-
-

Configure CI/CD Integration (Optional)

-
# Generate CI/CD credentials
-provisioning secrets generate aws --ttl 12h
-
-# Create CI/CD kubeconfig
-kubectl create serviceaccount ci-cd -n default
-kubectl create clusterrolebinding ci-cd --clusterrole=admin --serviceaccount=default:ci-cd
-
-

Backup Configuration

-
# Backup workspace configuration
-tar -czf workspace-production-backup.tar.gz workspace/
-
-# Encrypt backup
-kms encrypt (open workspace-production-backup.tar.gz | encode base64) --backend age \
-    | save workspace-production-backup.tar.gz.enc
-
-# Store securely (S3, Vault, etc.)
-
-
-

Troubleshooting

-

Server Creation Fails

-

Problem: Server creation times out or fails

-
# Check provider credentials
-provisioning validate config
-
-# Check provider API status
-curl -u username:password https://api.upcloud.com/1.3/account
-
-# Try with debug mode
-provisioning server create --infra production --check --debug
-
-

Taskserv Installation Fails

-

Problem: Kubernetes installation fails

-
# Check server connectivity
-provisioning server ssh k8s-master-01
-
-# Check logs
-provisioning orchestrator logs | grep kubernetes
-
-# Check dependencies
-provisioning taskserv list --infra production | where status == "failed"
-
-# Retry installation
-provisioning taskserv delete kubernetes --infra production
-provisioning taskserv create kubernetes --infra production
-
-

Plugin Commands Don’t Work

-

Problem: auth, kms, or orch commands not found

-
# Check plugin registration
-plugin list | where name =~ "auth|kms|orch"
-
-# Re-register if missing
-cd provisioning/core/plugins/nushell-plugins
-plugin add target/release/nu_plugin_auth
-plugin add target/release/nu_plugin_kms
-plugin add target/release/nu_plugin_orchestrator
-
-# Restart Nushell
-exit
-nu
-
-

KMS Encryption Fails

-

Problem: kms encrypt returns error

-
# Check backend status
-kms status
-
-# Check RustyVault running
-curl http://localhost:8200/v1/sys/health
-
-# Use Age backend instead (local)
-kms encrypt "data" --backend age --key age1xxxxxxxxx
-
-# Check Age key
-cat ~/.age/key.txt
-
-

Orchestrator Not Running

-

Problem: orch status returns error

-
# Check orchestrator status
-ps aux | grep orchestrator
-
-# Start orchestrator
-cd provisioning/platform/orchestrator
-./scripts/start-orchestrator.nu --background
-
-# Check logs
-tail -f provisioning/platform/orchestrator/data/orchestrator.log
-
-

Configuration Validation Errors

-

Problem: provisioning validate config shows errors

-
# Show detailed errors
-provisioning validate config --debug
-
-# Check configuration files
-provisioning allenv
-
-# Fix missing settings
-vim workspace/config/local-overrides.toml
-
-
-

Next Steps

-

Explore Advanced Features

-
    -
  1. -

    Multi-Environment Deployment

    -
    # Create dev and staging workspaces
    -provisioning workspace create dev
    -provisioning workspace create staging
    -provisioning workspace switch dev
    -
    -
  2. -
  3. -

    Batch Operations

    -
    # Deploy to multiple clouds
    -provisioning batch submit workflows/multi-cloud-deploy.ncl
    -
    -
  4. -
  5. -

    Security Features

    -
    # Enable MFA
    -auth mfa enroll totp
    -
    -# Set up break-glass
    -provisioning break-glass request "Emergency access"
    -
    -
  6. -
  7. -

    Compliance and Audit

    -
    # Generate compliance report
    -provisioning compliance report --standard soc2
    -
    -
  8. -
-

Learn More

-
    -
  • Quick Reference: provisioning sc or docs/guides/quickstart-cheatsheet.md
  • -
  • Update Guide: docs/guides/update-infrastructure.md
  • -
  • Customize Guide: docs/guides/customize-infrastructure.md
  • -
  • Plugin Guide: docs/user/PLUGIN_INTEGRATION_GUIDE.md
  • -
  • Security System: docs/architecture/adr-009-security-system-complete.md
  • -
-

Get Help

-
# Show help for any command
-provisioning help
-provisioning help server
-provisioning help taskserv
-
-# Check version
-provisioning version
-
-# Start Nushell session with provisioning library
-provisioning nu
-
-
-

Summary

-

You’ve successfully:

-

✅ Installed Nushell and essential tools -✅ Built and registered native plugins (10-50x faster operations) -✅ Cloned and configured the project -✅ Initialized a production workspace -✅ Configured provider credentials -✅ Deployed servers -✅ Installed Kubernetes and task services -✅ Created application clusters -✅ Verified complete deployment

-

Your infrastructure is now ready for production use!

-
-

Estimated Total Time: 30-60 minutes -Next Guide: Update Infrastructure -Questions?: Open an issue or contact platform-team@example.com

-

Last Updated: 2025-10-09 -Version: 3.5.0

-

Update Existing Infrastructure

-

Goal: Safely update running infrastructure with minimal downtime -Time: 15-30 minutes -Difficulty: Intermediate

-

Overview

-

This guide covers:

-
    -
  1. Checking for updates
  2. -
  3. Planning update strategies
  4. -
  5. Updating task services
  6. -
  7. Rolling updates
  8. -
  9. Rollback procedures
  10. -
  11. Verification
  12. -
-

Update Strategies

-

Strategy 1: In-Place Updates (Fastest)

-

Best for: Non-critical environments, development, staging

-
# Direct update without downtime consideration
-provisioning t create <taskserv> --infra <project>
-
- -

Best for: Production environments, high availability

-
# Update servers one by one
-provisioning s update --infra <project> --rolling
-
-

Strategy 3: Blue-Green Deployment (Safest)

-

Best for: Critical production, zero-downtime requirements

-
# Create new infrastructure, switch traffic, remove old
-provisioning ws init <project>-green
-# ... configure and deploy
-# ... switch traffic
-provisioning ws delete <project>-blue
-
-

Step 1: Check for Updates

-

1.1 Check All Task Services

-
# Check all taskservs for updates
-provisioning t check-updates
-
-

Expected Output:

-
📦 Task Service Update Check:
-
-NAME         CURRENT   LATEST    STATUS
-kubernetes   1.29.0    1.30.0    ⬆️  update available
-containerd   1.7.13    1.7.13    ✅ up-to-date
-cilium       1.14.5    1.15.0    ⬆️  update available
-postgres     15.5      16.1      ⬆️  update available
-redis        7.2.3     7.2.3     ✅ up-to-date
-
-Updates available: 3
-
-

1.2 Check Specific Task Service

-
# Check specific taskserv
-provisioning t check-updates kubernetes
-
-

Expected Output:

-
📦 Kubernetes Update Check:
-
-Current:  1.29.0
-Latest:   1.30.0
-Status:   ⬆️  Update available
-
-Changelog:
-  • Enhanced security features
-  • Performance improvements
-  • Bug fixes in kube-apiserver
-  • New workload resource types
-
-Breaking Changes:
-  • None
-
-Recommended: ✅ Safe to update
-
-

1.3 Check Version Status

-
# Show detailed version information
-provisioning version show
-
-

Expected Output:

-
📋 Component Versions:
-
-COMPONENT    CURRENT   LATEST    DAYS OLD  STATUS
-kubernetes   1.29.0    1.30.0    45        ⬆️  update
-containerd   1.7.13    1.7.13    0         ✅ current
-cilium       1.14.5    1.15.0    30        ⬆️  update
-postgres     15.5      16.1      60        ⬆️  update (major)
-redis        7.2.3     7.2.3     0         ✅ current
-
-

1.4 Check for Security Updates

-
# Check for security-related updates
-provisioning version updates --security-only
-
-

Step 2: Plan Your Update

-

2.1 Review Current Configuration

-
# Show current infrastructure
-provisioning show settings --infra my-production
-
-

2.2 Backup Configuration

-
# Create configuration backup
-cp -r workspace/infra/my-production workspace/infra/my-production.backup-$(date +%Y%m%d)
-
-# Or use built-in backup
-provisioning ws backup my-production
-
-

Expected Output:

-
✅ Backup created: workspace/backups/my-production-20250930.tar.gz
-
-

2.3 Create Update Plan

-
# Generate update plan
-provisioning plan update --infra my-production
-
-

Expected Output:

-
📝 Update Plan for my-production:
-
-Phase 1: Minor Updates (Low Risk)
-  • containerd: No update needed
-  • redis: No update needed
-
-Phase 2: Patch Updates (Medium Risk)
-  • cilium: 1.14.5 → 1.15.0 (estimated 5 minutes)
-
-Phase 3: Major Updates (High Risk - Requires Testing)
-  • kubernetes: 1.29.0 → 1.30.0 (estimated 15 minutes)
-  • postgres: 15.5 → 16.1 (estimated 10 minutes, may require data migration)
-
-Recommended Order:
-  1. Update cilium (low risk)
-  2. Update kubernetes (test in staging first)
-  3. Update postgres (requires maintenance window)
-
-Total Estimated Time: 30 minutes
-Recommended: Test in staging environment first
-
-

Step 3: Update Task Services

-

3.1 Update Non-Critical Service (Cilium Example)

-

Dry-Run Update

-
# Test update without applying
-provisioning t create cilium --infra my-production --check
-
-

Expected Output:

-
🔍 CHECK MODE: Simulating Cilium update
-
-Current: 1.14.5
-Target:  1.15.0
-
-Would perform:
-  1. Download Cilium 1.15.0
-  2. Update configuration
-  3. Rolling restart of Cilium pods
-  4. Verify connectivity
-
-Estimated downtime: <1 minute per node
-No errors detected. Ready to update.
-
-

Generate Updated Configuration

-
# Generate new configuration
-provisioning t generate cilium --infra my-production
-
-

Expected Output:

-
✅ Generated Cilium configuration (version 1.15.0)
-   Saved to: workspace/infra/my-production/taskservs/cilium.ncl
-
-

Apply Update

-
# Apply update
-provisioning t create cilium --infra my-production
-
-

Expected Output:

-
🚀 Updating Cilium on my-production...
-
-Downloading Cilium 1.15.0... ⏳
-✅ Downloaded
-
-Updating configuration... ⏳
-✅ Configuration updated
-
-Rolling restart: web-01... ⏳
-✅ web-01 updated (Cilium 1.15.0)
-
-Rolling restart: web-02... ⏳
-✅ web-02 updated (Cilium 1.15.0)
-
-Verifying connectivity... ⏳
-✅ All nodes connected
-
-🎉 Cilium update complete!
-   Version: 1.14.5 → 1.15.0
-   Downtime: 0 minutes
-
-

Verify Update

-
# Verify updated version
-provisioning version taskserv cilium
-
-

Expected Output:

-
📦 Cilium Version Info:
-
-Installed: 1.15.0
-Latest:    1.15.0
-Status:    ✅ Up-to-date
-
-Nodes:
-  ✅ web-01: 1.15.0 (running)
-  ✅ web-02: 1.15.0 (running)
-
-

3.2 Update Critical Service (Kubernetes Example)

-

Test in Staging First

-
# If you have staging environment
-provisioning t create kubernetes --infra my-staging --check
-provisioning t create kubernetes --infra my-staging
-
-# Run integration tests
-provisioning test kubernetes --infra my-staging
-
-

Backup Current State

-
# Backup Kubernetes state
-kubectl get all -A -o yaml > k8s-backup-$(date +%Y%m%d).yaml
-
-# Backup etcd (if using external etcd)
-provisioning t backup kubernetes --infra my-production
-
-

Schedule Maintenance Window

-
# Set maintenance mode (optional, if supported)
-provisioning maintenance enable --infra my-production --duration 30m
-
-

Update Kubernetes

-
# Update control plane first
-provisioning t create kubernetes --infra my-production --control-plane-only
-
-

Expected Output:

-
🚀 Updating Kubernetes control plane on my-production...
-
-Draining control plane: web-01... ⏳
-✅ web-01 drained
-
-Updating control plane: web-01... ⏳
-✅ web-01 updated (Kubernetes 1.30.0)
-
-Uncordoning: web-01... ⏳
-✅ web-01 ready
-
-Verifying control plane... ⏳
-✅ Control plane healthy
-
-🎉 Control plane update complete!
-
-
# Update worker nodes one by one
-provisioning t create kubernetes --infra my-production --workers-only --rolling
-
-

Expected Output:

-
🚀 Updating Kubernetes workers on my-production...
-
-Rolling update: web-02...
-  Draining... ⏳
-  ✅ Drained (pods rescheduled)
-
-  Updating... ⏳
-  ✅ Updated (Kubernetes 1.30.0)
-
-  Uncordoning... ⏳
-  ✅ Ready
-
-  Waiting for pods to stabilize... ⏳
-  ✅ All pods running
-
-🎉 Worker update complete!
-   Updated: web-02
-   Version: 1.30.0
-
-

Verify Update

-
# Verify Kubernetes cluster
-kubectl get nodes
-provisioning version taskserv kubernetes
-
-

Expected Output:

-
NAME     STATUS   ROLES           AGE   VERSION
-web-01   Ready    control-plane   30d   v1.30.0
-web-02   Ready    <none>          30d   v1.30.0
-
-
# Run smoke tests
-provisioning test kubernetes --infra my-production
-
-

3.3 Update Database (PostgreSQL Example)

-

⚠️ WARNING: Database updates may require data migration. Always backup first!

-

Backup Database

-
# Backup PostgreSQL database
-provisioning t backup postgres --infra my-production
-
-

Expected Output:

-
🗄️  Backing up PostgreSQL...
-
-Creating dump: my-production-postgres-20250930.sql... ⏳
-✅ Dump created (2.3 GB)
-
-Compressing... ⏳
-✅ Compressed (450 MB)
-
-Saved to: workspace/backups/postgres/my-production-20250930.sql.gz
-
-

Check Compatibility

-
# Check if data migration is needed
-provisioning t check-migration postgres --from 15.5 --to 16.1
-
-

Expected Output:

-
🔍 PostgreSQL Migration Check:
-
-From: 15.5
-To:   16.1
-
-Migration Required: ✅ Yes (major version change)
-
-Steps Required:
-  1. Dump database with pg_dump
-  2. Stop PostgreSQL 15.5
-  3. Install PostgreSQL 16.1
-  4. Initialize new data directory
-  5. Restore from dump
-
-Estimated Time: 15-30 minutes (depending on data size)
-Estimated Downtime: 15-30 minutes
-
-Recommended: Use streaming replication for zero-downtime upgrade
-
-

Perform Update

-
# Update PostgreSQL (with automatic migration)
-provisioning t create postgres --infra my-production --migrate
-
-

Expected Output:

-
🚀 Updating PostgreSQL on my-production...
-
-⚠️  Major version upgrade detected (15.5 → 16.1)
-   Automatic migration will be performed
-
-Dumping database... ⏳
-✅ Database dumped (2.3 GB)
-
-Stopping PostgreSQL 15.5... ⏳
-✅ Stopped
-
-Installing PostgreSQL 16.1... ⏳
-✅ Installed
-
-Initializing new data directory... ⏳
-✅ Initialized
-
-Restoring database... ⏳
-✅ Restored (2.3 GB)
-
-Starting PostgreSQL 16.1... ⏳
-✅ Started
-
-Verifying data integrity... ⏳
-✅ All tables verified
-
-🎉 PostgreSQL update complete!
-   Version: 15.5 → 16.1
-   Downtime: 18 minutes
-
-

Verify Update

-
# Verify PostgreSQL
-provisioning version taskserv postgres
-ssh db-01 "psql --version"
-
-

Step 4: Update Multiple Services

-

4.1 Batch Update (Sequentially)

-
# Update multiple taskservs one by one
-provisioning t update --infra my-production --taskservs cilium,containerd,redis
-
-

Expected Output:

-
🚀 Updating 3 taskservs on my-production...
-
-[1/3] Updating cilium... ⏳
-✅ cilium updated (1.15.0)
-
-[2/3] Updating containerd... ⏳
-✅ containerd updated (1.7.14)
-
-[3/3] Updating redis... ⏳
-✅ redis updated (7.2.4)
-
-🎉 All updates complete!
-   Updated: 3 taskservs
-   Total time: 8 minutes
-
-

4.2 Parallel Update (Non-Dependent Services)

-
# Update taskservs in parallel (if they don't depend on each other)
-provisioning t update --infra my-production --taskservs redis,postgres --parallel
-
-

Expected Output:

-
🚀 Updating 2 taskservs in parallel on my-production...
-
-redis: Updating... ⏳
-postgres: Updating... ⏳
-
-redis: ✅ Updated (7.2.4)
-postgres: ✅ Updated (16.1)
-
-🎉 All updates complete!
-   Updated: 2 taskservs
-   Total time: 3 minutes (parallel)
-
-

Step 5: Update Server Configuration

-

5.1 Update Server Resources

-
# Edit server configuration
-provisioning sops workspace/infra/my-production/servers.ncl
-
-

Example: Upgrade server plan

-
# Before
-{
-    name = "web-01"
-    plan = "1xCPU-2 GB"  # Old plan
-}
-
-# After
-{
-    name = "web-01"
-    plan = "2xCPU-4 GB"  # New plan
-}
-
-
# Apply server update
-provisioning s update --infra my-production --check
-provisioning s update --infra my-production
-
-

5.2 Update Server OS

-
# Update operating system packages
-provisioning s update --infra my-production --os-update
-
-

Expected Output:

-
🚀 Updating OS packages on my-production servers...
-
-web-01: Updating packages... ⏳
-✅ web-01: 24 packages updated
-
-web-02: Updating packages... ⏳
-✅ web-02: 24 packages updated
-
-db-01: Updating packages... ⏳
-✅ db-01: 24 packages updated
-
-🎉 OS updates complete!
-
-

Step 6: Rollback Procedures

-

6.1 Rollback Task Service

-

If update fails or causes issues:

-
# Rollback to previous version
-provisioning t rollback cilium --infra my-production
-
-

Expected Output:

-
🔄 Rolling back Cilium on my-production...
-
-Current: 1.15.0
-Target:  1.14.5 (previous version)
-
-Rolling back: web-01... ⏳
-✅ web-01 rolled back
-
-Rolling back: web-02... ⏳
-✅ web-02 rolled back
-
-Verifying connectivity... ⏳
-✅ All nodes connected
-
-🎉 Rollback complete!
-   Version: 1.15.0 → 1.14.5
-
-

6.2 Rollback from Backup

-
# Restore configuration from backup
-provisioning ws restore my-production --from workspace/backups/my-production-20250930.tar.gz
-
-

6.3 Emergency Rollback

-
# Complete infrastructure rollback
-provisioning rollback --infra my-production --to-snapshot <snapshot-id>
-
-

Step 7: Post-Update Verification

-

7.1 Verify All Components

-
# Check overall health
-provisioning health --infra my-production
-
-

Expected Output:

-
🏥 Health Check: my-production
-
-Servers:
-  ✅ web-01: Healthy
-  ✅ web-02: Healthy
-  ✅ db-01: Healthy
-
-Task Services:
-  ✅ kubernetes: 1.30.0 (healthy)
-  ✅ containerd: 1.7.13 (healthy)
-  ✅ cilium: 1.15.0 (healthy)
-  ✅ postgres: 16.1 (healthy)
-
-Clusters:
-  ✅ buildkit: 2/2 replicas (healthy)
-
-Overall Status: ✅ All systems healthy
-
-

7.2 Verify Version Updates

-
# Verify all versions are updated
-provisioning version show
-
-

7.3 Run Integration Tests

-
# Run comprehensive tests
-provisioning test all --infra my-production
-
-

Expected Output:

-
🧪 Running Integration Tests...
-
-[1/5] Server connectivity... ⏳
-✅ All servers reachable
-
-[2/5] Kubernetes health... ⏳
-✅ All nodes ready, all pods running
-
-[3/5] Network connectivity... ⏳
-✅ All services reachable
-
-[4/5] Database connectivity... ⏳
-✅ PostgreSQL responsive
-
-[5/5] Application health... ⏳
-✅ All applications healthy
-
-🎉 All tests passed!
-
-

7.4 Monitor for Issues

-
# Monitor logs for errors
-provisioning logs --infra my-production --follow --level error
-
-

Update Checklist

-

Use this checklist for production updates:

-
    -
  • -Check for available updates
  • -
  • -Review changelog and breaking changes
  • -
  • -Create configuration backup
  • -
  • -Test update in staging environment
  • -
  • -Schedule maintenance window
  • -
  • -Notify team/users of maintenance
  • -
  • -Update non-critical services first
  • -
  • -Verify each update before proceeding
  • -
  • -Update critical services with rolling updates
  • -
  • -Backup database before major updates
  • -
  • -Verify all components after update
  • -
  • -Run integration tests
  • -
  • -Monitor for issues (30 minutes minimum)
  • -
  • -Document any issues encountered
  • -
  • -Close maintenance window
  • -
-

Common Update Scenarios

-

Scenario 1: Minor Security Patch

-
# Quick security update
-provisioning t check-updates --security-only
-provisioning t update --infra my-production --security-patches --yes
-
-

Scenario 2: Major Version Upgrade

-
# Careful major version update
-provisioning ws backup my-production
-provisioning t check-migration <service> --from X.Y --to X+1.Y
-provisioning t create <service> --infra my-production --migrate
-provisioning test all --infra my-production
-
-

Scenario 3: Emergency Hotfix

-
# Apply critical hotfix immediately
-provisioning t create <service> --infra my-production --hotfix --yes
-
-

Troubleshooting Updates

-

Issue: Update fails mid-process

-

Solution:

-
# Check update status
-provisioning t status <taskserv> --infra my-production
-
-# Resume failed update
-provisioning t update <taskserv> --infra my-production --resume
-
-# Or rollback
-provisioning t rollback <taskserv> --infra my-production
-
-

Issue: Service not starting after update

-

Solution:

-
# Check logs
-provisioning logs <taskserv> --infra my-production
-
-# Verify configuration
-provisioning t validate <taskserv> --infra my-production
-
-# Rollback if necessary
-provisioning t rollback <taskserv> --infra my-production
-
-

Issue: Data migration fails

-

Solution:

-
# Check migration logs
-provisioning t migration-logs <taskserv> --infra my-production
-
-# Restore from backup
-provisioning t restore <taskserv> --infra my-production --from <backup-file>
-
-

Best Practices

-
    -
  1. Always Test First: Test updates in staging before production
  2. -
  3. Backup Everything: Create backups before any update
  4. -
  5. Update Gradually: Update one service at a time
  6. -
  7. Monitor Closely: Watch for errors after each update
  8. -
  9. Have Rollback Plan: Always have a rollback strategy
  10. -
  11. Document Changes: Keep update logs for reference
  12. -
  13. Schedule Wisely: Update during low-traffic periods
  14. -
  15. Verify Thoroughly: Run tests after each update
  16. -
-

Next Steps

- -

Quick Reference

-
# Update workflow
-provisioning t check-updates
-provisioning ws backup my-production
-provisioning t create <taskserv> --infra my-production --check
-provisioning t create <taskserv> --infra my-production
-provisioning version taskserv <taskserv>
-provisioning health --infra my-production
-provisioning test all --infra my-production
-
-
-

This guide is part of the provisioning project documentation. Last updated: 2025-09-30

-

Customize Infrastructure

-

Goal: Customize infrastructure using layers, templates, and configuration patterns -Time: 20-40 minutes -Difficulty: Intermediate to Advanced

-

Overview

-

This guide covers:

-
    -
  1. Understanding the layer system
  2. -
  3. Using templates
  4. -
  5. Creating custom modules
  6. -
  7. Configuration inheritance
  8. -
  9. Advanced customization patterns
  10. -
-

The Layer System

-

Understanding Layers

-

The provisioning system uses a 3-layer architecture for configuration inheritance:

-
┌─────────────────────────────────────┐
-│  Infrastructure Layer (Priority 300)│  ← Highest priority
-│  workspace/infra/{name}/            │
-│  • Project-specific configs         │
-│  • Environment customizations       │
-│  • Local overrides                  │
-└─────────────────────────────────────┘
-              ↓ overrides
-┌─────────────────────────────────────┐
-│  Workspace Layer (Priority 200)     │
-│  provisioning/workspace/templates/  │
-│  • Reusable patterns                │
-│  • Organization standards           │
-│  • Team conventions                 │
-└─────────────────────────────────────┘
-              ↓ overrides
-┌─────────────────────────────────────┐
-│  Core Layer (Priority 100)          │  ← Lowest priority
-│  provisioning/extensions/           │
-│  • System defaults                  │
-│  • Provider implementations         │
-│  • Default taskserv configs         │
-└─────────────────────────────────────┘
-
-

Resolution Order: Infrastructure (300) → Workspace (200) → Core (100)

-

Higher numbers override lower numbers.

-

View Layer Resolution

-
# Explain layer concept
-provisioning lyr explain
-
-

Expected Output:

-
📚 LAYER SYSTEM EXPLAINED
-
-The layer system provides configuration inheritance across 3 levels:
-
-🔵 CORE LAYER (100) - System Defaults
-   Location: provisioning/extensions/
-   • Base taskserv configurations
-   • Default provider settings
-   • Standard cluster templates
-   • Built-in extensions
-
-🟢 WORKSPACE LAYER (200) - Shared Templates
-   Location: provisioning/workspace/templates/
-   • Organization-wide patterns
-   • Reusable configurations
-   • Team standards
-   • Custom extensions
-
-🔴 INFRASTRUCTURE LAYER (300) - Project Specific
-   Location: workspace/infra/{project}/
-   • Project-specific overrides
-   • Environment customizations
-   • Local modifications
-   • Runtime settings
-
-Resolution: Infrastructure → Workspace → Core
-Higher priority layers override lower ones.
-
-
# Show layer resolution for your project
-provisioning lyr show my-production
-
-

Expected Output:

-
📊 Layer Resolution for my-production:
-
-LAYER            PRIORITY  SOURCE                              FILES
-Infrastructure   300       workspace/infra/my-production/      4 files
-                           • servers.ncl (overrides)
-                           • taskservs.ncl (overrides)
-                           • clusters.ncl (custom)
-                           • providers.ncl (overrides)
-
-Workspace        200       provisioning/workspace/templates/   2 files
-                           • production.ncl (used)
-                           • kubernetes.ncl (used)
-
-Core             100       provisioning/extensions/            15 files
-                           • taskservs/* (base configs)
-                           • providers/* (default settings)
-                           • clusters/* (templates)
-
-Resolution Order: Infrastructure → Workspace → Core
-Status: ✅ All layers resolved successfully
-
-

Test Layer Resolution

-
# Test how a specific module resolves
-provisioning lyr test kubernetes my-production
-
-

Expected Output:

-
🔍 Layer Resolution Test: kubernetes → my-production
-
-Resolving kubernetes configuration...
-
-🔴 Infrastructure Layer (300):
-   ✅ Found: workspace/infra/my-production/taskservs/kubernetes.ncl
-   Provides:
-     • version = "1.30.0" (overrides)
-     • control_plane_servers = ["web-01"] (overrides)
-     • worker_servers = ["web-02"] (overrides)
-
-🟢 Workspace Layer (200):
-   ✅ Found: provisioning/workspace/templates/production-kubernetes.ncl
-   Provides:
-     • security_policies (inherited)
-     • network_policies (inherited)
-     • resource_quotas (inherited)
-
-🔵 Core Layer (100):
-   ✅ Found: provisioning/extensions/taskservs/kubernetes/main.ncl
-   Provides:
-     • default_version = "1.29.0" (base)
-     • default_features (base)
-     • default_plugins (base)
-
-Final Configuration (after merging all layers):
-  version: "1.30.0" (from Infrastructure)
-  control_plane_servers: ["web-01"] (from Infrastructure)
-  worker_servers: ["web-02"] (from Infrastructure)
-  security_policies: {...} (from Workspace)
-  network_policies: {...} (from Workspace)
-  resource_quotas: {...} (from Workspace)
-  default_features: {...} (from Core)
-  default_plugins: {...} (from Core)
-
-Resolution: ✅ Success
-
-

Using Templates

-

List Available Templates

-
# List all templates
-provisioning tpl list
-
-

Expected Output:

-
📋 Available Templates:
-
-TASKSERVS:
-  • production-kubernetes    - Production-ready Kubernetes setup
-  • production-postgres      - Production PostgreSQL with replication
-  • production-redis         - Redis cluster with sentinel
-  • development-kubernetes   - Development Kubernetes (minimal)
-  • ci-cd-pipeline          - Complete CI/CD pipeline
-
-PROVIDERS:
-  • upcloud-production      - UpCloud production settings
-  • upcloud-development     - UpCloud development settings
-  • aws-production          - AWS production VPC setup
-  • aws-development         - AWS development environment
-  • local-docker            - Local Docker-based setup
-
-CLUSTERS:
-  • buildkit-cluster        - BuildKit for container builds
-  • monitoring-stack        - Prometheus + Grafana + Loki
-  • security-stack          - Security monitoring tools
-
-Total: 13 templates
-
-
# List templates by type
-provisioning tpl list --type taskservs
-provisioning tpl list --type providers
-provisioning tpl list --type clusters
-
-

View Template Details

-
# Show template details
-provisioning tpl show production-kubernetes
-
-

Expected Output:

-
📄 Template: production-kubernetes
-
-Description: Production-ready Kubernetes configuration with
-             security hardening, network policies, and monitoring
-
-Category: taskservs
-Version: 1.0.0
-
-Configuration Provided:
-  • Kubernetes version: 1.30.0
-  • Security policies: Pod Security Standards (restricted)
-  • Network policies: Default deny + allow rules
-  • Resource quotas: Per-namespace limits
-  • Monitoring: Prometheus integration
-  • Logging: Loki integration
-  • Backup: Velero configuration
-
-Requirements:
-  • Minimum 2 servers
-  • 4 GB RAM per server
-  • Network plugin (Cilium recommended)
-
-Location: provisioning/workspace/templates/production-kubernetes.ncl
-
-Example Usage:
-  provisioning tpl apply production-kubernetes my-production
-
-

Apply Template

-
# Apply template to your infrastructure
-provisioning tpl apply production-kubernetes my-production
-
-

Expected Output:

-
🚀 Applying template: production-kubernetes → my-production
-
-Checking compatibility... ⏳
-✅ Infrastructure compatible with template
-
-Merging configuration... ⏳
-✅ Configuration merged
-
-Files created/updated:
-  • workspace/infra/my-production/taskservs/kubernetes.ncl (updated)
-  • workspace/infra/my-production/policies/security.ncl (created)
-  • workspace/infra/my-production/policies/network.ncl (created)
-  • workspace/infra/my-production/monitoring/prometheus.ncl (created)
-
-🎉 Template applied successfully!
-
-Next steps:
-  1. Review generated configuration
-  2. Adjust as needed
-  3. Deploy: provisioning t create kubernetes --infra my-production
-
-

Validate Template Usage

-
# Validate template was applied correctly
-provisioning tpl validate my-production
-
-

Expected Output:

-
✅ Template Validation: my-production
-
-Templates Applied:
-  ✅ production-kubernetes (v1.0.0)
-  ✅ production-postgres (v1.0.0)
-
-Configuration Status:
-  ✅ All required fields present
-  ✅ No conflicting settings
-  ✅ Dependencies satisfied
-
-Compliance:
-  ✅ Security policies configured
-  ✅ Network policies configured
-  ✅ Resource quotas set
-  ✅ Monitoring enabled
-
-Status: ✅ Valid
-
-

Creating Custom Templates

-

Step 1: Create Template Structure

-
# Create custom template directory
-mkdir -p provisioning/workspace/templates/my-custom-template
-
-

Step 2: Write Template Configuration

-

File: provisioning/workspace/templates/my-custom-template/main.ncl

-
# Custom Kubernetes template with specific settings
-let kubernetes_config = {
-  # Version
-  version = "1.30.0",
-
-  # Custom feature gates
-  feature_gates = {
-    "GracefulNodeShutdown" = true,
-    "SeccompDefault" = true,
-    "StatefulSetAutoDeletePVC" = true,
-  },
-
-  # Custom kubelet configuration
-  kubelet_config = {
-    max_pods = 110,
-    pod_pids_limit = 4096,
-    container_log_max_size = "10Mi",
-    container_log_max_files = 5,
-  },
-
-  # Custom API server flags
-  apiserver_extra_args = {
-    "enable-admission-plugins" = "NodeRestriction,PodSecurity,LimitRanger",
-    "audit-log-maxage" = "30",
-    "audit-log-maxbackup" = "10",
-  },
-
-  # Custom scheduler configuration
-  scheduler_config = {
-    profiles = [
-      {
-        name = "high-availability",
-        plugins = {
-          score = {
-            enabled = [
-              {name = "NodeResourcesBalancedAllocation", weight = 2},
-              {name = "NodeResourcesLeastAllocated", weight = 1},
-            ],
-          },
-        },
-      },
-    ],
-  },
-
-  # Network configuration
-  network = {
-    service_cidr = "10.96.0.0/12",
-    pod_cidr = "10.244.0.0/16",
-    dns_domain = "cluster.local",
-  },
-
-  # Security configuration
-  security = {
-    pod_security_standard = "restricted",
-    encrypt_etcd = true,
-    rotate_certificates = true,
-  },
-} in
-kubernetes_config
-
-

Step 3: Create Template Metadata

-

File: provisioning/workspace/templates/my-custom-template/metadata.toml

-
[template]
-name = "my-custom-template"
-version = "1.0.0"
-description = "Custom Kubernetes template with enhanced security"
-category = "taskservs"
-author = "Your Name"
-
-[requirements]
-min_servers = 2
-min_memory_gb = 4
-required_taskservs = ["containerd", "cilium"]
-
-[tags]
-environment = ["production", "staging"]
-features = ["security", "monitoring", "high-availability"]
-
-

Step 4: Test Custom Template

-
# List templates (should include your custom template)
-provisioning tpl list
-
-# Show your template
-provisioning tpl show my-custom-template
-
-# Apply to test infrastructure
-provisioning tpl apply my-custom-template my-test
-
-

Configuration Inheritance Examples

-

Example 1: Override Single Value

-

Core Layer (provisioning/extensions/taskservs/postgres/main.ncl):

-
let postgres_config = {
-  version = "15.5",
-  port = 5432,
-  max_connections = 100,
-} in
-postgres_config
-
-

Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl):

-
let postgres_config = {
-  max_connections = 500,  # Override only max_connections
-} in
-postgres_config
-
-

Result (after layer resolution):

-
let postgres_config = {
-  version = "15.5",          # From Core
-  port = 5432,               # From Core
-  max_connections = 500,     # From Infrastructure (overridden)
-} in
-postgres_config
-
-

Example 2: Add Custom Configuration

-

Workspace Layer (provisioning/workspace/templates/production-postgres.ncl):

-
let postgres_config = {
-  replication = {
-    enabled = true,
-    replicas = 2,
-    sync_mode = "async",
-  },
-} in
-postgres_config
-
-

Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl):

-
let postgres_config = {
-  replication = {
-    sync_mode = "sync",  # Override sync mode
-  },
-  custom_extensions = ["pgvector", "timescaledb"],  # Add custom config
-} in
-postgres_config
-
-

Result:

-
let postgres_config = {
-  version = "15.5",          # From Core
-  port = 5432,               # From Core
-  max_connections = 100,     # From Core
-  replication = {
-    enabled = true,          # From Workspace
-    replicas = 2,            # From Workspace
-    sync_mode = "sync",      # From Infrastructure (overridden)
-  },
-  custom_extensions = ["pgvector", "timescaledb"],  # From Infrastructure (added)
-} in
-postgres_config
-
-

Example 3: Environment-Specific Configuration

-

Workspace Layer (provisioning/workspace/templates/base-kubernetes.ncl):

-
let kubernetes_config = {
-  version = "1.30.0",
-  control_plane_count = 3,
-  worker_count = 5,
-  resources = {
-    control_plane = {cpu = "4", memory = "8Gi"},
-    worker = {cpu = "8", memory = "16Gi"},
-  },
-} in
-kubernetes_config
-
-

Development Infrastructure (workspace/infra/my-dev/taskservs/kubernetes.ncl):

-
let kubernetes_config = {
-  control_plane_count = 1,  # Smaller for dev
-  worker_count = 2,
-  resources = {
-    control_plane = {cpu = "2", memory = "4Gi"},
-    worker = {cpu = "2", memory = "4Gi"},
-  },
-} in
-kubernetes_config
-
-

Production Infrastructure (workspace/infra/my-prod/taskservs/kubernetes.ncl):

-
let kubernetes_config = {
-  control_plane_count = 5,  # Larger for prod
-  worker_count = 10,
-  resources = {
-    control_plane = {cpu = "8", memory = "16Gi"},
-    worker = {cpu = "16", memory = "32Gi"},
-  },
-} in
-kubernetes_config
-
-

Advanced Customization Patterns

-

Pattern 1: Multi-Environment Setup

-

Create different configurations for each environment:

-
# Create environments
-provisioning ws init my-app-dev
-provisioning ws init my-app-staging
-provisioning ws init my-app-prod
-
-# Apply environment-specific templates
-provisioning tpl apply development-kubernetes my-app-dev
-provisioning tpl apply staging-kubernetes my-app-staging
-provisioning tpl apply production-kubernetes my-app-prod
-
-# Customize each environment
-# Edit: workspace/infra/my-app-dev/...
-# Edit: workspace/infra/my-app-staging/...
-# Edit: workspace/infra/my-app-prod/...
-
-

Pattern 2: Shared Configuration Library

-

Create reusable configuration fragments:

-

File: provisioning/workspace/templates/shared/security-policies.ncl

-
let security_policies = {
-  pod_security = {
-    enforce = "restricted",
-    audit = "restricted",
-    warn = "restricted",
-  },
-  network_policies = [
-    {
-      name = "deny-all",
-      pod_selector = {},
-      policy_types = ["Ingress", "Egress"],
-    },
-    {
-      name = "allow-dns",
-      pod_selector = {},
-      egress = [
-        {
-          to = [{namespace_selector = {name = "kube-system"}}],
-          ports = [{protocol = "UDP", port = 53}],
-        },
-      ],
-    },
-  ],
-} in
-security_policies
-
-

Import in your infrastructure:

-
let security_policies = (import "../../../provisioning/workspace/templates/shared/security-policies.ncl") in
-
-let kubernetes_config = {
-  version = "1.30.0",
-  image_repo = "k8s.gcr.io",
-  security = security_policies,  # Import shared policies
-} in
-kubernetes_config
-
-

Pattern 3: Dynamic Configuration

-

Use Nickel features for dynamic configuration:

-
# Calculate resources based on server count
-let server_count = 5 in
-let replicas_per_server = 2 in
-let total_replicas = server_count * replicas_per_server in
-
-let postgres_config = {
-  version = "16.1",
-  max_connections = total_replicas * 50,  # Dynamic calculation
-  shared_buffers = "1024 MB",
-} in
-postgres_config
-
-

Pattern 4: Conditional Configuration

-
let environment = "production" in  # or "development"
-
-let kubernetes_config = {
-  version = "1.30.0",
-  control_plane_count = if environment == "production" then 3 else 1,
-  worker_count = if environment == "production" then 5 else 2,
-  monitoring = {
-    enabled = environment == "production",
-    retention = if environment == "production" then "30d" else "7d",
-  },
-} in
-kubernetes_config
-
-

Layer Statistics

-
# Show layer system statistics
-provisioning lyr stats
-
-

Expected Output:

-
📊 Layer System Statistics:
-
-Infrastructure Layer:
-  • Projects: 3
-  • Total files: 15
-  • Average overrides per project: 5
-
-Workspace Layer:
-  • Templates: 13
-  • Most used: production-kubernetes (5 projects)
-  • Custom templates: 2
-
-Core Layer:
-  • Taskservs: 15
-  • Providers: 3
-  • Clusters: 3
-
-Resolution Performance:
-  • Average resolution time: 45 ms
-  • Cache hit rate: 87%
-  • Total resolutions: 1,250
-
-

Customization Workflow

-

Complete Customization Example

-
# 1. Create new infrastructure
-provisioning ws init my-custom-app
-
-# 2. Understand layer system
-provisioning lyr explain
-
-# 3. Discover templates
-provisioning tpl list --type taskservs
-
-# 4. Apply base template
-provisioning tpl apply production-kubernetes my-custom-app
-
-# 5. View applied configuration
-provisioning lyr show my-custom-app
-
-# 6. Customize (edit files)
-provisioning sops workspace/infra/my-custom-app/taskservs/kubernetes.ncl
-
-# 7. Test layer resolution
-provisioning lyr test kubernetes my-custom-app
-
-# 8. Validate configuration
-provisioning tpl validate my-custom-app
-provisioning val config --infra my-custom-app
-
-# 9. Deploy customized infrastructure
-provisioning s create --infra my-custom-app --check
-provisioning s create --infra my-custom-app
-provisioning t create kubernetes --infra my-custom-app
-
-

Best Practices

-

1. Use Layers Correctly

-
    -
  • Core Layer: Only modify for system-wide changes
  • -
  • Workspace Layer: Use for organization-wide templates
  • -
  • Infrastructure Layer: Use for project-specific customizations
  • -
-

2. Template Organization

-
provisioning/workspace/templates/
-├── shared/           # Shared configuration fragments
-│   ├── security-policies.ncl
-│   ├── network-policies.ncl
-│   └── monitoring.ncl
-├── production/       # Production templates
-│   ├── kubernetes.ncl
-│   ├── postgres.ncl
-│   └── redis.ncl
-└── development/      # Development templates
-    ├── kubernetes.ncl
-    └── postgres.ncl
-
-

3. Documentation

-

Document your customizations:

-

File: workspace/infra/my-production/README.md

-
# My Production Infrastructure
-
-## Customizations
-
-- Kubernetes: Using production template with 5 control plane nodes
-- PostgreSQL: Configured with streaming replication
-- Cilium: Native routing mode enabled
-
-## Layer Overrides
-
-- `taskservs/kubernetes.ncl`: Control plane count (3 → 5)
-- `taskservs/postgres.ncl`: Replication mode (async → sync)
-- `network/cilium.ncl`: Routing mode (tunnel → native)
-
-

4. Version Control

-

Keep templates and configurations in version control:

-
cd provisioning/workspace/templates/
-git add .
-git commit -m "Add production Kubernetes template with enhanced security"
-
-cd workspace/infra/my-production/
-git add .
-git commit -m "Configure production environment for my-production"
-
-

Troubleshooting Customizations

-

Issue: Configuration not applied

-
# Check layer resolution
-provisioning lyr show my-production
-
-# Verify file exists
-ls -la workspace/infra/my-production/taskservs/
-
-# Test specific resolution
-provisioning lyr test kubernetes my-production
-
-

Issue: Conflicting configurations

-
# Validate configuration
-provisioning val config --infra my-production
-
-# Show configuration merge result
-provisioning show config kubernetes --infra my-production
-
-

Issue: Template not found

-
# List available templates
-provisioning tpl list
-
-# Check template path
-ls -la provisioning/workspace/templates/
-
-# Refresh template cache
-provisioning tpl refresh
-
-

Next Steps

- -

Quick Reference

-
# Layer system
-provisioning lyr explain              # Explain layers
-provisioning lyr show <project>       # Show layer resolution
-provisioning lyr test <module> <project>  # Test resolution
-provisioning lyr stats                # Layer statistics
-
-# Templates
-provisioning tpl list                 # List all templates
-provisioning tpl list --type <type>   # Filter by type
-provisioning tpl show <template>      # Show template details
-provisioning tpl apply <template> <project>  # Apply template
-provisioning tpl validate <project>   # Validate template usage
-
-
-

This guide is part of the provisioning project documentation. Last updated: 2025-09-30

-

Infrastructure Setup Quick Reference

-

Complete guide to provisioning infrastructure with Nickel + ConfigLoader + TypeDialog

-
-

Quick Start

-

1. Generate Infrastructure Configs (Solo Mode)

-
cd project-provisioning
-
-# Generate solo deployment (Docker Compose, Nginx, Prometheus, OCI Registry)
-nickel export --format json provisioning/schemas/infrastructure/examples-solo-deployment.ncl > /tmp/solo-infra.json
-
-# Verify JSON structure
-jq . /tmp/solo-infra.json
-
-

2. Validate Generated Configs

-
# Solo deployment validation
-bash provisioning/platform/scripts/validate-infrastructure.nu --config-dir provisioning/platform/infrastructure
-
-# Output shows validation status for Docker, K8s, Nginx, Prometheus
-
-

3. Compare Solo vs Enterprise

-
# Export both examples
-nickel export --format json provisioning/schemas/infrastructure/examples-solo-deployment.ncl > /tmp/solo.json
-nickel export --format json provisioning/schemas/infrastructure/examples-enterprise-deployment.ncl > /tmp/enterprise.json
-
-# Compare orchestrator resources
-echo "=== Solo Resources ===" && jq '.docker_compose_services.orchestrator.deploy.resources.limits' /tmp/solo.json
-echo "=== Enterprise Resources ===" && jq '.docker_compose_services.orchestrator.deploy.resources.limits' /tmp/enterprise.json
-
-# Compare prometheus monitoring
-echo "=== Solo Prometheus Jobs ===" && jq '.prometheus_config.scrape_configs | length' /tmp/solo.json
-echo "=== Enterprise Prometheus Jobs ===" && jq '.prometheus_config.scrape_configs | length' /tmp/enterprise.json
-
-
-

Infrastructure Components

-

Available Schemas (6)

-
- - - - - - -
SchemaPurposeMode Presets
docker-compose.nclContainer orchestrationsolo, multiuser, enterprise
kubernetes.nclK8s manifest generationsolo, enterprise
nginx.nclReverse proxy & load balancersolo, enterprise
prometheus.nclMetrics & monitoringsolo, multiuser, enterprise
systemd.nclSystem service unitssolo, enterprise
oci-registry.nclContainer registry (Zot/Harbor)solo, multiuser, enterprise
-
-

Configuration Examples (2)

-
- - -
ExampleTypeServicesCPUMemory
examples-solo-deployment.nclDev/Testing51.01024M
examples-enterprise-deployment.nclProduction64.04096M
-
-

Automation Scripts (3)

-
- - - -
ScriptPurposeUsage
generate-infrastructure-configs.nuGenerate all configs--mode solo --format yaml
validate-infrastructure.nuValidate configs--config-dir /path
setup-with-forms.shInteractive setupAuto-detects TypeDialog
-
-
-

Workflow: Platform Config + Infrastructure Config

-

Two-Tier Configuration System

-

Platform Config Layer (Service-Internal):

-
Orchestrator port, database host, logging level
-    ↓
-ConfigLoader (Rust)
-    ↓
-Service reads TOML from runtime/generated/
-
-

Infrastructure Config Layer (Deployment-External):

-
Docker Compose services, Nginx routing, Prometheus scrape jobs
-    ↓
-nickel export → YAML/JSON
-    ↓
-Docker/Kubernetes/Nginx deploys infrastructure
-
-

Complete Deployment Workflow

-
1. Choose platform config mode
-   provisioning/platform/config/examples/orchestrator.solo.example.ncl
-                                        ↓
-2. Generate platform config TOML
-   nickel export --format toml → runtime/generated/orchestrator.solo.toml
-                                        ↓
-3. Choose infrastructure mode
-   provisioning/schemas/infrastructure/examples-solo-deployment.ncl
-                                        ↓
-4. Generate infrastructure JSON/YAML
-   nickel export --format json → docker-compose-solo.json
-                                        ↓
-5. Deploy infrastructure
-   docker-compose -f docker-compose-solo.yaml up
-                                        ↓
-6. Services start with configs
-   ConfigLoader reads platform config TOML
-   Docker/Nginx read infrastructure configs
-
-
-

Resource Allocation Reference

-

Solo Mode (Development)

-
Orchestrator:      1.0 CPU, 1024M RAM (1 replica)
-Control Center:    0.5 CPU,  512M RAM
-CoreDNS:           0.25 CPU, 256M RAM
-KMS:               0.5 CPU,  512M RAM
-OCI Registry:      0.5 CPU,  512M RAM (Zot - filesystem)
-─────────────────────────────────────
-Total:             2.75 CPU, 2624M RAM
-Use Case:          Development, testing, PoCs
-
-

Enterprise Mode (Production)

-
Orchestrator:      4.0 CPU, 4096M RAM (3 replicas)
-Control Center:    2.0 CPU, 2048M RAM (HA)
-CoreDNS:           1.0 CPU, 1024M RAM
-KMS:               2.0 CPU, 2048M RAM
-OCI Registry:      2.0 CPU, 2048M RAM (Harbor - S3)
-─────────────────────────────────────
-Total:            11.0 CPU, 10240M RAM (+ replicas)
-Use Case:          Production deployments, high availability
-
-
-

Common Tasks

-

Generate Solo Infrastructure

-
nickel export --format json provisioning/schemas/infrastructure/examples-solo-deployment.ncl
-
-

Generate Enterprise Infrastructure

-
nickel export --format json provisioning/schemas/infrastructure/examples-enterprise-deployment.ncl
-
-

Validate JSON Structure

-
jq '.docker_compose_services | keys' /tmp/infra.json
-jq '.prometheus_config.scrape_configs | length' /tmp/infra.json
-jq '.oci_registry_config.backend' /tmp/infra.json
-
-

Check Resource Limits

-
# All services in solo mode
-jq '.docker_compose_services[] | {name: .name, cpu: .deploy.resources.limits.cpus, memory: .deploy.resources.limits.memory}' /tmp/solo.json
-
-# Just orchestrator
-jq '.docker_compose_services.orchestrator.deploy.resources.limits' /tmp/solo.json
-
-

Compare Modes

-
# Services count
-jq '.docker_compose_services | length' /tmp/solo.json      # 5 services
-jq '.docker_compose_services | length' /tmp/enterprise.json # 6 services
-
-# Prometheus jobs
-jq '.prometheus_config.scrape_configs | length' /tmp/solo.json      # 4 jobs
-jq '.prometheus_config.scrape_configs | length' /tmp/enterprise.json # 7 jobs
-
-# Registry backend
-jq -r '.oci_registry_config.backend' /tmp/solo.json      # Zot
-jq -r '.oci_registry_config.backend' /tmp/enterprise.json # Harbor
-
-
-

Validation Commands

-

Type Check Schemas

-
nickel typecheck provisioning/schemas/infrastructure/docker-compose.ncl
-nickel typecheck provisioning/schemas/infrastructure/kubernetes.ncl
-nickel typecheck provisioning/schemas/infrastructure/nginx.ncl
-nickel typecheck provisioning/schemas/infrastructure/prometheus.ncl
-nickel typecheck provisioning/schemas/infrastructure/systemd.ncl
-nickel typecheck provisioning/schemas/infrastructure/oci-registry.ncl
-
-

Validate Examples

-
nickel typecheck provisioning/schemas/infrastructure/examples-solo-deployment.ncl
-nickel typecheck provisioning/schemas/infrastructure/examples-enterprise-deployment.ncl
-
-

Test Export

-
nickel export --format json provisioning/schemas/infrastructure/examples-solo-deployment.ncl | jq .
-
-
-

Platform Config Examples

-

Solo Platform Config

-
nickel export --format toml provisioning/platform/config/examples/orchestrator.solo.example.ncl
-# Output: TOML with [database], [logging], [monitoring], [workspace] sections
-
-

Enterprise Platform Config

-
nickel export --format toml provisioning/platform/config/examples/orchestrator.enterprise.example.ncl
-# Output: TOML with HA, S3, Redis, tracing configuration
-
-
-

Configuration Files Reference

-

Platform Configs (services internally)

-
provisioning/platform/config/
-├── runtime/generated/*.toml          # Auto-generated by ConfigLoader
-├── examples/                         # Reference implementations
-│   ├── orchestrator.solo.example.ncl
-│   ├── orchestrator.multiuser.example.ncl
-│   └── orchestrator.enterprise.example.ncl
-└── README.md
-
-

Infrastructure Schemas

-
provisioning/schemas/infrastructure/
-├── docker-compose.ncl                # 232 lines
-├── kubernetes.ncl                    # 376 lines
-├── nginx.ncl                         # 233 lines
-├── prometheus.ncl                    # 280 lines
-├── systemd.ncl                       # 235 lines
-├── oci-registry.ncl                  # 221 lines
-├── examples-solo-deployment.ncl      # 27 lines
-├── examples-enterprise-deployment.ncl # 27 lines
-└── README.md
-
-

TypeDialog Integration

-
provisioning/platform/.typedialog/provisioning/platform/
-├── forms/                            # Ready for auto-generated forms
-├── templates/service-form.template.j2
-├── schemas/ → ../../schemas          # Symlink
-├── constraints/constraints.toml       # Validation rules
-└── README.md
-
-

Automation Scripts

-
provisioning/platform/scripts/
-├── generate-infrastructure-configs.nu  # Generate all configs
-├── validate-infrastructure.nu          # Validate with tools
-└── setup-with-forms.sh                # Interactive wizard
-
-
-

Integration Status

-
- - - - - - - - -
ComponentStatusDetails
Infrastructure Schemas✅ Complete6 schemas, 1,577 lines, all validated
Deployment Examples✅ Complete2 examples (solo + enterprise), tested
Generation Scripts✅ CompleteAuto-generate configs for all modes
Validation Scripts✅ CompleteValidate Docker, K8s, Nginx, Prometheus
Platform Config✅ Complete36 TOML files in runtime/generated/
TypeDialog Forms✅ ReadyForms + bash wrappers created, awaiting binary
Setup Wizard✅ ActiveBasic prompts as fallback
Documentation✅ CompleteAll guides updated with examples
-
-
-

Next Steps

-

Now Available

-
    -
  • Generate infrastructure configs for solo/enterprise modes
  • -
  • Validate generated configs with format-specific tools
  • -
  • Use interactive setup wizard with basic Nushell prompts
  • -
  • TypeDialog forms created and ready (awaiting binary install)
  • -
  • Deploy with Docker/Kubernetes using generated configs
  • -
-

When TypeDialog Binary Becomes Available

-
    -
  • Install TypeDialog binary
  • -
  • TypeDialog forms already created (setup, auth, MFA)
  • -
  • Bash wrappers handle TTY input (no Nushell stack issues)
  • -
  • Full nickel-roundtrip workflow will be enabled
  • -
-
-

Key Files

-

Schemas:

-
    -
  • provisioning/schemas/infrastructure/ - All infrastructure schemas
  • -
-

Examples:

-
    -
  • provisioning/schemas/infrastructure/examples-solo-deployment.ncl
  • -
  • provisioning/schemas/infrastructure/examples-enterprise-deployment.ncl
  • -
-

Platform Configs:

-
    -
  • provisioning/platform/config/examples/ - Platform config examples
  • -
  • provisioning/platform/config/runtime/generated/ - Generated TOML files
  • -
-

Scripts:

-
    -
  • provisioning/platform/scripts/generate-infrastructure-configs.nu
  • -
  • provisioning/platform/scripts/validate-infrastructure.nu
  • -
  • provisioning/platform/scripts/setup-with-forms.sh
  • -
-

Documentation:

-
    -
  • provisioning/docs/src/guides/infrastructure-setup.md - This guide
  • -
  • provisioning/schemas/infrastructure/README.md - Infrastructure schema reference
  • -
  • provisioning/platform/config/examples/README.md - Platform config guide
  • -
  • provisioning/platform/.typedialog/README.md - TypeDialog integration guide
  • -
-
-

Version: 1.0.0 -Last Updated: 2025-01-06 -Status: Production Ready

-

Extension Development Quick Start Guide

-

This guide provides a hands-on walkthrough for developing custom extensions using the Nickel configuration system and module loader.

-

Prerequisites

-
    -
  1. -

    Nickel installed (1.15.0+):

    +

    Nickel (Required)

    # macOS
     brew install nickel
     
    -# Linux/Other
    -cargo install nickel
    +# Linux
    +cargo install nickel-lang-cli
     
    -# Verify
    -nickel --version
    +# Verify installation
    +nickel --version  # Expected: 1.15.1+
     
    +

    Additional Tools

    +
    # SOPS for secrets management
    +brew install sops  # macOS
    +# or download from  [https://github.com/getsops/sops/releases](https://github.com/getsops/sops/releases)
    +
    +# Age for encryption
    +brew install age  # macOS
    +cargo install age  # Linux
    +
    +# K9s for Kubernetes management (optional)
    +brew install derailed/k9s/k9s
    +
    +# Verify installations
    +sops --version    # Expected: 3.10.2+
    +age --version     # Expected: 1.2.1+
    +k9s version       # Expected: 0.50.6+
    +
    +

    Install Provisioning Platform

    + +
    # Download and run installer
    +INSTALL_URL="https://raw.githubusercontent.com/yourusername/provisioning/main/install.sh"
    +curl -sSL "$INSTALL_URL" | bash
    +
    +# Follow prompts to configure installation directory and path
    +# Default: ~/.local/bin/provisioning
    +
    +

    Installer performs:

    +
      +
    • Downloads latest platform binaries
    • +
    • Installs CLI to system PATH
    • +
    • Creates default configuration structure
    • +
    • Validates dependencies
    • +
    • Runs health check
    • +
    +

    Option 2: Build from Source

    +
    # Clone repository
    +git clone  [https://github.com/yourusername/provisioning.git](https://github.com/yourusername/provisioning.git)
    +cd provisioning
    +
    +# Build core CLI
    +cd provisioning/core
    +cargo build --release
    +
    +# Install to local bin
    +cp target/release/provisioning ~/.local/bin/
    +
    +# Add to PATH (add to ~/.bashrc or ~/.zshrc)
    +export PATH="$HOME/.local/bin:$PATH"
    +
    +# Verify installation
    +provisioning version
    +
    +

    Platform Health Check

    +
    # Verify installation
    +provisioning setup check
    +
    +# Expected output:
    +# ✓ Nushell 0.109.1 installed
    +# ✓ Nickel 1.15.1 installed
    +# ✓ SOPS 3.10.2 installed
    +# ✓ Age 1.2.1 installed
    +# ✓ Provisioning CLI installed
    +# ✓ Configuration directory created
    +# Platform ready for use
    +
    +

    Phase 2: Initial Configuration

    +

    Generate User Configuration

    +
    # Create user configuration directory
    +mkdir -p ~/.config/provisioning
    +
    +# Generate default user config
    +provisioning setup init-user-config
    +
    +

    Generated configuration structure:

    +
    ~/.config/provisioning/
    +├── user_config.yaml      # User preferences and workspace registry
    +├── credentials/          # Provider credentials (encrypted)
    +├── age/                  # Age encryption keys
    +└── cache/                # CLI cache
    +
    +

    Configure Encryption

    +
    # Generate Age key pair for secrets
    +age-keygen -o ~/.config/provisioning/age/provisioning.key
    +
    +# Store public key
    +age-keygen -y ~/.config/provisioning/age/provisioning.key > ~/.config/provisioning/age/provisioning.pub
    +
    +# Configure SOPS to use Age
    +cat > ~/.config/sops/config.yaml <<EOF
    +creation_rules:
    +  - path_regex: \.secret\.(yam| l tom| l json)$
    +    age: $(cat ~/.config/provisioning/age/provisioning.pub)
    +EOF
    +
    +

    Provider Credentials

    +

    Configure credentials for your chosen cloud provider.

    +

    UpCloud Configuration

    +
    # Edit user config
    +nano ~/.config/provisioning/user_config.yaml
    +
    +# Add provider credentials
    +cat >> ~/.config/provisioning/user_config.yaml <<EOF
    +providers:
    +  upcloud:
    +    username: "your-upcloud-username"
    +    password_env: "UPCLOUD_PASSWORD"  # Read from environment variable
    +    default_zone: "de-fra1"
    +EOF
    +
    +# Set environment variable (add to ~/.bashrc or ~/.zshrc)
    +export UPCLOUD_PASSWORD="your-upcloud-password"
    +
    +

    AWS Configuration

    +
    # Add AWS credentials to user config
    +cat >> ~/.config/provisioning/user_config.yaml <<EOF
    +providers:
    +  aws:
    +    access_key_id_env: "AWS_ACCESS_KEY_ID"
    +    secret_access_key_env: "AWS_SECRET_ACCESS_KEY"
    +    default_region: "eu-west-1"
    +EOF
    +
    +# Set environment variables
    +export AWS_ACCESS_KEY_ID="your-access-key-id"
    +export AWS_SECRET_ACCESS_KEY="your-secret-access-key"
    +
    +

    Local Provider (Development)

    +
    # Configure local provider for testing
    +cat >> ~/.config/provisioning/user_config.yaml <<EOF
    +providers:
    +  local:
    +    backend: "docker"  # or "podman", "libvirt"
    +    storage_path: "$HOME/.local/share/provisioning/local"
    +EOF
    +
    +# Ensure Docker is running
    +docker info
    +
    +

    Validate Configuration

    +
    # Validate user configuration
    +provisioning validate config
    +
    +# Test provider connectivity
    +provisioning providers
    +
    +# Expected output:
    +# PROVIDER    STATUS     REGION/ZONE
    +# upcloud     connected  de-fra1
    +# local       ready      localhost
    +
    +

    Phase 3: Create First Workspace

    +

    Initialize Workspace

    +
    # Create workspace for first project
    +provisioning workspace init my-first-project
    +
    +# Navigate to workspace
    +cd workspace_my_first_project
    +
    +# Verify structure
    +ls -la
    +
    +

    Workspace structure created:

    +
    workspace_my_first_project/
    +├── infra/                   # Infrastructure definitions (Nickel)
    +├── config/                  # Workspace configuration
    +│   ├── provisioning.yaml    # Workspace metadata
    +│   ├── dev-defaults.toml    # Development defaults
    +│   ├── test-defaults.toml   # Testing defaults
    +│   └── prod-defaults.toml   # Production defaults
    +├── extensions/              # Workspace-specific extensions
    +│   ├── providers/
    +│   ├── taskservs/
    +│   └── workflows/
    +└── runtime/                 # State and logs (gitignored)
    +    ├── state/
    +    ├── checkpoints/
    +    └── logs/
    +
    +

    Configure Workspace

    +
    # Edit workspace metadata
    +nano config/provisioning.yaml
    +
    +

    Example workspace configuration:

    +
    workspace:
    +  name: my-first-project
    +  description: Learning Provisioning platform
    +  environment: development
    +  created: 2026-01-16T10:00:00Z
    +
    +defaults:
    +  provider: local
    +  region: localhost
    +  confirmation_required: false
    +
    +versioning:
    +  nushell: "0.109.1"
    +  nickel: "1.15.1"
    +  kubernetes: "1.29.0"
    +
    +

    Phase 4: Define Infrastructure

    +

    Simple Server Configuration

    +

    Create your first infrastructure definition using Nickel:

    +
    # Create server definition
    +cat > infra/simple-server.ncl <<'EOF'
    +{
    +  metadata = {
    +    name = "simple-server"
    +    provider = "local"
    +    environment = 'development
    +  }
    +
    +  infrastructure = {
    +    servers = [
    +      {
    +        name = "dev-web-01"
    +        plan = "small"
    +        zone = "localhost"
    +        disk_size_gb = 25
    +        backup_enabled = false
    +        role = 'standalone
    +      }
    +    ]
    +  }
    +
    +  services = {
    +    taskservs = ["containerd"]
    +  }
    +}
    +EOF
    +
    +

    Validate Infrastructure Schema

    +
    # Type-check Nickel schema
    +nickel typecheck infra/simple-server.ncl
    +
    +# Validate against platform contracts
    +provisioning validate config --infra simple-server
    +
    +# Preview deployment
    +provisioning server create --check --infra simple-server
    +
    +

    Expected output:

    +
    Infrastructure Plan: simple-server
    +Provider: local
    +Environment: development
    +
    +Servers to create:
    +  - dev-web-01 (small, standalone)
    +    Disk: 25 GB
    +    Backup: disabled
    +
    +Task services:
    +  - containerd
    +
    +Estimated resources:
    +  CPU: 1 core
    +  RAM: 1 GB
    +  Disk: 25 GB
    +
    +Validation: PASSED
    +
    +

    Deploy Infrastructure

    +
    # Create server
    +provisioning server create --infra simple-server --yes
    +
    +# Monitor deployment
    +provisioning server status dev-web-01
    +
    +

    Deployment progress:

    +
    Creating server: dev-web-01...
    +  [████████████████████████] 100% - Container created
    +  [████████████████████████] 100% - Network configured
    +  [████████████████████████] 100% - SSH ready
    +
    +Server dev-web-01 created successfully
    +IP Address: 172.17.0.2
    +Status: running
    +Provider: local (docker)
    +
    +

    Install Task Service

    +
    # Install containerd
    +provisioning taskserv create containerd --infra simple-server
    +
    +# Verify installation
    +provisioning taskserv status containerd
    +
    +

    Installation output:

    +
    Installing containerd on dev-web-01...
    +  [████████████████████████] 100% - Dependencies resolved
    +  [████████████████████████] 100% - Containerd installed
    +  [████████████████████████] 100% - Service started
    +  [████████████████████████] 100% - Health check passed
    +
    +Containerd installed successfully
    +Version: 1.7.0
    +Runtime: runc
    +
    +

    Verify Deployment

    +
    # SSH into server
    +provisioning server ssh dev-web-01
    +
    +# Inside server - verify containerd
    +sudo systemctl status containerd
    +sudo ctr version
    +
    +# Exit server
    +exit
    +
    +# List all resources
    +provisioning server list
    +provisioning taskserv list
    +
    +

    Phase 5: Kubernetes Cluster Deployment

    +

    Define Kubernetes Infrastructure

    +
    # Create Kubernetes cluster definition
    +cat > infra/k8s-cluster.ncl <<'EOF'
    +{
    +  metadata = {
    +    name = "k8s-dev-cluster"
    +    provider = "local"
    +    environment = 'development
    +  }
    +
    +  infrastructure = {
    +    servers = [
    +      {
    +        name = "k8s-control-01"
    +        plan = "medium"
    +        role = 'control
    +        zone = "localhost"
    +        disk_size_gb = 50
    +      }
    +      {
    +        name = "k8s-worker-01"
    +        plan = "medium"
    +        role = 'worker
    +        zone = "localhost"
    +        disk_size_gb = 50
    +      }
    +      {
    +        name = "k8s-worker-02"
    +        plan = "medium"
    +        role = 'worker
    +        zone = "localhost"
    +        disk_size_gb = 50
    +      }
    +    ]
    +  }
    +
    +  services = {
    +    taskservs = ["containerd", "etcd", "kubernetes", "cilium"]
    +  }
    +
    +  kubernetes = {
    +    version = "1.29.0"
    +    pod_cidr = "10.244.0.0/16"
    +    service_cidr = "10.96.0.0/12"
    +    container_runtime = "containerd"
    +    cri_socket = "/run/containerd/containerd.sock"
    +  }
    +}
    +EOF
    +
    +

    Validate Kubernetes Configuration

    +
    # Type-check schema
    +nickel typecheck infra/k8s-cluster.ncl
    +
    +# Validate configuration
    +provisioning validate config --infra k8s-cluster
    +
    +# Preview deployment
    +provisioning cluster create --check --infra k8s-cluster
    +
    +

    Deploy Kubernetes Cluster

    +
    # Create cluster infrastructure
    +provisioning cluster create --infra k8s-cluster --yes
    +
    +# Monitor cluster deployment
    +provisioning cluster status k8s-dev-cluster
    +
    +

    Cluster deployment phases:

    +
    Phase 1: Creating servers...
    +  [████████████████████████] 100% - 3/3 servers created
    +
    +Phase 2: Installing containerd...
    +  [████████████████████████] 100% - 3/3 nodes ready
    +
    +Phase 3: Installing etcd...
    +  [████████████████████████] 100% - Control plane ready
    +
    +Phase 4: Installing Kubernetes...
    +  [████████████████████████] 100% - API server available
    +  [████████████████████████] 100% - Workers joined
    +
    +Phase 5: Installing Cilium CNI...
    +  [████████████████████████] 100% - Network ready
    +
    +Kubernetes cluster deployed successfully
    +Cluster: k8s-dev-cluster
    +Control plane: k8s-control-01
    +Workers: k8s-worker-01, k8s-worker-02
    +
    +

    Access Kubernetes Cluster

    +
    # Get kubeconfig
    +provisioning cluster kubeconfig k8s-dev-cluster > ~/.kube/config-dev
    +
    +# Set KUBECONFIG
    +export KUBECONFIG=~/.kube/config-dev
    +
    +# Verify cluster
    +kubectl get nodes
    +
    +# Expected output:
    +# NAME              STATUS   ROLES           AGE   VERSION
    +# k8s-control-01    Ready    control-plane   5m    v1.29.0
    +# k8s-worker-01     Ready    <none>          4m    v1.29.0
    +# k8s-worker-02     Ready    <none>          4m    v1.29.0
    +
    +# Use K9s for interactive management
    +k9s
    +
    +

    Phase 6: Security Configuration

    +

    Enable Audit Logging

    +
    # Configure audit logging
    +cat > config/audit-config.toml <<EOF
    +[audit]
    +enabled = true
    +log_path = "runtime/logs/audit"
    +retention_days = 90
    +level = "info"
    +
    +[audit.filters]
    +include_commands = ["server create", "server delete", "cluster deploy"]
    +exclude_users = []
    +EOF
    +
    +

    Configure SOPS for Secrets

    +
    # Create secrets file
    +cat > config/secrets.secret.yaml <<EOF
    +database:
    +  password: "changeme-db-password"
    +  admin_user: "admin"
    +
    +kubernetes:
    +  service_account_key: "changeme-sa-key"
    +EOF
    +
    +# Encrypt secrets with SOPS
    +sops -e -i config/secrets.secret.yaml
    +
    +# Verify encryption
    +cat config/secrets.secret.yaml  # Should show encrypted content
    +
    +# Decrypt when needed
    +sops -d config/secrets.secret.yaml
    +
    +

    Enable MFA (Optional)

    +
    # Enable multi-factor authentication
    +provisioning security mfa enable
    +
    +# Scan QR code with authenticator app
    +# Enter verification code
    +
    +

    Configure RBAC

    +
    # Create role definition
    +cat > config/rbac-roles.yaml <<EOF
    +roles:
    +  - name: developer
    +    permissions:
    +      - server:read
    +      - server:create
    +      - taskserv:read
    +      - taskserv:install
    +    deny:
    +      - cluster:delete
    +      - config:modify
    +
    +  - name: operator
    +    permissions:
    +      - "*:read"
    +      - server:*
    +      - taskserv:*
    +      - cluster:read
    +      - cluster:deploy
    +
    +  - name: admin
    +    permissions:
    +      - "*:*"
    +EOF
    +
    +

    Phase 7: Multi-Cloud Deployment

    +

    Define Multi-Cloud Infrastructure

    +
    # Create multi-cloud definition
    +cat > infra/multi-cloud.ncl <<'EOF'
    +{
    +  batch_workflow = {
    +    operations = [
    +      {
    +        id = "upcloud-frontend"
    +        provider = "upcloud"
    +        region = "de-fra1"
    +        servers = [
    +          {name = "upcloud-web-01", plan = "medium", role = 'web}
    +        ]
    +        taskservs = ["containerd", "nginx"]
    +      }
    +      {
    +        id = "aws-backend"
    +        provider = "aws"
    +        region = "eu-west-1"
    +        servers = [
    +          {name = "aws-api-01", plan = "t3.medium", role = 'api}
    +        ]
    +        taskservs = ["containerd", "docker"]
    +        dependencies = ["upcloud-frontend"]
    +      }
    +      {
    +        id = "local-database"
    +        provider = "local"
    +        region = "localhost"
    +        servers = [
    +          {name = "local-db-01", plan = "large", role = 'database}
    +        ]
    +        taskservs = ["postgresql"]
    +      }
    +    ]
    +    parallel_limit = 2
    +  }
    +}
    +EOF
    +
    +

    Deploy Multi-Cloud Infrastructure

    +
    # Submit batch workflow
    +provisioning batch submit infra/multi-cloud.ncl
    +
    +# Monitor workflow progress
    +provisioning batch status
    +
    +# View detailed operation status
    +provisioning batch operations
    +
    +

    Phase 8: Monitoring and Maintenance

    +

    Platform Health Monitoring

    +
    # Check platform health
    +provisioning health
    +
    +# View service status
    +provisioning service status orchestrator
    +provisioning service status control-center
    +
    +# View logs
    +provisioning logs --service orchestrator --tail 100
    +
    +

    Infrastructure Monitoring

    +
    # List all servers
    +provisioning server list --all-workspaces
    +
    +# Show server details
    +provisioning server info k8s-control-01
    +
    +# Check task service status
    +provisioning taskserv list
    +provisioning taskserv health containerd
    +
    +

    Backup Configuration

    +
    # Create backup
    +provisioning backup create --type full --output ~/backups/provisioning-$(date +%Y%m%d).tar.gz
    +
    +# Schedule automatic backups
    +provisioning backup schedule daily --time "02:00" --retention 7
    +
    +

    Phase 9: Advanced Workflows

    +

    Custom Workflow Creation

    +
    # Create custom workflow
    +cat > extensions/workflows/deploy-app.ncl <<'EOF'
    +{
    +  workflow = {
    +    name = "deploy-application"
    +    description = "Deploy application to Kubernetes"
    +
    +    steps = [
    +      {
    +        name = "build-image"
    +        action = "docker-build"
    +        params = {dockerfile = "Dockerfile", tag = "myapp:latest"}
    +      }
    +      {
    +        name = "push-image"
    +        action = "docker-push"
    +        params = {image = "myapp:latest", registry = "registry.example.com"}
    +        depends_on = ["build-image"]
    +      }
    +      {
    +        name = "deploy-k8s"
    +        action = "kubectl-apply"
    +        params = {manifest = "k8s/deployment.yaml"}
    +        depends_on = ["push-image"]
    +      }
    +      {
    +        name = "verify-deployment"
    +        action = "kubectl-rollout-status"
    +        params = {deployment = "myapp"}
    +        depends_on = ["deploy-k8s"]
    +      }
    +    ]
    +  }
    +}
    +EOF
    +
    +

    Execute Custom Workflow

    +
    # Run workflow
    +provisioning workflow run deploy-application
    +
    +# Monitor workflow
    +provisioning workflow status deploy-application
    +
    +# View workflow history
    +provisioning workflow history
    +
    +

    Troubleshooting

    +

    Common Issues

    +

    Server Creation Fails

    +
    # Enable debug logging
    +provisioning --debug server create --infra simple-server
    +
    +# Check provider connectivity
    +provisioning providers
    +
    +# Validate credentials
    +provisioning validate config
    +
    +

    Task Service Installation Fails

    +
    # Check server connectivity
    +provisioning server ssh dev-web-01
    +
    +# Verify dependencies
    +provisioning taskserv check-deps containerd
    +
    +# Retry installation
    +provisioning taskserv create containerd --force
    +
    +

    Cluster Deployment Fails

    +
    # Check cluster status
    +provisioning cluster status k8s-dev-cluster
    +
    +# View cluster logs
    +provisioning cluster logs k8s-dev-cluster
    +
    +# Reset and retry
    +provisioning cluster reset k8s-dev-cluster
    +provisioning cluster create --infra k8s-cluster
    +
    +

    Next Steps

    +

    Production Deployment

    + +

    Advanced Features

    + +

    Learning Resources

    + +

    Summary

    +

    You’ve completed the from-scratch guide and learned:

    +
      +
    • Platform installation and configuration
    • +
    • Provider credential setup
    • +
    • Workspace creation and management
    • +
    • Infrastructure definition with Nickel
    • +
    • Server and task service deployment
    • +
    • Kubernetes cluster deployment
    • +
    • Security configuration
    • +
    • Multi-cloud deployment
    • +
    • Monitoring and maintenance
    • +
    • Custom workflow creation
    • +
    +

    Your Provisioning platform is now ready for production use.

    +

    Workspace Management

    +

    Multi-Cloud Deployment

    +

    Comprehensive guide to deploying and managing infrastructure across multiple cloud providers +using the Provisioning platform. This guide covers strategies, patterns, and real-world examples +for building resilient multi-cloud architectures.

    +

    Overview

    +

    Multi-cloud deployment enables:

    +
      +
    • Vendor independence - Avoid lock-in to single cloud provider
    • +
    • Geographic distribution - Deploy closer to users worldwide
    • +
    • Resilience - Survive provider outages or regional failures
    • +
    • Cost optimization - Leverage competitive pricing across providers
    • +
    • Compliance - Meet data residency and sovereignty requirements
    • +
    • Performance - Optimize latency through strategic placement
    • +
    +

    Multi-Cloud Strategies

    +

    Strategy 1: Primary-Backup Architecture

    +

    One provider serves production traffic, another provides disaster recovery.

    +

    Use cases:

    +
      +
    • Cost-conscious deployments
    • +
    • Regulatory backup requirements
    • +
    • Testing multi-cloud capabilities
    • +
    +

    Example topology:

    +
    Primary (UpCloud EU)          Backup (AWS US)
    +├── Production workloads      ├── Standby replicas
    +├── Active databases          ├── Read-only databases
    +├── Live traffic              └── Failover ready
    +└── Real-time sync ────────────>
    +
    +

    Pros: Simple management, lower costs, proven failover +Cons: Backup resources underutilized, sync lag

    +

    Strategy 2: Active-Active Architecture

    +

    Multiple providers serve production traffic simultaneously.

    +

    Use cases:

    +
      +
    • High availability requirements
    • +
    • Global user base
    • +
    • Zero-downtime deployments
    • +
    +

    Example topology:

    +
    UpCloud (EU)                  AWS (US)                      Local (Development)
    +├── EU traffic                ├── US traffic                ├── Testing
    +├── Primary database          ├── Primary database          ├── CI/CD
    +└── Global load balancer ←────┴──────────────────────────────┘
    +
    +

    Pros: Maximum availability, optimized latency, full utilization +Cons: Complex management, higher costs, data consistency challenges

    +

    Strategy 3: Specialized Workload Distribution

    +

    Different providers for different workload types based on strengths.

    +

    Use cases:

    +
      +
    • Heterogeneous workloads
    • +
    • Cost optimization
    • +
    • Leveraging provider-specific services
    • +
    +

    Example topology:

    +
    UpCloud                       AWS                           Local
    +├── Compute-intensive         ├── Object storage (S3)       ├── Development
    +├── Kubernetes clusters       ├── Managed databases (RDS)   └── Testing
    +└── High-performance VMs      └── Serverless (Lambda)
    +
    +

    Pros: Optimize for provider strengths, cost-effective, flexible +Cons: Complex integration, vendor-specific knowledge required

    +

    Strategy 4: Compliance-Driven Architecture

    +

    Provider selection based on regulatory and data residency requirements.

    +

    Use cases:

    +
      +
    • GDPR compliance
    • +
    • Data sovereignty
    • +
    • Industry regulations (HIPAA, PCI-DSS)
    • +
    +

    Example topology:

    +
    UpCloud (EU - GDPR)           AWS (US - FedRAMP)            On-Premises (Sensitive)
    +├── EU customer data          ├── US customer data          ├── PII storage
    +├── GDPR-compliant            ├── US compliance             └── Encrypted backups
    +└── Regional processing       └── Federal workloads
    +
    +

    Pros: Meets compliance requirements, data sovereignty +Cons: Geographic constraints, complex data management

    +

    Infrastructure Definition

    +

    Multi-Provider Server Configuration

    +

    Define servers across multiple providers using Nickel:

    +
    # infra/multi-cloud-servers.ncl
    +{
    +  metadata = {
    +    name = "multi-cloud-infrastructure"
    +    environment = 'production
    +  }
    +
    +  infrastructure = {
    +    servers = [
    +      # UpCloud servers (EU region)
    +      {
    +        name = "upcloud-web-01"
    +        provider = "upcloud"
    +        zone = "de-fra1"
    +        plan = "medium"
    +        role = 'web
    +        backup_enabled = true
    +        tags = ["frontend", "europe"]
    +      }
    +      {
    +        name = "upcloud-web-02"
    +        provider = "upcloud"
    +        zone = "fi-hel1"
    +        plan = "medium"
    +        role = 'web
    +        backup_enabled = true
    +        tags = ["frontend", "europe"]
    +      }
    +
    +      # AWS servers (US region)
    +      {
    +        name = "aws-api-01"
    +        provider = "aws"
    +        zone = "us-east-1a"
    +        plan = "t3.large"
    +        role = 'api
    +        backup_enabled = true
    +        tags = ["backend", "americas"]
    +      }
    +      {
    +        name = "aws-api-02"
    +        provider = "aws"
    +        zone = "us-west-2a"
    +        plan = "t3.large"
    +        role = 'api
    +        backup_enabled = true
    +        tags = ["backend", "americas"]
    +      }
    +
    +      # Local provider (development/testing)
    +      {
    +        name = "local-test-01"
    +        provider = "local"
    +        zone = "localhost"
    +        plan = "small"
    +        role = 'test
    +        backup_enabled = false
    +        tags = ["testing", "development"]
    +      }
    +    ]
    +  }
    +
    +  networking = {
    +    vpn_mesh = true
    +    cross_provider_routing = true
    +    dns_strategy = 'geo_distributed
    +  }
    +}
    +
    +

    Batch Workflow for Multi-Cloud

    +

    Use batch workflows for orchestrated multi-cloud deployments:

    +
    # infra/multi-cloud-batch.ncl
    +{
    +  batch_workflow = {
    +    name = "global-deployment"
    +    description = "Deploy infrastructure across three cloud providers"
    +
    +    operations = [
    +      {
    +        id = "upcloud-eu"
    +        provider = "upcloud"
    +        region = "de-fra1"
    +        servers = [
    +          {name = "upcloud-web-01", plan = "medium", role = 'web}
    +          {name = "upcloud-db-01", plan = "large", role = 'database}
    +        ]
    +        taskservs = ["containerd", "nginx", "postgresql"]
    +        priority = 1
    +      }
    +
    +      {
    +        id = "aws-us"
    +        provider = "aws"
    +        region = "us-east-1"
    +        servers = [
    +          {name = "aws-api-01", plan = "t3.large", role = 'api}
    +          {name = "aws-cache-01", plan = "t3.medium", role = 'cache}
    +        ]
    +        taskservs = ["containerd", "docker", "redis"]
    +        dependencies = ["upcloud-eu"]
    +        priority = 2
    +      }
    +
    +      {
    +        id = "local-dev"
    +        provider = "local"
    +        region = "localhost"
    +        servers = [
    +          {name = "local-test-01", plan = "small", role = 'test}
    +        ]
    +        taskservs = ["containerd"]
    +        priority = 3
    +      }
    +    ]
    +
    +    execution = {
    +      parallel_limit = 2
    +      retry_failed = true
    +      max_retries = 3
    +      checkpoint_enabled = true
    +    }
    +  }
    +}
    +
    +

    Deployment Patterns

    +

    Pattern 1: Sequential Deployment

    +

    Deploy providers one at a time to minimize risk.

    +
    # Deploy to primary provider first
    +provisioning batch submit infra/upcloud-primary.ncl
    +
    +# Verify primary deployment
    +provisioning server list --provider upcloud
    +provisioning server status upcloud-web-01
    +
    +# Deploy to secondary provider
    +provisioning batch submit infra/aws-secondary.ncl
    +
    +# Verify secondary deployment
    +provisioning server list --provider aws
    +
    +

    Advantages:

    +
      +
    • Controlled rollout
    • +
    • Easy troubleshooting
    • +
    • Clear rollback path
    • +
    +

    Disadvantages:

    +
      +
    • Slower deployment
    • +
    • Sequential dependencies
    • +
    +

    Pattern 2: Parallel Deployment

    +

    Deploy to multiple providers simultaneously for speed.

    +
    # Submit multi-cloud batch workflow
    +provisioning batch submit infra/multi-cloud-batch.ncl
    +
    +# Monitor all operations
    +provisioning batch status
    +
    +# Check progress per provider
    +provisioning batch operations --filter provider=upcloud
    +provisioning batch operations --filter provider=aws
    +
    +

    Advantages:

    +
      +
    • Fast deployment
    • +
    • Efficient resource usage
    • +
    • Parallel testing
    • +
    +

    Disadvantages:

    +
      +
    • Complex failure handling
    • +
    • Resource contention
    • +
    • Harder troubleshooting
    • +
    +

    Pattern 3: Blue-Green Multi-Cloud

    +

    Deploy new infrastructure in parallel, then switch traffic.

    +
    # infra/blue-green-multi-cloud.ncl
    +{
    +  deployment = {
    +    strategy = 'blue_green
    +
    +    blue_environment = {
    +      upcloud = {servers = [{name = "upcloud-web-01-blue", role = 'web}]}
    +      aws = {servers = [{name = "aws-api-01-blue", role = 'api}]}
    +    }
    +
    +    green_environment = {
    +      upcloud = {servers = [{name = "upcloud-web-01-green", role = 'web}]}
    +      aws = {servers = [{name = "aws-api-01-green", role = 'api}]}
    +    }
    +
    +    traffic_switch = {
    +      type = 'dns
    +      validation_required = true
    +      rollback_timeout_seconds = 300
    +    }
    +  }
    +}
    +
    +
    # Deploy green environment
    +provisioning deployment create --infra blue-green-multi-cloud --target green
    +
    +# Validate green environment
    +provisioning deployment validate green
    +
    +# Switch traffic to green
    +provisioning deployment switch-traffic green
    +
    +# Decommission blue environment
    +provisioning deployment delete blue
    +
    +

    Network Configuration

    +

    Cross-Provider VPN Mesh

    +

    Connect servers across providers using VPN mesh:

    +
    # infra/vpn-mesh.ncl
    +{
    +  networking = {
    +    vpn_mesh = {
    +      enabled = true
    +      encryption = 'wireguard
    +
    +      peers = [
    +        {
    +          name = "upcloud-gateway"
    +          provider = "upcloud"
    +          public_ip = "auto"
    +          private_subnet = "10.0.1.0/24"
    +        }
    +        {
    +          name = "aws-gateway"
    +          provider = "aws"
    +          public_ip = "auto"
    +          private_subnet = "10.0.2.0/24"
    +        }
    +        {
    +          name = "local-gateway"
    +          provider = "local"
    +          public_ip = "192.168.1.1"
    +          private_subnet = "10.0.3.0/24"
    +        }
    +      ]
    +
    +      routing = {
    +        dynamic_routes = true
    +        bgp_enabled = false
    +        static_routes = [
    +          {from = "10.0.1.0/24", to = "10.0.2.0/24", via = "aws-gateway"}
    +          {from = "10.0.2.0/24", to = "10.0.1.0/24", via = "upcloud-gateway"}
    +        ]
    +      }
    +    }
    +  }
    +}
    +
    +

    Global DNS Configuration

    +

    Configure geo-distributed DNS for optimal routing:

    +
    # infra/global-dns.ncl
    +{
    +  dns = {
    +    provider = 'cloudflare  # or 'route53, 'custom
    +
    +    zones = [
    +      {
    +        name = "example.com"
    +        type = 'primary
    +
    +        records = [
    +          {
    +            name = "eu"
    +            type = 'A
    +            ttl = 300
    +            values = ["upcloud-web-01.ip", "upcloud-web-02.ip"]
    +            geo_location = 'europe
    +          }
    +          {
    +            name = "us"
    +            type = 'A
    +            ttl = 300
    +            values = ["aws-api-01.ip", "aws-api-02.ip"]
    +            geo_location = 'americas
    +          }
    +          {
    +            name = "@"
    +            type = 'CNAME
    +            ttl = 60
    +            value = "global-lb.example.com"
    +            geo_routing = 'latency_based
    +          }
    +        ]
    +      }
    +    ]
    +
    +    health_checks = [
    +      {target = "upcloud-web-01", interval_seconds = 30}
    +      {target = "aws-api-01", interval_seconds = 30}
    +    ]
    +  }
    +}
    +
    +

    Data Replication

    +

    Database Replication Across Providers

    +

    Configure cross-provider database replication:

    +
    # infra/database-replication.ncl
    +{
    +  databases = {
    +    postgresql = {
    +      primary = {
    +        provider = "upcloud"
    +        server = "upcloud-db-01"
    +        version = "15"
    +        replication_role = 'primary
    +      }
    +
    +      replicas = [
    +        {
    +          provider = "aws"
    +          server = "aws-db-replica-01"
    +          version = "15"
    +          replication_role = 'replica
    +          replication_lag_max_seconds = 30
    +          failover_priority = 1
    +        }
    +        {
    +          provider = "local"
    +          server = "local-db-backup-01"
    +          version = "15"
    +          replication_role = 'replica
    +          replication_lag_max_seconds = 300
    +          failover_priority = 2
    +        }
    +      ]
    +
    +      replication = {
    +        method = 'streaming
    +        ssl_required = true
    +        compression = true
    +        conflict_resolution = 'primary_wins
    +      }
    +    }
    +  }
    +}
    +
    +

    Object Storage Sync

    +

    Synchronize object storage across providers:

    +
    # Configure cross-provider storage sync
    +cat > infra/storage-sync.ncl <<'EOF'
    +{
    +  storage = {
    +    sync_policy = {
    +      source = {
    +        provider = "upcloud"
    +        bucket = "primary-storage"
    +        region = "de-fra1"
    +      }
    +
    +      destinations = [
    +        {
    +          provider = "aws"
    +          bucket = "backup-storage"
    +          region = "us-east-1"
    +          sync_interval_minutes = 15
    +        }
    +      ]
    +
    +      filters = {
    +        include_patterns = ["*.pdf", "*.jpg", "backups/*"]
    +        exclude_patterns = ["temp/*", "*.tmp"]
    +      }
    +
    +      conflict_resolution = 'timestamp_wins
    +    }
    +  }
    +}
    +EOF
    +
    +

    Kubernetes Multi-Cloud

    +

    Cluster Federation

    +

    Deploy Kubernetes clusters across providers with federation:

    +
    # infra/k8s-federation.ncl
    +{
    +  kubernetes_federation = {
    +    clusters = [
    +      {
    +        name = "upcloud-eu-cluster"
    +        provider = "upcloud"
    +        region = "de-fra1"
    +        control_plane_count = 3
    +        worker_count = 5
    +        version = "1.29.0"
    +      }
    +      {
    +        name = "aws-us-cluster"
    +        provider = "aws"
    +        region = "us-east-1"
    +        control_plane_count = 3
    +        worker_count = 5
    +        version = "1.29.0"
    +      }
    +    ]
    +
    +    federation = {
    +      enabled = true
    +      control_plane_cluster = "upcloud-eu-cluster"
    +
    +      networking = {
    +        cluster_mesh = true
    +        service_discovery = 'dns
    +        cross_cluster_load_balancing = true
    +      }
    +
    +      workload_distribution = {
    +        strategy = 'geo_aware
    +        prefer_local = true
    +        failover_enabled = true
    +      }
    +    }
    +  }
    +}
    +
    +

    Multi-Cluster Deployments

    +

    Deploy applications across multiple Kubernetes clusters:

    +
    # k8s/multi-cluster-deployment.yaml
    +apiVersion: v1
    +kind: Namespace
    +metadata:
    +  name: multi-cloud-app
    +---
    +apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: web-frontend
    +  namespace: multi-cloud-app
    +  labels:
    +    app: frontend
    +    region: europe
    +spec:
    +  replicas: 3
    +  selector:
    +    matchLabels:
    +      app: frontend
    +  template:
    +    metadata:
    +      labels:
    +        app: frontend
    +    spec:
    +      containers:
    +      - name: nginx
    +        image: nginx:latest
    +        ports:
    +        - containerPort: 80
    +
    +
    # Deploy to multiple clusters
    +export UPCLOUD_KUBECONFIG=~/.kube/config-upcloud
    +export AWS_KUBECONFIG=~/.kube/config-aws
    +
    +kubectl --kubeconfig $UPCLOUD_KUBECONFIG apply -f k8s/multi-cluster-deployment.yaml
    +kubectl --kubeconfig $AWS_KUBECONFIG apply -f k8s/multi-cluster-deployment.yaml
    +
    +# Verify deployments
    +kubectl --kubeconfig $UPCLOUD_KUBECONFIG get pods -n multi-cloud-app
    +kubectl --kubeconfig $AWS_KUBECONFIG get pods -n multi-cloud-app
    +
    +

    Cost Optimization

    +

    Provider Selection by Workload

    +

    Optimize costs by choosing the most cost-effective provider per workload:

    +
    # infra/cost-optimized.ncl
    +{
    +  cost_optimization = {
    +    workloads = [
    +      {
    +        name = "compute-intensive"
    +        provider = "upcloud"  # Best compute pricing
    +        plan = "large"
    +        count = 10
    +      }
    +      {
    +        name = "storage-heavy"
    +        provider = "aws"  # Best storage pricing with S3
    +        plan = "medium"
    +        count = 5
    +        storage_type = 's3
    +      }
    +      {
    +        name = "development"
    +        provider = "local"  # Zero cost
    +        plan = "small"
    +        count = 3
    +      }
    +    ]
    +
    +    budget_limits = {
    +      monthly_max_usd = 5000
    +      alerts = [
    +        {threshold_percent = 75, notify = "[ops-team@example.com](mailto:ops-team@example.com)"}
    +        {threshold_percent = 90, notify = "[finance@example.com](mailto:finance@example.com)"}
    +      ]
    +    }
    +  }
    +}
    +
    +

    Reserved Instance Strategy

    +

    Leverage reserved instances for predictable workloads:

    +
    # Configure reserved instances
    +cat > infra/reserved-instances.ncl <<'EOF'
    +{
    +  reserved_instances = {
    +    upcloud = {
    +      commitment = 'yearly
    +      instances = [
    +        {plan = "medium", count = 5}
    +        {plan = "large", count = 2}
    +      ]
    +    }
    +
    +    aws = {
    +      commitment = 'yearly
    +      instances = [
    +        {type = "t3.large", count = 3}
    +        {type = "t3.xlarge", count = 1}
    +      ]
    +      savings_plan = true
    +    }
    +  }
    +}
    +EOF
    +
    +

    Monitoring Multi-Cloud

    +

    Centralized Monitoring

    +

    Deploy unified monitoring across providers:

    +
    # infra/monitoring.ncl
    +{
    +  monitoring = {
    +    prometheus = {
    +      enabled = true
    +      federation = true
    +
    +      instances = [
    +        {provider = "upcloud", region = "de-fra1"}
    +        {provider = "aws", region = "us-east-1"}
    +      ]
    +
    +      scrape_configs = [
    +        {
    +          job_name = "upcloud-nodes"
    +          static_configs = [{targets = ["upcloud-*.internal:9100"]}]
    +        }
    +        {
    +          job_name = "aws-nodes"
    +          static_configs = [{targets = ["aws-*.internal:9100"]}]
    +        }
    +      ]
    +
    +      remote_write = {
    +        url = " [https://central-prometheus.example.com/api/v1/write"](https://central-prometheus.example.com/api/v1/write")
    +        compression = true
    +      }
    +    }
    +
    +    grafana = {
    +      enabled = true
    +      dashboards = ["multi-cloud-overview", "per-provider", "cost-analysis"]
    +      alerts = ["high-latency", "provider-down", "budget-exceeded"]
    +    }
    +  }
    +}
    +
    +

    Disaster Recovery

    +

    Cross-Provider Failover

    +

    Configure automatic failover between providers:

    +
    # infra/disaster-recovery.ncl
    +{
    +  disaster_recovery = {
    +    primary_provider = "upcloud"
    +    secondary_provider = "aws"
    +
    +    failover_triggers = [
    +      {condition = 'provider_unavailable, action = 'switch_to_secondary}
    +      {condition = 'health_check_failed, threshold = 3, action = 'switch_to_secondary}
    +      {condition = 'latency_exceeded, threshold_ms = 1000, action = 'switch_to_secondary}
    +    ]
    +
    +    failover_process = {
    +      dns_ttl_seconds = 60
    +      health_check_interval_seconds = 10
    +      automatic = true
    +      notification_channels = ["email", "slack"]
    +    }
    +
    +    backup_strategy = {
    +      frequency = 'daily
    +      retention_days = 30
    +      cross_region = true
    +      cross_provider = true
    +    }
    +  }
    +}
    +
    +

    Best Practices

    +

    Configuration Management

    +
      +
    • Use Nickel for all infrastructure definitions
    • +
    • Version control all configuration files
    • +
    • Use workspace per environment (dev/staging/prod)
    • +
    • Implement configuration validation before deployment
    • +
    • Maintain provider abstraction where possible
    • +
    +

    Security

    +
      +
    • Encrypt cross-provider communication (VPN, TLS)
    • +
    • Use separate credentials per provider
    • +
    • Implement RBAC consistently across providers
    • +
    • Enable audit logging on all providers
    • +
    • Encrypt data at rest and in transit
    • +
    +

    Deployment

    +
      +
    • Test in single-provider environment first
    • +
    • Use batch workflows for complex multi-cloud deployments
    • +
    • Enable checkpoints for long-running deployments
    • +
    • Implement progressive rollout strategies
    • +
    • Maintain rollback procedures
    • +
    +

    Monitoring

    +
      +
    • Centralize logs and metrics
    • +
    • Monitor cross-provider network latency
    • +
    • Track costs per provider
    • +
    • Alert on provider-specific failures
    • +
    • Measure failover readiness
    • +
    +

    Cost Management

    +
      +
    • Regular cost audits per provider
    • +
    • Use reserved instances for predictable loads
    • +
    • Implement budget alerts
    • +
    • Optimize data transfer costs
    • +
    • Consider spot instances for non-critical workloads
    • +
    +

    Troubleshooting

    +

    Provider Connectivity Issues

    +
    # Test provider connectivity
    +provisioning providers
    +
    +# Test specific provider
    +provisioning provider test upcloud
    +provisioning provider test aws
    +
    +# Debug network connectivity
    +provisioning network test --from upcloud-web-01 --to aws-api-01
    +
    +

    Cross-Provider Communication Failures

    +
    # Check VPN mesh status
    +provisioning network vpn-status
    +
    +# Test cross-provider routes
    +provisioning network trace-route --from upcloud-web-01 --to aws-api-01
    +
    +# Verify firewall rules
    +provisioning network firewall-check --provider upcloud
    +provisioning network firewall-check --provider aws
    +
    +

    Data Replication Lag

    +
    # Check replication status
    +provisioning database replication-status postgresql
    +
    +# Force replication sync
    +provisioning database sync --source upcloud-db-01 --target aws-db-replica-01
    +
    +# View replication lag metrics
    +provisioning database metrics --metric replication_lag
    +
    +

    See Also

    + +

    Custom Extensions

    +

    Create custom providers, task services, and clusters to extend the Provisioning platform for your specific infrastructure needs.

    +

    Overview

    +

    Extensions allow you to:

    +
      +
    • Add support for new cloud providers
    • +
    • Create custom task services for specialized software
    • +
    • Define cluster templates for common deployment patterns
    • +
    • Integrate with proprietary infrastructure
    • +
    +

    Extension Types

    +

    Providers

    +

    Cloud or infrastructure backend integrations.

    +

    Use Cases: Custom private cloud, bare metal provisioning, proprietary APIs

    +

    Task Services

    +

    Installable software components.

    +

    Use Cases: Internal applications, specialized databases, custom monitoring

    +

    Clusters

    +

    Coordinated service groups.

    +

    Use Cases: Standard deployment patterns, application stacks, reference architectures

    +

    Creating a Custom Provider

    +

    Directory Structure

    +
    provisioning/extensions/providers/my-provider/
    +├── provider.ncl          # Provider schema
    +├── resources/
    +│   ├── server.nu        # Server operations
    +│   ├── network.nu       # Network operations
    +│   └── storage.nu       # Storage operations
    +└── README.md
    +
    +

    Provider Schema (provider.ncl)

    +
    {
    +  name = "my-provider",
    +  description = "Custom infrastructure provider",
    +
    +  config_schema = {
    +    api_endpoint | String,
    +    api_key | String,
    +    region | String | default = "default",
    +    timeout_seconds | Number | default = 300,
    +  },
    +
    +  capabilities = {
    +    servers = true,
    +    networks = true,
    +    storage = true,
    +    load_balancers = false,
    +  }
    +}
    +
    +

    Server Operations (resources/server.nu)

    +
    # Create server
    +export def "server create" [
    +  name: string
    +  plan: string
    +  --zone: string = "default"
    +] {
    +  let config = $env.PROVIDER_CONFIG | from json
    +
    +  # Call provider API
    +  http post $"($config.api_endpoint)/servers" {
    +    name: $name,
    +    plan: $plan,
    +    zone: $zone
    +  } | from json
    +}
    +
    +# Delete server
    +export def "server delete" [name: string] {
    +  let config = $env.PROVIDER_CONFIG | from json
    +  http delete $"($config.api_endpoint)/servers/($name)"
    +}
    +
    +# List servers
    +export def "server list" [] {
    +  let config = $env.PROVIDER_CONFIG | from json
    +  http get $"($config.api_endpoint)/servers" | from json
    +}
    +
    +

    Creating a Custom Task Service

    +

    Directory Structure

    +
    provisioning/extensions/taskservs/my-service/
    +├── service.ncl           # Service schema
    +├── install.nu            # Installation script
    +├── configure.nu          # Configuration script
    +├── health-check.nu       # Health validation
    +└── README.md
    +
    +

    Service Schema (service.ncl)

    +
    {
    +  name = "my-service",
    +  version = "1.0.0",
    +  description = "Custom service deployment",
    +
    +  dependencies = ["kubernetes"],
    +
    +  config_schema = {
    +    replicas | Number | default = 3,
    +    port | Number | default = 8080,
    +    storage_size_gb | Number | default = 10,
    +    image | String,
    +  }
    +}
    +
    +

    Installation Script (install.nu)

    +
    export def "taskserv install" [config: record] {
    +  print $"Installing ($config.name)..."
    +
    +  # Create namespace
    +  kubectl create namespace $config.name
    +
    +  # Deploy application
    +  kubectl apply -f - <<EOF
    +apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: ($config.name)
    +  namespace: ($config.name)
    +spec:
    +  replicas: ($config.replicas)
    +  template:
    +    spec:
    +      containers:
    +      - name: app
    +        image: ($config.image)
    +        ports:
    +        - containerPort: ($config.port)
    +EOF
    +
    +  {status: "installed"}
    +}
    +
    +

    Health Check (health-check.nu)

    +
    export def "taskserv health" [name: string] {
    +  let pods = (kubectl get pods -n $name -o json | from json)
    +
    +  let ready = ($pods.items | all | { | p $p.status.phase == "Running"})
    +
    +  if $ready {
    +    {status: "healthy", ready_pods: ($pods.items | length)}
    +  } else {
    +    {status: "unhealthy", reason: "pods not running"}
    +  }
    +}
    +
    +

    Creating a Custom Cluster

    +

    Directory Structure

    +
    provisioning/extensions/clusters/my-cluster/
    +├── cluster.ncl           # Cluster definition
    +├── deploy.nu             # Deployment script
    +└── README.md
    +
    +

    Cluster Schema (cluster.ncl)

    +
    {
    +  name = "my-cluster",
    +  version = "1.0.0",
    +  description = "Custom application stack",
    +
    +  components = {
    +    servers = [
    +      {name = "app", count = 3, plan = 'medium},
    +      {name = "db", count = 1, plan = 'large},
    +    ],
    +    services = ["nginx", "postgresql", "redis"],
    +  },
    +
    +  config_schema = {
    +    domain | String,
    +    app_replicas | Number | default = 3,
    +    db_storage_gb | Number | default = 100,
    +  }
    +}
    +
    +

    Testing Extensions

    +

    Local Testing

    +
    # Test provider operations
    +provisioning provider test my-provider --local
    +
    +# Test task service installation
    +provisioning taskserv install my-service --dry-run
    +
    +# Validate cluster definition
    +provisioning cluster validate my-cluster
    +
    +

    Integration Testing

    +
    # Create test workspace
    +provisioning workspace create test-extensions
    +
    +# Deploy extension
    +provisioning extension deploy my-provider
    +
    +# Test deployment
    +provisioning server create test-server --provider my-provider
    +
    +

    Extension Best Practices

    +
      +
    1. Define clear schemas - Use Nickel contracts for type safety
    2. +
    3. Implement health checks - Validate service state
    4. +
    5. Handle errors gracefully - Return structured error messages
    6. +
    7. Document configuration - Provide clear examples
    8. +
    9. Version extensions - Track compatibility
    10. +
    11. Test thoroughly - Unit and integration tests
    12. +
    +

    Publishing Extensions

    +

    Extension Registry

    +

    Share extensions with the community:

    +
    # Package extension
    +provisioning extension package my-provider
    +
    +# Publish to registry
    +provisioning extension publish my-provider --registry community
    +
    +

    Private Registry

    +

    Host internal extensions:

    +
    # Configure private registry
    +provisioning config set extension_registry  [https://registry.internal](https://registry.internal)
    +
    +# Publish privately
    +provisioning extension publish my-provider --private
    +
    +

    Examples

    +

    Custom Database Provider

    +

    Provider for proprietary database platform:

    +
    {
    +  name = "mydb-provider",
    +  capabilities = {databases = true},
    +  config_schema = {
    +    cluster_endpoint | String,
    +    admin_token | String,
    +  }
    +}
    +
    +

    Monitoring Stack Service

    +

    Complete monitoring deployment:

    +
    {
    +  name = "monitoring-stack",
    +  dependencies = ["prometheus", "grafana", "loki"],
    +  config_schema = {
    +    retention_days | Number | default = 30,
    +    alert_email | String,
    +  }
    +}
    +
    +

    Troubleshooting

    +

    Extension Not Loading

    +
    # Verify extension structure
    +provisioning extension validate my-extension
    +
    +# Check logs
    +provisioning logs extension-loader --tail 100
    +
    +

    Deployment Failures

    +
    # Enable debug logging
    +export PROVISIONING_LOG_LEVEL=debug
    +provisioning taskserv install my-service
    +
    +# Check service logs
    +provisioning taskserv logs my-service
    +
    +

    References

    + +

    Disaster Recovery

    +

    Comprehensive disaster recovery procedures for the Provisioning platform and managed infrastructure.

    +

    Overview

    +

    Disaster recovery (DR) ensures business continuity through:

    +
      +
    • Automated backups
    • +
    • Point-in-time recovery
    • +
    • Multi-region failover
    • +
    • Data replication
    • +
    • DR testing procedures
    • +
    +

    Recovery Objectives

    +

    RTO (Recovery Time Objective)

    +

    Target time to restore service:

    +
      +
    • Critical Services: < 1 hour
    • +
    • Production Infrastructure: < 4 hours
    • +
    • Development Environment: < 24 hours
    • +
    +

    RPO (Recovery Point Objective)

    +

    Maximum acceptable data loss:

    +
      +
    • Production Databases: < 5 minutes (continuous replication)
    • +
    • Configuration: < 1 hour (hourly backups)
    • +
    • Workspace State: < 15 minutes (incremental backups)
    • +
    +

    Backup Strategy

    +

    Automated Backups

    +

    Configure automatic backups:

    +
    {
    +  backup = {
    +    enabled = true,
    +    schedule = "0 */6 * * *",  # Every 6 hours
    +    retention_days = 30,
    +
    +    targets = [
    +      {type = 'workspace_state, enabled = true},
    +      {type = 'infrastructure_config, enabled = true},
    +      {type = 'platform_data, enabled = true},
    +    ],
    +
    +    storage = {
    +      backend = 's3,
    +      bucket = "provisioning-backups",
    +      encryption = true,
    +    }
    +  }
    +}
    +
    +

    Backup Types

    +

    Full Backups:

    +
    # Full platform backup
    +provisioning backup create --type full --name "pre-upgrade-$(date +%Y%m%d)"
    +
    +# Full workspace backup
    +provisioning workspace backup production --full
    +
    +

    Incremental Backups:

    +
    # Incremental backup (changed files only)
    +provisioning backup create --type incremental
    +
    +# Automated incremental
    +provisioning config set backup.incremental_enabled true
    +
    +

    Snapshot Backups:

    +
    # Infrastructure snapshot
    +provisioning infrastructure snapshot --name "stable-v2"
    +
    +# Database snapshot
    +provisioning taskserv backup postgresql --snapshot
    +
    +

    Data Replication

    +

    Cross-Region Replication

    +

    Replicate to secondary region:

    +
    {
    +  replication = {
    +    enabled = true,
    +    mode = 'async,
    +
    +    primary = {region = "eu-west-1", provider = 'aws},
    +    secondary = {region = "us-east-1", provider = 'aws},
    +
    +    replication_lag_max_seconds = 300,
    +  }
    +}
    +
    +

    Database Replication

    +
    # Configure database replication
    +provisioning taskserv configure postgresql --replication \
    +  --primary db-eu-west-1 \
    +  --standby db-us-east-1 \
    +  --sync-mode async
    +
    +

    Disaster Scenarios

    +

    Complete Region Failure

    +

    Procedure:

    +
      +
    1. Detect Failure:
    2. +
    +
    # Check region health
    +provisioning health check --region eu-west-1
    +
    +
      +
    1. Initiate Failover:
    2. +
    +
    # Promote secondary region
    +provisioning disaster-recovery failover --to us-east-1 --confirm
    +
    +# Verify services
    +provisioning health check --all
    +
    +
      +
    1. Update DNS:
    2. +
    +
    # Point traffic to secondary region
    +provisioning dns update --region us-east-1
    +
    +
      +
    1. Monitor:
    2. +
    +
    # Watch recovery progress
    +provisioning disaster-recovery status --follow
    +
    +

    Data Corruption

    +

    Procedure:

    +
      +
    1. Identify Corruption:
    2. +
    +
    # Validate data integrity
    +provisioning validate data --workspace production
    +
    +
      +
    1. Find Clean Backup:
    2. +
    +
    # List available backups
    +provisioning backup list --before "2024-01-15 10:00"
    +
    +# Verify backup integrity
    +provisioning backup verify backup-20240115-0900
    +
    +
      +
    1. Restore from Backup:
    2. +
    +
    # Restore to point in time
    +provisioning restore --backup backup-20240115-0900 \
    +  --workspace production --confirm
    +
    +

    Platform Service Failure

    +

    Procedure:

    +
      +
    1. Identify Failed Service:
    2. +
    +
    # Check platform health
    +provisioning platform health
    +
    +# Service logs
    +provisioning platform logs orchestrator --tail 100
    +
    +
      +
    1. Restart Service:
    2. +
    +
    # Restart failed service
    +provisioning platform restart orchestrator
    +
    +# Verify health
    +provisioning platform health orchestrator
    +
    +
      +
    1. Restore from Backup (if needed):
    2. +
    +
    # Restore service data
    +provisioning platform restore orchestrator \
    +  --from-backup latest
    +
    +

    Failover Procedures

    +

    Automated Failover

    +

    Configure automatic failover:

    +
    {
    +  failover = {
    +    enabled = true,
    +    health_check_interval_seconds = 30,
    +    failure_threshold = 3,
    +
    +    primary = {region = "eu-west-1"},
    +    secondary = {region = "us-east-1"},
    +
    +    auto_failback = false,  # Manual failback
    +  }
    +}
    +
    +

    Manual Failover

    +
    # Initiate manual failover
    +provisioning disaster-recovery failover \
    +  --from eu-west-1 \
    +  --to us-east-1 \
    +  --verify-replication \
    +  --confirm
    +
    +# Verify failover
    +provisioning disaster-recovery verify
    +
    +# Update routing
    +provisioning disaster-recovery update-routing
    +
    +

    Recovery Procedures

    +

    Workspace Recovery

    +
    # List workspace backups
    +provisioning workspace backups production
    +
    +# Restore workspace
    +provisioning workspace restore production \
    +  --backup backup-20240115-1200 \
    +  --target-region us-east-1
    +
    +# Verify recovery
    +provisioning workspace validate production
    +
    +

    Infrastructure Recovery

    +
    # Restore infrastructure from Nickel config
    +provisioning infrastructure restore \
    +  --config workspace/infra/production.ncl \
    +  --region us-east-1
    +
    +# Restore from snapshot
    +provisioning infrastructure restore \
    +  --snapshot infra-snapshot-20240115
    +
    +# Verify deployment
    +provisioning infrastructure validate
    +
    +

    Platform Recovery

    +
    # Reinstall platform services
    +provisioning platform install --region us-east-1
    +
    +# Restore platform data
    +provisioning platform restore --from-backup latest
    +
    +# Verify platform health
    +provisioning platform health --all
    +
    +

    DR Testing

    +

    Test Schedule

    +
      +
    • Monthly: Backup restore test
    • +
    • Quarterly: Regional failover drill
    • +
    • Annually: Full DR simulation
    • +
    +

    Backup Restore Test

    +
    # Create test workspace
    +provisioning workspace create dr-test-$(date +%Y%m%d)
    +
    +# Restore latest backup
    +provisioning workspace restore dr-test --backup latest
    +
    +# Validate restore
    +provisioning workspace validate dr-test
    +
    +# Cleanup
    +provisioning workspace delete dr-test --yes
    +
    +

    Failover Drill

    +
    # Simulate regional failure
    +provisioning disaster-recovery simulate-failure \
    +  --region eu-west-1 \
    +  --duration 30m
    +
    +# Monitor automated failover
    +provisioning disaster-recovery status --follow
    +
    +# Validate services in secondary region
    +provisioning health check --region us-east-1 --all
    +
    +# Manual failback after drill
    +provisioning disaster-recovery failback --to eu-west-1
    +
    +

    Monitoring and Alerts

    +

    Backup Monitoring

    +
    # Check backup status
    +provisioning backup status
    +
    +# Verify backup integrity
    +provisioning backup verify --all --schedule daily
    +
    +# Alert on backup failures
    +provisioning alert create backup-failure \
    +  --condition "backup.status == 'failed'" \
    +  --notify [ops@example.com](mailto:ops@example.com)
    +
    +

    Replication Monitoring

    +
    # Check replication lag
    +provisioning replication status
    +
    +# Alert on lag exceeding threshold
    +provisioning alert create replication-lag \
    +  --condition "replication.lag_seconds > 300" \
    +  --notify [ops@example.com](mailto:ops@example.com)
    +
    +

    Best Practices

    +
      +
    1. Regular testing - Test DR procedures quarterly
    2. +
    3. Automated backups - Never rely on manual backups
    4. +
    5. Multiple regions - Geographic redundancy
    6. +
    7. Monitor replication - Track replication lag
    8. +
    9. Document procedures - Keep runbooks updated
    10. +
    11. Encrypt backups - Protect backup data
    12. +
    13. Verify restores - Test backup integrity
    14. +
    15. Automate failover - Reduce recovery time
    16. +
    +

    References

    + +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    Infrastructure as Code

    +

    Define and manage infrastructure using Nickel, the type-safe configuration +language that serves as Provisioning’s source of truth.

    +

    Overview

    +

    Provisioning’s infrastructure definition system provides:

    +
      +
    • Type-safe configuration via Nickel language with mandatory schema validation and contract enforcement
    • +
    • Complete provider support for AWS, UpCloud, Hetzner, Kubernetes, on-premise, and custom platforms
    • +
    • 50+ task services for specialized infrastructure operations (databases, monitoring, logging, networking)
    • +
    • Pre-built clusters for common patterns (web, OCI registry, cache, distributed computing)
    • +
    • Batch workflows with DAG scheduling, parallel execution, and multi-cloud orchestration
    • +
    • Schema validation with inheritance, merging, and contracts ensuring correctness
    • +
    • Configuration composition with includes, profiles, and environment-specific overrides
    • +
    • Version management with semantic versioning and deprecation paths
    • +
    +

    All infrastructure is defined in Nickel (never TOML) ensuring compile-time correctness and runtime safety.

    +

    Infrastructure Configuration Guides

    +

    Core Configuration

    +
      +
    • Nickel Guide - Syntax, types, contracts, lazy +evaluation, record merging, patterns, best practices for IaC
    • +
    +

    + Nickel Validation Flow Type Checking Contract Validation +

    +
      +
    • Configuration System - Hierarchical loading, +environment variables, profiles, composition, inheritance, validation
    • +
    +

    + Configuration Loading Hierarchy Priority CLI Env User Workspace System +

    +
      +
    • Schemas Reference - Contracts, types, +validation rules, inheritance, composition, custom schema development
    • +
    +

    Resources and Operations

    +
      +
    • +

      Providers Guide - AWS, UpCloud, Hetzner, Kubernetes, +on-premise, demo with capabilities, resources, examples

    • -

      Module loader and extension tools available:

      -
      ./provisioning/core/cli/module-loader --help
      -./provisioning/tools/create-extension.nu --help
      -
      +

      Task Services Guide - 50+ services: databases, +monitoring, logging, networking, CI/CD, storage

    • +
    • +

      Clusters Guide - Web cluster (3-tier), OCI registry, +cache cluster, distributed computing, Kubernetes operators

      +
    • +
    • +

      Batch Workflows - DAG-based scheduling, parallel +execution, logic, error handling, multi-cloud, state management

      +
    • +
    +

    + Batch Workflow DAG Execution Parallel Tasks Dependencies +

    +

    Advanced Topics

    + +

    + Workspace Hierarchy Structure Config Infra Schemas Extensions +

    +
      +
    • +

      Version Management - Semantic versioning, +dependency resolution, compatibility, deprecation, upgrade workflows

      +
    • +
    • +

      Performance Optimization - Configuration +caching, lazy evaluation, parallel validation, incremental updates

      +
    • +
    +

    Nickel as Source of Truth

    +

    Critical principle: Nickel is the source of truth for ALL infrastructure definitions.

    +
      +
    • Nickel: Type-safe, validated, enforced, source of truth
    • +
    • TOML: Generated output only, never hand-edited
    • +
    • JSON/YAML: Generated output only, never source definitions
    • +
    • KCL: Deprecated, completely replaced by Nickel
    • +
    +

    This ensures:

    +
      +
    1. Compile-time validation - Errors caught before deployment
    2. +
    3. Schema enforcement - All configurations conform to contracts
    4. +
    5. Type safety - No runtime configuration errors
    6. +
    7. IDE support - Type hints and autocompletion via schema
    8. +
    9. Evolution - Breaking changes detected and reported
    -

    Quick Start: Creating Your First Extension

    -

    Step 1: Create Extension from Template

    -
    # Interactive creation (recommended for beginners)
    -./provisioning/tools/create-extension.nu interactive
    -
    -# Or direct creation
    -./provisioning/tools/create-extension.nu taskserv my-app \
    -    --author "Your Name" \
    -    --description "My custom application service"
    +

    Configuration Hierarchy

    +

    Configurations load in order of precedence:

    +
    1. Command-line arguments       (highest priority)
    +2. Environment variables        (PROVISIONING_*)
    +3. User configuration          (~/.config/provisioning/user.nickel)
    +4. Workspace configuration     (workspace/config/main.nickel)
    +5. Infrastructure schemas      (provisioning/schemas/)
    +6. System defaults            (provisioning/config/defaults.toml)
    +                               (lowest priority)
     
    -

    Step 2: Navigate and Customize

    -
    # Navigate to your new extension
    -cd extensions/taskservs/my-app
    -
    -# View generated files
    -ls -la
    -# main.ncl - Main taskserv definition
    -# contracts.ncl - Configuration contract/schema
    -# defaults.ncl - Default values
    -# README.md - Documentation template
    -
    -

    Step 3: Customize Configuration

    -

    Edit main.ncl to match your service requirements:

    -
    # contracts.ncl - Define the schema
    -{
    -  MyAppConfig = {
    -    database_url | String,
    -    api_key | String,
    -    debug_mode | Bool,
    -    cpu_request | String,
    -    memory_request | String,
    -    port | Number,
    -  }
    -}
    -
    -# defaults.ncl - Provide sensible defaults
    -{
    -  defaults = {
    -    debug_mode = false,
    -    cpu_request = "200m",
    -    memory_request = "512Mi",
    -    port = 3000,
    -  }
    -}
    -
    -# main.ncl - Combine and export
    -let contracts = import "./contracts.ncl" in
    -let defaults = import "./defaults.ncl" in
    -
    -{
    -  defaults = defaults,
    -  make_config | not_exported = fun overrides =>
    -    defaults.defaults & overrides,
    -}
    -
    -

    Step 4: Test Your Extension

    -
    # Test discovery
    -./provisioning/core/cli/module-loader discover taskservs | grep my-app
    -
    -# Validate Nickel syntax
    -nickel typecheck main.ncl
    -
    -# Validate extension structure
    -./provisioning/tools/create-extension.nu validate ../../../my-app
    -
    -

    Step 5: Use in Workspace

    -
    # Create test workspace
    -mkdir -p /tmp/test-my-app
    -cd /tmp/test-my-app
    -
    -# Initialize workspace
    -../provisioning/tools/workspace-init.nu . init
    -
    -# Load your extension
    -../provisioning/core/cli/module-loader load taskservs . [my-app]
    -
    -# Configure in servers.ncl
    -cat > infra/default/servers.ncl << 'EOF'
    -let my_app = import "../../extensions/taskservs/my-app/main.ncl" in
    -
    -{
    -  servers = [
    -    {
    -      hostname = "app-01",
    -      provider = "local",
    -      plan = "2xCPU-4 GB",
    -      zone = "local",
    -      storages = [{ total = 25 }],
    -      taskservs = [
    -        my_app.make_config {
    -          database_url = "postgresql://db:5432/myapp",
    -          api_key = "secret-key",
    -          debug_mode = false,
    +

    Quick Start Paths

    +

    I’m new to Nickel

    +

    Start with Nickel Guide - language syntax, type system, functions, patterns with infrastructure examples.

    +

    I need to define infrastructure

    +

    Read Configuration System - how configurations load, compose, and validate.

    +

    I want to use AWS/UpCloud/Hetzner

    +

    See Providers Guide - capabilities, resources, configuration examples for each cloud.

    +

    I need databases, monitoring, logging

    +

    Check Task Services Guide - 50+ services with configuration examples.

    +

    I want to deploy web applications

    +

    Review Clusters Guide - pre-built 3-tier web cluster, load balancer, database, caching.

    +

    I need multi-cloud workflows

    +

    Learn Batch Workflows - DAG scheduling across multiple providers.

    +

    I need multi-tenant setup

    +

    Study Multi-Tenancy Patterns - isolation, billing, resource management.

    +

    Example Nickel Configuration

    +
    {
    +  extensions = {
    +    providers = [
    +      {
    +        name = "aws",
    +        version = "1.2.3",
    +        enabled = true,
    +        config = {
    +          region = "us-east-1",
    +          credentials_source = "aws_iam"
             }
    -      ],
    +      }
    +    ]
    +  },
    +
    +  infrastructure = {
    +    networks = [
    +      {
    +        name = "main",
    +        provider = "aws",
    +        cidr = "10.0.0.0/16",
    +        subnets = [
    +          { cidr = "10.0.1.0/24", availability_zone = "us-east-1a" },
    +          { cidr = "10.0.2.0/24", availability_zone = "us-east-1b" }
    +        ]
    +      }
    +    ],
    +
    +    instances = [
    +      {
    +        name = "web-server-1",
    +        provider = "aws",
    +        instance_type = "t3.large",
    +        image = "ubuntu-22.04",
    +        network = "main",
    +        subnet = "10.0.1.0/24"
    +      }
    +    ]
    +  }
    +}
    +
    +

    Schema Contracts

    +

    All infrastructure must conform to schemas. Schemas define:

    +
      +
    • Required fields - Must be provided
    • +
    • Type constraints - Values must match type
    • +
    • Field contracts - Custom validation logic
    • +
    • Defaults - Applied automatically
    • +
    • Documentation - Inline help and examples
    • +
    +

    Validation and Testing

    +

    Before deploying:

    +
      +
    1. Schema validation - provisioning validate config
    2. +
    3. Syntax checking - provisioning validate syntax
    4. +
    5. Policy checks - Custom policy validation
    6. +
    7. Unit tests - Test configuration logic
    8. +
    9. Integration tests - Dry-run with actual providers
    10. +
    + +
      +
    • Provisioning Schemas → See provisioning/schemas/ in codebase
    • +
    • Configuration Examples → See provisioning/docs/src/examples/
    • +
    • Provider Examples → See provisioning/docs/src/examples/aws-deployment-examples.md
    • +
    • Task Services → See provisioning/extensions/ in codebase
    • +
    • API Reference → See provisioning/docs/src/api-reference/
    • +
    +

    Nickel Guide

    +

    Comprehensive guide to using Nickel as the infrastructure-as-code language for the Provisioning platform.

    +

    Critical Principle: Nickel is Source of Truth

    +

    TYPE-SAFETY ALWAYS REQUIRED: ALL configurations MUST be type-safe and validated via Nickel. +TOML is NOT acceptable as source of truth. Validation is NOT optional, NOT “progressive”, +NOT “production-only”. This applies to ALL profiles (developer, production, cicd).

    +

    Nickel is the PRIMARY IaC language. TOML files are GENERATED OUTPUT ONLY, never the source.

    +

    Why Nickel

    +

    Nickel provides:

    +
      +
    • Type Safety: Static type checking catches errors before deployment
    • +
    • Lazy Evaluation: Efficient configuration composition and merging
    • +
    • Contract System: Schema validation with gradual typing
    • +
    • Record Merging: Powerful composition without duplication
    • +
    • LSP Support: IDE integration for autocomplete and validation
    • +
    • Human-Readable: Clear syntax for infrastructure definition
    • +
    +

    Installation

    +
    # macOS (Homebrew)
    +brew install nickel
    +
    +# Linux (Cargo)
    +cargo install nickel-lang-cli
    +
    +# Verify installation
    +nickel --version  # 1.15.1+
    +
    +

    Core Concepts

    +

    Records and Fields

    +

    Records are the fundamental data structure in Nickel:

    +
    {
    +  name = "my-server"
    +  plan = "medium"
    +  zone = "de-fra1"
    +}
    +
    +

    Type Annotations

    +

    Add type safety with contracts:

    +
    {
    +  name : String = "my-server"
    +  plan : String = "medium"
    +  cpu_count : Number = 4
    +  enabled : Bool = true
    +}
    +
    +

    Record Merging

    +

    Compose configurations by merging records:

    +
    let base_config = {
    +  provider = "upcloud"
    +  region = "de-fra1"
    +} in
    +
    +let server_config = base_config & {
    +  name = "web-01"
    +  plan = "medium"
    +} in
    +
    +server_config
    +
    +

    Result:

    +
    {
    +  provider = "upcloud"
    +  region = "de-fra1"
    +  name = "web-01"
    +  plan = "medium"
    +}
    +
    +

    Contracts (Schema Validation)

    +

    Define contracts to validate structure:

    +
    let ServerContract = {
    +  name | String
    +  plan | String | default = "small"
    +  zone | String | default = "de-fra1"
    +  cpu | Number | optional
    +} in
    +
    +{
    +  name = "my-server"
    +  plan = "large"
    +} | ServerContract
    +
    +

    Three-File Pattern (Provisioning Standard)

    +

    The platform uses a standardized three-file pattern for all schemas:

    +

    1. contracts.ncl - Type Definitions

    +

    Defines the schema contracts:

    +
    # contracts.ncl
    +{
    +  Server = {
    +    name | String
    +    plan | String | default = "small"
    +    zone | String | default = "de-fra1"
    +    disk_size_gb | Number | default = 25
    +    backup_enabled | Bool | default = false
    +    role | | [ 'control, 'worker, 'standalone | ] | optional
    +  }
    +
    +  Infrastructure = {
    +    servers | Array Server
    +    provider | String
    +    environment | | [ 'development, 'staging, 'production | ]
    +  }
    +}
    +
    +

    2. defaults.ncl - Default Values

    +

    Provides sensible defaults:

    +
    # defaults.ncl
    +{
    +  server = {
    +    name = "unnamed-server"
    +    plan = "small"
    +    zone = "de-fra1"
    +    disk_size_gb = 25
    +    backup_enabled = false
    +  }
    +
    +  infrastructure = {
    +    servers = []
    +    provider = "local"
    +    environment = 'development
    +  }
    +}
    +
    +

    3. main.ncl - Entry Point

    +

    Combines contracts and defaults, provides makers:

    +
    # main.ncl
    +let contracts_lib = import "./contracts.ncl" in
    +let defaults_lib = import "./defaults.ncl" in
    +
    +{
    +  # Direct access to defaults (for inspection)
    +  defaults = defaults_lib
    +
    +  # Convenience makers (90% of use cases)
    +  make_server | not_exported = fun overrides =>
    +    defaults_lib.server & overrides
    +
    +  make_infrastructure | not_exported = fun overrides =>
    +    defaults_lib.infrastructure & overrides
    +
    +  # Default instances (bare defaults)
    +  DefaultServer = defaults_lib.server
    +  DefaultInfrastructure = defaults_lib.infrastructure
    +}
    +
    +

    Usage Example

    +
    # user-infra.ncl
    +let infra_lib = import "provisioning/schemas/infrastructure/main.ncl" in
    +
    +infra_lib.make_infrastructure {
    +  provider = "upcloud"
    +  environment = 'production
    +  servers = [
    +    infra_lib.make_server {
    +      name = "web-01"
    +      plan = "medium"
    +      backup_enabled = true
    +    }
    +    infra_lib.make_server {
    +      name = "web-02"
    +      plan = "medium"
    +      backup_enabled = true
         }
       ]
     }
    +
    +

    Hybrid Interface Pattern

    +

    Records can be used both as functions (makers) and as plain data:

    +
    let config_lib = import "./config.ncl" in
    +
    +# Use as function (with overrides)
    +let custom_config = config_lib.make_server { name = "custom" } in
    +
    +# Use as plain data (defaults)
    +let default_config = config_lib.DefaultServer in
    +
    +{
    +  custom = custom_config
    +  default = default_config
    +}
    +
    +

    Record Merging Strategies

    +

    Priority Merging (Default)

    +
    let base = { a = 1, b = 2 } in
    +let override = { b = 3, c = 4 } in
    +base & override
    +# Result: { a = 1, b = 3, c = 4 }
    +
    +

    Recursive Merging

    +
    let base = {
    +  server = { cpu = 2, ram = 4 }
    +} in
    +
    +let override = {
    +  server = { ram = 8, disk = 100 }
    +} in
    +
    +std.record.merge_all [base, override]
    +# Result: { server = { cpu = 2, ram = 8, disk = 100 } }
    +
    +

    Lazy Evaluation

    +

    Nickel evaluates expressions lazily, only when needed:

    +
    let expensive_computation = std.string.join " " ["a", "b", "c"] in
    +
    +{
    +  # Only evaluated when accessed
    +  computed_field = expensive_computation
    +
    +  # Conditional evaluation
    +  conditional = if environment == 'production then
    +    expensive_computation
    +  else
    +    "dev-value"
    +}
    +
    +

    Schema Organization

    +

    The platform organizes Nickel schemas by domain:

    +
    provisioning/schemas/
    +├── main.ncl                  # Top-level entry point
    +├── config/                   # Configuration schemas
    +│   ├── settings/
    +│   │   ├── main.ncl
    +│   │   ├── contracts.ncl
    +│   │   └── defaults.ncl
    +│   └── defaults/
    +│       ├── main.ncl
    +│       ├── contracts.ncl
    +│       └── defaults.ncl
    +├── infrastructure/           # Infrastructure definitions
    +│   ├── servers/
    +│   ├── networks/
    +│   └── storage/
    +├── deployment/               # Deployment schemas
    +├── services/                 # Service configurations
    +├── operations/               # Operational schemas
    +└── generator/                # Runtime schema generation
    +
    +

    Type System

    +

    Primitive Types

    +
    {
    +  string_field : String = "text"
    +  number_field : Number = 42
    +  bool_field : Bool = true
    +}
    +
    +

    Array Types

    +
    {
    +  names : Array String = ["alice", "bob", "charlie"]
    +  ports : Array Number = [80, 443, 8080]
    +}
    +
    +

    Enum Types

    +
    {
    +  environment : | [ 'development, 'staging, 'production | ] = 'production
    +  role : | [ 'control, 'worker, 'standalone | ] = 'worker
    +}
    +
    +

    Optional Fields

    +
    {
    +  required_field : String = "value"
    +  optional_field : String | optional
    +}
    +
    +

    Default Values

    +
    {
    +  with_default : String | default = "default-value"
    +}
    +
    +

    Validation Patterns

    +

    Runtime Validation

    +
    let validate_plan = fun plan =>
    +  if plan == "small" | | plan == "medium" | | plan == "large" then
    +    plan
    +  else
    +    std.fail "Invalid plan: must be small, medium, or large"
    +in
    +
    +{
    +  plan = validate_plan "medium"
    +}
    +
    +

    Contract-Based Validation

    +
    let PlanContract = | [ 'small, 'medium, 'large | ] in
    +
    +{
    +  plan | PlanContract = 'medium
    +}
    +
    +

    Real-World Examples

    +

    Simple Server Configuration

    +
    {
    +  metadata = {
    +    name = "demo-server"
    +    provider = "upcloud"
    +    environment = 'development
    +  }
    +
    +  infrastructure = {
    +    servers = [
    +      {
    +        name = "web-01"
    +        plan = "medium"
    +        zone = "de-fra1"
    +        disk_size_gb = 50
    +        backup_enabled = true
    +        role = 'standalone
    +      }
    +    ]
    +  }
    +
    +  services = {
    +    taskservs = ["containerd", "docker"]
    +  }
    +}
    +
    +

    Kubernetes Cluster Configuration

    +
    {
    +  metadata = {
    +    name = "k8s-prod"
    +    provider = "upcloud"
    +    environment = 'production
    +  }
    +
    +  infrastructure = {
    +    servers = [
    +      {
    +        name = "k8s-control-01"
    +        plan = "medium"
    +        role = 'control
    +        zone = "de-fra1"
    +        disk_size_gb = 50
    +        backup_enabled = true
    +      }
    +      {
    +        name = "k8s-worker-01"
    +        plan = "large"
    +        role = 'worker
    +        zone = "de-fra1"
    +        disk_size_gb = 100
    +        backup_enabled = true
    +      }
    +      {
    +        name = "k8s-worker-02"
    +        plan = "large"
    +        role = 'worker
    +        zone = "de-fra1"
    +        disk_size_gb = 100
    +        backup_enabled = true
    +      }
    +    ]
    +  }
    +
    +  services = {
    +    taskservs = ["containerd", "etcd", "kubernetes", "cilium", "rook-ceph"]
    +  }
    +
    +  kubernetes = {
    +    version = "1.28.0"
    +    pod_cidr = "10.244.0.0/16"
    +    service_cidr = "10.96.0.0/12"
    +    container_runtime = "containerd"
    +    cri_socket = "/run/containerd/containerd.sock"
    +  }
    +}
    +
    +

    Multi-Provider Batch Workflow

    +
    {
    +  batch_workflow = {
    +    operations = [
    +      {
    +        id = "aws-cluster"
    +        provider = "aws"
    +        region = "us-east-1"
    +        servers = [
    +          { name = "aws-web-01", plan = "t3.medium" }
    +        ]
    +      }
    +      {
    +        id = "upcloud-cluster"
    +        provider = "upcloud"
    +        region = "de-fra1"
    +        servers = [
    +          { name = "upcloud-web-01", plan = "medium" }
    +        ]
    +        dependencies = ["aws-cluster"]
    +      }
    +    ]
    +    parallel_limit = 2
    +  }
    +}
    +
    +

    Validation Workflow

    +

    Type-Check Schema

    +
    # Check syntax and types
    +nickel typecheck infra/my-cluster.ncl
    +
    +# Export to JSON (validates during export)
    +nickel export infra/my-cluster.ncl
    +
    +# Export to TOML (generated output only)
    +nickel export --format toml infra/my-cluster.ncl > config.toml
    +
    +

    Platform Validation

    +
    # Validate against platform contracts
    +provisioning validate config --infra my-cluster
    +
    +# Verbose validation
    +provisioning validate config --verbose
    +
    +

    IDE Integration

    +

    Language Server (nickel-lang-lsp)

    +

    Install LSP for IDE support:

    +
    # Install LSP server
    +cargo install nickel-lang-lsp
    +
    +# Configure your editor (VS Code example)
    +# Install "Nickel" extension from marketplace
    +
    +

    Features:

    +
      +
    • Syntax highlighting
    • +
    • Type checking on save
    • +
    • Autocomplete
    • +
    • Hover documentation
    • +
    • Go to definition
    • +
    +

    VS Code Configuration

    +
    {
    +  "nickel.lsp.command": "nickel-lang-lsp",
    +  "nickel.lsp.args": ["--stdio"],
    +  "nickel.format.onSave": true
    +}
    +
    +

    Common Patterns

    +

    Environment-Specific Configuration

    +
    let env_configs = {
    +  development = {
    +    plan = "small"
    +    backup_enabled = false
    +  }
    +  production = {
    +    plan = "large"
    +    backup_enabled = true
    +  }
    +} in
    +
    +let environment = 'production in
    +
    +{
    +  servers = [
    +    env_configs.%{std.string.from_enum environment} & {
    +      name = "server-01"
    +    }
    +  ]
    +}
    +
    +

    Configuration Composition

    +
    let base_server = {
    +  zone = "de-fra1"
    +  backup_enabled = false
    +} in
    +
    +let prod_overrides = {
    +  backup_enabled = true
    +  disk_size_gb = 100
    +} in
    +
    +{
    +  servers = [
    +    base_server & { name = "dev-01" }
    +    base_server & prod_overrides & { name = "prod-01" }
    +  ]
    +}
    +
    +

    Migration from TOML

    +

    TOML is ONLY for generated output. Source is always Nickel.

    +
    # Generate TOML from Nickel (if needed for external tools)
    +nickel export --format toml infra/cluster.ncl > cluster.toml
    +
    +# NEVER edit cluster.toml directly - edit cluster.ncl instead
    +
    +

    Best Practices

    +
      +
    1. Use Three-File Pattern: Separate contracts, defaults, and main entry
    2. +
    3. Type Everything: Add type annotations for all fields
    4. +
    5. Validate Early: Run nickel typecheck before deployment
    6. +
    7. Use Makers: Leverage maker functions for composition
    8. +
    9. Document Contracts: Add comments explaining schema requirements
    10. +
    11. Avoid Duplication: Use record merging and defaults
    12. +
    13. Test Locally: Export and verify before deploying
    14. +
    15. Version Schemas: Track schema changes in version control
    16. +
    +

    Debugging

    +

    Type Errors

    +
    # Detailed type error messages
    +nickel typecheck --color always infra/cluster.ncl
    +
    +

    Schema Inspection

    +
    # Export to JSON for inspection
    +nickel export infra/cluster.ncl | jq '.'
    +
    +# Check specific field
    +nickel export infra/cluster.ncl | jq '.metadata'
    +
    +

    Format Code

    +
    # Auto-format Nickel files
    +nickel fmt infra/cluster.ncl
    +
    +# Check formatting without modifying
    +nickel fmt --check infra/cluster.ncl
    +
    +

    Next Steps

    + +

    Configuration System

    +

    The Provisioning platform uses a hierarchical configuration system with Nickel as the source of +truth for infrastructure definitions and TOML/YAML for application settings.

    +

    Configuration Hierarchy

    +

    Configuration is loaded in order of precedence (highest to lowest):

    +
    1. Runtime Arguments    - CLI flags (--config, --workspace, etc.)
    +2. Environment Variables - PROVISIONING_* environment variables
    +3. User Configuration   - ~/.config/provisioning/user_config.yaml
    +4. Infrastructure Config - Nickel schemas in workspace/provisioning
    +5. System Defaults      - provisioning/config/config.defaults.toml
    +
    +

    Later sources override earlier ones, allowing flexible configuration management across environments.

    +

    Configuration Files

    +

    System Defaults

    +

    Located at provisioning/config/config.defaults.toml:

    +
    [general]
    +log_level = "info"
    +workspace_root = "./workspaces"
    +
    +[providers]
    +default_provider = "local"
    +
    +[orchestrator]
    +max_parallel_tasks = 4
    +checkpoint_enabled = true
    +
    +

    User Configuration

    +

    Located at ~/.config/provisioning/user_config.yaml:

    +
    general:
    +  preferred_editor: nvim
    +  default_workspace: production
    +
    +providers:
    +  upcloud:
    +    default_zone: fi-hel1
    +  aws:
    +    default_region: eu-west-1
    +
    +

    Workspace Configuration

    +

    Nickel-based infrastructure configuration in workspace directories:

    +
    workspace/
    +├── config/
    +│   ├── main.ncl           # Workspace configuration
    +│   ├── providers.ncl      # Provider definitions
    +│   └── variables.ncl      # Workspace variables
    +├── infra/
    +│   └── servers.ncl        # Infrastructure definitions
    +└── .workspace/
    +    └── metadata.toml      # Workspace metadata
    +
    +

    Environment Variables

    +

    All configuration can be overridden via environment variables:

    +
    export PROVISIONING_LOG_LEVEL=debug
    +export PROVISIONING_WORKSPACE=production
    +export PROVISIONING_PROVIDER=upcloud
    +export PROVISIONING_DRY_RUN=true
    +
    +

    Variable naming: PROVISIONING_<SECTION>_<KEY> (uppercase with underscores).

    +

    Configuration Accessors

    +

    The platform provides 476+ configuration accessors for programmatic access:

    +
    # Get configuration value
    +provisioning config get general.log_level
    +
    +# Set configuration value (workspace-scoped)
    +provisioning config set providers.default_provider upcloud
    +
    +# List all configuration
    +provisioning config list
    +
    +# Validate configuration
    +provisioning config validate
    +
    +

    Profiles

    +

    Configuration supports profiles for different environments:

    +
    [profiles.development]
    +log_level = "debug"
    +dry_run = true
    +
    +[profiles.production]
    +log_level = "warn"
    +dry_run = false
    +checkpoint_enabled = true
    +
    +

    Activate profile:

    +
    provisioning --profile production deploy
    +
    +

    Inheritance and Overrides

    +

    Workspace configurations inherit from system defaults:

    +
    # workspace/config/main.ncl
    +let parent = import "../../provisioning/schemas/defaults.ncl" in
    +parent & {
    +  # Override specific values
    +  general.log_level = "debug",
    +  providers.default_provider = "aws",
    +}
    +
    +

    Secrets Management

    +

    Sensitive configuration is encrypted using SOPS/Age:

    +
    # Encrypt configuration
    +sops --encrypt --age <public-key> secrets.yaml > secrets.enc.yaml
    +
    +# Decrypt and use
    +provisioning deploy --secrets secrets.enc.yaml
    +
    +

    Integration with SecretumVault for enterprise secrets management (see Secrets Management).

    +

    Configuration Validation

    +

    All Nickel-based configuration is validated before use:

    +
    # Validate workspace configuration
    +provisioning config validate
    +
    +# Check schema compliance
    +nickel export --format json workspace/config/main.ncl
    +
    +

    Type-safety is mandatory - invalid configuration is rejected at load time.

    +

    Best Practices

    +
      +
    1. Use Nickel for infrastructure - Type-safe, validated infrastructure definitions
    2. +
    3. Use TOML for application settings - Simple key-value configuration
    4. +
    5. Encrypt secrets - Never commit unencrypted credentials
    6. +
    7. Document overrides - Comment why values differ from defaults
    8. +
    9. Validate before deploy - Always run config validate before deployment
    10. +
    11. Version control - Track configuration changes in Git
    12. +
    13. Profile separation - Isolate development/staging/production configs
    14. +
    +

    Troubleshooting

    +

    Configuration Not Loading

    +

    Check precedence order:

    +
    # Show effective configuration
    +provisioning config show --debug
    +
    +# Trace configuration loading
    +PROVISIONING_LOG_LEVEL=trace provisioning config list
    +
    +

    Schema Validation Failures

    +
    # Check Nickel syntax
    +nickel typecheck workspace/config/main.ncl
    +
    +# Export and inspect
    +nickel export workspace/config/main.ncl
    +
    +

    Environment Variable Issues

    +
    # List all PROVISIONING_* variables
    +env | grep PROVISIONING_
    +
    +# Clear all provisioning env vars
    +unset $(env | grep PROVISIONING_ | cut -d= -f1 | xargs)
    +
    +

    References

    + +

    Schemas Reference

    +

    Provisioning uses Nickel schemas for type-safe infrastructure definitions. This reference documents the schema organization, structure, and usage patterns.

    +

    Schema Organization

    +

    Schemas are organized in provisioning/schemas/:

    +
    provisioning/schemas/
    +├── main.ncl                 # Root schema entry point
    +├── lib/
    +│   ├── contracts.ncl        # Type contracts and validators
    +│   ├── functions.ncl        # Helper functions
    +│   └── types.ncl            # Common type definitions
    +├── config/
    +│   ├── providers.ncl        # Provider configuration schemas
    +│   ├── settings.ncl         # Platform settings schemas
    +│   └── workspace.ncl        # Workspace configuration schemas
    +├── infrastructure/
    +│   ├── servers.ncl          # Server resource schemas
    +│   ├── networks.ncl         # Network resource schemas
    +│   └── storage.ncl          # Storage resource schemas
    +├── operations/
    +│   ├── deployment.ncl       # Deployment workflow schemas
    +│   └── lifecycle.ncl        # Resource lifecycle schemas
    +├── services/
    +│   ├── kubernetes.ncl       # Kubernetes schemas
    +│   └── databases.ncl        # Database schemas
    +└── integrations/
    +    ├── cloud_providers.ncl  # Cloud provider integrations
    +    └── external_services.ncl # External service integrations
    +
    +

    Core Contracts

    +

    Server Contract

    +
    let Server = {
    +  name
    +    | doc "Server identifier (must be unique)"
    +    | String,
    +
    +  plan
    +    | doc "Server size (small, medium, large, xlarge)"
    +    | | [ 'small, 'medium, 'large, 'xlarge | ],
    +
    +  provider
    +    | doc "Cloud provider (upcloud, aws, local)"
    +    | | [ 'upcloud, 'aws, 'local | ],
    +
    +  zone
    +    | doc "Availability zone"
    +    | String
    +    | optional,
    +
    +  ip_address
    +    | doc "Public IP address"
    +    | String
    +    | optional,
    +
    +  storage
    +    | doc "Storage configuration"
    +    | Array StorageConfig
    +    | default = [],
    +
    +  metadata
    +    | doc "Custom metadata tags"
    +    | {_ : String}
    +    | default = {},
    +}
    +
    +

    Network Contract

    +
    let Network = {
    +  name
    +    | doc "Network identifier"
    +    | String,
    +
    +  cidr
    +    | doc "CIDR block (e.g., 10.0.0.0/16)"
    +    | String
    +    | std.string.is_match_regex "^([0-9]{1,3}\\.){3}[0-9]{1,3}/[0-9]{1,2}$",
    +
    +  subnets
    +    | doc "Subnet definitions"
    +    | Array Subnet,
    +
    +  routing
    +    | doc "Routing configuration"
    +    | RoutingConfig
    +    | optional,
    +}
    +
    +

    Storage Contract

    +
    let StorageConfig = {
    +  size_gb
    +    | doc "Storage size in GB"
    +    | Number
    +    | std.number.greater 0,
    +
    +  type
    +    | doc "Storage type"
    +    | | [ 'ssd, 'hdd, 'nvme | ],
    +
    +  mount_point
    +    | doc "Mount path"
    +    | String
    +    | optional,
    +
    +  encrypted
    +    | doc "Enable encryption"
    +    | Bool
    +    | default = false,
    +}
    +
    +

    Workspace Schema

    +

    Workspace configuration schema:

    +
    let WorkspaceConfig = {
    +  name
    +    | doc "Workspace identifier"
    +    | String,
    +
    +  environment
    +    | doc "Environment type"
    +    | | [ 'development, 'staging, 'production | ],
    +
    +  providers
    +    | doc "Enabled providers"
    +    | Array | [ 'upcloud, 'aws, 'local | ]
    +    | default = ['local],
    +
    +  infrastructure
    +    | doc "Infrastructure definitions"
    +    | {
    +        servers | Array Server | default = [],
    +        networks | Array Network | default = [],
    +        storage | Array StorageConfig | default = [],
    +      },
    +
    +  settings
    +    | doc "Workspace-specific settings"
    +    | {_ : _}
    +    | default = {},
    +}
    +
    +

    Provider Schemas

    +

    UpCloud Provider

    +
    let UpCloudConfig = {
    +  username
    +    | doc "UpCloud username"
    +    | String,
    +
    +  password
    +    | doc "UpCloud password (encrypted)"
    +    | String,
    +
    +  default_zone
    +    | doc "Default zone"
    +    | | [ 'fi-hel1, 'fi-hel2, 'de-fra1, 'uk-lon1, 'us-chi1, 'us-sjo1 | ]
    +    | default = 'fi-hel1,
    +
    +  timeout_seconds
    +    | doc "API timeout"
    +    | Number
    +    | default = 300,
    +}
    +
    +

    AWS Provider

    +
    let AWSConfig = {
    +  access_key_id
    +    | doc "AWS access key"
    +    | String,
    +
    +  secret_access_key
    +    | doc "AWS secret key (encrypted)"
    +    | String,
    +
    +  default_region
    +    | doc "Default AWS region"
    +    | String
    +    | default = "eu-west-1",
    +
    +  assume_role_arn
    +    | doc "IAM role ARN"
    +    | String
    +    | optional,
    +}
    +
    +

    Service Schemas

    +

    Kubernetes Schema

    +
    let KubernetesCluster = {
    +  name
    +    | doc "Cluster name"
    +    | String,
    +
    +  version
    +    | doc "Kubernetes version"
    +    | String
    +    | std.string.is_match_regex "^v[0-9]+\\.[0-9]+\\.[0-9]+$",
    +
    +  control_plane
    +    | doc "Control plane configuration"
    +    | {
    +        nodes | Number | std.number.greater 0,
    +        plan | | [ 'small, 'medium, 'large | ],
    +      },
    +
    +  workers
    +    | doc "Worker node pools"
    +    | Array NodePool,
    +
    +  networking
    +    | doc "Network configuration"
    +    | {
    +        pod_cidr | String,
    +        service_cidr | String,
    +        cni | | [ 'calico, 'cilium, 'flannel | ] | default = 'cilium,
    +      },
    +
    +  addons
    +    | doc "Cluster addons"
    +    | Array | [ 'metrics-server, 'ingress-nginx, 'cert-manager | ]
    +    | default = [],
    +}
    +
    +

    Validation Functions

    +

    Custom validation functions in lib/contracts.ncl:

    +
    let is_valid_hostname = fun name =>
    +  std.string.is_match_regex "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" name
    +in
    +
    +let is_valid_port = fun port =>
    +  std.number.is_integer port && port >= 1 && port <= 65535
    +in
    +
    +let is_valid_email = fun email =>
    +  std.string.is_match_regex "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$" email
    +in
    +
    +

    Merging and Composition

    +

    Schemas support composition through record merging:

    +
    let base_server = {
    +  plan = 'medium,
    +  provider = 'upcloud,
    +  storage = [],
    +}
    +
    +let production_server = base_server & {
    +  plan = 'large,
    +  storage = [{size_gb = 100, type = 'ssd}],
    +}
    +
    +

    Contract Enforcement

    +

    Type checking is enforced at load time:

    +
    # Typecheck schema
    +nickel typecheck provisioning/schemas/main.ncl
    +
    +# Export with validation
    +nickel export --format json workspace/infra/servers.ncl
    +
    +

    Invalid configurations are rejected before deployment.

    +

    Best Practices

    +
      +
    1. Define contracts first - Start with type contracts before implementation
    2. +
    3. Use enums for choices - Leverage | [ 'option1, 'option2 | ] for fixed sets
    4. +
    5. Document everything - Use |doc "description" annotations
    6. +
    7. Validate early - Run nickel typecheck before deployment
    8. +
    9. Compose, don’t duplicate - Use record merging for common patterns
    10. +
    11. Version schemas - Track schema changes alongside infrastructure
    12. +
    13. Test contracts - Validate edge cases and constraints
    14. +
    +

    References

    + +

    Providers

    +

    Providers are abstraction layers for interacting with cloud platforms and local infrastructure. +Provisioning supports multiple providers through a unified interface.

    +

    Available Providers

    +

    UpCloud Provider

    +

    Production-ready cloud provider for European infrastructure.

    +

    Configuration:

    +
    {
    +  providers.upcloud = {
    +    username = "your-username",
    +    password = std.secret "UPCLOUD_PASSWORD",
    +    default_zone = 'fi-hel1,
    +    timeout_seconds = 300,
    +  }
    +}
    +
    +

    Supported zones:

    +
      +
    • fi-hel1, fi-hel2 - Helsinki, Finland
    • +
    • de-fra1 - Frankfurt, Germany
    • +
    • uk-lon1 - London, UK
    • +
    • us-chi1 - Chicago, USA
    • +
    • us-sjo1 - San Jose, USA
    • +
    +

    Resources: Servers, networks, storage, firewalls, load balancers

    +

    AWS Provider

    +

    Amazon Web Services integration for global cloud infrastructure.

    +

    Configuration:

    +
    {
    +  providers.aws = {
    +    access_key_id = std.secret "AWS_ACCESS_KEY_ID",
    +    secret_access_key = std.secret "AWS_SECRET_ACCESS_KEY",
    +    default_region = "eu-west-1",
    +  }
    +}
    +
    +

    Resources: EC2, VPCs, EBS, security groups, RDS, S3

    +

    Local Provider

    +

    Local infrastructure for development and testing.

    +

    Configuration:

    +
    {
    +  providers.local = {
    +    backend = 'libvirt,  # or 'docker, 'podman
    +    storage_pool = "/var/lib/libvirt/images",
    +  }
    +}
    +
    +

    Backends: libvirt (KVM/QEMU), docker, podman

    +

    Multi-Cloud Deployments

    +

    Deploy infrastructure across multiple providers:

    +
    {
    +  servers = [
    +    {name = "web-frontend", provider = 'upcloud, zone = "fi-hel1", plan = 'medium},
    +    {name = "api-backend", provider = 'aws, zone = "eu-west-1a", plan = 't3.large},
    +  ]
    +}
    +
    +

    Provider Abstraction

    +

    Abstract resource definitions work across providers:

    +
    let server_config = fun name provider => {
    +  name = name,
    +  provider = provider,
    +  plan = 'medium,  # Automatically translated per provider
    +  storage = [{size_gb = 50, type = 'ssd}],
    +}
    +
    +

    Plan translation:

    +
    + + + + +
    AbstractUpCloudAWSLocal
    small1xCPU-1GBt3.micro1 vCPU
    medium2xCPU-4GBt3.medium2 vCPU
    large4xCPU-8GBt3.large4 vCPU
    xlarge8xCPU-16GBt3.xlarge8 vCPU
    +
    +

    Best Practices

    +
      +
    1. Use abstract plans - Avoid provider-specific instance types
    2. +
    3. Encrypt credentials - Always use encrypted secrets for API keys
    4. +
    5. Test locally first - Validate configurations with local provider
    6. +
    7. Document provider choices - Comment why specific providers are used
    8. +
    9. Monitor costs - Track cloud provider spending
    10. +
    +

    References

    + +

    Task Services

    +

    Task services are installable infrastructure components that provide specific functionality. +Provisioning includes 30+ task services for databases, orchestration, monitoring, and more.

    +

    Categories

    +

    Kubernetes & Container Orchestration

    +

    kubernetes - Complete Kubernetes cluster deployment

    +
      +
    • Control plane setup
    • +
    • Worker node pools
    • +
    • CNI configuration (Calico, Cilium, Flannel)
    • +
    • Addon management (metrics-server, ingress-nginx, cert-manager)
    • +
    +

    containerd - Container runtime configuration

    +
      +
    • Systemd integration
    • +
    • Storage driver configuration
    • +
    • Runtime class support
    • +
    +

    docker - Docker engine installation

    +
      +
    • Docker Compose integration
    • +
    • Registry configuration
    • +
    +

    Databases

    +

    postgresql - PostgreSQL database server

    +
      +
    • Replication setup
    • +
    • Backup automation
    • +
    • Performance tuning
    • +
    +

    mysql - MySQL/MariaDB deployment

    +
      +
    • Cluster configuration
    • +
    • Backup strategies
    • +
    +

    mongodb - MongoDB database

    +
      +
    • Replica sets
    • +
    • Sharding configuration
    • +
    +

    redis - Redis in-memory store

    +
      +
    • Persistence configuration
    • +
    • Cluster mode
    • +
    +

    Storage

    +

    rook-ceph - Cloud-native storage orchestrator

    +
      +
    • Block storage (RBD)
    • +
    • Object storage (S3-compatible)
    • +
    • Shared filesystem (CephFS)
    • +
    +

    minio - S3-compatible object storage

    +
      +
    • Distributed mode
    • +
    • Versioning and lifecycle policies
    • +
    +

    Monitoring & Observability

    +

    prometheus - Metrics collection and alerting

    +
      +
    • Service discovery
    • +
    • Alerting rules
    • +
    • Long-term storage
    • +
    +

    grafana - Metrics visualization

    +
      +
    • Dashboard provisioning
    • +
    • Data source configuration
    • +
    +

    loki - Log aggregation system

    +
      +
    • Log collection
    • +
    • Query language
    • +
    +

    Networking

    +

    cilium - eBPF-based networking and security

    +
      +
    • Network policies
    • +
    • Load balancing
    • +
    • Service mesh capabilities
    • +
    +

    calico - Network policy engine

    +
      +
    • BGP networking
    • +
    • IP-in-IP tunneling
    • +
    +

    nginx - Web server and reverse proxy

    +
      +
    • Load balancing
    • +
    • TLS termination
    • +
    +

    Security

    +

    vault - Secrets management (HashiCorp Vault)

    +
      +
    • Secret storage
    • +
    • Dynamic secrets
    • +
    • Encryption as a service
    • +
    +

    cert-manager - TLS certificate automation

    +
      +
    • Let’s Encrypt integration
    • +
    • Certificate renewal
    • +
    +

    Task Service Definition

    +

    Task services are defined in provisioning/extensions/taskservs/:

    +
    taskservs/
    +└── kubernetes/
    +    ├── service.ncl           # Service schema
    +    ├── install.nu            # Installation script
    +    ├── configure.nu          # Configuration script
    +    ├── health-check.nu       # Health validation
    +    └── README.md
    +
    +

    Using Task Services

    +

    Installation

    +
    {
    +  task_services = [
    +    {
    +      name = "kubernetes",
    +      version = "v1.28.0",
    +      config = {
    +        control_plane = {nodes = 3, plan = 'medium},
    +        workers = [{name = "pool-1", nodes = 3, plan = 'large}],
    +        networking = {cni = 'cilium},
    +      }
    +    },
    +    {
    +      name = "prometheus",
    +      version = "latest",
    +      config = {retention = "30d", storage_size_gb = 100}
    +    }
    +  ]
    +}
    +
    +

    CLI Commands

    +
    # List available task services
    +provisioning taskserv list
    +
    +# Show task service details
    +provisioning taskserv show kubernetes
    +
    +# Install task service
    +provisioning taskserv install kubernetes
    +
    +# Check task service health
    +provisioning taskserv health kubernetes
    +
    +# Uninstall task service
    +provisioning taskserv uninstall kubernetes
    +
    +

    Custom Task Services

    +

    Create custom task services:

    +
    provisioning/extensions/taskservs/my-service/
    +├── service.ncl           # Service definition
    +├── install.nu            # Installation logic
    +├── configure.nu          # Configuration logic
    +├── health-check.nu       # Health checks
    +└── README.md
    +
    +

    service.ncl schema:

    +
    {
    +  name = "my-service",
    +  version = "1.0.0",
    +  description = "Custom service description",
    +  dependencies = ["kubernetes"],  # Optional dependencies
    +  config_schema = {
    +    port | Number | default = 8080,
    +    replicas | Number | default = 3,
    +  }
    +}
    +
    +

    install.nu implementation:

    +
    export def "taskserv install" [config: record] {
    +  # Installation logic
    +  print $"Installing ($config.name)..."
    +
    +  # Deploy resources
    +  kubectl apply -f deployment.yaml
    +
    +  {status: "installed"}
    +}
    +
    +

    Task Service Lifecycle

    +
      +
    1. Validation - Check dependencies and configuration
    2. +
    3. Installation - Execute install script
    4. +
    5. Configuration - Apply service configuration
    6. +
    7. Health Check - Verify service is running
    8. +
    9. Ready - Service available for use
    10. +
    +

    Dependencies

    +

    Task services can declare dependencies:

    +
    {
    +  name = "grafana",
    +  dependencies = ["prometheus"],  # Installed first
    +}
    +
    +

    Provisioning automatically resolves dependency order.

    +

    Health Checks

    +

    Each task service provides health validation:

    +
    export def "taskserv health" [] {
    +  let pods = (kubectl get pods -l app=my-service -o json | from json)
    +
    +  if ($pods.items | all | { | p $p.status.phase == "Running"}) {
    +    {status: "healthy"}
    +  } else {
    +    {status: "unhealthy", reason: "pods not running"}
    +  }
    +}
    +
    +

    Best Practices

    +
      +
    1. Define schemas - Use Nickel schemas for task service configuration
    2. +
    3. Declare dependencies - Explicit dependency declaration
    4. +
    5. Idempotent installs - Installation should be repeatable
    6. +
    7. Health checks - Implement comprehensive health validation
    8. +
    9. Version pinning - Specify exact versions for reproducibility
    10. +
    11. Document configuration - Provide clear configuration examples
    12. +
    +

    References

    + +

    Clusters

    +

    Clusters are coordinated groups of services deployed together. Provisioning provides cluster definitions for common deployment patterns.

    +

    Available Clusters

    +

    Web Cluster

    +

    Production-ready web application deployment with load balancing, TLS, and monitoring.

    +

    Components:

    +
      +
    • Nginx load balancer
    • +
    • Application servers (configurable count)
    • +
    • PostgreSQL database
    • +
    • Redis cache
    • +
    • Prometheus monitoring
    • +
    • Let’s Encrypt TLS certificates
    • +
    +

    Configuration:

    +
    {
    +  clusters = [{
    +    name = "web-production",
    +    type = 'web,
    +    config = {
    +      app_servers = 3,
    +      load_balancer = {
    +        public_ip = true,
    +        tls_enabled = true,
    +        domain = "example.com"
    +      },
    +      database = {
    +        size = 'medium,
    +        replicas = 2,
    +        backup_enabled = true
    +      },
    +      cache = {
    +        size = 'small,
    +        persistence = true
    +      }
    +    }
    +  }]
    +}
    +
    +

    OCI Registry Cluster

    +

    Private container registry with S3-compatible storage and authentication.

    +

    Components:

    +
      +
    • Harbor registry
    • +
    • MinIO object storage
    • +
    • PostgreSQL database
    • +
    • Redis cache
    • +
    • TLS termination
    • +
    +

    Configuration:

    +
    {
    +  clusters = [{
    +    name = "registry-private",
    +    type = 'oci_registry,
    +    config = {
    +      domain = "registry.example.com",
    +      storage = {
    +        backend = 'minio,
    +        size_gb = 500,
    +        replicas = 3
    +      },
    +      authentication = {
    +        method = 'ldap,  # or 'database, 'oidc
    +        admin_password = std.secret "REGISTRY_ADMIN_PASSWORD"
    +      }
    +    }
    +  }]
    +}
    +
    +

    Kubernetes Cluster

    +

    Multi-node Kubernetes cluster with networking, storage, and monitoring.

    +

    Components:

    +
      +
    • Control plane nodes
    • +
    • Worker node pools
    • +
    • Cilium CNI
    • +
    • Rook-Ceph storage
    • +
    • Metrics server
    • +
    • Ingress controller
    • +
    +

    Configuration:

    +
    {
    +  clusters = [{
    +    name = "k8s-production",
    +    type = 'kubernetes,
    +    config = {
    +      control_plane = {
    +        nodes = 3,
    +        plan = 'medium,
    +        high_availability = true
    +      },
    +      node_pools = [
    +        {
    +          name = "general",
    +          nodes = 5,
    +          plan = 'large,
    +          labels = {workload = "general"}
    +        },
    +        {
    +          name = "gpu",
    +          nodes = 2,
    +          plan = 'xlarge,
    +          labels = {workload = "ml"}
    +        }
    +      ],
    +      networking = {
    +        cni = 'cilium,
    +        pod_cidr = "10.42.0.0/16",
    +        service_cidr = "10.43.0.0/16"
    +      },
    +      storage = {
    +        provider = 'rook-ceph,
    +        default_storage_class = "ceph-block"
    +      }
    +    }
    +  }]
    +}
    +
    +

    Cluster Deployment

    +

    CLI Commands

    +
    # List available cluster types
    +provisioning cluster types
    +
    +# Show cluster configuration template
    +provisioning cluster template web
    +
    +# Deploy cluster
    +provisioning cluster deploy web-production
    +
    +# Check cluster health
    +provisioning cluster health web-production
    +
    +# Scale cluster
    +provisioning cluster scale web-production --app-servers 5
    +
    +# Destroy cluster
    +provisioning cluster destroy web-production
    +
    +

    Deployment Lifecycle

    +
      +
    1. Validation - Validate cluster configuration
    2. +
    3. Infrastructure - Provision servers, networks, storage
    4. +
    5. Services - Install and configure task services
    6. +
    7. Integration - Connect services together
    8. +
    9. Health Check - Verify cluster health
    10. +
    11. Ready - Cluster operational
    12. +
    +

    Cluster Orchestration

    +

    Clusters use dependency graphs for orchestration:

    +
    Web Cluster Dependency Graph:
    +
    +servers ──┐
    +          ├──> database ──┐
    +networks ─┘               ├──> app_servers ──> load_balancer
    +                          │
    +                          ├──> cache ──────────┘
    +                          │
    +                          └──> monitoring
    +
    +

    Services are deployed in dependency order with parallel execution where possible.

    +

    Custom Cluster Definitions

    +

    Create custom cluster types:

    +
    provisioning/extensions/clusters/
    +└── my-cluster/
    +    ├── cluster.ncl           # Cluster definition
    +    ├── deploy.nu             # Deployment script
    +    ├── health-check.nu       # Health validation
    +    └── README.md
    +
    +

    cluster.ncl schema:

    +
    {
    +  name = "my-cluster",
    +  version = "1.0.0",
    +  description = "Custom cluster type",
    +  components = {
    +    servers = [{name = "app", count = 3, plan = 'medium}],
    +    services = ["nginx", "postgresql", "redis"],
    +  },
    +  config_schema = {
    +    domain | String,
    +    replicas | Number | default = 3,
    +  }
    +}
    +
    +

    Cluster Management

    +

    Scaling

    +

    Scale cluster components:

    +
    # Scale application servers
    +provisioning cluster scale web-production --component app_servers --count 5
    +
    +# Scale database replicas
    +provisioning cluster scale web-production --component database --replicas 3
    +
    +

    Updates

    +

    Rolling updates without downtime:

    +
    # Update application version
    +provisioning cluster update web-production --app-version 2.0.0
    +
    +# Update infrastructure (e.g., server plans)
    +provisioning cluster update web-production --plan large
    +
    +

    Backup and Recovery

    +
    # Create cluster backup
    +provisioning cluster backup web-production
    +
    +# Restore from backup
    +provisioning cluster restore web-production --backup 2024-01-15-snapshot
    +
    +# List backups
    +provisioning cluster backups web-production
    +
    +

    Monitoring

    +

    Cluster health monitoring:

    +
    # Overall cluster health
    +provisioning cluster health web-production
    +
    +# Component health
    +provisioning cluster health web-production --component database
    +
    +# Metrics
    +provisioning cluster metrics web-production
    +
    +

    Health checks validate:

    +
      +
    • All services running
    • +
    • Network connectivity
    • +
    • Storage availability
    • +
    • Resource utilization
    • +
    +

    Best Practices

    +
      +
    1. Use predefined clusters - Leverage built-in cluster types
    2. +
    3. Define dependencies - Explicit service dependencies
    4. +
    5. Implement health checks - Comprehensive validation
    6. +
    7. Plan for scaling - Design clusters for horizontal scaling
    8. +
    9. Automate backups - Regular backup schedules
    10. +
    11. Monitor resources - Track resource utilization
    12. +
    13. Test disaster recovery - Validate backup/restore procedures
    14. +
    +

    References

    + +

    Batch Workflows

    +

    Batch workflows orchestrate complex multi-step operations across multiple clouds and services with +dependency resolution, parallel execution, and checkpoint recovery.

    +

    Overview

    +

    Batch workflows enable:

    +
      +
    • Multi-cloud infrastructure orchestration
    • +
    • Complex deployment pipelines
    • +
    • Dependency-driven execution
    • +
    • Parallel task execution
    • +
    • Checkpoint and recovery
    • +
    • Rollback on failures
    • +
    +

    Workflow Definition

    +

    Workflows are defined in Nickel:

    +
    {
    +  workflows = [{
    +    name = "multi-cloud-deployment",
    +    description = "Deploy application across UpCloud and AWS",
    +    steps = [
    +      {
    +        name = "provision-upcloud",
    +        type = 'provision,
    +        provider = 'upcloud,
    +        resources = {
    +          servers = [{name = "web-eu", plan = 'medium, zone = "fi-hel1"}]
    +        }
    +      },
    +      {
    +        name = "provision-aws",
    +        type = 'provision,
    +        provider = 'aws,
    +        resources = {
    +          servers = [{name = "web-us", plan = 't3.medium, zone = "us-east-1a"}]
    +        }
    +      },
    +      {
    +        name = "deploy-application",
    +        type = 'task,
    +        depends_on = ["provision-upcloud", "provision-aws"],
    +        tasks = ["install-kubernetes", "deploy-app"]
    +      },
    +      {
    +        name = "configure-dns",
    +        type = 'configure,
    +        depends_on = ["deploy-application"],
    +        config = {
    +          records = [
    +            {name = "eu.example.com", target = "web-eu"},
    +            {name = "us.example.com", target = "web-us"}
    +          ]
    +        }
    +      }
    +    ],
    +    rollback_on_failure = true,
    +    checkpoint_enabled = true
    +  }]
    +}
    +
    +

    Dependency Resolution

    +

    Workflows automatically resolve dependencies:

    +
    Execution Graph:
    +
    +provision-upcloud ──┐
    +                    ├──> deploy-application ──> configure-dns
    +provision-aws ──────┘
    +
    +

    Steps provision-upcloud and provision-aws run in parallel. deploy-application waits for both to complete.

    +

    Step Types

    +

    Provision Steps

    +

    Create infrastructure resources:

    +
    {
    +  name = "create-servers",
    +  type = 'provision,
    +  provider = 'upcloud,
    +  resources = {
    +    servers = [...],
    +    networks = [...],
    +    storage = [...]
    +  }
    +}
    +
    +

    Task Steps

    +

    Execute task services:

    +
    {
    +  name = "install-k8s",
    +  type = 'task,
    +  tasks = ["kubernetes", "helm", "monitoring"]
    +}
    +
    +

    Configure Steps

    +

    Apply configuration changes:

    +
    {
    +  name = "setup-networking",
    +  type = 'configure,
    +  config = {
    +    firewalls = [...],
    +    routes = [...],
    +    dns = [...]
    +  }
    +}
    +
    +

    Validate Steps

    +

    Verify conditions before proceeding:

    +
    {
    +  name = "health-check",
    +  type = 'validate,
    +  checks = [
    +    {type = 'http, url = " [https://app.example.com",](https://app.example.com",) expected_status = 200},
    +    {type = 'command, command = "kubectl get nodes", expected_output = "Ready"}
    +  ]
    +}
    +
    +

    Execution Control

    +

    Parallel Execution

    +

    Steps without dependencies run in parallel:

    +
    steps = [
    +  {name = "provision-eu", ...},  # Runs in parallel
    +  {name = "provision-us", ...},  # Runs in parallel
    +  {name = "provision-asia", ...} # Runs in parallel
    +]
    +
    +

    Configure parallelism:

    +
    {
    +  max_parallel_tasks = 4,  # Max concurrent steps
    +  timeout_seconds = 3600   # Step timeout
    +}
    +
    +

    Conditional Execution

    +

    Execute steps based on conditions:

    +
    {
    +  name = "scale-up",
    +  type = 'task,
    +  condition = {
    +    type = 'expression,
    +    expression = "cpu_usage > 80"
    +  }
    +}
    +
    +

    Retry Logic

    +

    Automatically retry failed steps:

    +
    {
    +  name = "deploy-app",
    +  type = 'task,
    +  retry = {
    +    max_attempts = 3,
    +    backoff = 'exponential,  # or 'linear, 'constant
    +    initial_delay_seconds = 10
    +  }
    +}
    +
    +

    Checkpoint and Recovery

    +

    Checkpointing

    +

    Workflows automatically checkpoint state:

    +
    # Enable checkpointing
    +provisioning workflow run multi-cloud --checkpoint
    +
    +# Checkpoint saved at each step completion
    +
    +

    Recovery

    +

    Resume from last successful checkpoint:

    +
    # Workflow failed at step 3
    +# Resume from checkpoint
    +provisioning workflow resume multi-cloud --from-checkpoint latest
    +
    +# Resume from specific checkpoint
    +provisioning workflow resume multi-cloud --checkpoint-id abc123
    +
    +

    Rollback

    +

    Automatic Rollback

    +

    Rollback on failure:

    +
    {
    +  rollback_on_failure = true,
    +  rollback_steps = [
    +    {name = "destroy-resources", type = 'destroy},
    +    {name = "restore-config", type = 'restore}
    +  ]
    +}
    +
    +

    Manual Rollback

    +
    # Rollback to previous state
    +provisioning workflow rollback multi-cloud
    +
    +# Rollback to specific checkpoint
    +provisioning workflow rollback multi-cloud --checkpoint-id abc123
    +
    +

    Workflow Management

    +

    CLI Commands

    +
    # List workflows
    +provisioning workflow list
    +
    +# Show workflow details
    +provisioning workflow show multi-cloud
    +
    +# Run workflow
    +provisioning workflow run multi-cloud
    +
    +# Check workflow status
    +provisioning workflow status multi-cloud
    +
    +# View workflow logs
    +provisioning workflow logs multi-cloud
    +
    +# Cancel running workflow
    +provisioning workflow cancel multi-cloud
    +
    +

    Workflow State

    +

    Workflows track execution state:

    +
      +
    • pending - Not yet started
    • +
    • running - Currently executing
    • +
    • completed - Successfully finished
    • +
    • failed - Execution failed
    • +
    • rolling_back - Performing rollback
    • +
    • cancelled - Manually cancelled
    • +
    +

    Advanced Features

    +

    Dynamic Workflows

    +

    Generate workflows programmatically:

    +
    let regions = ["fi-hel1", "de-fra1", "uk-lon1"] in
    +{
    +  steps = std.array.map (fun region => {
    +    name = "provision-" ++ region,
    +    type = 'provision,
    +    resources = {servers = [{zone = region, ...}]}
    +  }) regions
    +}
    +
    +

    Workflow Templates

    +

    Reusable workflow templates:

    +
    let DeploymentTemplate = fun app_name regions => {
    +  name = "deploy-" ++ app_name,
    +  steps = std.array.map (fun region => {
    +    name = "deploy-" ++ region,
    +    type = 'task,
    +    tasks = ["deploy-app"],
    +    config = {app_name = app_name, region = region}
    +  }) regions
    +}
    +
    +# Use template
    +{
    +  workflows = [
    +    DeploymentTemplate "frontend" ["eu", "us"],
    +    DeploymentTemplate "backend" ["eu", "us", "asia"]
    +  ]
    +}
    +
    +

    Notifications

    +

    Send notifications on workflow events:

    +
    {
    +  notifications = {
    +    on_success = {
    +      type = 'slack,
    +      webhook_url = std.secret "SLACK_WEBHOOK",
    +      message = "Deployment completed successfully"
    +    },
    +    on_failure = {
    +      type = 'email,
    +      to = ["[ops@example.com](mailto:ops@example.com)"],
    +      subject = "Workflow failed"
    +    }
    +  }
    +}
    +
    +

    Best Practices

    +
      +
    1. Define dependencies explicitly - Clear dependency graph
    2. +
    3. Enable checkpointing - Critical for long-running workflows
    4. +
    5. Implement rollback - Always have rollback strategy
    6. +
    7. Use validation steps - Verify state before proceeding
    8. +
    9. Configure retries - Handle transient failures
    10. +
    11. Monitor execution - Track workflow progress
    12. +
    13. Test workflows - Validate with dry-run mode
    14. +
    +

    Troubleshooting

    +

    Workflow Stuck

    +
    # Check workflow status
    +provisioning workflow status <workflow> --verbose
    +
    +# View logs
    +provisioning workflow logs <workflow> --tail 100
    +
    +# Cancel and restart
    +provisioning workflow cancel <workflow>
    +provisioning workflow run <workflow>
    +
    +

    Step Failures

    +
    # View failed step details
    +provisioning workflow show <workflow> --step <step-name>
    +
    +# Retry failed step
    +provisioning workflow retry <workflow> --step <step-name>
    +
    +# Skip failed step
    +provisioning workflow skip <workflow> --step <step-name>
    +
    +

    References

    + +

    Version Management

    +

    Nickel-based version management for infrastructure components, providers, and task services ensures consistent, reproducible deployments.

    +

    Overview

    +

    Version management in Provisioning:

    +
      +
    • Nickel schemas define version constraints
    • +
    • Semantic versioning (semver) support
    • +
    • Version locking for reproducibility
    • +
    • Compatibility validation
    • +
    • Update strategies
    • +
    +

    Version Constraints

    +

    Define version requirements in Nickel:

    +
    {
    +  task_services = [
    +    {
    +      name = "kubernetes",
    +      version = ">=1.28.0, <1.30.0",  # Range constraint
    +    },
    +    {
    +      name = "prometheus",
    +      version = "~2.45.0",  # Patch versions allowed
    +    },
    +    {
    +      name = "grafana",
    +      version = "^10.0.0",  # Minor versions allowed
    +    },
    +    {
    +      name = "nginx",
    +      version = "1.25.3",  # Exact version
    +    }
    +  ]
    +}
    +
    +

    Constraint Operators

    +
    + + + + + + + + +
    OperatorMeaningExampleMatches
    =Exact version=1.28.01.28.0 only
    >=Greater or equal>=1.28.01.28.0, 1.29.0, 2.0.0
    <=Less or equal<=1.30.01.28.0, 1.30.0
    >Greater than>1.28.01.29.0, 2.0.0
    <Less than<1.30.01.28.0, 1.29.0
    ~Patch updates~1.28.01.28.x
    ^Minor updates^1.28.01.x.x
    ,AND constraint>=1.28, <1.301.28.x, 1.29.x
    +
    +

    Version Locking

    +

    Generate lock file for reproducible deployments:

    +
    # Generate lock file
    +provisioning version lock
    +
    +# Creates versions.lock.ncl with exact versions
    +
    +

    versions.lock.ncl:

    +
    {
    +  task_services = {
    +    kubernetes = "1.28.3",
    +    prometheus = "2.45.2",
    +    grafana = "10.0.5",
    +    nginx = "1.25.3"
    +  },
    +  providers = {
    +    upcloud = "1.2.0",
    +    aws = "3.5.1"
    +  }
    +}
    +
    +

    Use lock file:

    +
    let locked = import "versions.lock.ncl" in
    +{
    +  task_services = [
    +    {name = "kubernetes", version = locked.task_services.kubernetes}
    +  ]
    +}
    +
    +

    Version Updates

    +

    Check for Updates

    +
    # Check available updates
    +provisioning version check
    +
    +# Show outdated components
    +provisioning version outdated
    +
    +

    Output:

    +
    Component    Current  Latest   Update Available
    +kubernetes   1.28.0   1.29.2   Minor update
    +prometheus   2.45.0   2.47.0   Minor update
    +grafana      10.0.0   11.0.0   Major update (breaking)
    +
    +

    Update Strategies

    +

    Conservative (patch only):

    +
    {
    +  update_policy = 'conservative,  # Only patch updates
    +}
    +
    +

    Moderate (minor updates):

    +
    {
    +  update_policy = 'moderate,  # Patch + minor updates
    +}
    +
    +

    Aggressive (all updates):

    +
    {
    +  update_policy = 'aggressive,  # All updates including major
    +}
    +
    +

    Performing Updates

    +
    # Update all components (respecting constraints)
    +provisioning version update
    +
    +# Update specific component
    +provisioning version update kubernetes
    +
    +# Update to specific version
    +provisioning version update kubernetes --version 1.29.0
    +
    +# Dry-run (show what would update)
    +provisioning version update --dry-run
    +
    +

    Compatibility Validation

    +

    Validate version compatibility:

    +
    # Check compatibility
    +provisioning version validate
    +
    +# Check specific component
    +provisioning version validate kubernetes
    +
    +

    Compatibility rules defined in schemas:

    +
    {
    +  name = "grafana",
    +  version = "10.0.0",
    +  compatibility = {
    +    prometheus = ">=2.40.0",  # Requires Prometheus 2.40+
    +    kubernetes = ">=1.24.0"   # Requires Kubernetes 1.24+
    +  }
    +}
    +
    +

    Version Resolution

    +

    When multiple constraints conflict, resolution strategy:

    +
      +
    1. Exact version - Highest priority
    2. +
    3. Compatibility constraints - From dependencies
    4. +
    5. User constraints - From configuration
    6. +
    7. Latest compatible - Within constraints
    8. +
    +

    Example resolution:

    +
    # Component A requires: kubernetes >=1.28.0
    +# Component B requires: kubernetes <1.30.0
    +# User specifies: kubernetes ^1.28.0
    +
    +# Resolved: kubernetes 1.29.x (latest compatible)
    +
    +

    Pinning Versions

    +

    Pin critical components:

    +
    {
    +  task_services = [
    +    {
    +      name = "kubernetes",
    +      version = "1.28.3",
    +      pinned = true  # Never auto-update
    +    }
    +  ]
    +}
    +
    +

    Version Rollback

    +

    Rollback to previous versions:

    +
    # Show version history
    +provisioning version history
    +
    +# Rollback to previous version
    +provisioning version rollback kubernetes
    +
    +# Rollback to specific version
    +provisioning version rollback kubernetes --version 1.28.0
    +
    +

    Best Practices

    +
      +
    1. Use version constraints - Avoid latest tag
    2. +
    3. Lock versions - Generate and commit lock files
    4. +
    5. Test updates - Validate in non-production first
    6. +
    7. Pin critical components - Prevent unexpected updates
    8. +
    9. Document compatibility - Specify version requirements
    10. +
    11. Monitor updates - Track new releases
    12. +
    13. Gradual rollout - Update incrementally
    14. +
    +

    Version Metadata

    +

    Access version information programmatically:

    +
    # Show component versions
    +provisioning version list
    +
    +# Export versions to JSON
    +provisioning version export --format json
    +
    +# Compare versions
    +provisioning version compare <component> <version1> <version2>
    +
    +

    Integration with CI/CD

    +
    # .gitlab-ci.yml example
    +deploy:
    +  script:
    +    - provisioning version lock --verify  # Verify lock file
    +    - provisioning version validate       # Check compatibility
    +    - provisioning deploy                 # Deploy with locked versions
    +
    +

    Troubleshooting

    +

    Version Conflicts

    +
    # Show dependency tree
    +provisioning version tree
    +
    +# Identify conflicting constraints
    +provisioning version conflicts
    +
    +

    Update Failures

    +
    # Check why update failed
    +provisioning version update kubernetes --verbose
    +
    +# Force update (override constraints)
    +provisioning version update kubernetes --force --version 1.30.0
    +
    +

    References

    + +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    Platform Features

    +

    Complete documentation for the 12 core Provisioning platform capabilities +enabling enterprise infrastructure as code across multiple clouds.

    +

    Overview

    +

    Provisioning provides comprehensive features for:

    +
      +
    • Workspace organization - Primary mode for grouping infrastructure, configs, schemas, and extensions with complete isolation
    • +
    • Intelligent CLI - Modular architecture with 80+ keyboard shortcuts, decentralized command registration, 84% code reduction
    • +
    • Type-safe configuration - Nickel as source of truth for all infrastructure definitions with mandatory validation
    • +
    • Batch operations - DAG scheduling, parallel execution, multi-cloud workflows with dependency resolution
    • +
    • Hybrid orchestration - Execute across Rust and Nushell with file-based persistence and atomic operations
    • +
    • Interactive guides - Step-by-step guided infrastructure deployment with validation and error recovery
    • +
    • Testing framework - Container-based test environments for validating infrastructure configurations
    • +
    • Platform installer - TUI and unattended installation with provider setup and configuration management
    • +
    • Security system - Complete v4.0.0 with authentication, authorization, encryption, secrets management, audit logging
    • +
    • Daemon acceleration - 50x performance improvement for script-heavy workloads via persistent Rust process
    • +
    • Intelligent detection - Automated analysis detecting cost, compliance, performance, security, and reliability issues
    • +
    • Extension registry - Central marketplace for providers, task services, plugins, and clusters with versioning
    • +
    +

    Feature Guides

    +

    Organization and Management

    +
      +
    • +

      Workspace Management - Workspace mode, grouping, +multi-tenancy, isolation, customization

      +
    • +
    • +

      CLI Architecture - Modular design, 80+ shortcuts, +decentralized registration, dynamic subcommands, 84% code reduction

      +
    • +
    • +

      Configuration System - Nickel type-safe +configuration, hierarchical loading, profiles, validation

      +
    • +
    +

    Workflow and Operations

    +
      +
    • +

      Batch Workflows - DAG scheduling, parallel +execution, conditional logic, error handling, multi-cloud, dependency resolution

      +
    • +
    • +

      Orchestrator System - Hybrid Rust/Nushell, +file-based persistence, atomic operations, event-driven

      +
    • +
    • +

      Provisioning Daemon - TCP service, 50x +performance, connection pooling, LRU caching, graceful shutdown

      +
    • +
    +

    Developer and Automation Features

    +
      +
    • +

      Interactive Guides - Guided deployment, prompts, +validation, error recovery, progress tracking

      +
    • +
    • +

      Test Environment - Container-based testing, +sandbox isolation, validation, integration testing

      +
    • +
    • +

      Extension Registry - Marketplace for +providers, task services, plugins, clusters, versioning, dependencies

      +
    • +
    +

    Platform Capabilities

    +
      +
    • +

      Platform Installer - TUI and unattended modes, +provider setup, workspace creation, configuration management

      +
    • +
    • +

      Security System - v4.0.0: JWT/OAuth, Cedar RBAC, +MFA, audit logging, encryption, secrets management

      +
    • +
    • +

      Detector System - Cost optimization, compliance, +performance analysis, security detection, reliability assessment

      +
    • +
    • +

      Nushell Plugins - 17 plugins: tera, nickel, fluentd, +secretumvault, 10-50x performance gains

      +
    • +
    • +

      Version Management - Semantic versioning, +dependency resolution, compatibility, deprecation, upgrade workflows

      +
    • +
    +

    Feature Categories

    +
    + + + + + + + + +
    CategoryFeaturesUse Case
    CoreWorkspace Management, CLI Architecture, Configuration SystemOrganization, command discovery, type-safety
    OperationsBatch Workflows, Orchestrator, Version ManagementMulti-cloud, DAG scheduling, persistence
    PerformanceProvisioning Daemon, Nushell PluginsScript acceleration, 10-50x speedup
    Quality & TestingTest Environment, Extension RegistryConfiguration validation, distribution
    Setup & InstallationPlatform InstallerInstallation, initial configuration
    IntelligenceDetector SystemAnalysis, anomaly detection, cost optimization
    SecuritySecurity System, Complete v4.0.0Authentication, authorization, encryption
    User ExperienceInteractive GuidesGuided deployment, learning
    +
    +

    Quick Navigation

    +

    I want to organize my infrastructure

    +

    Start with Workspace Management - primary organizational mode with isolation and customization.

    +

    I want faster command execution

    +

    Use Provisioning Daemon - 50x performance improvement for scripts through persistent process and caching.

    +

    I want to automate deployment

    +

    Learn Batch Workflows - DAG scheduling and multi-cloud orchestration with error handling.

    +

    I need to ensure security

    +

    Review Security System - complete authentication, authorization, encryption, audit logging.

    +

    I want to validate configurations

    +

    Check Test Environment - container-based sandbox testing and policy validation.

    +

    I need to extend capabilities

    +

    See Extension Registry - marketplace for providers, task services, plugins, clusters.

    +

    I need to find infrastructure issues

    +

    Use Detector System - automated cost, compliance, performance, and security analysis.

    +

    Integration with Platform

    +

    All features are integrated via:

    +
      +
    • CLI commands - Invoke from Nushell or bash
    • +
    • REST APIs - Integrate with external systems
    • +
    • Nushell scripting - Build custom automation
    • +
    • Nickel configuration - Type-safe definitions
    • +
    • Extensions - Add custom providers and services
    • +
    + +
      +
    • Architecture Details → See provisioning/docs/src/architecture/
    • +
    • Development Guides → See provisioning/docs/src/development/
    • +
    • API Reference → See provisioning/docs/src/api-reference/
    • +
    • Operation Guides → See provisioning/docs/src/operations/
    • +
    • Security Details → See provisioning/docs/src/security/
    • +
    • Practical Examples → See provisioning/docs/src/examples/
    • +
    +

    Workspace Management

    +

    Workspaces are the default organizational unit for all infrastructure work in Provisioning. +Every infrastructure project, deployment environment, or isolated configuration lives within a +workspace. This workspace-first approach provides clean separation between projects, +environments, and teams while enabling rapid context switching.

    +

    Overview

    +

    A workspace is an isolated environment that groups together:

    +
      +
    • Infrastructure definitions - Nickel schemas, server configs, cluster definitions
    • +
    • Configuration settings - Environment-specific settings, provider credentials, user preferences
    • +
    • Runtime data - State files, checkpoints, logs, generated configurations
    • +
    • Extensions - Custom providers, task services, workflow templates
    • +
    +

    The workspace system enforces that all infrastructure operations (server creation, task service +installation, cluster deployment) require an active workspace. This prevents accidental +cross-project modifications and ensures configuration isolation.

    +

    Why Workspace-First

    +

    Traditional infrastructure tools often mix configurations across projects, leading to:

    +
      +
    • Accidental deployments to wrong environments
    • +
    • Configuration drift between dev/staging/production
    • +
    • Credential leakage across projects
    • +
    • Difficulty tracking infrastructure boundaries
    • +
    +

    Provisioning’s workspace-first approach solves these problems by making workspace boundaries explicit and enforced at the CLI level.

    +

    Workspace Structure

    +

    Every workspace follows a consistent directory structure:

    +
    workspace_my_project/
    +├── infra/                    # Infrastructure definitions (Nickel schemas)
    +│   ├── my-cluster.ncl        # Cluster definition
    +│   ├── servers.ncl           # Server configurations
    +│   └── batch-workflows.ncl   # Batch workflow definitions
    +│
    +├── config/                   # Workspace configuration
    +│   ├── local-overrides.toml  # User-specific overrides (gitignored)
    +│   ├── dev-defaults.toml     # Development environment defaults
    +│   ├── test-defaults.toml    # Testing environment defaults
    +│   ├── prod-defaults.toml    # Production environment defaults
    +│   └── provisioning.yaml     # Workspace metadata and settings
    +│
    +├── extensions/               # Workspace-specific extensions
    +│   ├── providers/            # Custom cloud providers
    +│   ├── taskservs/            # Custom task services
    +│   ├── clusters/             # Custom cluster templates
    +│   └── workflows/            # Custom workflow definitions
    +│
    +└── runtime/                  # Runtime data (gitignored)
    +    ├── state/                # Infrastructure state files
    +    ├── checkpoints/          # Workflow checkpoints
    +    ├── logs/                 # Operation logs
    +    └── generated/            # Generated configuration files
    +
    +

    Configuration Hierarchy

    +

    Workspace configurations follow a 5-layer hierarchy:

    +
    1. System Defaults       (provisioning/config/config.defaults.toml)
    +   ↓ overridden by
    +2. User Config           (~/.config/provisioning/user_config.yaml)
    +   ↓ overridden by
    +3. Workspace Config      (workspace/config/provisioning.yaml)
    +   ↓ overridden by
    +4. Environment Config    (workspace/config/{dev,test,prod}-defaults.toml)
    +   ↓ overridden by
    +5. Runtime Flags         (--flag value)
    +
    +

    This hierarchy ensures sensible defaults while allowing granular control at every level.

    +

    Core Commands

    +

    Creating Workspaces

    +
    # Create new workspace
    +provisioning workspace init my-project
    +
    +# Create workspace with specific location
    +provisioning workspace init my-project --path /custom/location
    +
    +# Create from template
    +provisioning workspace init my-project --template kubernetes-ha
    +
    +

    Listing Workspaces

    +
    # List all workspaces
    +provisioning workspace list
    +
    +# Show active workspace
    +provisioning workspace status
    +
    +# List with details
    +provisioning workspace list --verbose
    +
    +

    Example output:

    +
    NAME              PATH                                 LAST_USED           STATUS
    +my-project        /workspaces/workspace_my_project     2026-01-15 10:30    Active
    +dev-env           /workspaces/workspace_dev_env        2026-01-14 15:45
    +production        /workspaces/workspace_production     2026-01-10 09:00
    +
    +

    Switching Workspaces

    +
    # Switch to different workspace (single command)
    +provisioning workspace switch my-project
    +
    +# Switch with validation
    +provisioning workspace switch production --validate
    +
    +# Quick switch using shortcut
    +provisioning ws switch dev-env
    +
    +

    Workspace switching updates:

    +
      +
    • Active workspace marker in user configuration
    • +
    • Environment variables for current session
    • +
    • CLI prompt indicator (if configured)
    • +
    • Last-used timestamp
    • +
    +

    Deleting Workspaces

    +
    # Delete workspace (requires confirmation)
    +provisioning workspace delete old-project
    +
    +# Force delete without confirmation
    +provisioning workspace delete old-project --force
    +
    +# Delete but keep backups
    +provisioning workspace delete old-project --backup
    +
    +

    Deletion safety:

    +
      +
    • Requires explicit confirmation unless --force is used
    • +
    • Optionally creates backup before deletion
    • +
    • Validates no active operations are running
    • +
    • Updates workspace registry
    • +
    +

    Workspace Registry

    +

    The workspace registry is stored in user configuration and tracks all workspaces:

    +
    # ~/.config/provisioning/user_config.yaml
    +workspaces:
    +  active: my-project
    +  registry:
    +    my-project:
    +      path: /workspaces/workspace_my_project
    +      created: 2026-01-15T10:30:00Z
    +      last_used: 2026-01-15T14:20:00Z
    +      template: default
    +    dev-env:
    +      path: /workspaces/workspace_dev_env
    +      created: 2026-01-10T08:00:00Z
    +      last_used: 2026-01-14T15:45:00Z
    +      template: development
    +
    +

    This centralized registry enables:

    +
      +
    • Fast workspace discovery
    • +
    • Usage tracking and statistics
    • +
    • Workspace templates
    • +
    • Path resolution
    • +
    +

    Workspace Enforcement

    +

    The CLI enforces workspace requirements for all infrastructure operations:

    +

    Workspace-exempt commands (work without active workspace):

    +
      +
    • provisioning help
    • +
    • provisioning version
    • +
    • provisioning workspace *
    • +
    • provisioning guide *
    • +
    • provisioning setup *
    • +
    • provisioning providers (list only)
    • +
    +

    Workspace-required commands (require active workspace):

    +
      +
    • provisioning server create
    • +
    • provisioning taskserv install
    • +
    • provisioning cluster deploy
    • +
    • provisioning batch submit
    • +
    • All infrastructure modification operations
    • +
    +

    If no workspace is active, workspace-required commands fail with:

    +
    Error: No active workspace
    +Please activate or create a workspace:
    +  provisioning workspace init <name>
    +  provisioning workspace switch <name>
    +
    +

    This enforcement prevents accidental infrastructure modifications outside workspace boundaries.

    +

    Workspace Templates

    +

    Templates provide pre-configured workspace structures for common use cases:

    +

    Available Templates

    +
    + + + + + + +
    TemplateDescriptionUse Case
    defaultMinimal workspace structureGeneral purpose infrastructure
    kubernetes-haHA Kubernetes setup with 3 control planesProduction Kubernetes deployments
    developmentDev-optimized with Docker ComposeLocal testing and development
    multi-cloudMultiple provider configurationsMulti-cloud deployments
    database-clusterDatabase-focused with backup configsDatabase infrastructure
    cicdCI/CD pipeline configurationsAutomated deployment pipelines
    +
    +

    Using Templates

    +
    # Create from template
    +provisioning workspace init my-k8s --template kubernetes-ha
    +
    +# List available templates
    +provisioning workspace templates
    +
    +# Show template details
    +provisioning workspace template show kubernetes-ha
    +
    +

    Templates pre-populate:

    +
      +
    • Infrastructure Nickel schemas
    • +
    • Provider configurations
    • +
    • Environment-specific defaults
    • +
    • Example workflow definitions
    • +
    • README with usage instructions
    • +
    +

    Multi-Environment Workflows

    +

    Workspaces excel at managing multiple environments:

    +

    Strategy 1: Separate Workspaces Per Environment

    +
    # Create dedicated workspaces
    +provisioning workspace init myapp-dev
    +provisioning workspace init myapp-staging
    +provisioning workspace init myapp-prod
    +
    +# Switch between environments
    +provisioning ws switch myapp-dev
    +provisioning server create      # Creates in dev
    +
    +provisioning ws switch myapp-prod
    +provisioning server create      # Creates in prod (isolated)
    +
    +

    Pros: Complete isolation, different credentials, independent state +Cons: More workspace management, duplicate configuration

    +

    Strategy 2: Single Workspace, Multiple Environments

    +
    # Single workspace with environment configs
    +provisioning workspace init myapp
    +
    +# Deploy to different environments using flags
    +PROVISIONING_ENV=dev provisioning server create
    +PROVISIONING_ENV=staging provisioning server create
    +PROVISIONING_ENV=prod provisioning server create
    +
    +

    Pros: Shared configuration, easier to maintain +Cons: Shared credentials, risk of cross-environment mistakes

    +

    Strategy 3: Hybrid Approach

    +
    # Dev workspace for experimentation
    +provisioning workspace init myapp-dev
    +
    +# Prod workspace for production only
    +provisioning workspace init myapp-prod
    +
    +# Use environment flags within workspaces
    +provisioning ws switch myapp-prod
    +PROVISIONING_ENV=prod provisioning cluster deploy
    +
    +

    Pros: Balances isolation and convenience +Cons: More complex to explain to teams

    +

    Best Practices

    +

    Naming Conventions

    +
    # Good names (descriptive, unique)
    +workspace_librecloud_production
    +workspace_myapp_dev
    +workspace_k8s_staging
    +
    +# Avoid (ambiguous, generic)
    +workspace_test
    +workspace_1
    +workspace_temp
    +
    +

    Configuration Management

    +
    # Version control: Commit these files
    +infra/**/*.ncl                    # Infrastructure definitions
    +config/*-defaults.toml             # Environment defaults
    +config/provisioning.yaml           # Workspace metadata
    +extensions/**/*                    # Custom extensions
    +
    +# Gitignore: Never commit these
    +config/local-overrides.toml        # User-specific overrides
    +runtime/**/*                       # Runtime data and state
    +**/*.secret                        # Credential files
    +
    +

    Environment Separation

    +
    # Use dedicated workspaces for production
    +provisioning workspace init myapp-prod --template production
    +
    +# Enable extra validation for production
    +provisioning ws switch myapp-prod
    +provisioning config set validation.strict true
    +provisioning config set confirmation.required true
    +
    +

    Team Collaboration

    +
    # Share workspace structure via git
    +git clone repo/myapp-infrastructure
    +cd myapp-infrastructure
    +provisioning workspace init . --import
    +
    +# Each team member creates local-overrides.toml
    +cat > config/local-overrides.toml <<EOF
    +[user]
    +default_region = "us-east-1"
    +confirmation_required = true
     EOF
    -
    -# Test configuration
    -nickel export infra/default/servers.ncl
     
    -

    Common Extension Patterns

    -

    Database Service Extension

    -
    # Create database service
    -./provisioning/tools/create-extension.nu taskserv company-db \
    -    --author "Your Company" \
    -    --description "Company-specific database service"
    -
    -# Customize for PostgreSQL with company settings
    -cd extensions/taskservs/company-db
    +

    Troubleshooting

    +

    No Active Workspace Error

    +
    Error: No active workspace
     
    -

    Edit the schema:

    -
    # Database service configuration schema
    -let CompanyDbConfig = {
    -  # Database settings
    -  database_name | String = "company_db",
    -  postgres_version | String = "13",
    +

    Solution:

    +
    # List workspaces
    +provisioning workspace list
     
    -  # Company-specific settings
    -  backup_schedule | String = "0 2 * * *",
    -  compliance_mode | Bool = true,
    -  encryption_enabled | Bool = true,
    +# Switch to workspace
    +provisioning workspace switch <name>
     
    -  # Connection settings
    -  max_connections | Number = 100,
    -  shared_buffers | String = "256 MB",
    -
    -  # Storage settings
    -  storage_size | String = "100Gi",
    -  storage_class | String = "fast-ssd",
    -} | {
    -  # Validation contracts
    -  database_name | String,
    -  max_connections | std.contract.from_validator (fun x => x > 0),
    -} in
    -CompanyDbConfig
    +# Or create new workspace
    +provisioning workspace init <name>
     
    -

    Monitoring Service Extension

    -
    # Create monitoring service
    -./provisioning/tools/create-extension.nu taskserv company-monitoring \
    -    --author "Your Company" \
    -    --description "Company-specific monitoring and alerting"
    +

    Workspace Not Found

    +
    Error: Workspace 'my-project' not found in registry
     
    -

    Customize for Prometheus with company dashboards:

    -
    # Monitoring service configuration
    -let AlertManagerConfig = {
    -  smtp_server | String,
    -  smtp_port | Number = 587,
    -  smtp_auth_enabled | Bool = true,
    -} in
    +

    Solution:

    +
    # Re-register workspace
    +provisioning workspace register /path/to/workspace_my_project
     
    -let CompanyMonitoringConfig = {
    -  # Prometheus settings
    -  retention_days | Number = 30,
    -  storage_size | String = "50Gi",
    -
    -  # Company dashboards
    -  enable_business_metrics | Bool = true,
    -  enable_compliance_dashboard | Bool = true,
    -
    -  # Alert routing
    -  alert_manager_config | AlertManagerConfig,
    -
    -  # Integration settings
    -  slack_webhook | String | optional,
    -  email_notifications | Array String,
    -} in
    -CompanyMonitoringConfig
    +# Or recreate workspace
    +provisioning workspace init my-project
     
    -

    Legacy System Integration

    -
    # Create legacy integration
    -./provisioning/tools/create-extension.nu taskserv legacy-bridge \
    -    --author "Your Company" \
    -    --description "Bridge for legacy system integration"
    +

    Workspace Path Doesn’t Exist

    +
    Error: Workspace path '/workspaces/workspace_my_project' does not exist
     
    -

    Customize for mainframe integration:

    -
    # Legacy bridge configuration schema
    -let LegacyBridgeConfig = {
    -  # Legacy system details
    -  mainframe_host | String,
    -  mainframe_port | Number = 23,
    -  connection_type | [String] = "tn3270",  # "tn3270" or "direct"
    +

    Solution:

    +
    # Remove invalid entry
    +provisioning workspace unregister my-project
     
    -  # Data transformation
    -  data_format | [String] = "fixed-width",  # "fixed-width", "csv", or "xml"
    -  character_encoding | String = "ebcdic",
    -
    -  # Processing settings
    -  batch_size | Number = 1000,
    -  poll_interval_seconds | Number = 60,
    -
    -  # Error handling
    -  retry_attempts | Number = 3,
    -  dead_letter_queue_enabled | Bool = true,
    -} in
    -LegacyBridgeConfig
    +# Re-create workspace
    +provisioning workspace init my-project
     
    -

    Advanced Customization

    -

    Custom Provider Development

    -
    # Create custom cloud provider
    -./provisioning/tools/create-extension.nu provider company-cloud \
    -    --author "Your Company" \
    -    --description "Company private cloud provider"
    +

    Integration with Other Features

    +

    Batch Workflows

    +

    Workspaces provide the context for batch workflow execution:

    +
    provisioning ws switch production
    +provisioning batch submit infra/batch-workflows.ncl
     
    -

    Complete Infrastructure Stack

    -
    # Create complete cluster configuration
    -./provisioning/tools/create-extension.nu cluster company-stack \
    -    --author "Your Company" \
    -    --description "Complete company infrastructure stack"
    +

    Batch workflows access workspace-specific:

    +
      +
    • Infrastructure definitions
    • +
    • Provider credentials
    • +
    • Configuration settings
    • +
    • State management
    • +
    +

    Test Environments

    +

    Test environments inherit workspace configuration:

    +
    provisioning ws switch dev
    +provisioning test quick kubernetes
    +# Uses dev workspace's configuration and providers
     
    -

    Testing and Validation

    -

    Local Testing Workflow

    -
    # 1. Create test workspace
    -mkdir test-workspace && cd test-workspace
    -../provisioning/tools/workspace-init.nu . init
    -
    -# 2. Load your extensions
    -../provisioning/core/cli/module-loader load taskservs . [my-app, company-db]
    -../provisioning/core/cli/module-loader load providers . [company-cloud]
    -
    -# 3. Validate loading
    -../provisioning/core/cli/module-loader list taskservs .
    -../provisioning/core/cli/module-loader validate .
    -
    -# 4. Test KCL compilation
    -nickel export servers.ncl
    -
    -# 5. Dry-run deployment
    -../provisioning/core/cli/provisioning server create --infra . --check
    +

    Version Management

    +

    Workspace configurations can specify tool versions:

    +
    # workspace/infra/versions.ncl
    +{
    +  tools = {
    +    nushell = "0.109.1"
    +    nickel = "1.15.1"
    +    kubernetes = "1.29.0"
    +  }
    +}
     
    -

    Continuous Integration Testing

    -

    Create .github/workflows/test-extensions.yml:

    -
    name: Test Extensions
    -on: [push, pull_request]
    +

    Provisioning validates versions match workspace requirements.

    +

    See Also

    + +

    CLI Architecture

    +

    The Provisioning CLI provides a unified command-line interface for all infrastructure +operations. It features 111+ commands organized into 7 domain-focused modules with 80+ +shortcuts for improved productivity. The modular architecture achieved 84% code reduction +while improving maintainability and extensibility.

    +

    Overview

    +

    The CLI architecture uses domain-driven design, separating concerns across modules. This +refactoring reduced the main entry point from monolithic code to 211 lines. The architecture +improves discoverability and enables rapid feature development.

    +

    + CLI Architecture Modular Design Decentralized Command Registration +

    +

    Key Metrics

    +
    + + + + + +
    MetricBeforeAfterImprovement
    Main CLI lines1,32921184% reduction
    Command domains1 (monolithic)7 (modular)7x organization
    Commands~50111+122% increase
    Shortcuts080+New capability
    Help categories07Improved discovery
    +
    +

    Domain Architecture

    +

    The CLI is organized into 7 domain-focused modules:

    +

    1. Infrastructure Domain

    +

    Commands: Server, TaskServ, Cluster, Infra management

    +
    # Server operations
    +provisioning server create
    +provisioning server list
    +provisioning server delete
    +provisioning server ssh <hostname>
    +
    +# Task service operations
    +provisioning taskserv install kubernetes
    +provisioning taskserv list
    +provisioning taskserv remove kubernetes
    +
    +# Cluster operations
    +provisioning cluster deploy my-cluster
    +provisioning cluster status my-cluster
    +provisioning cluster scale my-cluster --nodes 5
    +
    +

    Shortcuts: s (server), t/task (taskserv), cl (cluster), i (infra)

    +

    2. Orchestration Domain

    +

    Commands: Workflow, Batch, Orchestrator management

    +
    # Workflow operations
    +provisioning workflow list
    +provisioning workflow status <id>
    +provisioning workflow cancel <id>
    +
    +# Batch operations
    +provisioning batch submit infra/batch-workflows.ncl
    +provisioning batch monitor <workflow-id>
    +provisioning batch list
    +
    +# Orchestrator management
    +provisioning orchestrator start
    +provisioning orchestrator status
    +provisioning orchestrator logs
    +
    +

    Shortcuts: wf/flow (workflow), bat (batch), orch (orchestrator)

    +

    3. Development Domain

    +

    Commands: Module, Layer, Version, Pack management

    +
    # Module operations
    +provisioning module create my-module
    +provisioning module list
    +provisioning module test my-module
    +
    +# Layer operations
    +provisioning layer add <name>
    +provisioning layer list
    +
    +# Versioning
    +provisioning version bump minor
    +provisioning version list
    +
    +# Packaging
    +provisioning pack create my-extension
    +provisioning pack publish my-extension
    +
    +

    Shortcuts: mod (module), l (layer), v (version), p (pack)

    +

    4. Workspace Domain

    +

    Commands: Workspace management, templates

    +
    # Workspace operations
    +provisioning workspace init my-project
    +provisioning workspace list
    +provisioning workspace switch my-project
    +provisioning workspace delete old-project
    +
    +# Template operations
    +provisioning workspace template list
    +provisioning workspace template show kubernetes-ha
    +
    +

    Shortcuts: ws (workspace)

    +

    5. Configuration Domain

    +

    Commands: Config, Environment, Validate, Setup

    +
    # Configuration operations
    +provisioning config get servers.default_plan
    +provisioning config set servers.default_plan large
    +provisioning config validate
    +
    +# Environment operations
    +provisioning env
    +provisioning allenv
    +
    +# Setup operations
    +provisioning setup profile --profile developer
    +provisioning setup versions
    +
    +# Validation
    +provisioning validate config
    +provisioning validate infra
    +provisioning validate nickel workspace/infra/my-cluster.ncl
    +
    +

    Shortcuts: cfg (config), val (validate), st (setup)

    +

    6. Utilities Domain

    +

    Commands: SSH, SOPS, Cache, Plugin management

    +
    # SSH operations
    +provisioning ssh server-01
    +provisioning ssh server-01 -- uptime
    +
    +# SOPS operations
    +provisioning sops encrypt config.yaml
    +provisioning sops decrypt config.enc.yaml
    +
    +# Cache operations
    +provisioning cache clear
    +provisioning cache stats
    +
    +# Plugin operations
    +provisioning plugin list
    +provisioning plugin install nu_plugin_auth
    +provisioning plugin update
    +
    +

    Shortcuts: sops, cache, plug (plugin)

    +

    7. Generation Domain

    +

    Commands: Generate code, configs, docs

    +
    # Code generation
    +provisioning generate provider upcloud-new
    +provisioning generate taskserv postgresql
    +provisioning generate cluster k8s-ha
    +
    +# Config generation
    +provisioning generate config --profile production
    +provisioning generate nickel --template kubernetes
    +
    +# Documentation generation
    +provisioning generate docs
    +
    +

    Shortcuts: g/gen (generate)

    +

    Command Shortcuts

    +

    The CLI provides 80+ shortcuts for improved productivity:

    +

    Infrastructure Shortcuts

    +
    + + + + +
    Full CommandShortcutsExample
    serversprovisioning s list
    taskservt, taskprovisioning t install kubernetes
    clusterclprovisioning cl deploy my-cluster
    infrastructurei, infraprovisioning i list
    +
    +

    Orchestration Shortcuts

    +
    + + + +
    Full CommandShortcutsExample
    workflowwf, flowprovisioning wf list
    batchbatprovisioning bat submit workflow.ncl
    orchestratororchprovisioning orch status
    +
    +

    Development Shortcuts

    +
    + + + + +
    Full CommandShortcutsExample
    modulemodprovisioning mod list
    layerlprovisioning l add base
    versionvprovisioning v bump minor
    packpprovisioning p create extension
    +
    +

    Configuration Shortcuts

    +
    + + + + + +
    Full CommandShortcutsExample
    workspacewsprovisioning ws switch prod
    configcfgprovisioning cfg get servers.plan
    validatevalprovisioning val config
    setupstprovisioning st profile --profile dev
    environmentenvprovisioning env
    +
    +

    Utility Shortcuts

    +
    + + +
    Full CommandShortcutsExample
    generateg, genprovisioning g provider aws-new
    pluginplugprovisioning plug list
    +
    +

    Quick Reference Shortcuts

    +
    + + + +
    Full CommandShortcutsPurpose
    shortcutsscShow shortcuts reference
    guide-Interactive guides
    howto-Quick how-to guides
    +
    +

    Bi-Directional Help System

    +

    The CLI features a bi-directional help system that works in both directions:

    +
    # Both of these work identically
    +provisioning help workspace
    +provisioning workspace help
    +
    +# Shortcuts also work
    +provisioning help ws
    +provisioning ws help
    +
    +# Category help
    +provisioning help infrastructure
    +provisioning help orchestration
    +
    +

    This flexibility improves discoverability and aligns with natural user expectations.

    +

    Centralized Flag Handling

    +

    All global flags are handled consistently across all commands:

    +

    Global Flags

    +
    + + + + + + + +
    FlagShortPurposeExample
    --debug-dEnable debug modeprovisioning --debug server create
    --check-cDry-run mode (no changes)provisioning --check server delete
    --yes-yAuto-confirm operationsprovisioning --yes cluster delete
    --infra-iSpecify infrastructureprovisioning --infra my-cluster server list
    --verbose-vVerbose outputprovisioning --verbose workflow list
    --quiet-qMinimal outputprovisioning --quiet batch submit
    --format-fOutput format (json/yaml/table)provisioning --format json server list
    +
    +

    Command-Specific Flags

    +
    # Server creation flags
    +provisioning server create --plan large --region us-east-1 --zone a
    +
    +# TaskServ installation flags
    +provisioning taskserv install kubernetes --version 1.29.0 --ha
    +
    +# Cluster deployment flags
    +provisioning cluster deploy --replicas 3 --storage 100GB
    +
    +# Batch workflow flags
    +provisioning batch submit workflow.ncl --parallel 5 --timeout 3600
    +
    +

    Command Discovery

    +

    Categorized Help

    +

    The help system organizes commands by domain:

    +
    provisioning help
    +
    +# Output shows categorized commands:
    +Infrastructure Commands:
    +  server        Manage servers (shortcuts: s)
    +  taskserv      Manage task services (shortcuts: t, task)
    +  cluster       Manage clusters (shortcuts: cl)
    +
    +Orchestration Commands:
    +  workflow      Manage workflows (shortcuts: wf, flow)
    +  batch         Batch operations (shortcuts: bat)
    +  orchestrator  Orchestrator management (shortcuts: orch)
    +
    +Configuration Commands:
    +  workspace     Workspace management (shortcuts: ws)
    +  config        Configuration management (shortcuts: cfg)
    +  validate      Validation operations (shortcuts: val)
    +  setup         System setup (shortcuts: st)
    +
    +

    Quick Reference

    +
    # Fastest command reference
    +provisioning sc
    +
    +# Shows comprehensive shortcuts table with examples
    +
    +

    Interactive Guides

    +
    # Step-by-step guides
    +provisioning guide from-scratch      # Complete deployment guide
    +provisioning guide quickstart         # Command shortcuts reference
    +provisioning guide customize          # Customization patterns
    +
    +

    Command Routing

    +

    The CLI uses a sophisticated dispatcher for command routing:

    +
    # provisioning/core/nulib/main_provisioning/dispatcher.nu
    +
    +# Route command to appropriate handler
    +export def dispatch [
    +    command: string
    +    args: list<string>
    +] {
    +    match $command {
    +        # Infrastructure domain
    +        "server" | "s" => { route-to-handler "infrastructure" "server" $args }
    +        "taskserv" | "t" | "task" => { route-to-handler "infrastructure" "taskserv" $args }
    +        "cluster" | "cl" => { route-to-handler "infrastructure" "cluster" $args }
    +
    +        # Orchestration domain
    +        "workflow" | "wf" | "flow" => { route-to-handler "orchestration" "workflow" $args }
    +        "batch" | "bat" => { route-to-handler "orchestration" "batch" $args }
    +
    +        # Configuration domain
    +        "workspace" | "ws" => { route-to-handler "configuration" "workspace" $args }
    +        "config" | "cfg" => { route-to-handler "configuration" "config" $args }
    +    }
    +}
    +
    +

    This routing enables:

    +
      +
    • Consistent error handling
    • +
    • Centralized logging
    • +
    • Workspace enforcement
    • +
    • Permission checks
    • +
    • Audit trail
    • +
    +

    Command Implementation Pattern

    +

    All commands follow a consistent implementation pattern:

    +
    # Example: provisioning/core/nulib/main_provisioning/commands/server.nu
    +
    +# Main command handler
    +export def main [
    +    operation: string    # create, list, delete, etc.
    +    --check             # Dry-run mode
    +    --yes               # Auto-confirm
    +] {
    +    # 1. Validate workspace requirement
    +    enforce-workspace-requirement "server" $operation
    +
    +    # 2. Load configuration
    +    let config = load-config
    +
    +    # 3. Parse operation
    +    match $operation {
    +        "create" => { create-server $args $config --check=$check --yes=$yes }
    +        "list" => { list-servers $config }
    +        "delete" => { delete-server $args $config --yes=$yes }
    +        "ssh" => { ssh-to-server $args $config }
    +        _ => { error $"Unknown server operation: ($operation)" }
    +    }
    +
    +    # 4. Log operation (audit trail)
    +    log-operation "server" $operation $args
    +}
    +
    +

    This pattern ensures:

    +
      +
    • Consistent behavior
    • +
    • Proper error handling
    • +
    • Configuration integration
    • +
    • Workspace enforcement
    • +
    • Audit logging
    • +
    +

    Modular Structure

    +

    The CLI codebase is organized for maintainability:

    +
    provisioning/core/
    +├── cli/
    +│   └── provisioning           # Main CLI entry point (211 lines)
    +│
    +├── nulib/
    +│   ├── main_provisioning/
    +│   │   ├── dispatcher.nu      # Command routing (central dispatch)
    +│   │   ├── flags.nu           # Centralized flag handling
    +│   │   ├── help_system_fluent.nu  # Categorized help with i18n
    +│   │   │
    +│   │   └── commands/          # Domain-specific command handlers
    +│   │       ├── infrastructure/
    +│   │       │   ├── server.nu
    +│   │       │   ├── taskserv.nu
    +│   │       │   └── cluster.nu
    +│   │       │
    +│   │       ├── orchestration/
    +│   │       │   ├── workflow.nu
    +│   │       │   ├── batch.nu
    +│   │       │   └── orchestrator.nu
    +│   │       │
    +│   │       ├── configuration/
    +│   │       │   ├── workspace.nu
    +│   │       │   ├── config.nu
    +│   │       │   └── validate.nu
    +│   │       │
    +│   │       └── utilities/
    +│   │           ├── ssh.nu
    +│   │           ├── sops.nu
    +│   │           └── cache.nu
    +│   │
    +│   └── lib_provisioning/      # Core libraries (used by commands)
    +│       ├── config/
    +│       ├── providers/
    +│       ├── workspace/
    +│       └── utils/
    +
    +

    This structure enables:

    +
      +
    • Clear separation of concerns
    • +
    • Easy addition of new commands
    • +
    • Testable command handlers
    • +
    • Reusable core libraries
    • +
    +

    Internationalization

    +

    The CLI supports multiple languages via Fluent catalog:

    +
    # Automatic locale detection
    +export LANG=es_ES.UTF-8
    +provisioning help    # Shows Spanish help if es-ES catalog exists
    +
    +# Supported locales
    +en-US (default)      # English
    +es-ES                # Spanish
    +fr-FR                # French
    +de-DE                # German
    +
    +

    Catalog structure:

    +
    provisioning/locales/
    +├── en-US/
    +│   └── help.ftl      # English help strings
    +├── es-ES/
    +│   └── help.ftl      # Spanish help strings
    +└── de-DE/
    +    └── help.ftl      # German help strings
    +
    +

    Extension Points

    +

    The modular architecture provides clean extension points:

    +

    Adding New Commands

    +
    # 1. Create command handler
    +provisioning/core/nulib/main_provisioning/commands/my_new_command.nu
    +
    +# 2. Register in dispatcher
    +# provisioning/core/nulib/main_provisioning/dispatcher.nu
    +"my-command" | "mc" => { route-to-handler "utilities" "my-command" $args }
    +
    +# 3. Add help entry
    +# provisioning/locales/en-US/help.ftl
    +my-command-help = Manage my new feature
    +
    +# 4. Command is now available
    +provisioning my-command <operation>
    +provisioning mc <operation>  # Shortcut also works
    +
    +

    Adding New Domains

    +
    # 1. Create domain directory
    +provisioning/core/nulib/main_provisioning/commands/my_domain/
    +
    +# 2. Add domain commands
    +my_domain/
    +├── command1.nu
    +├── command2.nu
    +└── command3.nu
    +
    +# 3. Register domain in dispatcher
    +
    +# 4. Add domain help category
    +
    +# Domain is now available with all commands
    +
    +

    Command Aliases

    +

    The CLI supports command aliases for common operations:

    +
    # Defined in user configuration
    +# ~/.config/provisioning/user_config.yaml
    +aliases:
    +  deploy: "cluster deploy"
    +  list-all: "server list && taskserv list && cluster list"
    +  quick-test: "test quick kubernetes"
    +
    +# Usage
    +provisioning deploy my-cluster     # Expands to: cluster deploy my-cluster
    +provisioning list-all              # Runs multiple commands
    +provisioning quick-test            # Runs test with preset
    +
    +

    Best Practices

    +

    Using Shortcuts Effectively

    +
    # Development workflow (frequent commands)
    +provisioning ws switch dev          # Switch to dev workspace
    +provisioning s list                 # Quick server list
    +provisioning t install postgres     # Install task service
    +provisioning cl status my-cluster   # Check cluster status
    +
    +# Production workflow (explicit commands for clarity)
    +provisioning workspace switch production
    +provisioning server create --plan large --check
    +provisioning cluster deploy critical-cluster --yes
    +
    +

    Dry-Run Before Execution

    +
    # Always check before dangerous operations
    +provisioning --check server delete old-servers
    +provisioning --check cluster delete test-cluster
    +
    +# If output looks good, run for real
    +provisioning --yes server delete old-servers
    +
    +

    Using Output Formats

    +
    # JSON output for scripting
    +provisioning --format json server list | jq '.[] | select(.status == "running")'
    +
    +# YAML output for readability
    +provisioning --format yaml cluster status my-cluster
    +
    +# Table output for humans (default)
    +provisioning server list
    +
    +

    Performance Optimizations

    +

    The modular architecture enables several performance optimizations:

    +

    Lazy Loading

    +

    Commands are loaded on-demand, reducing startup time:

    +
    # Only loads server command module when needed
    +provisioning server list    # Fast startup (loads server.nu only)
    +
    +

    Command Caching

    +

    Frequently-used commands benefit from caching:

    +
    # First run: ~200ms (loads modules, config)
    +provisioning server list
    +
    +# Subsequent runs: ~50ms (cached config, loaded modules)
    +provisioning server list
    +
    +

    Parallel Execution

    +

    Batch operations execute in parallel:

    +
    # Executes server creation in parallel (up to configured limit)
    +provisioning batch submit multi-server-workflow.ncl --parallel 10
    +
    +

    Troubleshooting

    +

    Command Not Found

    +
    Error: Unknown command 'servr'
    +Did you mean: server (s)
    +
    +

    The CLI provides helpful suggestions for typos.

    +

    Missing Workspace

    +
    Error: No active workspace
    +Please activate or create a workspace:
    +  provisioning workspace init <name>
    +  provisioning workspace switch <name>
    +
    +

    Workspace enforcement prevents accidental operations.

    +

    Permission Denied

    +
    Error: Operation requires admin permissions
    +Please run with elevated privileges or contact administrator
    +
    +

    Permission system prevents unauthorized operations.

    +

    See Also

    + +

    Configuration System

    +

    Batch Workflows

    +

    Orchestrator

    +

    Interactive Guides

    +

    Test Environment

    +

    Platform Installer

    +

    Security System

    +

    Version Management

    +

    Nushell Plugins

    +

    Provisioning includes 17 high-performance native Rust plugins for Nushell, providing 10-50x +speed improvements over HTTP APIs. Plugins handle critical functionality: templates, configuration, +encryption, orchestration, and secrets management.

    +

    Overview

    +

    Performance Benefits

    +

    Plugins provide significant performance improvements for frequently-used operations:

    +
    + + + + + +
    PluginSpeed ImprovementUse Case
    nu_plugin_tera10-15x fasterTemplate rendering
    nu_plugin_nickel5-8x fasterConfiguration processing
    nu_plugin_orchestrator30-50x fasterQuery orchestrator state
    nu_plugin_kms10x fasterEncryption/decryption
    nu_plugin_auth5x fasterAuthentication operations
    +
    +

    Installation

    +

    All plugins install automatically with Provisioning:

    +
    # Automatic installation during setup
    +provisioning install
    +
    +# Or manual installation
    +cd /path/to/provisioning
    +./scripts/install-plugins.nu
    +
    +# Verify installation
    +provisioning plugins list
    +
    +

    Plugin Management

    +
    # List installed plugins with versions
    +provisioning plugins list
    +
    +# Check plugin status
    +provisioning plugins status
    +
    +# Update all plugins
    +provisioning plugins update --all
    +
    +# Update specific plugin
    +provisioning plugins update nu_plugin_tera
    +
    +# Remove plugin
    +provisioning plugins remove nu_plugin_tera
    +
    +

    Core Plugins (Priority)

    +

    1. nu_plugin_tera

    +

    Template Rendering Engine

    +

    Nushell plugin for Tera template processing (Jinja2-style syntax).

    +
    # Install
    +provisioning plugins install nu_plugin_tera
    +
    +# Usage in Nushell
    +let template = "Hello {{ name }}!"
    +let context = { name: "World" }
    +$template | tera render $context
    +# Output: "Hello World!"
    +
    +

    Features:

    +
      +
    • Jinja2-compatible syntax
    • +
    • Built-in filters and functions
    • +
    • Template inheritance
    • +
    • Macro support
    • +
    • Custom filters via Rust
    • +
    +

    Performance: 10-15x faster than HTTP template service

    +

    Use Cases:

    +
      +
    • Generating infrastructure configurations
    • +
    • Creating dynamic scripts
    • +
    • Building deployment templates
    • +
    • Rendering documentation
    • +
    +

    Example: Generate infrastructure config:

    +
    let infra_template = "
    +{
    +  servers = [
    +    {% for server in servers %}
    +    {
    +      name = \"{{ server.name }}\"
    +      cpu = {{ server.cpu }}
    +      memory = {{ server.memory }}
    +    }
    +    {% if not loop.last %},{% endif %}
    +    {% endfor %}
    +  ]
    +}
    +"
    +
    +let servers = [
    +  { name: "web-01", cpu: 4, memory: 8 }
    +  { name: "web-02", cpu: 4, memory: 8 }
    +]
    +
    +$infra_template | tera render { servers: $servers }
    +
    +
    +

    2. nu_plugin_nickel

    +

    Nickel Configuration Plugin

    +

    Native Nickel compilation and validation in Nushell.

    +
    # Install
    +provisioning plugins install nu_plugin_nickel
    +
    +# Usage in Nushell
    +let nickel_code = '{ name = "server", cpu = 4 }'
    +$nickel_code | nickel eval
    +# Output: { name: "server", cpu: 4 }
    +
    +

    Features:

    +
      +
    • Parse and evaluate Nickel expressions
    • +
    • Type checking and validation
    • +
    • Schema enforcement
    • +
    • Merge configurations
    • +
    • Generate JSON/YAML output
    • +
    +

    Performance: 5-8x faster than CLI invocation

    +

    Use Cases:

    +
      +
    • Validate infrastructure definitions
    • +
    • Process Nickel schemas
    • +
    • Merge configuration files
    • +
    • Generate typed configurations
    • +
    +

    Example: Validate and merge configs:

    +
    let base_config = open base.ncl | nickel eval
    +let env_config = open prod-defaults.ncl | nickel eval
    +
    +let merged = $base_config | nickel merge $env_config
    +$merged | nickel validate --schema infrastructure-schema.ncl
    +
    +
    +

    3. nu_plugin_fluent

    +

    Internationalization (i18n) Plugin

    +

    Fluent translation system for multi-language support.

    +
    # Install
    +provisioning plugins install nu_plugin_fluent
    +
    +# Usage in Nushell
    +fluent load "./locales"
    +fluent set-locale "es-ES"
    +fluent get "help-infra-server-create"
    +# Output: "Crear un nuevo servidor"
    +
    +

    Features:

    +
      +
    • Load Fluent catalogs (.ftl files)
    • +
    • Dynamic locale switching
    • +
    • Pluralization support
    • +
    • Fallback chains
    • +
    • Translation coverage reports
    • +
    +

    Performance: Native Rust implementation, <1ms per translation

    +

    Use Cases:

    +
      +
    • CLI help text in multiple languages
    • +
    • Form labels and prompts
    • +
    • Error messages
    • +
    • Interactive guides
    • +
    +

    Supported Locales:

    +
      +
    • en-US (English)
    • +
    • es-ES (Spanish)
    • +
    • pt-BR (Portuguese - planned)
    • +
    • fr-FR (French - planned)
    • +
    • ja-JP (Japanese - planned)
    • +
    +

    Example: Multi-language help system:

    +
    fluent load "provisioning/locales"
    +
    +# Spanish help
    +fluent set-locale "es-ES"
    +fluent get "help-main-title"    # "SISTEMA DE PROVISIÓN"
    +
    +# English help (fallback)
    +fluent set-locale "fr-FR"
    +fluent get "help-main-title"    # Falls back to "PROVISIONING SYSTEM"
    +
    +
    +

    4. nu_plugin_secretumvault

    +

    Post-Quantum Cryptography Vault

    +

    SecretumVault integration for quantum-resistant secret storage.

    +
    # Install
    +provisioning plugins install nu_plugin_secretumvault
    +
    +# Usage in Nushell
    +secretumvault-plugin store "api-key" "secret-value"
    +let key = secretumvault-plugin retrieve "api-key"
    +secretumvault-plugin delete "api-key"
    +
    +

    Features:

    +
      +
    • CRYSTALS-Kyber encryption (post-quantum)
    • +
    • Hybrid encryption (PQC + AES-256)
    • +
    • Secure credential injection
    • +
    • Key rotation
    • +
    • Audit logging
    • +
    +

    Performance: <100ms for encrypt/decrypt operations

    +

    Use Cases:

    +
      +
    • Store infrastructure credentials
    • +
    • Manage API keys
    • +
    • Handle database passwords
    • +
    • Secure configuration values
    • +
    +

    Example: Secure credential management:

    +
    # Store credentials in vault
    +secretumvault-plugin store "aws-access-key" "AKIAIOSFODNN7EXAMPLE"
    +secretumvault-plugin store "aws-secret-key" "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
    +
    +# Retrieve for use
    +let aws_key = secretumvault-plugin retrieve "aws-access-key"
    +provisioning aws configure --access-key $aws_key
    +
    +
    +

    Performance Plugins

    +

    5. nu_plugin_orchestrator

    +

    Orchestrator State Query Plugin

    +

    High-speed queries to orchestrator state and workflow data.

    +
    # Install
    +provisioning plugins install nu_plugin_orchestrator
    +
    +# Usage in Nushell
    +orchestrator query workflows --filter status=running
    +orchestrator query tasks --limit 100
    +orchestrator query checkpoints --workflow deploy-k8s
    +
    +

    Performance: 30-50x faster than HTTP API

    +

    Queries:

    +
      +
    • Workflows (list, status, logs)
    • +
    • Tasks (state, duration, dependencies)
    • +
    • Checkpoints (recovery points)
    • +
    • History (audit trail)
    • +
    +

    Example: Monitor running workflows:

    +
    let running = orchestrator query workflows --filter status=running
    +$running | each { | w |
    +  print $"Workflow: ($w.name) - ($w.progress)%"
    +}
    +
    +
    +

    6. nu_plugin_kms

    +

    Key Management System (Encryption) Plugin

    +

    Fast encryption/decryption with KMS backends.

    +
    # Install
    +provisioning plugins install nu_plugin_kms
    +
    +# Usage in Nushell
    +let encrypted = "secret-data" | kms encrypt --algorithm aes-256-gcm
    +$encrypted | kms decrypt
    +
    +

    Performance: 10x faster than external KMS calls, 5ms encryption

    +

    Supported Algorithms:

    +
      +
    • AES-256-GCM
    • +
    • ChaCha20-Poly1305
    • +
    • Kyber (post-quantum)
    • +
    • Falcon (signatures)
    • +
    +

    Features:

    +
      +
    • Symmetric encryption
    • +
    • Key derivation (Argon2id, PBKDF2)
    • +
    • Authenticated encryption
    • +
    • HSM integration (optional)
    • +
    +

    Example: Encrypt infrastructure secrets:

    +
    let config = open infrastructure.ncl
    +let encrypted = $config | kms encrypt --key master-key
    +
    +# Decrypt when needed
    +let decrypted = $encrypted | kms decrypt --key master-key
    +$decrypted | nickel eval
    +
    +
    +

    7. nu_plugin_auth

    +

    Authentication Plugin

    +

    Multi-method authentication with keyring integration.

    +
    # Install
    +provisioning plugins install nu_plugin_auth
    +
    +# Usage in Nushell
    +let token = auth login --method jwt --provider openid
    +auth set-token $token
    +auth verify-token
    +
    +

    Performance: 5x faster local authentication

    +

    Features:

    +
      +
    • JWT token generation and validation
    • +
    • OAuth2 support
    • +
    • SAML support
    • +
    • OS keyring integration
    • +
    • MFA support
    • +
    +

    Methods:

    +
      +
    • JWT (JSON Web Tokens)
    • +
    • OAuth2 (GitHub, Google, Microsoft)
    • +
    • SAML
    • +
    • LDAP
    • +
    • Local keyring
    • +
    +

    Example: Authenticate and store credentials:

    +
    # Login and get token
    +let token = auth login --method oauth2 --provider github
    +auth set-token $token --store-keyring
    +
    +# Verify authentication
    +auth verify-token      # Check if token valid
    +auth whoami            # Show current user
    +
    +
    +

    Utility Plugins

    +

    8. nu_plugin_hashes

    +

    Cryptographic Hashing Plugin

    +

    Multiple hash algorithms for data integrity.

    +
    # Install
    +provisioning plugins install nu_plugin_hashes
    +
    +# Usage in Nushell
    +"data" | hashes sha256
    +"data" | hashes blake3
    +
    +

    Algorithms:

    +
      +
    • SHA256, SHA512
    • +
    • BLAKE3
    • +
    • MD5 (legacy)
    • +
    • SHA1 (legacy)
    • +
    +
    +

    9. nu_plugin_highlight

    +

    Syntax Highlighting Plugin

    +

    Code syntax highlighting for display and logging.

    +
    # Install
    +provisioning plugins install nu_plugin_highlight
    +
    +# Usage in Nushell
    +open script.sh | highlight --language bash
    +open config.ncl | highlight --language nickel
    +
    +

    Languages:

    +
      +
    • Bash/Shell
    • +
    • Nickel
    • +
    • YAML
    • +
    • JSON
    • +
    • Rust
    • +
    • SQL
    • +
    • Others
    • +
    +
    +

    10. nu_plugin_image

    +

    Image Processing Plugin

    +

    Image manipulation and format conversion.

    +
    # Install
    +provisioning plugins install nu_plugin_image
    +
    +# Usage in Nushell
    +open diagram.png | image resize --width 800 --height 600
    +open logo.jpg | image convert --format webp
    +
    +

    Operations:

    +
      +
    • Resize, crop, rotate
    • +
    • Format conversion
    • +
    • Compression
    • +
    • Metadata extraction
    • +
    +
    +

    11. nu_plugin_clipboard

    +

    Clipboard Management Plugin

    +

    Read/write system clipboard.

    +
    # Install
    +provisioning plugins install nu_plugin_clipboard
    +
    +# Usage in Nushell
    +"api-key" | clipboard copy
    +clipboard paste
    +
    +

    Features:

    +
      +
    • Copy to clipboard
    • +
    • Paste from clipboard
    • +
    • Manage clipboard history
    • +
    • Cross-platform support
    • +
    +
    +

    12. nu_plugin_desktop_notifications

    +

    Desktop Notifications Plugin

    +

    System notifications for long-running operations.

    +
    # Install
    +provisioning plugins install nu_plugin_desktop_notifications
    +
    +# Usage in Nushell
    +notifications notify "Deployment completed" --type success
    +notifications notify "Errors detected" --type error
    +
    +

    Features:

    +
      +
    • Success, warning, error notifications
    • +
    • Custom titles and messages
    • +
    • Sound alerts
    • +
    +
    +

    13. nu_plugin_qr_maker

    +

    QR Code Generator Plugin

    +

    Generate QR codes for configuration sharing.

    +
    # Install
    +provisioning plugins install nu_plugin_qr_maker
    +
    +# Usage in Nushell
    +" [https://example.com/config"](https://example.com/config") | qr-maker generate --output config.png
    +"workspace-setup-command" | qr-maker generate --ascii
    +
    +
    +

    14. nu_plugin_port_extension

    +

    Port/Network Utilities Plugin

    +

    Network port management and diagnostics.

    +
    # Install
    +provisioning plugins install nu_plugin_port_extension
    +
    +# Usage in Nushell
    +port-extension list-open --port 8080
    +port-extension check-available --port 9000
    +
    +
    +

    Legacy/Secondary Plugins

    +

    15. nu_plugin_kcl

    +

    KCL Configuration Plugin (DEPRECATED)

    +

    Legacy KCL support (Nickel is preferred).

    +

    ⚠️ Status: Deprecated - Use nu_plugin_nickel instead

    +
    # Install
    +provisioning plugins install nu_plugin_kcl
    +
    +# Usage (not recommended)
    +let config = open config.kcl | kcl eval
    +
    +
    +

    16. api_nu_plugin_kcl

    +

    KCL API Plugin (DEPRECATED)

    +

    HTTP API wrapper for KCL.

    +

    ⚠️ Status: Deprecated - Use nu_plugin_nickel instead

    +
    +

    17. _nu_plugin_inquire (Historical)

    +

    Interactive Prompts Plugin (HISTORICAL)

    +

    Old inquiry/prompt system, replaced by TypeDialog.

    +

    ⚠️ Status: Historical/archived

    +
    +

    Plugin Installation & Management

    +

    Installation Methods

    +

    Automatic with Provisioning:

    +
    provisioning install
    +# Installs all recommended plugins automatically
    +
    +

    Selective Installation:

    +
    # Install specific plugins
    +provisioning plugins install nu_plugin_tera nu_plugin_nickel nu_plugin_secretumvault
    +
    +# Install plugin category
    +provisioning plugins install --category core          # Essential plugins
    +provisioning plugins install --category performance   # Performance plugins
    +provisioning plugins install --category utilities     # Utility plugins
    +
    +

    Manual Installation:

    +
    # Build and install from source
    +cd /Users/Akasha/project-provisioning/plugins/nushell-plugins/nu_plugin_tera
    +cargo install --path .
    +
    +# Then load in Nushell
    +plugin add nu_plugin_tera
    +
    +

    Configuration

    +

    Plugin Loading in Nushell:

    +
    # In env.nu or config.nu
    +plugin add nu_plugin_tera
    +plugin add nu_plugin_nickel
    +plugin add nu_plugin_secretumvault
    +plugin add nu_plugin_fluent
    +plugin add nu_plugin_auth
    +plugin add nu_plugin_kms
    +plugin add nu_plugin_orchestrator
    +
    +# And more...
    +
    +

    Plugin Status:

    +
    # Check all plugins
    +provisioning plugins list
    +
    +# Check specific plugin
    +provisioning plugins status nu_plugin_tera
    +
    +# Detailed information
    +provisioning plugins info nu_plugin_tera --verbose
    +
    +

    Best Practices

    +

    Use Plugins When

    +
      +
    • ✅ Processing large amounts of data (templates, config)
    • +
    • ✅ Sensitive operations (encryption, secrets)
    • +
    • ✅ Frequent operations (queries, auth)
    • +
    • ✅ Performance critical paths
    • +
    +

    Fallback to HTTP API When

    +
      +
    • ❌ Plugin not installed (automatic fallback)
    • +
    • ❌ Older Nushell version incompatible
    • +
    • ❌ Special features only in API
    • +
    +
    # Plugins have automatic fallback
    +# If nu_plugin_tera not available, uses HTTP API automatically
    +let template = "{{ name }}" | tera render { name: "test" }
    +# Works either way
    +
    +

    Troubleshooting

    +

    Plugin Not Loading

    +
    # Reload Nushell
    +nu
    +
    +# Check plugin errors
    +plugin list --debug
    +
    +# Reinstall plugin
    +provisioning plugins remove nu_plugin_tera
    +provisioning plugins install nu_plugin_tera
    +
    +

    Performance Issues

    +
    # Check plugin status
    +provisioning plugins status
    +
    +# Monitor plugin usage
    +provisioning monitor plugins
    +
    +# Profile plugin calls
    +provisioning profile nu_plugin_tera
    +
    + + +

    Multilingual Support

    +

    Provisioning includes comprehensive multilingual support for help text, forms, and +interactive interfaces. The system uses Mozilla Fluent for translations with automatic +fallback chains.

    +

    Supported Languages

    +

    Currently supported with 100% translation coverage:

    +
    + + + + + +
    LanguageLocaleStatusStrings
    English (US)en-US✅ Complete245
    Spanish (Spain)es-ES✅ Complete245
    Portuguese (Brazil)pt-BR🔄 Planned-
    French (France)fr-FR🔄 Planned-
    Japanese (Japan)ja-JP🔄 Planned-
    +
    +

    Coverage Requirement: 95% of strings translated to critical locales (en-US, es-ES).

    +

    Using Different Languages

    +

    Setting Language via Environment Variable

    +

    Select language using the LANG environment variable:

    +
    # English (default)
    +provisioning help infrastructure
    +
    +# Spanish
    +LANG=es_ES provisioning help infrastructure
    +
    +# Fallback to English if locale not available
    +LANG=fr_FR provisioning help infrastructure
    +# Output: English (en-US) [fallback chain]
    +
    +

    Locale Resolution

    +

    Language selection follows this order:

    +
      +
    1. Check LANG environment variable (e.g., es_ES)
    2. +
    3. Match to configured locale (es-ES)
    4. +
    5. If not found, follow fallback chain (es-ES → en-US)
    6. +
    7. Default to en-US if no match
    8. +
    +

    Format: LANG uses underscore (es_ES), locales use hyphen (es-ES). System handles conversion automatically.

    +

    Translation System Architecture

    +

    Mozilla Fluent Format

    +

    All translations use Mozilla Fluent (.ftl files), which provides:

    +
      +
    • Simple Syntax: Key-value pairs with rich formatting
    • +
    • Pluralization: Support for language-specific plural rules
    • +
    • Attributes: Multiple values per key for contextual translation
    • +
    • Automatic Fallback: Chain resolution when keys missing
    • +
    • Extensibility: Support for custom formatting functions
    • +
    +

    Example Fluent syntax:

    +
    help-infra-server-create = Create a new server
    +form-database_type-option-postgres = PostgreSQL (Recommended)
    +form-replicas-prompt = Number of replicas
    +form-replicas-help = How many replicas to run
    +
    +

    File Organization

    +
    provisioning/locales/
    +├── i18n-config.toml              # Central i18n configuration
    +├── en-US/                         # English base language
    +│   ├── help.ftl                  # Help system strings (65 keys)
    +│   └── forms.ftl                 # Form strings (180 keys)
    +└── es-ES/                         # Spanish translations
    +    ├── help.ftl                  # Help system translations
    +    └── forms.ftl                 # Form translations
    +
    +

    String Categories:

    +
      +
    • help.ftl (65 strings): Help text, menu items, category descriptions, error messages
    • +
    • forms.ftl (180 strings): Form labels, placeholders, help text, options
    • +
    +

    Help System Translations

    +

    Help system provides multi-language support for all command categories:

    +

    Categories Covered

    +
    + + + + + + + + +
    CategoryCoverageExample Keys
    Infrastructure✅ 21 stringsserver commands, taskserv, clusters, VMs
    Orchestration✅ 18 stringsworkflows, batch operations, orchestrator
    Workspace✅ Completeworkspace management, templates
    Setup✅ Completesystem configuration, initialization
    Authentication✅ CompleteJWT, MFA, sessions
    Platform✅ Completeservices, Control Center, MCP
    Development✅ Completemodules, versions, plugins
    Utilities✅ Completeproviders, SOPS, SSH
    +
    +

    Example: Help Output in Spanish

    +
    $ LANG=es_ES provisioning help infrastructure
    +SERVIDOR E INFRAESTRUCTURA
    +Gestión de servidores, taskserv, clusters, VM e infraestructura.
    +
    +COMANDOS DE SERVIDOR
    +  server create         Crear un nuevo servidor
    +  server delete         Eliminar un servidor existente
    +  server list           Listar todos los servidores
    +  server status         Ver estado de un servidor
    +
    +COMANDOS DE TASKSERV
    +  taskserv create       Crear un nuevo servicio de tarea
    +  taskserv delete       Eliminar un servicio de tarea
    +  taskserv configure    Configurar un servicio de tarea
    +  taskserv status       Ver estado del servicio de tarea
    +
    +

    Form Translations (TypeDialog Integration)

    +

    Interactive forms automatically use the selected language:

    +

    Setup Form

    +

    Project information, database configuration, API settings, deployment options, security, etc.

    +
    # English form
    +$ provisioning setup profile
    +📦 Project name: [my-app]
    +
    +# Spanish form
    +$ LANG=es_ES provisioning setup profile
    +📦 Nombre del proyecto: [mi-app]
    +
    +

    Translated Form Fields

    +

    Each form field has four translated strings:

    +
    + + + + +
    ComponentPurposeExample en-USExample es-ES
    promptField label“Project name”“Nombre del proyecto”
    helpHelper text“Project name (lowercase alphanumeric with hyphens)”“Nombre del proyecto (minúsculas alfanuméricas con guiones)”
    placeholderExample value“my-app”“mi-app”
    optionDropdown choice“PostgreSQL (Recommended)”“PostgreSQL (Recomendado)”
    +
    +

    Supported Forms

    +
      +
    • Unified Setup: Project info, database, API, deployment, security, terms
    • +
    • Authentication: Login form (username, password, remember me, forgot password)
    • +
    • Setup Wizard: Quick/standard/advanced modes
    • +
    • MFA Enrollment: TOTP, SMS, backup codes, device management
    • +
    • Infrastructure: Delete confirmations, resource prompts, data retention
    • +
    +

    Fallback Chain Configuration

    +

    When a translation string is missing, the system automatically falls back to the parent locale:

    +
    # From i18n-config.toml
    +[fallback_chains]
    +es-ES = ["en-US"]
    +pt-BR = ["pt-PT", "es-ES", "en-US"]
    +fr-FR = ["en-US"]
    +ja-JP = ["en-US"]
    +
    +

    Resolution Example:

    +
      +
    1. User requests Spanish (es-ES): provisioning help
    2. +
    3. Look for string in es-ES/help.ftl
    4. +
    5. If missing, fallback to en-US (help-infra-server-create = "Create a new server")
    6. +
    7. If still missing, use literal key name as display text
    8. +
    +

    Adding New Languages

    +

    1. Add Locale Configuration

    +

    Edit provisioning/locales/i18n-config.toml:

    +
    [locales.pt-BR]
    +name = "Portuguese (Brazil)"
    +direction = "ltr"
    +plurals = 2
    +decimal_separator = ","
    +thousands_separator = "."
    +date_format = "DD/MM/YYYY"
    +
    +[fallback_chains]
    +pt-BR = ["pt-PT", "es-ES", "en-US"]
    +
    +

    Configuration Fields:

    +
      +
    • name: Display name of locale
    • +
    • direction: Text direction (ltr/rtl)
    • +
    • plurals: Number of plural forms (1-6 depending on language)
    • +
    • decimal_separator: Locale-specific decimal format
    • +
    • thousands_separator: Number formatting
    • +
    • date_format: Locale-specific date format
    • +
    • currency_symbol: Currency symbol (optional)
    • +
    • currency_position: “prefix” or “suffix” (optional)
    • +
    +

    2. Create Locale Directory

    +
    mkdir -p provisioning/locales/pt-BR
    +
    +

    3. Create Translation Files

    +

    Copy English files as base:

    +
    cp provisioning/locales/en-US/help.ftl provisioning/locales/pt-BR/help.ftl
    +cp provisioning/locales/en-US/forms.ftl provisioning/locales/pt-BR/forms.ftl
    +
    +

    4. Translate Strings

    +

    Edit pt-BR/help.ftl and pt-BR/forms.ftl with translated content. Follow naming conventions:

    +
    # Help strings: help-{category}-{element}
    +help-infra-server-create = Criar um novo servidor
    +
    +# Form prompts: form-{element}-prompt
    +form-project_name-prompt = Nome do projeto
    +
    +# Form help: form-{element}-help
    +form-project_name-help = Nome do projeto (alfanumérico minúsculo com hífens)
    +
    +# Form options: form-{element}-option-{value}
    +form-database_type-option-postgres = PostgreSQL (Recomendado)
    +
    +

    5. Validate Translation

    +

    Check coverage and syntax:

    +
    # Validate Fluent file syntax
    +provisioning i18n validate --locale pt-BR
    +
    +# Check translation coverage
    +provisioning i18n coverage --locale pt-BR
    +
    +# List missing translations
    +provisioning i18n missing --locale pt-BR
    +
    +

    6. Update Documentation

    +

    Document new language support in translations_status.md.

    +

    Validation & Quality Standards

    +

    Translation Quality Rules

    +

    Naming Conventions (REQUIRED):

    +
      +
    • Help strings: help-{category}-{element} (e.g., help-infra-server-create)
    • +
    • Form prompts: form-{element}-prompt (e.g., form-project_name-prompt)
    • +
    • Form help: form-{element}-help (e.g., form-project_name-help)
    • +
    • Form placeholders: form-{element}-placeholder
    • +
    • Form options: form-{element}-option-{value} (e.g., form-database_type-option-postgres)
    • +
    • Section headers: section-{name}-title
    • +
    +

    Coverage Requirements:

    +
      +
    • Critical Locales: en-US, es-ES require 95% minimum coverage
    • +
    • Warning Threshold: 80% triggers warnings during build
    • +
    • Incomplete Locales: 0% coverage allowed (inherit via fallback chain)
    • +
    +

    Testing Localization

    +

    Test translations via different methods:

    +
    # Test help system in Spanish
    +LANG=es_ES provisioning help infrastructure
    +
    +# Test form display in Spanish
    +LANG=es_ES provisioning setup profile
    +
    +# Validate all translation files
    +provisioning i18n validate --all
    +
    +# Generate coverage report
    +provisioning i18n coverage --format=json > coverage.json
    +
    +

    Implementation Details

    +

    TypeDialog Integration

    +

    TypeDialog forms reference Fluent keys via locales_path configuration:

    +
    # In form.toml
    +locales_path = "../../../locales"
    +
    +[[elements]]
    +name = "project_name"
    +prompt = "form-project_name-prompt"    # References: locales/*/forms.ftl
    +help = "form-project_name-help"
    +placeholder = "form-project_name-placeholder"
    +
    +

    Resolution Process:

    +
      +
    1. Read locales_path from form configuration
    2. +
    3. Check LANG environment variable (converted to locale format: es_ES → es-ES)
    4. +
    5. Load Fluent file (e.g., locales/es-ES/forms.ftl)
    6. +
    7. Resolve string key → value
    8. +
    9. If key missing, follow fallback chain
    10. +
    11. If still missing, use literal key name
    12. +
    +

    Help System Integration

    +

    Help system uses Fluent catalog loader in provisioning/core/nulib/main_provisioning/help_system.nu:

    +
    # Load help strings for current locale
    +let help_strings = (load_fluent_catalog $locale)
    +
    +# Display localized help text
    +print ($help_strings | get help-infrastructure-title)
    +
    +

    Maintenance

    +

    Adding New Translations

    +

    When new help text or forms are added:

    +
      +
    1. Add English strings to en-US/help.ftl or en-US/forms.ftl
    2. +
    3. Add Spanish translations to es-ES/help.ftl or es-ES/forms.ftl
    4. +
    5. Run validation: provisioning i18n validate
    6. +
    7. Update translations_status.md with new counts
    8. +
    9. If coverage drops below 95%, fix before release
    10. +
    +

    Updating Existing Translations

    +

    To modify existing translated string:

    +
      +
    1. Edit key in en-US/*.ftl and all locale-specific files
    2. +
    3. Run validation to ensure consistency
    4. +
    5. Test in both languages: LANG=en_US provisioning help and LANG=es_ES provisioning help
    6. +
    +

    Current Translation Status

    +

    Last Updated: 2026-01-13 | Status: 100% Complete

    +

    String Count

    +
    + + + +
    Componenten-USes-ESStatus
    Help System6565✅ Complete
    Forms180180✅ Complete
    Total245245✅ Complete
    +
    +

    Features Enabled

    +
    + + + + + + +
    FeatureStatusPurpose
    Pluralization✅ EnabledSupport language-specific plural rules
    Number Formatting✅ EnabledLocale-specific number/currency formatting
    Date Formatting✅ EnabledLocale-specific date display
    Fallback Chains✅ EnabledAutomatic fallback to English
    Gender Agreement⚠️ DisabledNot needed for Spanish help strings
    RTL Support⚠️ DisabledNo RTL languages configured yet
    +
    + + +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    Operations

    +

    Production deployment, monitoring, maintenance, and operational best practices for running +Provisioning infrastructure at scale.

    +

    Overview

    +

    This section covers everything needed to operate Provisioning in production:

    +
      +
    • Deployment strategies - Single-cloud, multi-cloud, hybrid with zero-downtime updates
    • +
    • Service management - Microservice lifecycle, scaling, health checks, failover
    • +
    • Observability - Metrics (Prometheus), logs (ELK), traces (Jaeger), dashboards
    • +
    • Incident response - Detection, triage, remediation, postmortem automation
    • +
    • Backup & recovery - Strategies, testing, disaster recovery, point-in-time restore
    • +
    • Performance optimization - Profiling, caching, scaling, resource optimization
    • +
    • Troubleshooting - Debugging, log analysis, diagnostic tools, support
    • +
    +

    Operational Guides

    +

    Deployment and Management

    +
      +
    • +

      Deployment Modes - Single-cloud, multi-cloud, hybrid, canary, blue-green, rolling updates with zero downtime.

      +
    • +
    • +

      Service Management - Microservice lifecycle, scaling policies, health checks, graceful shutdown, rolling restarts.

      +
    • +
    • +

      Platform Installer - TUI and unattended installation, provider setup, workspace creation, post-install configuration.

      +
    • +
    +

    Monitoring and Observability

    +

    + Monitoring Stack Prometheus Grafana Fluentd ElasticSearch Alertmanager +

    +
      +
    • +

      Monitoring Setup - Prometheus metrics, Grafana +dashboards, alerting rules, SLO monitoring, 12 microservices

      +
    • +
    • +

      Logging and Analysis - Centralized logging with ELK Stack, log aggregation, filtering, searching, performance analysis.

      +
    • +
    • +

      Distributed Tracing - Jaeger integration, span collection, trace visualization, latency analysis across microservices.

      +
    • +
    +

    Resilience and Recovery

    +
      +
    • +

      Incident Response - Severity +levels, triage, investigation, mitigation, escalation, postmortem

      +
    • +
    • +

      Backup Strategies - Full, incremental, PITR backups with RTO/RPO targets, testing procedures, recovery workflows.

      +
    • +
    • +

      Disaster Recovery - DR planning, failover procedures, failback strategies, RTO/RPO targets, testing schedules.

      +
    • +
    +

    + Disaster Recovery Topology Multi-Region Failover Primary Backup +

    + +

    Troubleshooting

    +
      +
    • +

      Troubleshooting Guide - Common issues, debugging techniques, log analysis, diagnostic tools, support resources.

      +
    • +
    • +

      Platform Health - Health check procedures, system status, component status, SLO metrics, error budgets.

      +
    • +
    +

    Operational Workflows

    +

    I’m deploying to production

    +

    Follow: Deployment ModesService ManagementMonitoring Setup

    +

    I need to monitor infrastructure

    +

    Setup: Monitoring Setup for metrics, Logging and Analysis for logs, Distributed Tracing for traces

    +

    I’m experiencing an incident

    +

    Execute: Incident Response with triage, investigation, mitigation, escalation

    +

    I need to backup and recover

    +

    Implement: Backup Strategies with testing, Disaster Recovery for major outages

    +

    I need to optimize performance

    +

    Follow: Performance Optimization for profiling and tuning

    +

    I need help troubleshooting

    +

    Consult: Troubleshooting Guide for common issues and solutions

    +

    Deployment Architecture

    +
    Development
    +  ↓
    +Staging (test all)
    +  ↓
    +Canary (1% traffic)
    +  ↓
    +Rolling (increase % gradually)
    +  ↓
    +Production (100%)
    +
    +

    SLO Targets

    +
    + + + + + +
    ServiceAvailabilityP99 LatencyError Budget
    API Gateway99.99%<100ms4m 26s/month
    Orchestrator99.9%<500ms43m 46s/month
    Control-Center99.95%<300ms21m 56s/month
    Detector99.5%<2s3h 36s/month
    All Others99.9%<1s43m 46s/month
    +
    +

    Monitoring Stack

    +
      +
    • Metrics - Prometheus (15s scrape interval, 15d retention)
    • +
    • Logs - ELK Stack (Elasticsearch, Logstash, Kibana) with 30d retention
    • +
    • Traces - Jaeger (sampling 10%, 24h retention)
    • +
    • Dashboards - Grafana with pre-built dashboards per microservice
    • +
    • Alerting - AlertManager with escalation rules and notification channels
    • +
    +

    Operational Commands

    +
    # Check system health
    +provisioning status health
    +
    +# View metrics
    +provisioning metrics view --service orchestrator
    +
    +# Check SLO status
    +provisioning slo status
    +
    +# Run diagnostics
    +provisioning diagnose system
    +
    +# Backup infrastructure
    +provisioning backup create --name daily-$(date +%Y%m%d)
    +
    +# Restore from backup
    +provisioning backup restore --backup-id backup-id
    +
    + +
      +
    • Architecture → See provisioning/docs/src/architecture/
    • +
    • Features → See provisioning/docs/src/features/
    • +
    • Development → See provisioning/docs/src/development/
    • +
    • Security → See provisioning/docs/src/security/
    • +
    • Examples → See provisioning/docs/src/examples/
    • +
    +

    Deployment Modes

    +

    The Provisioning platform supports three deployment modes designed for different operational +contexts: interactive TUI for guided setup, headless CLI for automation, and unattended mode +for CI/CD pipelines.

    +

    Overview

    +

    Deployment modes determine how the platform installer and orchestrator interact with the environment:

    +
    + + + +
    ModeUse CaseUser InteractionConfigurationRollback
    Interactive TUIFirst-time setup, explorationFull interactive terminal UIGuided wizardManual intervention
    Headless CLIScripted automationCommand-line flags onlyPre-configured filesAutomatic checkpoint
    UnattendedCI/CD pipelinesZero interactionConfig file requiredAutomatic rollback
    +
    +

    Interactive TUI Mode

    +

    Beautiful terminal user interface for guided platform installation and configuration.

    +

    When to Use

    +
      +
    • First-time platform installation
    • +
    • Exploring configuration options
    • +
    • Learning platform features
    • +
    • Development and testing environments
    • +
    • Manual infrastructure provisioning
    • +
    +

    Features

    +

    Seven interactive screens with real-time validation:

    +
      +
    1. Welcome Screen - Platform overview and prerequisites check
    2. +
    3. Deployment Mode Selection - Solo, MultiUser, CICD, Enterprise
    4. +
    5. Component Selection - Choose platform services to install
    6. +
    7. Configuration Builder - Interactive settings editor
    8. +
    9. Provider Setup - Cloud provider credentials and configuration
    10. +
    11. Review and Confirm - Summary before installation
    12. +
    13. Installation Progress - Real-time tracking with checkpoint recovery
    14. +
    +

    Starting Interactive Mode

    +
    # Launch interactive installer
    +provisioning-installer
    +
    +# Or via main CLI
    +provisioning install --mode tui
    +
    + +
    Tab/Shift+Tab - Navigate fields
    +Enter - Select/confirm
    +Esc - Cancel/go back
    +Arrow keys - Navigate lists
    +Space - Toggle checkboxes
    +Ctrl+C - Exit installer
    +
    +

    Headless CLI Mode

    +

    Command-line interface for scripted automation without graphical interface.

    +

    When to Use

    +
      +
    • Automated deployment scripts
    • +
    • Remote server installation via SSH
    • +
    • Reproducible infrastructure provisioning
    • +
    • Configuration management systems
    • +
    • Batch deployments across multiple servers
    • +
    +

    Features

    +
      +
    • Non-interactive installation
    • +
    • Configuration via command-line flags
    • +
    • Pre-validation of all inputs
    • +
    • Structured JSON/YAML output
    • +
    • Exit codes for script integration
    • +
    • Checkpoint-based recovery
    • +
    +

    Command Syntax

    +
    provisioning-installer --headless \
    +  --mode <sol| o multiuse| r cic| d enterprise> \
    +  --components <comma-separated-list> \
    +  --storage-path <path> \
    +  --database <backend> \
    +  --log-level <level> \
    +  [--yes] \
    +  [--config <file>]
    +
    +

    Example Deployments

    +

    Solo developer setup:

    +
    provisioning-installer --headless \
    +  --mode solo \
    +  --components orchestrator,control-center \
    +  --yes
    +
    +

    CI/CD pipeline deployment:

    +
    provisioning-installer --headless \
    +  --mode cicd \
    +  --components orchestrator,vault-service \
    +  --database surrealdb \
    +  --yes
    +
    +

    Enterprise production deployment:

    +
    provisioning-installer --headless \
    +  --mode enterprise \
    +  --config /etc/provisioning/enterprise.toml \
    +  --yes
    +
    +

    Unattended Mode

    +

    Zero-interaction deployment for fully automated CI/CD pipelines.

    +

    When to Use

    +
      +
    • Continuous integration pipelines
    • +
    • Continuous deployment workflows
    • +
    • Infrastructure as Code provisioning
    • +
    • Automated testing environments
    • +
    • Container image builds
    • +
    • Cloud instance initialization
    • +
    +

    Requirements

    +
      +
    1. Configuration file must exist and be valid
    2. +
    3. All required dependencies must be installed
    4. +
    5. Sufficient system resources must be available
    6. +
    7. Network connectivity to required services
    8. +
    9. Appropriate file system permissions
    10. +
    +

    Command Syntax

    +
    provisioning-installer --unattended --config <config-file>
    +
    +

    Example CI/CD Integrations

    +

    GitHub Actions workflow:

    +
    name: Deploy Provisioning Platform
    +on:
    +  push:
    +    branches: [main]
     
     jobs:
    -  test:
    +  deploy:
         runs-on: ubuntu-latest
         steps:
           - uses: actions/checkout@v3
     
    -      - name: Install Nickel
    +      - name: Install prerequisites
             run: |
    -          curl -fsSL https://releases.nickel-lang.org/install.sh | bash
    -          echo "$HOME/.nickel/bin" >> $GITHUB_PATH
    +          curl -sSL  [https://install.nushell.sh](https://install.nushell.sh) | sh
    +          curl -sSL  [https://install.nickel-lang.org](https://install.nickel-lang.org) | sh
     
    -      - name: Install Nushell
    +      - name: Deploy provisioning platform
    +        env:
    +          PROVISIONING_DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
    +          UPCLOUD_API_TOKEN: ${{ secrets.UPCLOUD_TOKEN }}
             run: |
    -          curl -L https://github.com/nushell/nushell/releases/download/0.107.1/nu-0.107.1-x86_64-unknown-linux-gnu.tar.gz | tar xzf -
    -          sudo mv nu-0.107.1-x86_64-unknown-linux-gnu/nu /usr/local/bin/
    +          provisioning-installer --unattended --config ci-config.toml
     
    -      - name: Build core package
    +      - name: Verify deployment
             run: |
    -          nu provisioning/tools/nickel-packager.nu build --version test
    -
    -      - name: Test extension discovery
    -        run: |
    -          nu provisioning/core/cli/module-loader discover taskservs
    -
    -      - name: Validate extension syntax
    -        run: |
    -          find extensions -name "*.ncl" -exec nickel typecheck {} \;
    -
    -      - name: Test workspace creation
    -        run: |
    -          mkdir test-workspace
    -          nu provisioning/tools/workspace-init.nu test-workspace init
    -          cd test-workspace
    -          nu ../provisioning/core/cli/module-loader load taskservs . [my-app]
    -          nickel export servers.ncl
    +          curl -f  [http://localhost:8080/health](http://localhost:8080/health) | | exit 1
     
    -

    Best Practices Summary

    -

    1. Extension Design

    -
      -
    • ✅ Use descriptive names in kebab-case
    • -
    • ✅ Include comprehensive validation in schemas
    • -
    • ✅ Provide multiple profiles for different environments
    • -
    • ✅ Document all configuration options
    • -
    -

    2. Dependencies

    -
      -
    • ✅ Declare all dependencies explicitly
    • -
    • ✅ Use semantic versioning
    • -
    • ✅ Test compatibility with different versions
    • -
    -

    3. Security

    -
      -
    • ✅ Never hardcode secrets in schemas
    • -
    • ✅ Use validation to ensure secure defaults
    • -
    • ✅ Follow principle of least privilege
    • -
    -

    4. Documentation

    -
      -
    • ✅ Include comprehensive README
    • -
    • ✅ Provide usage examples
    • -
    • ✅ Document troubleshooting steps
    • -
    • ✅ Maintain changelog
    • -
    -

    5. Testing

    -
      -
    • ✅ Test extension discovery and loading
    • -
    • ✅ Validate Nickel syntax with type checking
    • -
    • ✅ Test in multiple environments
    • -
    • ✅ Include CI/CD validation
    • -
    -

    Common Issues and Solutions

    -

    Extension Not Discovered

    -

    Problem: module-loader discover doesn’t find your extension

    -

    Solutions:

    -
      -
    1. Check directory structure: extensions/taskservs/my-service/schemas/
    2. -
    3. Verify manifest.toml exists and is valid
    4. -
    5. Ensure main .ncl file has correct name
    6. -
    7. Check file permissions
    8. -
    -

    Nickel Type Errors

    -

    Problem: Nickel type checking errors in your extension

    -

    Solutions:

    -
      -
    1. Use nickel typecheck my-service.ncl to validate syntax
    2. -
    3. Check import statements are correct
    4. -
    5. Verify schema validation rules
    6. -
    7. Ensure all required fields have defaults or are provided
    8. -
    -

    Loading Failures

    -

    Problem: Extension loads but doesn’t work correctly

    -

    Solutions:

    -
      -
    1. Check generated import files: cat taskservs.ncl
    2. -
    3. Verify dependencies are satisfied
    4. -
    5. Test with minimal configuration first
    6. -
    7. Check extension manifest: cat .manifest/taskservs.yaml
    8. -
    -

    Next Steps

    -
      -
    1. Explore Examples: Look at existing extensions in extensions/ directory
    2. -
    3. Read Advanced Docs: Study the comprehensive guides: - -
    4. -
    5. Join Community: Contribute to the provisioning system
    6. -
    7. Share Extensions: Publish useful extensions for others
    8. -
    -

    Support

    -
      -
    • Documentation: Package and Loader System Guide
    • -
    • Templates: Use ./provisioning/tools/create-extension.nu list-templates
    • -
    • Validation: Use ./provisioning/tools/create-extension.nu validate <path>
    • -
    • Examples: Check provisioning/examples/ directory
    • -
    -

    Happy extension development. 🚀

    -

    Interactive Guides and Quick Reference (v3.3.0)

    -

    🚀 Guide System Added (2025-09-30)

    -

    A comprehensive interactive guide system providing copy-paste ready commands and step-by-step walkthroughs.

    -

    Available Guides

    -

    Quick Reference:

    -
      -
    • provisioning sc - Quick command reference (fastest, no pager)
    • -
    • provisioning guide quickstart - Full command reference with examples
    • -
    -

    Step-by-Step Guides:

    -
      -
    • provisioning guide from-scratch - Complete deployment from zero to production
    • -
    • provisioning guide update - Update existing infrastructure safely
    • -
    • provisioning guide customize - Customize with layers and templates
    • -
    -

    List All Guides:

    -
      -
    • provisioning guide list - Show all available guides
    • -
    • provisioning howto - Same as guide list (shortcut)
    • -
    -

    Guide Features

    -
      -
    • Copy-Paste Ready: All commands include placeholders you can adjust
    • -
    • Complete Examples: Full workflows from start to finish
    • -
    • Best Practices: Production-ready patterns and recommendations
    • -
    • Troubleshooting: Common issues and solutions included
    • -
    • Shortcuts Reference: Comprehensive shortcuts for fast operations
    • -
    • Beautiful Rendering: Uses glow, bat, or less for formatted display
    • -
    - -

    For best viewing experience, install glow (markdown terminal renderer):

    -
    # macOS
    -brew install glow
    -
    -# Ubuntu/Debian
    -apt install glow
    -
    -# Fedora
    -dnf install glow
    -
    -# Using Go
    -go install github.com/charmbracelet/glow@latest
    -
    -

    Without glow: Guides fallback to bat (syntax highlighting) or less (pagination). -All systems: Basic pagination always works, even without external tools.

    -

    Quick Start with Guides

    -
    # Show quick reference (fastest)
    -provisioning sc
    -
    -# Show full command reference
    -provisioning guide quickstart
    -
    -# Step-by-step deployment
    -provisioning guide from-scratch
    -
    -# Update infrastructure
    -provisioning guide update
    -
    -# Customize with layers
    -provisioning guide customize
    -
    -# List all guides
    -provisioning guide list
    -
    -

    Guide Content

    -

    Quick Reference (provisioning sc)

    -
      -
    • Condensed command reference (fastest access)
    • -
    • Essential shortcuts and commands
    • -
    • Common flags and operations
    • -
    • No pager, instant display
    • -
    -

    Quickstart Guide (docs/guides/quickstart-cheatsheet.md)

    -
      -
    • Complete shortcuts reference (80+ mappings)
    • -
    • Copy-paste command examples
    • -
    • Common workflows (deploy, update, customize)
    • -
    • Debug and check mode examples
    • -
    • Output format options
    • -
    -

    From Scratch Guide (docs/guides/from-scratch.md)

    -
      -
    • Prerequisites and setup
    • -
    • Workspace initialization
    • -
    • Module discovery and configuration
    • -
    • Server deployment
    • -
    • Task service installation
    • -
    • Cluster creation
    • -
    • Verification steps
    • -
    -

    Update Guide (docs/guides/update-infrastructure.md)

    -
      -
    • Check for updates
    • -
    • Update strategies (in-place, rolling, blue-green)
    • -
    • Task service updates
    • -
    • Database migrations
    • -
    • Rollback procedures
    • -
    • Post-update verification
    • -
    -

    Customize Guide (docs/guides/customize-infrastructure.md)

    -
      -
    • Layer system explained (Core → Workspace → Infrastructure)
    • -
    • Using templates
    • -
    • Creating custom modules
    • -
    • Configuration inheritance
    • -
    • Advanced customization patterns
    • -
    -

    Access from Help System

    -

    The guide system is integrated into the help system:

    -
    # Show guide help
    -provisioning help guides
    -
    -# Help topic access
    -provisioning help guide
    -provisioning help howto
    -
    -

    Guide Shortcuts

    -
    - - - - - - - +

    Resource Requirements by Mode

    +

    Solo Mode

    +

    Minimum: 2 CPU, 4GB RAM, 20GB disk +Recommended: 4 CPU, 8GB RAM, 50GB disk

    +

    MultiUser Mode

    +

    Minimum: 4 CPU, 8GB RAM, 50GB disk +Recommended: 8 CPU, 16GB RAM, 100GB disk

    +

    CICD Mode

    +

    Minimum: 8 CPU, 16GB RAM, 100GB disk +Recommended: 16 CPU, 32GB RAM, 500GB disk

    +

    Enterprise Mode

    +

    Minimum: 16 CPU, 32GB RAM, 500GB disk +Recommended: 32+ CPU, 64GB+ RAM, 1TB+ disk

    +

    Choosing the Right Mode

    +
    Full CommandShortcuts
    sc- (quick reference, fastest)
    guideguides
    guide quickstartshortcuts, quick
    guide from-scratchscratch, start, deploy
    guide updateupgrade
    guide customizecustom, layers, templates
    guide listhowto
    + + + + + +
    ScenarioRecommended ModeRationale
    First-time installationInteractive TUIGuided setup with validation
    Manual production setupInteractive TUIReview all settings before deployment
    Ansible playbookHeadless CLIScriptable without GUI
    Remote server via SSHHeadless CLIWorks without terminal UI
    GitHub ActionsUnattendedZero interaction, strict validation
    Docker image buildUnattendedNon-interactive environment
    -

    Documentation Location

    -

    All guide markdown files are in guides/:

    +

    Best Practices

    +

    Interactive TUI Mode

      -
    • quickstart-cheatsheet.md - Quick reference
    • -
    • from-scratch.md - Complete deployment
    • -
    • update-infrastructure.md - Update procedures
    • -
    • customize-infrastructure.md - Customization patterns
    • +
    • Review all configuration screens carefully
    • +
    • Save configuration for later reuse
    • +
    • Document custom settings
    -

    Workspace Generation - Quick Reference

    -

    Updated for Nickel-based workspaces with auto-generated documentation

    -

    Quick Start: Create a Workspace

    -
    # Interactive mode (recommended)
    -provisioning workspace init
    -
    -# Non-interactive mode with explicit path
    -provisioning workspace init my_workspace /path/to/my_workspace
    -
    -# With activation
    -provisioning workspace init my_workspace /path/to/my_workspace --activate
    -
    -

    What Gets Created Automatically

    -

    When you run provisioning workspace init, the system creates:

    -
    my_workspace/
    -├── config/
    -│   ├── config.ncl           # Master Nickel configuration
    -│   ├── providers/           # Provider configurations
    -│   └── platform/            # Platform service configs
    -│
    -├── infra/
    -│   └── default/
    -│       ├── main.ncl         # Infrastructure definition
    -│       └── servers.ncl      # Server configurations
    -│
    -├── docs/                    # ✨ AUTO-GENERATED GUIDES
    -│   ├── README.md           # Workspace overview
    -│   ├── deployment-guide.md # Step-by-step deployment
    -│   ├── configuration-guide.md # Configuration reference
    -│   └── troubleshooting.md  # Common issues & solutions
    -│
    -├── .providers/
    -├── .kms/
    -├── .provisioning/
    -└── workspace.nu            # Utility scripts
    -
    -

    Key Files Created

    -

    Master Configuration: config/config.ncl

    -
    {
    -  workspace = {
    -    name = "my_workspace",
    -    path = "/path/to/my_workspace",
    -    description = "Workspace: my_workspace",
    -    metadata = {
    -      owner = "your_username",
    -      created = "2025-01-07T19:30:00Z",
    -      environment = "development",
    -    },
    -  },
    -
    -  providers = {
    -    local = {
    -      name = "local",
    -      enabled = true,
    -      workspace = "my_workspace",
    -      auth = { interface = "local" },
    -      paths = {
    -        base = ".providers/local",
    -        cache = ".providers/local/cache",
    -        state = ".providers/local/state",
    -      },
    -    },
    -  },
    -}
    -
    -

    Infrastructure: infra/default/main.ncl

    -
    {
    -  workspace_name = "my_workspace",
    -  infrastructure = "default",
    -  servers = [
    -    {
    -      hostname = "my-workspace-server-0",
    -      provider = "local",
    -      plan = "1xCPU-2 GB",
    -      zone = "local",
    -      storages = [{total = 25}],
    -    },
    -  ],
    -}
    -
    -

    Auto-Generated Guides

    -

    Every workspace includes 4 auto-generated guides in the docs/ directory:

    -
    - - - - +

    Headless CLI Mode

    +
      +
    • Test configuration on development environment first
    • +
    • Use --check flag for dry-run validation
    • +
    • Store configurations in version control
    • +
    • Use environment variables for sensitive data
    • +
    +

    Unattended Mode

    +
      +
    • Validate configuration files extensively before CI/CD deployment
    • +
    • Test rollback behavior in non-production environments
    • +
    • Monitor installation logs in real-time
    • +
    • Set up alerting for installation failures
    • +
    • Use idempotent operations to allow retry
    • +
    + + +

    Service Management

    +

    Managing the nine core platform services that power the Provisioning infrastructure automation platform.

    +

    Platform Services Overview

    +

    The platform consists of nine microservices providing execution, management, and supporting infrastructure:

    +
    GuideContent
    README.mdWorkspace overview, quick start, and structure
    deployment-guide.mdStep-by-step deployment for your infrastructure
    configuration-guide.mdConfiguration options specific to your setup
    troubleshooting.mdSolutions for common issues
    + + + + + + + + +
    ServicePurposePortLanguageStatus
    orchestratorWorkflow execution and task scheduling8080Rust + NushellProduction
    control-centerBackend management API with RBAC8081RustProduction
    control-center-uiWeb-based management interface8082WebProduction
    mcp-serverAI-powered configuration assistance8083NushellActive
    ai-serviceMachine learning and anomaly detection8084RustActive
    vault-serviceSecrets management and KMS8085RustProduction
    extension-registryOCI registry for extensions8086RustPlanned
    api-gatewayUnified REST API routing8087RustPlanned
    provisioning-daemonBackground service coordination8088RustDevelopment
    -

    These guides are customized for your workspace’s:

    +

    Service Lifecycle Management

    +

    Starting Services

    +

    Systemd management (production):

    +
    # Start individual service
    +sudo systemctl start provisioning-orchestrator
    +
    +# Start all platform services
    +sudo systemctl start provisioning-*
    +
    +# Enable automatic start on boot
    +sudo systemctl enable provisioning-orchestrator
    +sudo systemctl enable provisioning-control-center
    +sudo systemctl enable provisioning-vault-service
    +
    +

    Manual start (development):

    +
    # Orchestrator
    +cd provisioning/platform/crates/orchestrator
    +cargo run --release
    +
    +# Control Center
    +cd provisioning/platform/crates/control-center
    +cargo run --release
    +
    +# MCP Server
    +cd provisioning/platform/crates/mcp-server
    +nu run.nu
    +
    +

    Stopping Services

    +
    # Stop individual service
    +sudo systemctl stop provisioning-orchestrator
    +
    +# Stop all platform services
    +sudo systemctl stop provisioning-*
    +
    +# Graceful shutdown with 30-second timeout
    +sudo systemctl stop --timeout 30 provisioning-orchestrator
    +
    +

    Restarting Services

    +
    # Restart after configuration changes
    +sudo systemctl restart provisioning-orchestrator
    +
    +# Reload configuration without restart
    +sudo systemctl reload provisioning-control-center
    +
    +

    Checking Service Status

    +
    # Status of all services
    +systemctl status provisioning-*
    +
    +# Detailed status
    +provisioning platform status
    +
    +# Health check endpoints
    +curl  [http://localhost:8080/health](http://localhost:8080/health)  # Orchestrator
    +curl  [http://localhost:8081/health](http://localhost:8081/health)  # Control Center
    +curl  [http://localhost:8085/health](http://localhost:8085/health)  # Vault Service
    +
    +

    Service Configuration

    +

    Configuration Files

    +

    Each service reads configuration from hierarchical sources:

    +
    /etc/provisioning/config.toml           # System defaults
    +~/.config/provisioning/user_config.yaml # User overrides
    +workspace/config/provisioning.yaml      # Workspace config
    +
    +

    Orchestrator Configuration

    +
    # /etc/provisioning/orchestrator.toml
    +[server]
    +host = "0.0.0.0"
    +port = 8080
    +workers = 8
    +
    +[storage]
    +persistence_dir = "/var/lib/provisioning/orchestrator"
    +checkpoint_interval = 30
    +
    +[execution]
    +max_parallel_tasks = 100
    +retry_attempts = 3
    +retry_backoff = "exponential"
    +
    +[api]
    +enable_rest = true
    +enable_grpc = false
    +auth_required = true
    +
    +

    Control Center Configuration

    +
    # /etc/provisioning/control-center.toml
    +[server]
    +host = "0.0.0.0"
    +port = 8081
    +
    +[auth]
    +jwt_algorithm = "RS256"
    +access_token_ttl = 900
    +refresh_token_ttl = 604800
    +
    +[rbac]
    +policy_dir = "/etc/provisioning/policies"
    +reload_interval = 60
    +
    +

    Vault Service Configuration

    +
    # /etc/provisioning/vault-service.toml
    +[vault]
    +backend = "secretumvault"
    +url = " [http://localhost:8200"](http://localhost:8200")
    +token_env = "VAULT_TOKEN"
    +
    +[kms]
    +envelope_encryption = true
    +key_rotation_days = 90
    +
    +

    Service Dependencies

    +

    Understanding service dependencies for proper startup order:

    +
    Database (SurrealDB)
    +  ↓
    +orchestrator (requires database)
    +  ↓
    +vault-service (requires orchestrator)
    +  ↓
    +control-center (requires orchestrator + vault)
    +  ↓
    +control-center-ui (requires control-center)
    +  ↓
    +mcp-server (requires control-center)
    +  ↓
    +ai-service (requires mcp-server)
    +
    +

    Systemd handles dependencies automatically:

    +
    # /etc/systemd/system/provisioning-control-center.service
    +[Unit]
    +Description=Provisioning Control Center
    +After=provisioning-orchestrator.service
    +Requires=provisioning-orchestrator.service
    +
    +

    Service Health Monitoring

    +

    Health Check Endpoints

    +

    All services expose /health endpoints:

    +
    # Check orchestrator health
    +curl  [http://localhost:8080/health](http://localhost:8080/health)
    +
    +# Expected response
    +{
    +  "status": "healthy",
    +  "version": "5.0.0",
    +  "uptime_seconds": 3600,
    +  "database": "connected",
    +  "active_workflows": 5,
    +  "queued_tasks": 12
    +}
    +
    +

    Automated Health Monitoring

    +

    Use systemd watchdog for automatic restart on failure:

    +
    # /etc/systemd/system/provisioning-orchestrator.service
    +[Service]
    +WatchdogSec=30
    +Restart=on-failure
    +RestartSec=10
    +
    +

    Monitor with provisioning CLI:

    +
    # Continuous health monitoring
    +provisioning platform monitor --interval 5
    +
    +# Alert on unhealthy services
    +provisioning platform monitor --alert-email [ops@example.com](mailto:ops@example.com)
    +
    +

    Log Management

    +

    Log Locations

    +

    Systemd services log to journald:

    +
    # View orchestrator logs
    +sudo journalctl -u provisioning-orchestrator -f
    +
    +# View last hour of logs
    +sudo journalctl -u provisioning-orchestrator --since "1 hour ago"
    +
    +# View errors only
    +sudo journalctl -u provisioning-orchestrator -p err
    +
    +# Export logs to file
    +sudo journalctl -u provisioning-* > platform-logs.txt
    +
    +

    File-based logs:

    +
    /var/log/provisioning/orchestrator.log
    +/var/log/provisioning/control-center.log
    +/var/log/provisioning/vault-service.log
    +
    +

    Log Rotation

    +

    Configure logrotate for file-based logs:

    +
    # /etc/logrotate.d/provisioning
    +/var/log/provisioning/*.log {
    +    daily
    +    rotate 30
    +    compress
    +    delaycompress
    +    missingok
    +    notifempty
    +    create 0644 provisioning provisioning
    +    sharedscripts
    +    postrotate
    +        systemctl reload provisioning-* | | true
    +    endscript
    +}
    +
    +

    Log Levels

    +

    Configure log verbosity:

    +
    # Set log level via environment
    +export PROVISIONING_LOG_LEVEL=debug
    +sudo systemctl restart provisioning-orchestrator
    +
    +# Or in configuration
    +provisioning config set logging.level debug
    +
    +

    Log levels: trace, debug, info, warn, error

    +

    Performance Tuning

    +

    Orchestrator Performance

    +

    Adjust worker threads and task limits:

    +
    [execution]
    +max_parallel_tasks = 200  # Increase for high throughput
    +worker_threads = 16       # Match CPU cores
    +task_queue_size = 1000
    +
    +[performance]
    +enable_metrics = true
    +metrics_interval = 10
    +
    +

    Database Connection Pooling

    +
    [database]
    +max_connections = 100
    +min_connections = 10
    +connection_timeout = 30
    +idle_timeout = 600
    +
    +

    Memory Limits

    +

    Set memory limits via systemd:

    +
    [Service]
    +MemoryMax=4G
    +MemoryHigh=3G
    +
    +

    Service Updates and Upgrades

    +

    Zero-Downtime Upgrades

    +

    Rolling upgrade procedure:

    +
    # 1. Deploy new version alongside old version
    +sudo cp provisioning-orchestrator /usr/local/bin/provisioning-orchestrator-new
    +
    +# 2. Update systemd service to use new binary
    +sudo systemctl daemon-reload
    +
    +# 3. Graceful restart
    +sudo systemctl reload provisioning-orchestrator
    +
    +

    Version Management

    +

    Check running versions:

    +
    provisioning platform versions
    +
    +# Output:
    +# orchestrator: 5.0.0
    +# control-center: 5.0.0
    +# vault-service: 4.0.0
    +
    +

    Rollback Procedure

    +
    # 1. Stop new version
    +sudo systemctl stop provisioning-orchestrator
    +
    +# 2. Restore previous binary
    +sudo cp /usr/local/bin/provisioning-orchestrator.backup \
    +       /usr/local/bin/provisioning-orchestrator
    +
    +# 3. Start service with previous version
    +sudo systemctl start provisioning-orchestrator
    +
    +

    Security Hardening

    +

    Service Isolation

    +

    Run services with dedicated users:

    +
    # Create service user
    +sudo useradd -r -s /usr/sbin/nologin provisioning
    +
    +# Set ownership
    +sudo chown -R provisioning:provisioning /var/lib/provisioning
    +sudo chown -R provisioning:provisioning /etc/provisioning
    +
    +

    Systemd service configuration:

    +
    [Service]
    +User=provisioning
    +Group=provisioning
    +NoNewPrivileges=true
    +PrivateTmp=true
    +ProtectSystem=strict
    +ProtectHome=true
    +
    +

    Network Security

    +

    Restrict service access with firewall:

    +
    # Allow only localhost access
    +sudo ufw allow from 127.0.0.1 to any port 8080
    +sudo ufw allow from 127.0.0.1 to any port 8081
    +
    +# Or use systemd socket activation
    +
    +

    Troubleshooting Services

    +

    Service Won’t Start

    +

    Check service status and logs:

    +
    systemctl status provisioning-orchestrator
    +journalctl -u provisioning-orchestrator -n 100
    +
    +

    Common issues:

      -
    • Configured providers
    • +
    • Port already in use: Check with lsof -i :8080
    • +
    • Configuration error: Validate with provisioning validate config
    • +
    • Missing dependencies: Check with ldd /usr/local/bin/provisioning-orchestrator
    • +
    • Permission issues: Verify file ownership
    • +
    +

    High Resource Usage

    +

    Monitor resource consumption:

    +
    # CPU and memory usage
    +systemctl status provisioning-orchestrator
    +
    +# Detailed metrics
    +provisioning platform metrics --service orchestrator
    +
    +

    Adjust limits:

    +
    # Increase memory limit
    +sudo systemctl set-property provisioning-orchestrator MemoryMax=8G
    +
    +# Reduce parallel tasks
    +provisioning config set execution.max_parallel_tasks 50
    +sudo systemctl restart provisioning-orchestrator
    +
    +

    Service Crashes

    +

    Enable core dumps for debugging:

    +
    # Enable core dumps
    +sudo sysctl -w kernel.core_pattern=/var/crash/core.%e.%p
    +ulimit -c unlimited
    +
    +# Analyze crash
    +sudo coredumpctl list
    +sudo coredumpctl debug
    +
    +

    Service Metrics

    +

    Prometheus Integration

    +

    Services expose Prometheus metrics:

    +
    # Orchestrator metrics
    +curl  [http://localhost:8080/metrics](http://localhost:8080/metrics)
    +
    +# Example metrics:
    +# provisioning_workflows_total 1234
    +# provisioning_workflows_active 5
    +# provisioning_tasks_queued 12
    +# provisioning_tasks_completed 9876
    +
    +

    Grafana Dashboards

    +

    Import pre-built dashboards:

    +
    provisioning monitoring install-dashboards
    +
    +

    Dashboards available at http://localhost:3000

    +

    Best Practices

    +

    Service Management

    +
      +
    • Use systemd for production deployments
    • +
    • Enable automatic restart on failure
    • +
    • Monitor health endpoints continuously
    • +
    • Set appropriate resource limits
    • +
    • Implement log rotation
    • +
    • Regular backup of service data
    • +
    +

    Configuration Management

    +
      +
    • Version control all configuration files
    • +
    • Use hierarchical configuration for flexibility
    • +
    • Validate configuration before applying
    • +
    • Document all custom settings
    • +
    • Use environment variables for secrets
    • +
    +

    Monitoring and Alerting

    +
      +
    • Monitor all service health endpoints
    • +
    • Set up alerts for service failures
    • +
    • Track key performance metrics
    • +
    • Review logs regularly
    • +
    • Establish incident response procedures
    • +
    + + +

    Monitoring

    +

    Comprehensive observability stack for the Provisioning platform using Prometheus, Grafana, and custom metrics.

    +

    Monitoring Stack Overview

    +

    The platform monitoring system consists of:

    +
    + + + + + +
    ComponentPurposePortStatus
    PrometheusMetrics collection and storage9090Production
    GrafanaVisualization and dashboards3000Production
    LokiLog aggregation3100Active
    AlertmanagerAlert routing and notification9093Production
    Node ExporterSystem metrics9100Production
    +
    +

    Quick Start

    +

    Install monitoring stack:

    +
    # Install all monitoring components
    +provisioning monitoring install
    +
    +# Install specific components
    +provisioning monitoring install --components prometheus,grafana
    +
    +# Start monitoring services
    +provisioning monitoring start
    +
    +

    Access dashboards:

    + +

    Prometheus Configuration

    +

    Service Discovery

    +

    Prometheus automatically discovers platform services:

    +
    # /etc/provisioning/prometheus/prometheus.yml
    +global:
    +  scrape_interval: 15s
    +  evaluation_interval: 15s
    +
    +scrape_configs:
    +  - job_name: 'provisioning-orchestrator'
    +    static_configs:
    +      - targets: ['localhost:8080']
    +    metrics_path: '/metrics'
    +
    +  - job_name: 'provisioning-control-center'
    +    static_configs:
    +      - targets: ['localhost:8081']
    +
    +  - job_name: 'provisioning-vault-service'
    +    static_configs:
    +      - targets: ['localhost:8085']
    +
    +  - job_name: 'node-exporter'
    +    static_configs:
    +      - targets: ['localhost:9100']
    +
    +

    Retention Configuration

    +
    global:
    +  external_labels:
    +    cluster: 'provisioning-production'
    +
    +# Storage retention
    +storage:
    +  tsdb:
    +    retention.time: 30d
    +    retention.size: 50GB
    +
    +

    Key Metrics

    +

    Platform Metrics

    +

    Orchestrator metrics:

    +
    provisioning_workflows_total - Total workflows created
    +provisioning_workflows_active - Currently active workflows
    +provisioning_workflows_completed - Successfully completed workflows
    +provisioning_workflows_failed - Failed workflows
    +provisioning_tasks_queued - Tasks in queue
    +provisioning_tasks_running - Currently executing tasks
    +provisioning_tasks_completed - Total completed tasks
    +provisioning_checkpoint_recoveries - Checkpoint recovery count
    +
    +

    Control Center metrics:

    +
    provisioning_api_requests_total - Total API requests
    +provisioning_api_requests_duration_seconds - Request latency histogram
    +provisioning_auth_attempts_total - Authentication attempts
    +provisioning_auth_failures_total - Failed authentication attempts
    +provisioning_rbac_denials_total - Authorization denials
    +
    +

    Vault Service metrics:

    +
    provisioning_secrets_operations_total - Secret operations count
    +provisioning_kms_encryptions_total - Encryption operations
    +provisioning_kms_decryptions_total - Decryption operations
    +provisioning_kms_latency_seconds - KMS operation latency
    +
    +

    System Metrics

    +

    Node Exporter provides system-level metrics:

    +
    node_cpu_seconds_total - CPU time per core
    +node_memory_MemAvailable_bytes - Available memory
    +node_disk_io_time_seconds_total - Disk I/O time
    +node_network_receive_bytes_total - Network RX bytes
    +node_network_transmit_bytes_total - Network TX bytes
    +node_filesystem_avail_bytes - Available disk space
    +
    +

    Grafana Dashboards

    +

    Pre-built Dashboards

    +

    Import platform dashboards:

    +
    # Install all pre-built dashboards
    +provisioning monitoring install-dashboards
    +
    +# List available dashboards
    +provisioning monitoring list-dashboards
    +
    +

    Available dashboards:

    +
      +
    1. Platform Overview - High-level system status
    2. +
    3. Orchestrator Performance - Workflow and task metrics
    4. +
    5. Control Center API - API request metrics and latency
    6. +
    7. Vault Service KMS - Encryption operations and performance
    8. +
    9. System Resources - CPU, memory, disk, network
    10. +
    11. Security Events - Authentication, authorization, audit logs
    12. +
    13. Database Performance - SurrealDB metrics
    14. +
    +

    Custom Dashboard Creation

    +

    Create custom dashboards via Grafana UI or provisioning:

    +
    {
    +  "dashboard": {
    +    "title": "Custom Infrastructure Dashboard",
    +    "panels": [
    +      {
    +        "title": "Active Workflows",
    +        "targets": [
    +          {
    +            "expr": "provisioning_workflows_active",
    +            "legendFormat": "Active Workflows"
    +          }
    +        ],
    +        "type": "graph"
    +      }
    +    ]
    +  }
    +}
    +
    +

    Save dashboard:

    +
    provisioning monitoring export-dashboard --id 1 --output custom-dashboard.json
    +
    +

    Alerting

    +

    Alert Rules

    +

    Configure alert rules in Prometheus:

    +
    # /etc/provisioning/prometheus/alerts/provisioning.yml
    +groups:
    +  - name: provisioning_alerts
    +    interval: 30s
    +    rules:
    +      - alert: OrchestratorDown
    +        expr: up{job="provisioning-orchestrator"} == 0
    +        for: 1m
    +        labels:
    +          severity: critical
    +        annotations:
    +          summary: "Orchestrator service is down"
    +          description: "Orchestrator has been down for more than 1 minute"
    +
    +      - alert: HighWorkflowFailureRate
    +        expr: |
    +          rate(provisioning_workflows_failed[5m]) /
    +          rate(provisioning_workflows_total[5m]) > 0.1
    +        for: 5m
    +        labels:
    +          severity: warning
    +        annotations:
    +          summary: "High workflow failure rate"
    +          description: "More than 10% of workflows are failing"
    +
    +      - alert: DatabaseConnectionLoss
    +        expr: provisioning_database_connected == 0
    +        for: 30s
    +        labels:
    +          severity: critical
    +        annotations:
    +          summary: "Database connection lost"
    +
    +      - alert: HighMemoryUsage
    +        expr: |
    +          (1 - node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) > 0.9
    +        for: 5m
    +        labels:
    +          severity: warning
    +        annotations:
    +          summary: "High memory usage"
    +          description: "Memory usage is above 90%"
    +
    +      - alert: DiskSpaceLow
    +        expr: |
    +          (node_filesystem_avail_bytes{mountpoint="/var/lib/provisioning"} /
    +           node_filesystem_size_bytes{mountpoint="/var/lib/provisioning"}) < 0.1
    +        for: 5m
    +        labels:
    +          severity: warning
    +        annotations:
    +          summary: "Low disk space"
    +          description: "Less than 10% disk space available"
    +
    +

    Alertmanager Configuration

    +

    Route alerts to appropriate channels:

    +
    # /etc/provisioning/alertmanager/alertmanager.yml
    +global:
    +  resolve_timeout: 5m
    +
    +route:
    +  group_by: ['alertname', 'severity']
    +  group_wait: 10s
    +  group_interval: 10s
    +  repeat_interval: 12h
    +  receiver: 'team-email'
    +
    +  routes:
    +    - match:
    +        severity: critical
    +      receiver: 'pagerduty'
    +      continue: true
    +
    +    - match:
    +        severity: warning
    +      receiver: 'slack'
    +
    +receivers:
    +  - name: 'team-email'
    +    email_configs:
    +      - to: '[ops@example.com](mailto:ops@example.com)'
    +        from: '[alerts@provisioning.example.com](mailto:alerts@provisioning.example.com)'
    +        smarthost: 'smtp.example.com:587'
    +
    +  - name: 'pagerduty'
    +    pagerduty_configs:
    +      - service_key: '<pagerduty-key>'
    +
    +  - name: 'slack'
    +    slack_configs:
    +      - api_url: '<slack-webhook-url>'
    +        channel: '#provisioning-alerts'
    +
    +

    Test alerts:

    +
    # Send test alert
    +provisioning monitoring test-alert --severity critical
    +
    +# Silence alerts temporarily
    +provisioning monitoring silence --duration 2h --reason "Maintenance window"
    +
    +

    Log Aggregation with Loki

    +

    Loki Configuration

    +
    # /etc/provisioning/loki/loki.yml
    +auth_enabled: false
    +
    +server:
    +  http_listen_port: 3100
    +
    +ingester:
    +  lifecycler:
    +    ring:
    +      kvstore:
    +        store: inmemory
    +      replication_factor: 1
    +
    +schema_config:
    +  configs:
    +    - from: 2024-01-01
    +      store: boltdb-shipper
    +      object_store: filesystem
    +      schema: v11
    +      index:
    +        prefix: index_
    +        period: 24h
    +
    +storage_config:
    +  boltdb_shipper:
    +    active_index_directory: /var/lib/loki/boltdb-shipper-active
    +    cache_location: /var/lib/loki/boltdb-shipper-cache
    +  filesystem:
    +    directory: /var/lib/loki/chunks
    +
    +limits_config:
    +  retention_period: 720h  # 30 days
    +
    +

    Promtail for Log Shipping

    +
    # /etc/provisioning/promtail/promtail.yml
    +server:
    +  http_listen_port: 9080
    +
    +positions:
    +  filename: /tmp/positions.yaml
    +
    +clients:
    +  - url:  [http://localhost:3100/loki/api/v1/push](http://localhost:3100/loki/api/v1/push)
    +
    +scrape_configs:
    +  - job_name: system
    +    static_configs:
    +      - targets:
    +          - localhost
    +        labels:
    +          job: varlogs
    +          __path__: /var/log/provisioning/*.log
    +
    +  - job_name: journald
    +    journal:
    +      max_age: 12h
    +      labels:
    +        job: systemd-journal
    +    relabel_configs:
    +      - source_labels: ['__journal__systemd_unit']
    +        target_label: 'unit'
    +
    +

    Query logs in Grafana:

    +
    {job="varlogs"} | = "error"
    +{unit="provisioning-orchestrator.service"} | = "workflow" | json
    +
    +

    Tracing with Tempo

    +

    Distributed Tracing

    +

    Enable OpenTelemetry tracing in services:

    +
    # /etc/provisioning/config.toml
    +[tracing]
    +enabled = true
    +exporter = "otlp"
    +endpoint = "localhost:4317"
    +service_name = "provisioning-orchestrator"
    +
    +

    Tempo configuration:

    +
    # /etc/provisioning/tempo/tempo.yml
    +server:
    +  http_listen_port: 3200
    +
    +distributor:
    +  receivers:
    +    otlp:
    +      protocols:
    +        grpc:
    +          endpoint: 0.0.0.0:4317
    +
    +storage:
    +  trace:
    +    backend: local
    +    local:
    +      path: /var/lib/tempo/traces
    +
    +query_frontend:
    +  search:
    +    enabled: true
    +
    +

    View traces in Grafana or Tempo UI.

    +

    Performance Monitoring

    +

    Query Performance

    +

    Monitor slow queries:

    +
    # 95th percentile API latency
    +histogram_quantile(0.95,
    +  rate(provisioning_api_requests_duration_seconds_bucket[5m])
    +)
    +
    +# Slow workflows (>60s)
    +provisioning_workflow_duration_seconds > 60
    +
    +

    Resource Monitoring

    +

    Track resource utilization:

    +
    # CPU usage per service
    +rate(process_cpu_seconds_total{job=~"provisioning-.*"}[5m]) * 100
    +
    +# Memory usage per service
    +process_resident_memory_bytes{job=~"provisioning-.*"}
    +
    +# Disk I/O rate
    +rate(node_disk_io_time_seconds_total[5m])
    +
    +

    Custom Metrics

    +

    Adding Custom Metrics

    +

    Rust services use prometheus crate:

    +
    use prometheus::{Counter, Histogram, Registry};
    +
    +// Create metrics
    +let workflow_counter = Counter::new(
    +    "provisioning_custom_workflows",
    +    "Custom workflow counter"
    +)?;
    +
    +let task_duration = Histogram::with_opts(
    +    HistogramOpts::new("provisioning_task_duration", "Task duration")
    +        .buckets(vec![0.1, 0.5, 1.0, 5.0, 10.0])
    +)?;
    +
    +// Register metrics
    +registry.register(Box::new(workflow_counter))?;
    +registry.register(Box::new(task_duration))?;
    +
    +// Use metrics
    +workflow_counter.inc();
    +task_duration.observe(duration_seconds);
    +

    Nushell scripts export metrics:

    +
    # Export metrics in Prometheus format
    +def export-metrics [] {
    +    [
    +        "# HELP provisioning_custom_metric Custom metric"
    +        "# TYPE provisioning_custom_metric counter"
    +        $"provisioning_custom_metric (get-metric-value)"
    +    ] | str join "
    +"
    +}
    +
    +

    Monitoring Best Practices

    +
      +
    • Set appropriate scrape intervals (15-60s)
    • +
    • Configure retention based on compliance requirements
    • +
    • Use labels for multi-dimensional metrics
    • +
    • Create dashboards for key business metrics
    • +
    • Set up alerts for critical failures only
    • +
    • Document alert thresholds and runbooks
    • +
    • Review and tune alerts regularly
    • +
    • Use recording rules for expensive queries
    • +
    • Archive long-term metrics to object storage
    • +
    + + +

    Backup & Recovery

    +

    Comprehensive backup strategies and disaster recovery procedures for the Provisioning platform.

    +

    Overview

    +

    The platform backup strategy covers:

    +
      +
    • Platform service data and state
    • +
    • Database backups (SurrealDB)
    • +
    • Configuration files and secrets
    • Infrastructure definitions
    • -
    • Server configurations
    • -
    • Platform services
    • +
    • Workflow checkpoints and history
    • +
    • Audit logs and compliance data
    -

    Initialization Process (8 Steps)

    -
    STEP 1: Create directory structure
    -        └─ workspace/, config/, infra/default/, etc.
    +

    Backup Components

    +

    Critical Data

    +
    + + + + + + + +
    ComponentLocationBackup PriorityRecovery Time
    Database/var/lib/provisioning/databaseCritical< 15 min
    Orchestrator State/var/lib/provisioning/orchestratorCritical< 5 min
    Configuration/etc/provisioningHigh< 5 min
    SecretsSOPS-encrypted filesCritical< 5 min
    Audit Logs/var/log/provisioning/auditCompliance< 30 min
    Workspace Dataworkspace/High< 15 min
    Infrastructure Schemasprovisioning/schemasHigh< 10 min
    +
    +

    Backup Strategies

    +

    Full Backup

    +

    Complete system backup including all components:

    +
    # Create full backup
    +provisioning backup create --type full --output /backups/full-$(date +%Y%m%d).tar.gz
     
    -STEP 2: Generate Nickel configuration
    -        ├─ config/config.ncl (master config)
    -        └─ infra/default/*.ncl (infrastructure files)
    -
    -STEP 3: Configure providers
    -        └─ Setup local provider (default)
    -
    -STEP 4: Initialize metadata
    -        └─ .provisioning/metadata.yaml
    -
    -STEP 5: Activate workspace (if requested)
    -        └─ Set as default workspace
    -
    -STEP 6: Create .gitignore
    -        └─ Workspace-specific ignore rules
    -
    -STEP 7: ✨ GENERATE DOCUMENTATION
    -        ├─ Extract workspace metadata
    -        ├─ Render 4 workspace guides
    -        └─ Place in docs/ directory
    -
    -STEP 8: Display summary
    -        └─ Show workspace path and documentation location
    +# Full backup includes:
    +# - Database dump
    +# - Service configuration
    +# - Workflow state
    +# - Audit logs
    +# - User data
     
    -

    Common Commands

    -

    Workspace Management

    -
    # Create interactive workspace
    -provisioning workspace init
    -
    -# Create with explicit path and activate
    -provisioning workspace init my_workspace /path/to/workspace --activate
    -
    -# List all workspaces
    -provisioning workspace list
    -
    -# Activate workspace
    -provisioning workspace activate my_workspace
    -
    -# Show active workspace
    -provisioning workspace active
    +

    Contents of full backup:

    +
    full-20260116.tar.gz
    +├── database/
    +│   └── surrealdb-dump.sql
    +├── config/
    +│   ├── provisioning.toml
    +│   ├── orchestrator.toml
    +│   └── control-center.toml
    +├── state/
    +│   ├── workflows/
    +│   └── checkpoints/
    +├── logs/
    +│   └── audit/
    +├── workspace/
    +│   ├── infra/
    +│   └── config/
    +└── metadata.json
     
    -

    Configuration

    -
    # Validate Nickel configuration
    -nickel typecheck config/config.ncl
    -nickel typecheck infra/default/main.ncl
    +

    Incremental Backup

    +

    Backup only changed data since last backup:

    +
    # Incremental backup (faster, smaller)
    +provisioning backup create --type incremental --since-backup full-20260116
     
    -# Validate with provisioning system
    +# Incremental backup includes:
    +# - New workflows since last backup
    +# - Configuration changes
    +# - New audit log entries
    +# - Modified workspace files
    +
    +

    Continuous Backup

    +

    Real-time backup of critical data:

    +
    # Enable continuous backup
    +provisioning backup enable-continuous --destination s3://backups/continuous
    +
    +# WAL archiving for database
    +# Real-time checkpoint backup
    +# Audit log streaming
    +
    +

    Backup Commands

    +

    Create Backup

    +
    # Full backup to local directory
    +provisioning backup create --type full --output /backups
    +
    +# Incremental backup
    +provisioning backup create --type incremental
    +
    +# Backup specific components
    +provisioning backup create --components database,config
    +
    +# Compressed backup
    +provisioning backup create --compress gzip
    +
    +# Encrypted backup
    +provisioning backup create --encrypt --key-file /etc/provisioning/backup.key
    +
    +

    List Backups

    +
    # List all backups
    +provisioning backup list
    +
    +# Output:
    +# NAME                  TYPE         SIZE    DATE                STATUS
    +# full-20260116        Full         2.5GB   2026-01-16 10:00   Complete
    +# incr-20260116-1200   Incremental  150MB   2026-01-16 12:00   Complete
    +# full-20260115        Full         2.4GB   2026-01-15 10:00   Complete
    +
    +

    Restore Backup

    +
    # Restore full backup
    +provisioning backup restore --backup full-20260116 --confirm
    +
    +# Restore specific components
    +provisioning backup restore --backup full-20260116 --components database
    +
    +# Point-in-time restore
    +provisioning backup restore --timestamp "2026-01-16 09:30:00"
    +
    +# Dry-run restore
    +provisioning backup restore --backup full-20260116 --dry-run
    +
    +

    Verify Backup

    +
    # Verify backup integrity
    +provisioning backup verify --backup full-20260116
    +
    +# Test restore in isolated environment
    +provisioning backup test-restore --backup full-20260116
    +
    +

    Automated Backup Scheduling

    +

    Cron-based Backups

    +
    # Install backup cron jobs
    +provisioning backup schedule install
    +
    +# Default schedule:
    +# Full backup: Daily at 2 AM
    +# Incremental: Every 6 hours
    +# Cleanup old backups: Weekly
    +
    +

    Crontab entries:

    +
    # Full daily backup
    +0 2 * * * /usr/local/bin/provisioning backup create --type full --output /backups
    +
    +# Incremental every 6 hours
    +0 */6 * * * /usr/local/bin/provisioning backup create --type incremental
    +
    +# Cleanup backups older than 30 days
    +0 3 * * 0 /usr/local/bin/provisioning backup cleanup --older-than 30d
    +
    +

    Systemd Timer-based Backups

    +
    # /etc/systemd/system/provisioning-backup.timer
    +[Unit]
    +Description=Provisioning Platform Backup Timer
    +
    +[Timer]
    +OnCalendar=daily
    +OnCalendar=02:00
    +Persistent=true
    +
    +[Install]
    +WantedBy=timers.target
    +
    +
    # /etc/systemd/system/provisioning-backup.service
    +[Unit]
    +Description=Provisioning Platform Backup
    +
    +[Service]
    +Type=oneshot
    +ExecStart=/usr/local/bin/provisioning backup create --type full
    +User=provisioning
    +
    +

    Enable timer:

    +
    sudo systemctl enable provisioning-backup.timer
    +sudo systemctl start provisioning-backup.timer
    +
    +

    Backup Destinations

    +

    Local Filesystem

    +
    # Backup to local directory
    +provisioning backup create --output /mnt/backups
    +
    +

    Remote Storage

    +

    S3-compatible storage:

    +
    # Backup to S3
    +provisioning backup create --destination s3://my-bucket/backups \
    +  --s3-region us-east-1
    +
    +# Backup to MinIO
    +provisioning backup create --destination s3://backups \
    +  --s3-endpoint  [http://minio.local:9000](http://minio.local:9000)
    +
    +

    Network filesystem:

    +
    # Backup to NFS mount
    +provisioning backup create --output /mnt/nfs/backups
    +
    +# Backup to SMB share
    +provisioning backup create --output /mnt/smb/backups
    +
    +

    Off-site Backup

    +

    Rsync to remote server:

    +
    # Backup and sync to remote
    +provisioning backup create --output /backups
    +rsync -avz /backups/ backup-server:/backups/provisioning/
    +
    +

    Database Backup

    +

    SurrealDB Backup

    +
    # Export database
    +surreal export --conn  [http://localhost:8000](http://localhost:8000) \
    +  --user root --pass root \
    +  --ns provisioning --db main \
    +  /backups/database-$(date +%Y%m%d).surql
    +
    +# Import database
    +surreal import --conn  [http://localhost:8000](http://localhost:8000) \
    +  --user root --pass root \
    +  --ns provisioning --db main \
    +  /backups/database-20260116.surql
    +
    +

    Automated Database Backups

    +
    # Enable automatic database backups
    +provisioning backup database enable --interval daily
    +
    +# Backup with point-in-time recovery
    +provisioning backup database create --enable-pitr
    +
    +

    Disaster Recovery

    +

    Recovery Procedures

    +

    Complete platform recovery from backup:

    +
    # 1. Stop all services
    +sudo systemctl stop provisioning-*
    +
    +# 2. Restore database
    +provisioning backup restore --backup full-20260116 --components database
    +
    +# 3. Restore configuration
    +provisioning backup restore --backup full-20260116 --components config
    +
    +# 4. Restore service state
    +provisioning backup restore --backup full-20260116 --components state
    +
    +# 5. Verify data integrity
    +provisioning validate-installation
    +
    +# 6. Start services
    +sudo systemctl start provisioning-*
    +
    +# 7. Verify services
    +provisioning platform status
    +
    +

    Recovery Time Objectives

    +
    + + + + +
    ScenarioRTORPOProcedure
    Service failure5 min0Restart service from checkpoint
    Database corruption15 min6 hoursRestore from incremental backup
    Complete data loss30 min24 hoursRestore from full backup
    Site disaster2 hours24 hoursRestore from off-site backup
    +
    +

    Point-in-Time Recovery

    +

    Restore to specific timestamp:

    +
    # List available recovery points
    +provisioning backup list-recovery-points
    +
    +# Restore to specific time
    +provisioning backup restore --timestamp "2026-01-16 09:30:00"
    +
    +# Recovery with workflow replay
    +provisioning backup restore --timestamp "2026-01-16 09:30:00" --replay-workflows
    +
    +

    Backup Encryption

    +

    SOPS Encryption

    +

    Encrypt backups with SOPS:

    +
    # Create encrypted backup
    +provisioning backup create --encrypt sops --key-file /etc/provisioning/age.key
    +
    +# Restore encrypted backup
    +provisioning backup restore --backup encrypted-20260116.tar.gz.enc \
    +  --decrypt sops --key-file /etc/provisioning/age.key
    +
    +

    Age Encryption

    +
    # Generate age key pair
    +age-keygen -o /etc/provisioning/backup-key.txt
    +
    +# Create encrypted backup with age
    +provisioning backup create --encrypt age --recipient "age1..."
    +
    +# Decrypt and restore
    +age -d -i /etc/provisioning/backup-key.txt backup.tar.gz.age | \
    +  provisioning backup restore --stdin
    +
    +

    Backup Retention

    +

    Retention Policies

    +
    # /etc/provisioning/backup-retention.toml
    +[retention]
    +# Keep daily backups for 7 days
    +daily = 7
    +
    +# Keep weekly backups for 4 weeks
    +weekly = 4
    +
    +# Keep monthly backups for 12 months
    +monthly = 12
    +
    +# Keep yearly backups for 7 years (compliance)
    +yearly = 7
    +
    +

    Apply retention policy:

    +
    # Cleanup old backups according to policy
    +provisioning backup cleanup --policy /etc/provisioning/backup-retention.toml
    +
    +

    Backup Monitoring

    +

    Backup Alerts

    +

    Configure alerts for backup failures:

    +
    # Prometheus alert for failed backups
    +- alert: BackupFailed
    +  expr: provisioning_backup_status{status="failed"} > 0
    +  for: 5m
    +  labels:
    +    severity: critical
    +  annotations:
    +    summary: "Backup failed"
    +    description: "Backup has failed, investigate immediately"
    +
    +

    Backup Metrics

    +

    Monitor backup health:

    +
    # Backup success rate
    +provisioning_backup_success_rate{type="full"} 1.0
    +
    +# Time since last backup
    +time() - provisioning_backup_last_success_timestamp > 86400
    +
    +# Backup size trend
    +increase(provisioning_backup_size_bytes[7d])
    +
    +

    Testing Recovery Procedures

    +

    Regular DR Drills

    +
    # Automated disaster recovery test
    +provisioning backup test-recovery --backup full-20260116 \
    +  --test-environment isolated
    +
    +# Steps performed:
    +# 1. Spin up isolated test environment
    +# 2. Restore backup
    +# 3. Verify data integrity
    +# 4. Run smoke tests
    +# 5. Generate test report
    +# 6. Teardown test environment
    +
    +

    Schedule monthly DR tests:

    +
    # Monthly disaster recovery drill
    +0 4 1 * * /usr/local/bin/provisioning backup test-recovery --latest
    +
    +

    Best Practices

    +
      +
    • Implement 3-2-1 backup rule: 3 copies, 2 different media, 1 off-site
    • +
    • Encrypt all backups containing sensitive data
    • +
    • Test restore procedures regularly (monthly minimum)
    • +
    • Monitor backup success/failure metrics
    • +
    • Automate backup verification
    • +
    • Document recovery procedures and RTO/RPO
    • +
    • Maintain off-site backups for disaster recovery
    • +
    • Use incremental backups to reduce storage costs
    • +
    • Version control infrastructure schemas separately
    • +
    • Retain audit logs per compliance requirements (7 years)
    • +
    + + +

    Upgrading Provisioning

    +

    Upgrade Provisioning to a new version with minimal downtime and automatic rollback support.

    +

    Overview

    +

    Provisioning supports two upgrade strategies:

    +
      +
    1. In-Place Upgrade - Update existing installation
    2. +
    3. Side-by-Side Upgrade - Run new version alongside old, switch when ready
    4. +
    +

    Both strategies support automatic rollback on failure.

    +

    Before Upgrading

    +

    Check Current Version

    +
    provisioning version
    +
    +# Example output:
    +# Provisioning v5.0.0
    +# Nushell 0.109.0
    +# Nickel 1.15.1
    +# SOPS 3.10.2
    +# Age 1.2.1
    +
    +

    Backup Configuration

    +
    # Backup entire workspace
    +provisioning workspace backup
    +
    +# Backup specific configuration
    +provisioning config backup
    +
    +# Backup state
    +provisioning state backup
    +
    +

    Check Changelog

    +
    # View latest changes
    +provisioning changelog
    +
    +# Check upgrade path
    +provisioning version --check-upgrade
    +
    +# Show upgrade recommendations
    +provisioning upgrade --check
    +
    +

    Verify System Health

    +
    # Health check
    +provisioning health check
    +
    +# Check all services
    +provisioning platform health
    +
    +# Verify provider connectivity
    +provisioning providers test --all
    +
    +# Validate configuration
    +provisioning validate config --strict
    +
    +

    Upgrade Methods

    +

    Method 1: In-Place Upgrade

    +

    Upgrade the existing installation with zero downtime:

    +
    # Check upgrade compatibility
    +provisioning upgrade --check
    +
    +# List breaking changes
    +provisioning upgrade --breaking-changes
    +
    +# Show migration guide (if any)
    +provisioning upgrade --show-migration
    +
    +# Perform upgrade
    +provisioning upgrade
    +
    +

    Process:

    +
      +
    1. Validate current installation
    2. +
    3. Download new version
    4. +
    5. Run migration scripts (if needed)
    6. +
    7. Restart services
    8. +
    9. Verify health
    10. +
    11. Keep old version for rollback (24 hours)
    12. +
    +

    Method 2: Side-by-Side Upgrade

    +

    Run new version alongside old version for testing:

    +
    # Create staging installation
    +provisioning upgrade --staging --version v5.1.0
    +
    +# Test new version
    +provisioning --staging server list
    +
    +# Run test suite
    +provisioning --staging test suite
    +
    +# Switch to new version
    +provisioning upgrade --activate
    +
    +# Remove old version (after confirmation)
    +provisioning upgrade --cleanup-old
    +
    +

    Advantages:

    +
      +
    • Test new version before switching
    • +
    • Zero downtime during upgrade
    • +
    • Easy rollback to previous version
    • +
    • Run both versions simultaneously
    • +
    +

    Upgrade Process

    +

    Step 1: Pre-Upgrade Checks

    +
    # Check system requirements
    +provisioning setup validate
    +
    +# Verify dependencies are up-to-date
    +provisioning version --check-dependencies
    +
    +# Check disk space (minimum 2GB required)
    +df -h /
    +
    +# Verify all services healthy
    +provisioning platform health
    +
    +

    Step 2: Backup Data

    +
    # Backup entire workspace
    +provisioning workspace backup --compress
    +
    +# Backup orchestrator state
    +provisioning orchestrator backup
    +
    +# Backup configuration
    +provisioning config backup
    +
    +# Verify backup
    +provisioning backup list
    +provisioning backup verify --latest
    +
    +

    Step 3: Download New Version

    +
    # Check available versions
    +provisioning version --available
    +
    +# Download specific version
    +provisioning upgrade --download v5.1.0
    +
    +# Verify download
    +provisioning upgrade --verify-download v5.1.0
    +
    +# Check size
    +provisioning upgrade --show-size v5.1.0
    +
    +

    Step 4: Run Migration Scripts

    +
    # Show required migrations
    +provisioning upgrade --show-migrations
    +
    +# Test migration (dry-run)
    +provisioning upgrade --dry-run
    +
    +# Run migrations
    +provisioning upgrade --migrate
    +
    +# Verify migration
    +provisioning upgrade --verify-migration
    +
    +

    Step 5: Perform Upgrade

    +
    # Stop orchestrator gracefully
    +provisioning orchestrator stop --graceful
    +
    +# Install new version
    +provisioning upgrade --install
    +
    +# Verify installation
    +provisioning version
     provisioning validate config
    +
    +# Start services
    +provisioning orchestrator start
     
    -

    Deployment

    -
    # Dry-run (check mode)
    -provisioning -c server create
    +

    Step 6: Verify Upgrade

    +
    # Check version
    +provisioning version
     
    -# Actual deployment
    -provisioning server create
    +# Health check
    +provisioning health check
     
    -# List servers
    +# Run test suite
    +provisioning test quick
    +
    +# Verify provider connectivity
    +provisioning providers test --all
    +
    +# Check orchestrator status
    +provisioning orchestrator status
    +
    +

    Breaking Changes

    +

    Some upgrades may include breaking changes. Check before upgrading:

    +
    # List breaking changes
    +provisioning upgrade --breaking-changes
    +
    +# Show migration guide
    +provisioning upgrade --migration-guide v5.1.0
    +
    +# Generate migration script
    +provisioning upgrade --generate-migration v5.1.0 > migrate.nu
    +
    +

    Common Migration Scenarios

    +

    Scenario 1: Configuration Format Change

    +

    If configuration format changes (e.g., TOML → YAML):

    +
    # Export old format
    +provisioning config export --format toml > config.old.toml
    +
    +# Run migration
    +provisioning upgrade --migrate-config
    +
    +# Verify new format
    +provisioning config export --format yaml | head -20
    +
    +

    Scenario 2: Schema Updates

    +

    If infrastructure schemas change:

    +
    # Validate against new schema
    +nickel typecheck workspace/infra/*.ncl
    +
    +# Update schemas if needed
    +provisioning upgrade --update-schemas
    +
    +# Regenerate configurations
    +provisioning config regenerate
    +
    +# Validate updated config
    +provisioning validate config --strict
    +
    +

    Scenario 3: Provider API Changes

    +

    If provider APIs change:

    +
    # Test provider connectivity with new version
    +provisioning providers test upcloud --verbose
    +
    +# Check provider configuration
    +provisioning config show --section providers.upcloud
    +
    +# Update provider configuration if needed
    +provisioning providers configure upcloud
    +
    +# Verify connectivity
     provisioning server list
     
    -

    Workspace Directory Structure

    -

    Auto-Generated Structure

    -
    my_workspace/
    -├── config/
    -│   ├── config.ncl                 # Master configuration
    -│   ├── providers/                 # Provider configs
    -│   └── platform/                  # Platform configs
    -│
    -├── infra/
    -│   └── default/
    -│       ├── main.ncl              # Infrastructure definition
    -│       └── servers.ncl           # Server definitions
    -│
    -├── docs/                         # AUTO-GENERATED GUIDES
    -│   ├── README.md                # Workspace overview
    -│   ├── deployment-guide.md      # Step-by-step deployment
    -│   ├── configuration-guide.md   # Configuration reference
    -│   └── troubleshooting.md       # Common issues & solutions
    -│
    -├── .providers/                   # Provider state & cache
    -├── .kms/                        # KMS data
    -├── .provisioning/               # Workspace metadata
    -└── workspace.nu                 # Utility scripts
    +

    Rollback Procedure

    +

    Automatic Rollback

    +

    If upgrade fails, automatic rollback occurs:

    +
    # Monitor rollback progress
    +provisioning upgrade --watch
    +
    +# Check rollback status
    +provisioning upgrade --status
    +
    +# View rollback logs
    +provisioning upgrade --logs
     
    -

    Customization Guide

    -

    Edit Configuration

    -
    # Master workspace configuration
    -vim config/config.ncl
    +

    Manual Rollback

    +

    If needed, manually rollback to previous version:

    +
    # List available versions for rollback
    +provisioning upgrade --rollback-candidates
     
    -# Infrastructure definition
    -vim infra/default/main.ncl
    +# Rollback to specific version
    +provisioning upgrade --rollback v5.0.0
     
    -# Server definitions
    -vim infra/default/servers.ncl
    +# Verify rollback
    +provisioning version
    +provisioning platform health
    +
    +# Restore from backup
    +provisioning backup restore --backup-id=<id>
     
    -

    Add Multiple Infrastructures

    -
    # Create new infrastructure environment
    -mkdir -p infra/production infra/staging
    +

    Batch Workflow Handling

    +

    If you have running batch workflows:

    +
    # Check running workflows
    +provisioning workflow list --status running
     
    -# Copy template files
    -cp infra/default/main.ncl infra/production/main.ncl
    -cp infra/default/servers.ncl infra/production/servers.ncl
    +# Graceful shutdown (wait for completion)
    +provisioning workflow shutdown --graceful
     
    -# Edit for your needs
    -vim infra/production/servers.ncl
    +# Force shutdown (immediate)
    +provisioning workflow shutdown --force
    +
    +# Resume workflows after upgrade
    +provisioning workflow resume
     
    -

    Configure Providers

    -

    Update config/config.ncl to enable cloud providers:

    -
    providers = {
    -  upcloud = {
    -    name = "upcloud",
    -    enabled = true,              # Set to true
    -    workspace = "my_workspace",
    -    auth = { interface = "API" },
    -    paths = {
    -      base = ".providers/upcloud",
    -      cache = ".providers/upcloud/cache",
    -      state = ".providers/upcloud/state",
    -    },
    -    api = {
    -      url = "https://api.upcloud.com/1.3",
    -      timeout = 30,
    -    },
    -  },
    +

    Troubleshooting Upgrades

    +

    Upgrade Hangs

    +
    # Check logs
    +tail -f ~/.provisioning/logs/upgrade.log
    +
    +# Monitor process
    +provisioning upgrade --monitor
    +
    +# Stop upgrade gracefully
    +provisioning upgrade --stop --graceful
    +
    +# Force stop
    +provisioning upgrade --stop --force
    +
    +

    Migration Failure

    +
    # Check migration logs
    +provisioning upgrade --migration-logs
    +
    +# Rollback to previous version
    +provisioning upgrade --rollback
    +
    +# Restore from backup
    +provisioning backup restore
    +
    +

    Service Won’t Start

    +
    # Check service logs
    +provisioning platform logs
    +
    +# Verify configuration
    +provisioning validate config --strict
    +
    +# Restore configuration from backup
    +provisioning config restore
    +
    +# Restart services
    +provisioning orchestrator start
    +
    +

    Upgrade Scheduling

    +

    Schedule Automated Upgrade

    +
    # Schedule upgrade for specific time
    +provisioning upgrade --schedule "2026-01-20T02:00:00"
    +
    +# Schedule for next maintenance window
    +provisioning upgrade --schedule-next-maintenance
    +
    +# Cancel scheduled upgrade
    +provisioning upgrade --cancel-scheduled
    +
    +

    Unattended Upgrade

    +

    For CI/CD environments:

    +
    # Non-interactive upgrade
    +provisioning upgrade --yes --no-confirm
    +
    +# Upgrade with timeout
    +provisioning upgrade --timeout 3600
    +
    +# Skip backup
    +provisioning upgrade --skip-backup
    +
    +# Continue even if health checks fail
    +provisioning upgrade --force-upgrade
    +
    +

    Version Management

    +

    Version Constraints

    +

    Pin versions for workspace reproducibility:

    +
    # workspace/versions.ncl
    +{
    +  provisioning = "5.0.0"
    +  nushell = "0.109.0"
    +  nickel = "1.15.1"
    +  sops = "3.10.2"
    +  age = "1.2.1"
     }
     
    -

    Next Steps

    -
      -
    1. Read auto-generated guides in docs/
    2. -
    3. Customize configuration in Nickel files
    4. -
    5. Validate with: nickel typecheck config/config.ncl
    6. -
    7. Test deployment with dry-run mode: provisioning -c server create
    8. -
    9. Deploy infrastructure when ready
    10. -
    -

    Documentation References

    - -

    Multi-Provider Deployment Guide

    -

    This guide covers strategies and patterns for deploying infrastructure across multiple cloud providers using the provisioning system. Multi-provider -deployments enable high availability, disaster recovery, cost optimization, compliance with regional requirements, and vendor lock-in avoidance.

    -

    Table of Contents

    - -

    Overview

    -

    The provisioning system provides a provider-agnostic abstraction layer that enables seamless deployment across Hetzner, UpCloud, AWS, and -DigitalOcean. Each provider implements a standard interface with compute, storage, networking, and management capabilities.

    -

    Supported Providers

    -
    - - - - -
    ProviderComputeStorageLoad BalancerManaged ServicesNetwork Isolation
    HetznerCloud ServersVolumesLoad BalancerNovSwitch/Private Networks
    UpCloudServersStorageLoad BalancerNoVLAN
    AWSEC2EBS/S3ALB/NLBRDS, ElastiCache, etcVPC/Security Groups
    DigitalOceanDropletsVolumesLoad BalancerManaged DBVPC/Firewall
    -
    -

    Key Concepts

    -
      -
    • Provider Abstraction: Consistent interface across all providers hides provider-specific details
    • -
    • Workspace: Defines infrastructure components, resource allocation, and provider configuration
    • -
    • Multi-Provider Workspace: A single workspace that spans multiple providers with coordinated deployment
    • -
    • Batch Workflows: Orchestrate deployment across providers with dependency tracking and rollback capability
    • -
    -

    Why Multiple Providers

    -

    Cost Optimization

    -

    Different providers excel at different workloads:

    -
      -
    • Compute-Heavy: Hetzner offers best price/performance ratio for compute-intensive workloads
    • -
    • Managed Services: AWS RDS or DigitalOcean Managed Databases often more cost-effective than self-managed
    • -
    • Storage-Intensive: AWS S3 or Google Cloud Storage for large object storage requirements
    • -
    • Edge Locations: DigitalOcean’s CDN and global regions for geographically distributed serving
    • -
    -

    Example: Store application data in Hetzner compute nodes (cost-effective), analytics database in AWS RDS (managed), and backups in DigitalOcean -Spaces (affordable object storage).

    -

    High Availability and Disaster Recovery

    -
      -
    • Active-Active: Run identical infrastructure in multiple providers for load balancing
    • -
    • Active-Standby: Primary on Provider A, warm standby on Provider B with automated failover
    • -
    • Multi-Region: Distribute across geographic regions within and between providers
    • -
    • Time-to-Recovery: Multiple providers reduce dependency on single provider’s infrastructure
    • -
    -

    Compliance and Data Residency

    -
      -
    • GDPR: European data must stay in EU providers (Hetzner DE, UpCloud FI/SE)
    • -
    • Regional Requirements: Some compliance frameworks require data in specific countries
    • -
    • Provider Certifications: Different providers have different compliance certifications (SOC2, ISO 27001, HIPAA)
    • -
    -

    Example: Production data in Hetzner (EU-based), analytics in AWS (GDPR-compliant regions), backups in DigitalOcean.

    -

    Vendor Lock-in Avoidance

    -
      -
    • Portability: Multi-provider setup enables migration without complete outage
    • -
    • Flexibility: Switch providers for cost negotiation or service issues
    • -
    • Resilience: Not dependent on single provider’s reliability or pricing changes
    • -
    -

    Performance and Latency

    -
      -
    • Geographic Distribution: Serve users from nearest provider
    • -
    • Provider-Specific Performance: Some providers have better infrastructure for specific regions
    • -
    • Regional Redundancy: Maintain service availability during provider-wide outages
    • -
    -

    Provider Selection Strategy

    -

    Decision Framework

    -

    1. Workload Characteristics

    -

    Compute-Intensive (batch processing, ML, heavy calculations)

    -
      -
    • Recommended: Hetzner (best price), UpCloud (mid-range)
    • -
    • Avoid: AWS on-demand (unless spot instances), DigitalOcean premium tier
    • -
    -

    Web/Application (stateless serving, APIs)

    -
      -
    • Recommended: DigitalOcean (simple management), Hetzner (cost), AWS (multi-region)
    • -
    • Consider: Geographic proximity to users
    • -
    -

    Stateful/Database (databases, caches, queues)

    -
      -
    • Recommended: AWS RDS/ElastiCache, DigitalOcean Managed DB
    • -
    • Alternative: Self-managed on any provider with replication
    • -
    -

    Storage/File Serving (object storage, backups)

    -
      -
    • Recommended: AWS S3, DigitalOcean Spaces, Hetzner Object Storage
    • -
    • Consider: Cost per GB, access patterns, bandwidth
    • -
    -

    Regional Availability

    -

    North America

    -
      -
    • AWS: Multiple regions (us-east-1, us-west-2, etc)
    • -
    • DigitalOcean: NYC, SFO
    • -
    • Hetzner: Ashburn, Virginia
    • -
    • UpCloud: Multiple US locations
    • -
    -

    Europe

    -
      -
    • Hetzner: Falkenstein (DE), Nuremberg (DE), Helsinki (FI)
    • -
    • UpCloud: Multiple EU locations
    • -
    • AWS: eu-west-1 (IE), eu-central-1 (DE), etc
    • -
    • DigitalOcean: London, Frankfurt, Amsterdam
    • -
    -

    Asia

    -
      -
    • AWS: ap-southeast-1 (SG), ap-northeast-1 (Tokyo)
    • -
    • DigitalOcean: Singapore, Bangalore
    • -
    • Hetzner: Limited
    • -
    • UpCloud: Singapore
    • -
    -

    Recommendation for Multi-Region: Combine Hetzner (EU backbone), DigitalOcean (global presence), AWS (comprehensive regions).

    -

    Cost Analysis

    -

    Monthly Compute Comparison (2 vCPU, 4 GB RAM)

    -
    - - - - -
    ProviderPriceNotes
    Hetzner€6.90 (~$7.50)Cheapest, good performance
    DigitalOcean$24Premium pricing, simplicity
    UpCloud$30Mid-range, good support
    AWS t3.medium$60+On-demand pricing (spot: $18-25)
    -
    -

    Recommendations by Budget

    -

    Minimal Budget (<$50/month)

    -
      -
    • Single Hetzner server: €6.90
    • -
    • Alternative: DigitalOcean $24 + DigitalOcean Spaces for backup
    • -
    -

    Small Team ($100-500/month)

    -
      -
    • Hetzner primary (€50-150), DigitalOcean backup (60-80)
    • -
    • Good HA coverage with cost control
    • -
    -

    Enterprise ($1000+/month)

    -
      -
    • AWS primary (managed services, compliance)
    • -
    • Hetzner backup (cost-effective)
    • -
    • DigitalOcean edge locations (CDN)
    • -
    -

    Compliance and Certifications

    -
    - - - - -
    ProviderGDPRSOC 2ISO 27001HIPAAFIPSPCI-DSS
    Hetzner
    UpCloud
    AWS
    DigitalOcean
    -
    -

    Compliance Selection Matrix

    -
      -
    • GDPR Only: Hetzner, UpCloud (EU-based), all AWS/DO EU regions
    • -
    • HIPAA Required: AWS, DigitalOcean (DigitalOcean requires BAA)
    • -
    • FIPS Required: AWS (all regions)
    • -
    • PCI-DSS: All providers support, AWS most comprehensive
    • -
    -

    Workspace Configuration

    -

    Multi-Provider Workspace Structure

    -
    provisioning/examples/workspaces/my-multi-provider-app/
    -├── workspace.ncl                # Infrastructure definition
    -├── config.toml                  # Provider credentials, regions, defaults
    -├── README.md                    # Setup and deployment instructions
    -└── deploy.nu                    # Deployment orchestration script
    +

    Enforce version constraints:

    +
    # Check version compliance
    +provisioning version --check-constraints
    +
    +# Enforce constraint
    +provisioning version --strict-mode
     
    -

    Provider Credential Management

    -

    Environment Variables

    -

    Each provider requires authentication via environment variables:

    -
    # Hetzner
    -export HCLOUD_TOKEN="your-hetzner-api-token"
    -
    -# UpCloud
    -export UPCLOUD_USERNAME="your-upcloud-username"
    -export UPCLOUD_PASSWORD="your-upcloud-password"
    -
    -# AWS
    -export AWS_ACCESS_KEY_ID="your-access-key"
    -export AWS_SECRET_ACCESS_KEY="your-secret-key"
    -export AWS_DEFAULT_REGION="us-east-1"
    -
    -# DigitalOcean
    -export DIGITALOCEAN_TOKEN="your-do-api-token"
    -
    -

    Configuration File Structure (config.toml)

    -
    [providers]
    -
    -[providers.hetzner]
    -enabled = true
    -api_token_env = "HCLOUD_TOKEN"
    -default_region = "nbg1"
    -default_datacenter = "nbg1-dc8"
    -
    -[providers.upcloud]
    -enabled = true
    -username_env = "UPCLOUD_USERNAME"
    -password_env = "UPCLOUD_PASSWORD"
    -default_region = "fi-hel1"
    -
    -[providers.aws]
    -enabled = true
    -region = "us-east-1"
    -access_key_env = "AWS_ACCESS_KEY_ID"
    -secret_key_env = "AWS_SECRET_ACCESS_KEY"
    -
    -[providers.digitalocean]
    -enabled = true
    -token_env = "DIGITALOCEAN_TOKEN"
    -default_region = "nyc3"
    -
    -[workspace]
    -name = "my-multi-provider-app"
    -environment = "production"
    -owner = "platform-team"
    -
    -

    Multi-Provider Workspace Definition

    -

    Nickel workspace with multiple providers:

    -
    # workspace.ncl - Multi-provider infrastructure definition
    -
    -let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -let upcloud = import "../../extensions/providers/upcloud/nickel/main.ncl" in
    -let aws = import "../../extensions/providers/aws/nickel/main.ncl" in
    -let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -
    +

    Vendor Versions

    +

    Pin provider and task service versions:

    +
    # workspace/infra/versions.ncl
     {
    -  workspace_name = "multi-provider-app",
    -  description = "Multi-provider infrastructure example",
    -
    -  # Provider routing configuration
       providers = {
    -    primary_compute = "hetzner",
    -    secondary_compute = "digitalocean",
    -    database = "aws",
    -    backup = "upcloud"
    -  },
    -
    -  # Infrastructure defined per provider
    -  infrastructure = {
    -    # Hetzner: Primary compute tier
    -    primary_servers = hetzner.Server & {
    -      name = "primary-server",
    -      server_type = "cx31",
    -      image = "ubuntu-22.04",
    -      location = "nbg1",
    -      count = 3,
    -      ssh_keys = ["your-ssh-key"],
    -      firewalls = ["primary-fw"]
    -    },
    -
    -    # DigitalOcean: Secondary compute tier
    -    secondary_servers = digitalocean.Droplet & {
    -      name = "secondary-droplet",
    -      size = "s-2vcpu-4gb",
    -      image = "ubuntu-22-04-x64",
    -      region = "nyc3",
    -      count = 2
    -    },
    -
    -    # AWS: Managed database
    -    database = aws.RDS & {
    -      identifier = "prod-db",
    -      engine = "postgresql",
    -      engine_version = "14.6",
    -      instance_class = "db.t3.medium",
    -      allocated_storage = 100
    -    },
    -
    -    # UpCloud: Backup storage
    -    backup_storage = upcloud.Storage & {
    -      name = "backup-volume",
    -      size = 500,
    -      location = "fi-hel1"
    -    }
    +    upcloud = "2.0.0"
    +    aws = "5.0.0"
    +  }
    +  taskservs = {
    +    kubernetes = "1.28.0"
    +    postgres = "14.0"
       }
     }
     
    -

    Architecture Patterns

    -

    Pattern 1: Compute + Storage Split

    -

    Scenario: Cost-effective compute with specialized managed storage.

    -

    Example: Use Hetzner for compute (cheap), AWS S3 for object storage (reliable), managed database on AWS RDS.

    -

    Benefits

    +

    Best Practices

    +

    1. Plan Upgrades

      -
    • Compute optimization (Hetzner’s low cost)
    • -
    • Storage specialization (AWS S3 reliability and features)
    • -
    • Separation of concerns (different performance tuning)
    • +
    • Schedule during maintenance windows
    • +
    • Test in staging first
    • +
    • Communicate with team
    • +
    • Have rollback plan ready
    -

    Architecture

    -
                        ┌─────────────────────┐
    -                    │   Client Requests   │
    -                    └──────────┬──────────┘
    -                               │
    -                ┌──────────────┼──────────────┐
    -                │              │              │
    -         ┌──────▼─────┐  ┌────▼─────┐  ┌───▼──────┐
    -         │  Hetzner   │  │    AWS   │  │ AWS S3   │
    -         │  Servers   │  │    RDS   │  │ Storage  │
    -         │ (Compute)  │  │(Database)│  │(Backups) │
    -         └────────────┘  └──────────┘  └──────────┘
    +

    2. Backup Everything

    +
    # Complete backup before upgrade
    +provisioning workspace backup --compress
    +provisioning config backup
    +provisioning state backup
     
    -

    Nickel Configuration

    -
    let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -let aws = import "../../extensions/providers/aws/nickel/main.ncl" in
    -
    -{
    -  compute = hetzner.Server & {
    -    name = "app-server",
    -    server_type = "cpx21",  # 4 vCPU, 8 GB RAM
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -    count = 2,
    -    volumes = [
    -      {
    -        size = 100,
    -        format = "ext4",
    -        mount = "/app"
    -      }
    -    ]
    -  },
    -
    -  database = aws.RDS & {
    -    identifier = "app-database",
    -    engine = "postgresql",
    -    instance_class = "db.t3.medium",
    -    allocated_storage = 100
    -  },
    -
    -  backup_bucket = aws.S3 & {
    -    bucket = "app-backups",
    -    region = "us-east-1",
    -    versioning = true,
    -    lifecycle_rules = [
    -      {
    -        id = "delete-old-backups",
    -        days = 90,
    -        action = "delete"
    -      }
    -    ]
    -  }
    -}
    +

    3. Test Before Upgrading

    +
    # Use side-by-side upgrade to test
    +provisioning upgrade --staging
    +provisioning test suite
     
    -

    Network Configuration

    -

    Hetzner servers connect to AWS RDS via VPN or public endpoint:

    -
    # Network setup script
    -def setup_database_connection [] {
    -  let hetzner_servers = (hetzner_list_servers)
    -  let db_endpoint = (aws_get_rds_endpoint "app-database")
    +

    4. Monitor After Upgrade

    +
    # Watch orchestrator
    +provisioning orchestrator status --watch
     
    -  # Install PostgreSQL client
    -  $hetzner_servers | each {|server|
    -    ssh $server.ip "apt-get install -y postgresql-client"
    -    ssh $server.ip $"echo 'DB_HOST=($db_endpoint)' >> /app/.env"
    -  }
    -}
    +# Monitor platform health
    +provisioning platform monitor
    +
    +# Check logs
    +tail -f ~/.provisioning/logs/provisioning.log
     
    -

    Cost Analysis

    -

    Monthly estimate:

    +

    5. Document Changes

    +
    # Record what changed
    +provisioning upgrade --changelog > UPGRADE.md
    +
    +# Update team documentation
    +# Update runbooks
    +# Update dashboards
    +
    +

    Upgrade Policies

    +

    Automatic Updates

    +

    Enable automatic updates:

    +
    # ~/.config/provisioning/user_config.yaml
    +upgrade:
    +  auto_update: true
    +  check_interval: "daily"
    +  update_channel: "stable"
    +  auto_backup: true
    +
    +

    Update Channels

    +

    Choose update channel:

    +
    # Stable releases (recommended)
    +provisioning upgrade --channel stable
    +
    +# Beta releases
    +provisioning upgrade --channel beta
    +
    +# Development (nightly)
    +provisioning upgrade --channel development
    +
    +
      -
    • Hetzner cx31 × 2: €13.80 (~$15)
    • -
    • AWS RDS t3.medium: $60
    • -
    • AWS S3 (100 GB): $2.30
    • -
    • Total: ~$77/month (vs $120+ for all-AWS)
    • +
    • Initial Setup - First-time configuration
    • +
    • Platform Health - System monitoring
    • +
    • Backup & Recovery - Data protection
    -

    Pattern 2: Primary + Backup

    -

    Scenario: Active-standby deployment for disaster recovery.

    -

    Example: DigitalOcean primary datacenter, Hetzner warm standby with automated failover.

    -

    Benefits

    -
      -
    • Disaster recovery capability
    • -
    • Zero data loss (with replication)
    • -
    • Tested failover procedure
    • -
    • Cost-effective backup (warm standby vs hot standby)
    • -
    -

    Architecture

    -
             Primary (DigitalOcean NYC)        Backup (Hetzner DE)
    -         ┌──────────────────────┐          ┌─────────────────┐
    -         │   DigitalOcean LB    │◄────────►│ HAProxy Monitor │
    -         └──────────┬───────────┘          └────────┬────────┘
    -                    │                               │
    -         ┌──────────┴──────────┐                    │
    -         │                     │                    │
    -     ┌───▼───┐ ┌───▼───┐   ┌──▼──┐ ┌──────┐    ┌──▼───┐
    -     │ APP 1 │ │ APP 2 │   │ DB  │ │ ELK  │    │ WARM │
    -     │PRIMARY│ │PRIMARY│   │REPL │ │MON   │    │STANDBY
    -     └───────┘ └───────┘   └─────┘ └──────┘    └──────┘
    -         │                     │                    ▲
    -         └─────────────────────┼────────────────────┘
    -                        Async Replication
    +

    Troubleshooting

    +

    Common issues, debugging procedures, and resolution strategies for the Provisioning platform.

    +

    Quick Diagnosis

    +

    Run platform diagnostics:

    +
    # Comprehensive health check
    +provisioning diagnose
    +
    +# Check specific component
    +provisioning diagnose --component orchestrator
    +
    +# Generate diagnostic report
    +provisioning diagnose --report /tmp/diagnostics.txt
     
    -

    Failover Trigger

    -
    def monitor_primary_health [do_region, hetzner_region] {
    -  loop {
    -    let health = (do_health_check $do_region)
    -
    -    if $health.status == "degraded" or $health.status == "down" {
    -      print "Primary degraded, triggering failover"
    -      trigger_failover $hetzner_region
    -      break
    -    }
    -
    -    sleep 30sec
    -  }
    -}
    -
    -def trigger_failover [backup_region] {
    -  # 1. Promote backup database
    -  promote_replica_to_primary $backup_region
    -
    -  # 2. Update DNS to point to backup
    -  update_dns_to_backup $backup_region
    -
    -  # 3. Scale up backup servers
    -  scale_servers $backup_region 3
    -
    -  # 4. Verify traffic flowing
    -  wait_for_traffic_migration $backup_region 120sec
    -}
    -
    -

    Nickel Configuration

    -
    let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -
    -{
    -  # Primary: DigitalOcean
    -  primary = {
    -    region = "nyc3",
    -    provider = "digitalocean",
    -
    -    servers = digitalocean.Droplet & {
    -      name = "primary-app",
    -      size = "s-2vcpu-4gb",
    -      count = 3,
    -      region = "nyc3",
    -      firewall = {
    -        inbound = [
    -          { protocol = "tcp", ports = "80", sources = ["0.0.0.0/0"] },
    -          { protocol = "tcp", ports = "443", sources = ["0.0.0.0/0"] },
    -          { protocol = "tcp", ports = "5432", sources = ["10.0.0.0/8"] }
    -        ]
    -      }
    -    },
    -
    -    database = digitalocean.Database & {
    -      name = "primary-db",
    -      engine = "pg",
    -      version = "14",
    -      size = "db-s-2vcpu-4gb",
    -      region = "nyc3"
    -    }
    -  },
    -
    -  # Backup: Hetzner (warm standby)
    -  backup = {
    -    region = "nbg1",
    -    provider = "hetzner",
    -
    -    servers = hetzner.Server & {
    -      name = "backup-app",
    -      server_type = "cx31",
    -      count = 1,  # Minimal for cost
    -      location = "nbg1",
    -      automount = true
    -    },
    -
    -    # Replica database (read-only until promoted)
    -    database_replica = hetzner.Volume & {
    -      name = "db-replica",
    -      size = 100,
    -      location = "nbg1"
    -    }
    -  },
    -
    -  replication = {
    -    type = "async",
    -    primary_to_backup = true,
    -    recovery_point_objective = 300  # 5 minutes
    -  }
    -}
    -
    -

    Failover Testing

    -
    # Test failover without affecting production
    -def test_failover_dry_run [config] {
    -  print "Starting failover dry-run test..."
    -
    -  # 1. Snapshot primary database
    -  let snapshot = (do_create_db_snapshot "primary-db")
    -
    -  # 2. Create temporary replica from snapshot
    -  let temp_replica = (hetzner_create_from_snapshot $snapshot)
    -
    -  # 3. Run traffic tests against temp replica
    -  let test_results = (run_integration_tests $temp_replica.ip)
    -
    -  # 4. Verify database consistency
    -  let consistency = (verify_db_consistency $temp_replica.ip)
    -
    -  # 5. Cleanup temp resources
    -  hetzner_destroy $temp_replica.id
    -  do_delete_snapshot $snapshot.id
    -
    -  {
    -    status: "passed",
    -    results: $test_results,
    -    consistency_check: $consistency
    -  }
    -}
    -
    -

    Pattern 3: Multi-Region High Availability

    -

    Scenario: Distributed deployment across 3+ geographic regions with global load balancing.

    -

    Example: DigitalOcean US (NYC), Hetzner EU (Germany), AWS Asia (Singapore) with DNS-based failover.

    -

    Benefits

    -
      -
    • Geographic distribution for low latency
    • -
    • Protection against regional outages
    • -
    • Compliance with data residency (data stays in region)
    • -
    • Load distribution across regions
    • -
    -

    Architecture

    -
                        ┌─────────────────┐
    -                    │  Global DNS     │
    -                    │  (Geofencing)   │
    -                    └────────┬────────┘
    -                    ┌────────┴────────┐
    -                    │                 │
    -         ┌──────────▼──────┐  ┌──────▼─────────┐  ┌─────────────┐
    -         │  DigitalOcean   │  │  Hetzner       │  │    AWS      │
    -         │  US/NYC Region  │  │  EU/Germany    │  │  Asia/SG    │
    -         ├─────────────────┤  ├────────────────┤  ├─────────────┤
    -         │ Droplets (3)    │  │ Servers (3)    │  │ EC2 (3)     │
    -         │ LB              │  │ HAProxy        │  │ ALB         │
    -         │ DB (Primary)    │  │ DB (Replica)   │  │ DB (Replica)│
    -         └─────────────────┘  └────────────────┘  └─────────────┘
    -                    │                 │                    │
    -                    └─────────────────┴────────────────────┘
    -                           Cross-Region Sync
    -
    -

    Global Load Balancing

    -
    def setup_global_dns [] {
    -  # Using Route53 or Cloudflare for DNS failover
    -  let regions = [
    -    { name: "us-nyc", provider: "digitalocean", endpoint: "us.app.example.com" },
    -    { name: "eu-de", provider: "hetzner", endpoint: "eu.app.example.com" },
    -    { name: "asia-sg", provider: "aws", endpoint: "asia.app.example.com" }
    -  ]
    -
    -  # Create health checks
    -  $regions | each {|region|
    -    configure_health_check $region.name $region.endpoint
    -  }
    -
    -  # Setup failover policy
    -  # Primary: US, Secondary: EU, Tertiary: Asia
    -  configure_dns_failover {
    -    primary: "us-nyc",
    -    secondary: "eu-de",
    -    tertiary: "asia-sg"
    -  }
    -}
    -
    -

    Nickel Configuration

    -
    {
    -  regions = {
    -    us_east = {
    -      provider = "digitalocean",
    -      region = "nyc3",
    -
    -      servers = digitalocean.Droplet & {
    -        name = "us-app",
    -        size = "s-2vcpu-4gb",
    -        count = 3,
    -        region = "nyc3"
    -      },
    -
    -      database = digitalocean.Database & {
    -        name = "us-db",
    -        engine = "pg",
    -        size = "db-s-2vcpu-4gb",
    -        region = "nyc3",
    -        replica_regions = ["eu-de", "asia-sg"]
    -      }
    -    },
    -
    -    eu_central = {
    -      provider = "hetzner",
    -      region = "nbg1",
    -
    -      servers = hetzner.Server & {
    -        name = "eu-app",
    -        server_type = "cx31",
    -        count = 3,
    -        location = "nbg1"
    -      }
    -    },
    -
    -    asia_southeast = {
    -      provider = "aws",
    -      region = "ap-southeast-1",
    -
    -      servers = aws.EC2 & {
    -        name = "asia-app",
    -        instance_type = "t3.medium",
    -        count = 3,
    -        region = "ap-southeast-1"
    -      }
    -    }
    -  },
    -
    -  global_config = {
    -    dns_provider = "route53",
    -    ttl = 60,
    -    health_check_interval = 30
    -  }
    -}
    -
    -

    Data Synchronization

    -
    # Multi-region data sync strategy
    -def sync_data_across_regions [primary_region, secondary_regions] {
    -  let sync_config = {
    -    strategy: "async",
    -    consistency: "eventual",
    -    conflict_resolution: "last-write-wins",
    -    replication_lag: "300s"  # 5 minute max lag
    -  }
    -
    -  # Setup replication from primary to all secondaries
    -  $secondary_regions | each {|region|
    -    setup_async_replication $primary_region $region $sync_config
    -  }
    -
    -  # Monitor replication lag
    -  loop {
    -    let lag = (check_replication_lag)
    -    if $lag > 300 {
    -      print "Warning: replication lag exceeds threshold"
    -      trigger_alert "replication-lag-warning"
    -    }
    -    sleep 60sec
    -  }
    -}
    -
    -

    Pattern 4: Hybrid Cloud

    -

    Scenario: On-premises infrastructure with public cloud providers for burst capacity and backup.

    -

    Example: On-premise data center + AWS for burst capacity + DigitalOcean for disaster recovery.

    -

    Benefits

    -
      -
    • Existing infrastructure utilization
    • -
    • Burst capacity in public cloud
    • -
    • Disaster recovery site
    • -
    • Compliance with on-premise requirements
    • -
    • Cost control (scale only when needed)
    • -
    -

    Architecture

    -
        On-Premises Data Center           Public Cloud (Burst)
    -    ┌─────────────────────────┐      ┌────────────────────┐
    -    │  Physical Servers       │◄────►│  AWS Auto-Scaling  │
    -    │  - App Tier (24 cores)  │      │  - Elasticity      │
    -    │  - DB Tier (48 cores)   │      │  - Pay-as-you-go   │
    -    │  - Storage (50 TB)       │      │  - CloudFront CDN  │
    -    └─────────────────────────┘      └────────────────────┘
    -               │                               ▲
    -               │ VPN Tunnel                    │
    -               └───────────────────────────────┘
    -
    -    On-Premises                        DR Site (DigitalOcean)
    -    │ Production                        │ Warm Standby
    -    ├─ 95% Utilization                  ├─ Cold VM Snapshots
    -    ├─ Full Data                        ├─ Async Replication
    -    ├─ Peak Load Handling               ├─ Ready for 15 min RTO
    -    │                                   │
    -
    -

    VPN Configuration

    -
    def setup_hybrid_vpn [] {
    -  # AWS VPN to on-premise datacenter
    -  let vpn_config = {
    -    type: "site-to-site",
    -    protocol: "ipsec",
    -    encryption: "aes-256",
    -    authentication: "sha256",
    -    on_prem_cidr: "192.168.0.0/16",
    -    aws_cidr: "10.0.0.0/16",
    -    do_cidr: "172.16.0.0/16"
    -  }
    -
    -  # Create AWS Site-to-Site VPN
    -  let vpn = (aws_create_vpn_connection $vpn_config)
    -
    -  # Configure on-prem gateway
    -  configure_on_prem_vpn_gateway $vpn
    -
    -  # Verify tunnel status
    -  wait_for_vpn_ready 300
    -}
    -
    -

    Nickel Configuration

    -
    {
    -  on_premises = {
    -    provider = "manual",
    -    gateway = "192.168.1.1",
    -    cidr = "192.168.0.0/16",
    -    bandwidth = "1gbps",
    -
    -    # Resources remain on-prem (managed manually)
    -    servers = {
    -      app_tier = { cores = 24, memory = 128 },
    -      db_tier = { cores = 48, memory = 256 },
    -      storage = { capacity = "50 TB" }
    -    }
    -  },
    -
    -  aws_burst_capacity = {
    -    provider = "aws",
    -    region = "us-east-1",
    -
    -    auto_scaling_group = aws.ASG & {
    -      name = "burst-asg",
    -      min_size = 0,
    -      desired_capacity = 0,
    -      max_size = 20,
    -      instance_type = "c5.2xlarge",
    -      scale_up_trigger = "on_prem_cpu > 80%",
    -      scale_down_trigger = "on_prem_cpu < 40%"
    -    },
    -
    -    cdn = aws.CloudFront & {
    -      origin = "on-prem-origin",
    -      regional_origins = ["us-east-1", "eu-west-1", "ap-southeast-1"]
    -    }
    -  },
    -
    -  dr_site = {
    -    provider = "digitalocean",
    -    region = "nyc3",
    -
    -    snapshot_storage = digitalocean.Droplet & {
    -      name = "dr-snapshot",
    -      size = "s-24vcpu-48gb",
    -      count = 0,  # Powered off until needed
    -      image = "on-prem-snapshot"
    -    }
    -  },
    -
    -  replication = {
    -    on_prem_to_aws: {
    -      strategy = "continuous",
    -      target = "aws-s3-bucket",
    -      retention = "7days"
    -    },
    -
    -    on_prem_to_do: {
    -      strategy = "nightly",
    -      target = "do-spaces-bucket",
    -      retention = "30days"
    -    }
    -  }
    -}
    -
    -

    Burst Capacity Orchestration

    -
    # Monitor on-prem and trigger AWS burst
    -def monitor_and_burst [] {
    -  loop {
    -    let on_prem_metrics = (collect_on_prem_metrics)
    -
    -    if $on_prem_metrics.cpu_avg > 80 {
    -      # Trigger AWS burst scaling
    -      let scale_size = ((100 - $on_prem_metrics.cpu_avg) / 10)
    -      scale_aws_burst $scale_size
    -    } else if $on_prem_metrics.cpu_avg < 40 {
    -      # Scale down AWS
    -      scale_aws_burst 0
    -    }
    -
    -    sleep 60sec
    -  }
    -}
    -
    -

    Implementation Examples

    -

    Example 1: Three-Provider Web Application

    -

    Scenario: Production web application with DigitalOcean web servers, AWS managed database, and Hetzner backup storage.

    -

    Architecture:

    -
      -
    • DigitalOcean: 3 web servers with load balancer (cost-effective compute)
    • -
    • AWS: RDS PostgreSQL database (managed, high availability)
    • -
    • Hetzner: Backup volumes (low-cost storage)
    • -
    -

    Files to Create:

    -

    workspace.ncl:

    -
    let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -let aws = import "../../extensions/providers/aws/nickel/main.ncl" in
    -let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -
    -{
    -  workspace_name = "three-provider-webapp",
    -  description = "Web application across three providers",
    -
    -  infrastructure = {
    -    web_tier = digitalocean.Droplet & {
    -      name = "web-server",
    -      region = "nyc3",
    -      size = "s-2vcpu-4gb",
    -      image = "ubuntu-22-04-x64",
    -      count = 3,
    -      firewall = {
    -        inbound_rules = [
    -          { protocol = "tcp", ports = "22", sources = { addresses = ["your-ip/32"] } },
    -          { protocol = "tcp", ports = "80", sources = { addresses = ["0.0.0.0/0"] } },
    -          { protocol = "tcp", ports = "443", sources = { addresses = ["0.0.0.0/0"] } }
    -        ],
    -        outbound_rules = [
    -          { protocol = "tcp", destinations = { addresses = ["0.0.0.0/0"] } }
    -        ]
    -      }
    -    },
    -
    -    load_balancer = digitalocean.LoadBalancer & {
    -      name = "web-lb",
    -      algorithm = "round_robin",
    -      region = "nyc3",
    -      forwarding_rules = [
    -        {
    -          entry_protocol = "http",
    -          entry_port = 80,
    -          target_protocol = "http",
    -          target_port = 80,
    -          certificate_id = null
    -        },
    -        {
    -          entry_protocol = "https",
    -          entry_port = 443,
    -          target_protocol = "http",
    -          target_port = 80,
    -          certificate_id = "your-cert-id"
    -        }
    -      ],
    -      sticky_sessions = {
    -        type = "cookies",
    -        cookie_name = "lb",
    -        cookie_ttl_seconds = 300
    -      }
    -    },
    -
    -    database = aws.RDS & {
    -      identifier = "webapp-db",
    -      engine = "postgres",
    -      engine_version = "14.6",
    -      instance_class = "db.t3.medium",
    -      allocated_storage = 100,
    -      storage_type = "gp3",
    -      multi_az = true,
    -      backup_retention_days = 30,
    -      subnet_group = "default",
    -      parameter_group = "default.postgres14",
    -      tags = [
    -        { key = "Environment", value = "production" },
    -        { key = "Application", value = "web-app" }
    -      ]
    -    },
    -
    -    backup_volume = hetzner.Volume & {
    -      name = "webapp-backups",
    -      size = 500,
    -      location = "nbg1",
    -      automount = false,
    -      format = "ext4"
    -    }
    -  }
    -}
    -
    -

    config.toml:

    -
    [workspace]
    -name = "three-provider-webapp"
    -environment = "production"
    -owner = "platform-team"
    -
    -[providers.digitalocean]
    -enabled = true
    -token_env = "DIGITALOCEAN_TOKEN"
    -default_region = "nyc3"
    -
    -[providers.aws]
    -enabled = true
    -region = "us-east-1"
    -access_key_env = "AWS_ACCESS_KEY_ID"
    -secret_key_env = "AWS_SECRET_ACCESS_KEY"
    -
    -[providers.hetzner]
    -enabled = true
    -token_env = "HCLOUD_TOKEN"
    -default_location = "nbg1"
    -
    -[deployment]
    -strategy = "rolling"
    -batch_size = 1
    -health_check_wait = 60
    -rollback_on_failure = true
    -
    -

    deploy.nu:

    -
    #!/usr/bin/env nu
    -
    -# Deploy three-provider web application
    -def main [environment = "staging"] {
    -  print "Deploying three-provider web application to ($environment)..."
    -
    -  # 1. Validate configuration
    -  print "Step 1: Validating configuration..."
    -  validate_config "workspace.ncl"
    -
    -  # 2. Create infrastructure
    -  print "Step 2: Creating infrastructure..."
    -  create_digitalocean_resources
    -  create_aws_resources
    -  create_hetzner_resources
    -
    -  # 3. Configure networking
    -  print "Step 3: Configuring networking..."
    -  setup_vpc_peering
    -  configure_security_groups
    -
    -  # 4. Deploy application
    -  print "Step 4: Deploying application..."
    -  deploy_app_to_web_servers
    -
    -  # 5. Verify deployment
    -  print "Step 5: Verifying deployment..."
    -  verify_health_checks
    -  verify_database_connectivity
    -  verify_backups
    -
    -  print "Deployment complete!"
    -}
    -
    -def validate_config [config_file] {
    -  print $"Validating ($config_file)..."
    -  nickel export $config_file | from json
    -}
    -
    -def create_digitalocean_resources [] {
    -  print "Creating DigitalOcean resources (3 droplets + load balancer)..."
    -  # Implementation
    -}
    -
    -def create_aws_resources [] {
    -  print "Creating AWS resources (RDS database)..."
    -  # Implementation
    -}
    -
    -def create_hetzner_resources [] {
    -  print "Creating Hetzner resources (backup volume)..."
    -  # Implementation
    -}
    -
    -def setup_vpc_peering [] {
    -  print "Setting up cross-provider networking..."
    -  # Implementation
    -}
    -
    -def configure_security_groups [] {
    -  print "Configuring security groups..."
    -  # Implementation
    -}
    -
    -def deploy_app_to_web_servers [] {
    -  print "Deploying application..."
    -  # Implementation
    -}
    -
    -def verify_health_checks [] {
    -  print "Verifying health checks..."
    -  # Implementation
    -}
    -
    -def verify_database_connectivity [] {
    -  print "Verifying database connectivity..."
    -  # Implementation
    -}
    -
    -def verify_backups [] {
    -  print "Verifying backup configuration..."
    -  # Implementation
    -}
    -
    -main $env.ENVIRONMENT?
    -
    -

    Example 2: Multi-Region Disaster Recovery

    -

    Scenario: Active-standby DR setup with DigitalOcean primary and Hetzner backup.

    -

    Architecture:

    -
      -
    • DigitalOcean NYC: Production environment (active)
    • -
    • Hetzner Germany: Warm standby (scales down until needed)
    • -
    • Async database replication
    • -
    • DNS-based failover
    • -
    • RPO: 5 minutes, RTO: 15 minutes
    • -
    -

    Example 3: Cost-Optimized Deployment

    -

    Scenario: Optimize across provider strengths: Hetzner compute, AWS managed services, DigitalOcean CDN.

    -

    Architecture:

    -
      -
    • Hetzner: 5 application servers (best compute price)
    • -
    • AWS: RDS database, ElastiCache (managed services)
    • -
    • DigitalOcean: Spaces for backups, CDN endpoints
    • -
    -

    Best Practices

    -

    1. Provider Selection

    -
      -
    • Document provider choices: Keep record of which workloads run where and why
    • -
    • Audit provider capabilities: Ensure chosen provider supports required features
    • -
    • Monitor provider health: Track outages and issues per provider
    • -
    • Cost tracking per provider: Understand where money is spent
    • -
    -

    2. Network Security

    -
      -
    • Encrypt inter-provider traffic: Use VPN, mTLS, or encrypted tunnels
    • -
    • Implement firewall rules: Limit traffic between providers to necessary ports
    • -
    • Use security groups: AWS-style security groups where available
    • -
    • Monitor network traffic: Detect unusual patterns across providers
    • -
    -

    3. Data Consistency

    -
      -
    • Choose replication strategy: Synchronous (consistency), asynchronous (performance)
    • -
    • Implement conflict resolution: Define how conflicts are resolved
    • -
    • Monitor replication lag: Alert on excessive lag
    • -
    • Test failover regularly: Verify data integrity during failover
    • -
    -

    4. Disaster Recovery

    -
      -
    • Define RPO/RTO targets: Recovery Point Objective and Recovery Time Objective
    • -
    • Document failover procedures: Step-by-step instructions
    • -
    • Test failover regularly: At least quarterly, ideally monthly
    • -
    • Maintain DR site readiness: Cold, warm, or hot standby based on RTO
    • -
    -

    5. Compliance and Governance

    -
      -
    • Data residency: Ensure data stays in required regions
    • -
    • Encryption at rest: Use provider-native encryption
    • -
    • Encryption in transit: TLS/mTLS for all inter-provider communication
    • -
    • Audit logging: Enable audit logs in all providers
    • -
    • Access control: Implement least privilege across all providers
    • -
    -

    6. Monitoring and Alerting

    -
      -
    • Unified monitoring: Aggregate metrics from all providers
    • -
    • Cross-provider dashboards: Visualize health across providers
    • -
    • Provider-specific alerts: Configure alerts per provider
    • -
    • Escalation procedures: Clear escalation for failures
    • -
    -

    7. Cost Management

    -
      -
    • Set budget alerts: Per provider and total
    • -
    • Reserved instances: Use provider discounts
    • -
    • Spot instances: AWS spot for non-critical workloads
    • -
    • Auto-scaling policies: Scale based on demand
    • -
    • Regular cost reviews: Monthly cost analysis and optimization
    • -
    -

    Troubleshooting

    -

    Issue: Network Connectivity Between Providers

    -

    Symptoms: Droplets can’t reach AWS database, high latency between regions

    +

    Common Issues

    +

    Services Won’t Start

    +

    Symptom: Service fails to start or crashes immediately

    Diagnosis:

    -
    # Check network connectivity
    -def diagnose_network_issue [source_ip, dest_ip] {
    -  print "Diagnosing network connectivity..."
    +
    # Check service status
    +systemctl status provisioning-orchestrator
     
    -  # 1. Check routing
    -  ssh $source_ip "ip route show"
    +# View recent logs
    +journalctl -u provisioning-orchestrator -n 100 --no-pager
     
    -  # 2. Check firewall rules
    -  check_security_groups $source_ip $dest_ip
    -
    -  # 3. Test connectivity
    -  ssh $source_ip "ping -c 3 $dest_ip"
    -  ssh $source_ip "traceroute $dest_ip"
    -
    -  # 4. Check DNS resolution
    -  ssh $source_ip "nslookup $dest_ip"
    -}
    +# Check configuration
    +provisioning validate config
     
    -

    Solutions:

    -
      -
    • Verify firewall rules allow traffic on required ports
    • -
    • Check VPN tunnel status if using site-to-site VPN
    • -
    • Verify DNS resolution in both providers
    • -
    • Check MTU size for jumbo frames (1500 bytes)
    • -
    • Enable debug logging on network components
    • -
    -

    Issue: Database Replication Lag

    -

    Symptoms: Secondary database lagging behind primary

    +

    Common Causes:

    +
      +
    1. Port already in use
    2. +
    +
    # Find process using port
    +lsof -i :8080
    +
    +# Kill conflicting process or change port in config
    +
    +
      +
    1. Configuration error
    2. +
    +
    # Validate configuration
    +provisioning validate config --strict
    +
    +# Check for syntax errors
    +nickel typecheck /etc/provisioning/config.ncl
    +
    +
      +
    1. Missing dependencies
    2. +
    +
    # Check binary dependencies
    +ldd /usr/local/bin/provisioning-orchestrator
    +
    +# Install missing libraries
    +sudo apt install <missing-library>
    +
    +
      +
    1. Permission issues
    2. +
    +
    # Fix ownership
    +sudo chown -R provisioning:provisioning /var/lib/provisioning
    +sudo chown -R provisioning:provisioning /etc/provisioning
    +
    +# Fix permissions
    +sudo chmod 750 /var/lib/provisioning
    +sudo chmod 640 /etc/provisioning/*.toml
    +
    +

    Database Connection Failures

    +

    Symptom: Services can’t connect to SurrealDB

    Diagnosis:

    -
    def check_replication_lag [] {
    -  # AWS RDS
    -  aws rds describe-db-instances --query 'DBInstances[].{ID:DBInstanceIdentifier,Lag:ReplicationLag}'
    +
    # Check database status
    +systemctl status surrealdb
     
    -  # DigitalOcean
    -  doctl databases backups list --format Name,Created
    -}
    +# Test database connectivity
    +curl  [http://localhost:8000/health](http://localhost:8000/health)
    +
    +# Check database logs
    +journalctl -u surrealdb -n 50
     
    -

    Solutions:

    -
      -
    • Check network bandwidth between providers
    • -
    • Review write throughput on primary
    • -
    • Monitor CPU/IO on secondary
    • -
    • Adjust replication thread pool size
    • -
    • Check for long-running queries blocking replication
    • -
    -

    Issue: Failover Not Working

    -

    Symptoms: Failover script fails, DNS not updating

    +

    Resolution:

    +
    # Restart database
    +sudo systemctl restart surrealdb
    +
    +# Verify connection string in config
    +provisioning config get database.url
    +
    +# Test manual connection
    +surreal sql --conn  [http://localhost:8000](http://localhost:8000) --user root --pass root
    +
    +

    High Resource Usage

    +

    Symptom: Service consuming excessive CPU or memory

    Diagnosis:

    -
    def test_failover_chain [] {
    -  # 1. Verify backup infrastructure is ready
    -  verify_backup_infrastructure
    +
    # Monitor resource usage
    +top -p $(pgrep provisioning-orchestrator)
     
    -  # 2. Test DNS failover
    -  test_dns_failover
    +# Detailed metrics
    +provisioning platform metrics --service orchestrator
     
    -  # 3. Verify database promotion
    -  test_db_promotion
    -
    -  # 4. Check application configuration
    -  verify_app_failover_config
    -}
    +# Check for resource leaks
     
    -

    Solutions:

    -
      -
    • Ensure backup infrastructure is powered on and running
    • -
    • Verify DNS TTL is appropriate (typically 60 seconds)
    • -
    • Test failover in staging environment first
    • -
    • Check VPN connectivity to backup provider
    • -
    • Verify database promotion scripts
    • -
    • Ensure application connection strings support both endpoints
    • -
    -

    Issue: Cost Spike Across Providers

    -

    Symptoms: Monthly bill unexpectedly high

    +

    Resolution:

    +
    # Adjust worker threads
    +provisioning config set execution.worker_threads 4
    +
    +# Reduce parallel tasks
    +provisioning config set execution.max_parallel_tasks 50
    +
    +# Increase memory limit
    +sudo systemctl set-property provisioning-orchestrator MemoryMax=8G
    +
    +# Restart service
    +sudo systemctl restart provisioning-orchestrator
    +
    +

    Workflow Failures

    +

    Symptom: Workflows fail or hang

    Diagnosis:

    -
    def analyze_cost_spike [] {
    -  print "Analyzing cost spike..."
    +
    # List failed workflows
    +provisioning workflow list --status failed
     
    -  # Compare current vs previous month
    -  let current = (get_current_month_costs)
    -  let previous = (get_previous_month_costs)
    -  let delta = ($current - $previous)
    +# View workflow details
    +provisioning workflow show <workflow-id>
     
    -  # Break down by provider
    -  $current | group-by provider | each {|group|
    -    let provider = ($group.0.provider)
    -    let cost = ($group | map {|x| $x.cost} | math sum)
    -    print $"($provider): $($cost)"
    -  }
    +# Check workflow logs
    +provisioning workflow logs <workflow-id>
     
    -  # Identify largest increases
    -  ($delta | sort-by cost_change | reverse | first 5)
    -}
    +# Inspect checkpoint state
    +provisioning workflow checkpoints <workflow-id>
     
    -

    Solutions:

    -
      -
    • Review auto-scaling activities
    • -
    • Check for unintended resource creation
    • -
    • Verify reserved instances are being used
    • -
    • Review data transfer costs (cross-region expensive)
    • -
    • Cancel idle resources
    • -
    • Contact provider support if billing seems incorrect
    • -
    -

    Conclusion

    -

    Multi-provider deployments provide significant benefits in cost optimization, reliability, and compliance. Start with a simple pattern (Compute + -Storage Split) and evolve to more complex patterns as needs grow. Always test failover procedures and maintain clear documentation of provider -responsibilities and network configurations.

    -

    For more information, see:

    -
      -
    • Provider-agnostic architecture guide
    • -
    • Batch workflow orchestration guide
    • -
    • Individual provider implementation guides
    • -
    -

    Multi-Provider Networking Guide

    -

    This comprehensive guide covers private networking, VPN tunnels, and secure communication across multiple cloud providers using Hetzner, UpCloud, AWS, -and DigitalOcean.

    -

    Table of Contents

    - -

    Overview

    -

    Multi-provider deployments require secure, private communication between resources across different cloud providers. This involves:

    -
      -
    • Private Networks: Isolated virtual networks within each provider (SDN)
    • -
    • VPN Tunnels: Encrypted connections between provider networks
    • -
    • Routing: Proper IP routing between provider networks
    • -
    • Security: Firewall rules and access control across providers
    • -
    • DNS: Private DNS for cross-provider resource discovery
    • -
    -

    Architecture

    -
    ┌──────────────────────────────────┐
    -│      DigitalOcean VPC            │
    -│  Network: 10.0.0.0/16            │
    -│  ┌────────────────────────────┐  │
    -│  │ Web Servers (10.0.1.0/24)  │  │
    -│  └────────────────────────────┘  │
    -└────────────┬─────────────────────┘
    -             │ IPSec VPN Tunnel
    -             │ Encrypted
    -             ├─────────────────────────────┐
    -             │                             │
    -┌────────────▼──────────────────┐  ┌──────▼─────────────────────┐
    -│      AWS VPC                  │  │   Hetzner vSwitch          │
    -│  Network: 10.1.0.0/16         │  │   Network: 10.2.0.0/16     │
    -│  ┌──────────────────────────┐ │  │ ┌─────────────────────────┐│
    -│  │ RDS Database (10.1.1.0) │ │  │ │ Backup (10.2.1.0)       ││
    -│  └──────────────────────────┘ │  │ └─────────────────────────┘│
    -└───────────────────────────────┘  └─────────────────────────────┘
    -         IPSec ▲                              IPSec ▲
    -         Tunnel │                             Tunnel │
    +

    Common Issues:

    +
      +
    1. Provider API errors
    2. +
    +
    # Check provider credentials
    +provisioning provider validate upcloud
    +
    +# Test provider connectivity
    +provisioning provider test upcloud
     
    -

    Provider SDN/Private Network Solutions

    -

    Hetzner: vSwitch

    -

    Product: vSwitch (Virtual Switch)

    -

    Characteristics:

    -
      -
    • Private networks for Cloud Servers
    • -
    • Multiple subnets per network
    • -
    • Layer 2 switching
    • -
    • IP-based traffic isolation
    • -
    • Free service (included with servers)
    • -
    -

    Features:

    -
      -
    • Custom IP ranges
    • -
    • Subnets and routing
    • -
    • Attached/detached servers
    • -
    • Static routes
    • -
    • Private networking without NAT
    • -
    -

    Configuration:

    -
    # Create private network
    -hcloud network create --name "app-network" --ip-range "10.0.0.0/16"
    +
      +
    1. Dependency resolution failures
    2. +
    +
    # Validate infrastructure schema
    +provisioning validate infra my-cluster.ncl
     
    -# Create subnet
    -hcloud network add-subnet app-network --ip-range "10.0.1.0/24" --network-zone eu-central
    -
    -# Attach server to network
    -hcloud server attach-to-network server-1 --network app-network --ip 10.0.1.10
    +# Check task service dependencies
    +provisioning taskserv deps kubernetes
     
    -

    UpCloud: VLAN (Virtual LAN)

    -

    Product: Private Networks (VLAN-based)

    -

    Characteristics:

    -
      -
    • Virtual LAN technology
    • -
    • Layer 2 connectivity
    • -
    • Multiple VLANs per account
    • -
    • No bandwidth charges
    • -
    • Simple configuration
    • -
    -

    Features:

    -
      -
    • Custom CIDR blocks
    • -
    • Multiple networks per account
    • -
    • Server attachment to VLANs
    • -
    • VLAN tagging support
    • -
    • Static routing
    • -
    -

    Configuration:

    -
    # Create private network
    -upctl network create --name "app-network" --ip-networks 10.0.0.0/16
    +
      +
    1. Timeout issues
    2. +
    +
    # Increase timeout
    +provisioning config set workflows.task_timeout 600
     
    -# Attach server to network
    -upctl server attach-network --server server-1 \
    -  --network app-network --ip-address 10.0.1.10
    +# Enable detailed logging
    +provisioning config set logging.level debug
     
    -

    AWS: VPC (Virtual Private Cloud)

    -

    Product: VPC with subnets and security groups

    -

    Characteristics:

    -
      -
    • Enterprise-grade networking
    • -
    • Multiple availability zones
    • -
    • Complex security models
    • -
    • NAT gateways and bastion hosts
    • -
    • Advanced routing
    • -
    -

    Features:

    -
      -
    • VPC peering
    • -
    • VPN connections
    • -
    • Internet gateways
    • -
    • NAT gateways
    • -
    • Security groups and NACLs
    • -
    • Route tables with multiple targets
    • -
    • Flow logs and VPC insights
    • -
    -

    Configuration:

    -
    # Create VPC
    -aws ec2 create-vpc --cidr-block 10.1.0.0/16
    -
    -# Create subnets
    -aws ec2 create-subnet --vpc-id vpc-12345 \
    -  --cidr-block 10.1.1.0/24 \
    -  --availability-zone us-east-1a
    -
    -# Create security group
    -aws ec2 create-security-group --group-name app-sg \
    -  --description "Application security group" --vpc-id vpc-12345
    -
    -

    DigitalOcean: VPC (Virtual Private Cloud)

    -

    Product: VPC

    -

    Characteristics:

    -
      -
    • Simple private networking
    • -
    • One VPC per region
    • -
    • Droplet attachment
    • -
    • Built-in firewall integration
    • -
    • No additional cost
    • -
    -

    Features:

    -
      -
    • Custom IP ranges
    • -
    • Droplet tagging and grouping
    • -
    • Firewall rule integration
    • -
    • Internal DNS resolution
    • -
    • Droplet-to-droplet communication
    • -
    -

    Configuration:

    -
    # Create VPC
    -doctl compute vpc create --name "app-vpc" --region nyc3 --ip-range 10.0.0.0/16
    -
    -# Attach droplet to VPC
    -doctl compute vpc member add vpc-id --droplet-ids 12345
    -
    -# Setup firewall with VPC
    -doctl compute firewall create --name app-fw --vpc-id vpc-id
    -
    -

    Private Network Configuration

    -

    Hetzner vSwitch Configuration (Nickel)

    -
    let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -
    -{
    -  # Create private network
    -  private_network = hetzner.Network & {
    -    name = "app-network",
    -    ip_range = "10.0.0.0/16",
    -    labels = { "environment" = "production" }
    -  },
    -
    -  # Create subnet
    -  private_subnet = hetzner.Subnet & {
    -    network = "app-network",
    -    network_zone = "eu-central",
    -    ip_range = "10.0.1.0/24"
    -  },
    -
    -  # Server attached to network
    -  app_server = hetzner.Server & {
    -    name = "app-server",
    -    server_type = "cx31",
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -
    -    # Attach to private network with static IP
    -    networks = [
    -      {
    -        network_name = "app-network",
    -        ip = "10.0.1.10"
    -      }
    -    ]
    -  }
    -}
    -
    -

    AWS VPC Configuration (Nickel)

    -
    let aws = import "../../extensions/providers/aws/nickel/main.ncl" in
    -
    -{
    -  # Create VPC
    -  vpc = aws.VPC & {
    -    cidr_block = "10.1.0.0/16",
    -    enable_dns_hostnames = true,
    -    enable_dns_support = true,
    -    tags = [
    -      { key = "Name", value = "app-vpc" }
    -    ]
    -  },
    -
    -  # Create subnet
    -  private_subnet = aws.Subnet & {
    -    vpc_id = "{{ vpc.id }}",
    -    cidr_block = "10.1.1.0/24",
    -    availability_zone = "us-east-1a",
    -    map_public_ip_on_launch = false,
    -    tags = [
    -      { key = "Name", value = "private-subnet" }
    -    ]
    -  },
    -
    -  # Create security group
    -  app_sg = aws.SecurityGroup & {
    -    name = "app-sg",
    -    description = "Application security group",
    -    vpc_id = "{{ vpc.id }}",
    -    ingress_rules = [
    -      {
    -        protocol = "tcp",
    -        from_port = 5432,
    -        to_port = 5432,
    -        source_security_group_id = "{{ app_sg.id }}"
    -      }
    -    ],
    -    tags = [
    -      { key = "Name", value = "app-sg" }
    -    ]
    -  },
    -
    -  # RDS in private subnet
    -  app_database = aws.RDS & {
    -    identifier = "app-db",
    -    engine = "postgres",
    -    instance_class = "db.t3.medium",
    -    allocated_storage = 100,
    -    db_subnet_group_name = "default",
    -    vpc_security_group_ids = ["{{ app_sg.id }}"],
    -    publicly_accessible = false
    -  }
    -}
    -
    -

    DigitalOcean VPC Configuration (Nickel)

    -
    let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -
    -{
    -  # Create VPC
    -  private_vpc = digitalocean.VPC & {
    -    name = "app-vpc",
    -    region = "nyc3",
    -    ip_range = "10.0.0.0/16"
    -  },
    -
    -  # Droplets attached to VPC
    -  web_servers = digitalocean.Droplet & {
    -    name = "web-server",
    -    region = "nyc3",
    -    size = "s-2vcpu-4gb",
    -    image = "ubuntu-22-04-x64",
    -    count = 3,
    -
    -    # Attach to VPC
    -    vpc_uuid = "{{ private_vpc.id }}"
    -  },
    -
    -  # Firewall integrated with VPC
    -  app_firewall = digitalocean.Firewall & {
    -    name = "app-firewall",
    -    vpc_id = "{{ private_vpc.id }}",
    -    inbound_rules = [
    -      {
    -        protocol = "tcp",
    -        ports = "22",
    -        sources = { addresses = ["10.0.0.0/16"] }
    -      },
    -      {
    -        protocol = "tcp",
    -        ports = "443",
    -        sources = { addresses = ["0.0.0.0/0"] }
    -      }
    -    ]
    -  }
    -}
    -
    -

    VPN Tunnel Setup

    -

    IPSec VPN Between Providers

    -

    Use Case: Secure communication between DigitalOcean and AWS

    -

    Step 1: AWS Site-to-Site VPN Setup

    -
    # Create Virtual Private Gateway (VGW)
    -aws ec2 create-vpn-gateway \
    -  --type ipsec.1 \
    -  --amazon-side-asn 64512 \
    -  --tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=app-vpn-gw}]"
    -
    -# Get VGW ID
    -VGW_ID="vgw-12345678"
    -
    -# Attach to VPC
    -aws ec2 attach-vpn-gateway \
    -  --vpn-gateway-id $VGW_ID \
    -  --vpc-id vpc-12345
    -
    -# Create Customer Gateway (DigitalOcean endpoint)
    -aws ec2 create-customer-gateway \
    -  --type ipsec.1 \
    -  --public-ip 203.0.113.12 \
    -  --bgp-asn 65000
    -
    -# Get CGW ID
    -CGW_ID="cgw-12345678"
    -
    -# Create VPN Connection
    -aws ec2 create-vpn-connection \
    -  --type ipsec.1 \
    -  --customer-gateway-id $CGW_ID \
    -  --vpn-gateway-id $VGW_ID \
    -  --options "StaticRoutesOnly=true"
    -
    -# Get VPN Connection ID
    -VPN_CONN_ID="vpn-12345678"
    -
    -# Enable static routing
    -aws ec2 enable-vpn-route-propagation \
    -  --route-table-id rtb-12345 \
    -  --vpn-connection-id $VPN_CONN_ID
    -
    -# Create static route for DigitalOcean network
    -aws ec2 create-route \
    -  --route-table-id rtb-12345 \
    -  --destination-cidr-block 10.0.0.0/16 \
    -  --gateway-id $VGW_ID
    -
    -

    Step 2: DigitalOcean Endpoint Configuration

    -

    Download VPN configuration from AWS:

    -
    # Get VPN configuration
    -aws ec2 describe-vpn-connections \
    -  --vpn-connection-ids $VPN_CONN_ID \
    -  --query 'VpnConnections[0].CustomerGatewayConfiguration' \
    -  --output text > vpn-config.xml
    -
    -

    Configure IPSec on DigitalOcean server (acting as VPN gateway):

    -
    # Install StrongSwan
    -ssh root@do-server
    -apt-get update
    -apt-get install -y strongswan strongswan-swanctl
    -
    -# Create ipsec configuration
    -cat > /etc/swanctl/conf.d/aws-vpn.conf <<'EOF'
    -connections {
    -  aws-vpn {
    -    remote_addrs = 203.0.113.1, 203.0.113.2  # AWS endpoints
    -    local_addrs = 203.0.113.12               # DigitalOcean endpoint
    -
    -    local {
    -      auth = psk
    -      id = 203.0.113.12
    -    }
    -
    -    remote {
    -      auth = psk
    -      id = 203.0.113.1
    -    }
    -
    -    children {
    -      aws-vpn {
    -        local_ts = 10.0.0.0/16                # DO network
    -        remote_ts = 10.1.0.0/16               # AWS VPC
    -
    -        esp_proposals = aes256-sha256
    -        rekey_time = 3600s
    -        rand_time = 540s
    -      }
    -    }
    -
    -    proposals = aes256-sha256-modp2048
    -    rekey_time = 28800s
    -    rand_time = 540s
    -  }
    -}
    -
    -secrets {
    -  ike-aws {
    -    secret = "SharedPreSharedKeyFromAWS123456789"
    -  }
    -}
    -EOF
    -
    -# Enable IP forwarding
    -sysctl -w net.ipv4.ip_forward=1
    -echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf
    -
    -# Start StrongSwan
    -systemctl restart strongswan-swanctl
    -
    -# Verify connection
    -swanctl --stats
    -
    -

    Step 3: Add Route on DigitalOcean

    -
    # Add route to AWS VPC through VPN
    -ssh root@do-server
    -
    -ip route add 10.1.0.0/16 via 10.0.0.1 dev eth0
    -echo "10.1.0.0/16 via 10.0.0.1 dev eth0" >> /etc/network/interfaces
    -
    -# Enable forwarding on firewall
    -ufw allow from 10.1.0.0/16 to 10.0.0.0/16
    -
    -

    Wireguard VPN (Alternative, Simpler)

    -

    Advantages: Simpler, faster, modern

    -

    Create Wireguard Keypairs

    -
    # On DO server
    -ssh root@do-server
    -apt-get install -y wireguard wireguard-tools
    -
    -# Generate keypairs
    -wg genkey | tee /etc/wireguard/do_private.key | wg pubkey > /etc/wireguard/do_public.key
    -
    -# On AWS server
    -ssh ubuntu@aws-server
    -sudo apt-get install -y wireguard wireguard-tools
    -
    -sudo wg genkey | sudo tee /etc/wireguard/aws_private.key | wg pubkey > /etc/wireguard/aws_public.key
    -
    -

    Configure Wireguard on DigitalOcean

    -
    # /etc/wireguard/wg0.conf
    -cat > /etc/wireguard/wg0.conf <<'EOF'
    -[Interface]
    -PrivateKey = <contents-of-do_private.key>
    -Address = 10.10.0.1/24
    -ListenPort = 51820
    -
    -[Peer]
    -PublicKey = <contents-of-aws_public.key>
    -AllowedIPs = 10.10.0.2/32, 10.1.0.0/16
    -Endpoint = aws-server-public-ip:51820
    -PersistentKeepalive = 25
    -EOF
    -
    -chmod 600 /etc/wireguard/wg0.conf
    -
    -# Enable interface
    -wg-quick up wg0
    -
    -# Enable at boot
    -systemctl enable wg-quick@wg0
    -
    -

    Configure Wireguard on AWS

    -
    # /etc/wireguard/wg0.conf
    -cat > /etc/wireguard/wg0.conf <<'EOF'
    -[Interface]
    -PrivateKey = <contents-of-aws_private.key>
    -Address = 10.10.0.2/24
    -ListenPort = 51820
    -
    -[Peer]
    -PublicKey = <contents-of-do_public.key>
    -AllowedIPs = 10.10.0.1/32, 10.0.0.0/16
    -Endpoint = do-server-public-ip:51820
    -PersistentKeepalive = 25
    -EOF
    -
    -chmod 600 /etc/wireguard/wg0.conf
    -
    -# Enable interface
    -sudo wg-quick up wg0
    -sudo systemctl enable wg-quick@wg0
    -
    -

    Test Connectivity

    -
    # From DO server
    -ssh root@do-server
    -ping 10.10.0.2
    -
    -# From AWS server
    -ssh ubuntu@aws-server
    -sudo ping 10.10.0.1
    -
    -# Test actual services
    -curl -I http://10.1.1.10:5432  # Test AWS RDS from DO
    -
    -

    Multi-Provider Routing

    -

    Define Cross-Provider Routes (Nickel)

    -
    {
    -  # Route between DigitalOcean and AWS
    -  vpn_routes = {
    -    do_to_aws = {
    -      source_network = "10.0.0.0/16",  # DigitalOcean VPC
    -      destination_network = "10.1.0.0/16",  # AWS VPC
    -      gateway = "vpn-tunnel",
    -      metric = 100
    -    },
    -
    -    aws_to_do = {
    -      source_network = "10.1.0.0/16",
    -      destination_network = "10.0.0.0/16",
    -      gateway = "vpn-tunnel",
    -      metric = 100
    -    },
    -
    -    # Route to Hetzner through AWS (if AWS is central hub)
    -    aws_to_hz = {
    -      source_network = "10.1.0.0/16",
    -      destination_network = "10.2.0.0/16",
    -      gateway = "aws-vpn-gateway",
    -      metric = 150
    -    }
    -  }
    -}
    -
    -

    Static Routes on Hetzner

    -
    # Add route to AWS VPC
    -ip route add 10.1.0.0/16 via 10.0.0.1
    -
    -# Add route to DigitalOcean VPC
    -ip route add 10.0.0.0/16 via 10.2.0.1
    -
    -# Persist routes
    -cat >> /etc/network/interfaces <<'EOF'
    -# Routes to other providers
    -up ip route add 10.1.0.0/16 via 10.0.0.1
    -up ip route add 10.0.0.0/16 via 10.2.0.1
    -EOF
    -
    -

    AWS Route Tables

    -
    # Get main route table
    -RT_ID=$(aws ec2 describe-route-tables --filters Name=vpc-id,Values=vpc-12345 --query 'RouteTables[0].RouteTableId' --output text)
    -
    -# Add route to DigitalOcean network through VPN gateway
    -aws ec2 create-route \
    -  --route-table-id $RT_ID \
    -  --destination-cidr-block 10.0.0.0/16 \
    -  --gateway-id vgw-12345
    -
    -# Add route to Hetzner network
    -aws ec2 create-route \
    -  --route-table-id $RT_ID \
    -  --destination-cidr-block 10.2.0.0/16 \
    -  --gateway-id vgw-12345
    -
    -

    Security Considerations

    -

    1. Encryption

    -

    IPSec:

    -
      -
    • AES-256 encryption
    • -
    • SHA-256 hashing
    • -
    • 2048-bit Diffie-Hellman
    • -
    • Perfect Forward Secrecy (PFS)
    • -
    -

    Wireguard:

    -
      -
    • ChaCha20/Poly1305 or AES-GCM
    • -
    • Curve25519 key exchange
    • -
    • Automatic key rotation
    • -
    -
    # Verify IPSec configuration
    -swanctl --stats
    -
    -# Check encryption algorithms
    -swanctl --list-connections
    -
    -

    2. Firewall Rules

    -

    DigitalOcean Firewall:

    -
    inbound_rules = [
    -  # Allow VPN traffic from AWS
    -  {
    -    protocol = "udp",
    -    ports = "51820",
    -    sources = { addresses = ["aws-server-public-ip/32"] }
    -  },
    -  # Allow traffic from AWS VPC
    -  {
    -    protocol = "tcp",
    -    ports = "443",
    -    sources = { addresses = ["10.1.0.0/16"] }
    -  }
    -]
    -
    -

    AWS Security Group:

    -
    # Allow traffic from DigitalOcean VPC
    -aws ec2 authorize-security-group-ingress \
    -  --group-id sg-12345 \
    -  --protocol tcp \
    -  --port 443 \
    -  --source-security-group-cidr 10.0.0.0/16
    -
    -# Allow VPN from DigitalOcean
    -aws ec2 authorize-security-group-ingress \
    -  --group-id sg-12345 \
    -  --protocol udp \
    -  --port 51820 \
    -  --cidr "do-public-ip/32"
    -
    -

    Hetzner Firewall:

    -
    hcloud firewall create --name vpn-fw \
    -  --rules "direction=in protocol=udp destination_port=51820 source_ips=10.0.0.0/16;10.1.0.0/16"
    -
    -

    3. Network Segmentation

    -
    # Each provider has isolated subnets
    -networks = {
    -  do_web_tier = "10.0.1.0/24",      # Public-facing web
    -  do_app_tier = "10.0.2.0/24",      # Internal apps
    -  do_vpn_gateway = "10.0.3.0/24",   # VPN endpoint
    -
    -  aws_data_tier = "10.1.1.0/24",    # Databases
    -  aws_cache_tier = "10.1.2.0/24",   # Redis/Cache
    -  aws_vpn_endpoint = "10.1.3.0/24", # VPN endpoint
    -
    -  hz_backup_tier = "10.2.1.0/24",   # Backups
    -  hz_vpn_gateway = "10.2.2.0/24"    # VPN endpoint
    -}
    -
    -

    4. DNS Security

    -
    # Private DNS for internal services
    -# On each provider's VPC/network, configure:
    -
    -# DigitalOcean
    -10.0.1.10 web-1.internal
    -10.0.1.11 web-2.internal
    -10.1.1.10 database.internal
    -
    -# Add to /etc/hosts or configure Route53 private hosted zones
    -aws route53 create-hosted-zone \
    -  --name internal.example.com \
    -  --vpc VPCRegion=us-east-1,VPCId=vpc-12345 \
    -  --caller-reference internal-zone
    -
    -# Create A record
    -aws route53 change-resource-record-sets \
    -  --hosted-zone-id ZONE_ID \
    -  --change-batch file:///tmp/changes.json
    -
    -

    Implementation Examples

    -

    Complete Multi-Provider Network Setup (Nushell)

    -
    #!/usr/bin/env nu
    -
    -def setup_multi_provider_network [] {
    -  print "🌐 Setting up multi-provider network"
    -
    -  # Phase 1: Create networks on each provider
    -  print "\nPhase 1: Creating private networks..."
    -  create_digitalocean_vpc
    -  create_aws_vpc
    -  create_hetzner_network
    -
    -  # Phase 2: Create VPN endpoints
    -  print "\nPhase 2: Setting up VPN endpoints..."
    -  setup_aws_vpn_gateway
    -  setup_do_vpn_endpoint
    -  setup_hetzner_vpn_endpoint
    -
    -  # Phase 3: Configure routing
    -  print "\nPhase 3: Configuring routing..."
    -  configure_aws_routes
    -  configure_do_routes
    -  configure_hetzner_routes
    -
    -  # Phase 4: Verify connectivity
    -  print "\nPhase 4: Verifying connectivity..."
    -  verify_do_to_aws
    -  verify_aws_to_hetzner
    -  verify_hetzner_to_do
    -
    -  print "\n✅ Multi-provider network ready!"
    -}
    -
    -def create_digitalocean_vpc [] {
    -  print "  Creating DigitalOcean VPC..."
    -  let vpc = (doctl compute vpc create \
    -    --name "multi-provider-vpc" \
    -    --region "nyc3" \
    -    --ip-range "10.0.0.0/16" \
    -    --format ID \
    -    --no-header)
    -
    -  print $"    ✓ VPC created: ($vpc)"
    -}
    -
    -def create_aws_vpc [] {
    -  print "  Creating AWS VPC..."
    -  let vpc = (aws ec2 create-vpc \
    -    --cidr-block "10.1.0.0/16" \
    -    --tag-specifications "ResourceType=vpc,Tags=[{Key=Name,Value=multi-provider-vpc}]" | from json)
    -
    -  print $"    ✓ VPC created: ($vpc.Vpc.VpcId)"
    -
    -  # Create subnet
    -  let subnet = (aws ec2 create-subnet \
    -    --vpc-id $vpc.Vpc.VpcId \
    -    --cidr-block "10.1.1.0/24" | from json)
    -
    -  print $"    ✓ Subnet created: ($subnet.Subnet.SubnetId)"
    -}
    -
    -def create_hetzner_network [] {
    -  print "  Creating Hetzner vSwitch..."
    -  let network = (hcloud network create \
    -    --name "multi-provider-network" \
    -    --ip-range "10.2.0.0/16" \
    -    --format "json" | from json)
    -
    -  print $"    ✓ Network created: ($network.network.id)"
    -
    -  # Create subnet
    -  let subnet = (hcloud network add-subnet \
    -    multi-provider-network \
    -    --ip-range "10.2.1.0/24" \
    -    --network-zone "eu-central" \
    -    --format "json" | from json)
    -
    -  print $"    ✓ Subnet created"
    -}
    -
    -def setup_aws_vpn_gateway [] {
    -  print "  Setting up AWS VPN gateway..."
    -  let vgw = (aws ec2 create-vpn-gateway \
    -    --type "ipsec.1" \
    -    --tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=multi-provider-vpn}]" | from json)
    -
    -  print $"    ✓ VPN gateway created: ($vgw.VpnGateway.VpnGatewayId)"
    -}
    -
    -def setup_do_vpn_endpoint [] {
    -  print "  Setting up DigitalOcean VPN endpoint..."
    -  # Would SSH into DO droplet and configure IPSec/Wireguard
    -  print "    ✓ VPN endpoint configured via SSH"
    -}
    -
    -def setup_hetzner_vpn_endpoint [] {
    -  print "  Setting up Hetzner VPN endpoint..."
    -  # Would SSH into Hetzner server and configure VPN
    -  print "    ✓ VPN endpoint configured via SSH"
    -}
    -
    -def configure_aws_routes [] {
    -  print "  Configuring AWS routes..."
    -  # Routes configured via AWS CLI
    -  print "    ✓ Routes to DO (10.0.0.0/16) configured"
    -  print "    ✓ Routes to Hetzner (10.2.0.0/16) configured"
    -}
    -
    -def configure_do_routes [] {
    -  print "  Configuring DigitalOcean routes..."
    -  print "    ✓ Routes to AWS (10.1.0.0/16) configured"
    -  print "    ✓ Routes to Hetzner (10.2.0.0/16) configured"
    -}
    -
    -def configure_hetzner_routes [] {
    -  print "  Configuring Hetzner routes..."
    -  print "    ✓ Routes to DO (10.0.0.0/16) configured"
    -  print "    ✓ Routes to AWS (10.1.0.0/16) configured"
    -}
    -
    -def verify_do_to_aws [] {
    -  print "  Verifying DigitalOcean to AWS connectivity..."
    -  # Ping or curl from DO to AWS
    -  print "    ✓ Connectivity verified (latency: 45 ms)"
    -}
    -
    -def verify_aws_to_hetzner [] {
    -  print "  Verifying AWS to Hetzner connectivity..."
    -  print "    ✓ Connectivity verified (latency: 65 ms)"
    -}
    -
    -def verify_hetzner_to_do [] {
    -  print "  Verifying Hetzner to DigitalOcean connectivity..."
    -  print "    ✓ Connectivity verified (latency: 78 ms)"
    -}
    -
    -setup_multi_provider_network
    -
    -

    Troubleshooting

    -

    Issue: No Connectivity Between Providers

    +

    Network Connectivity Issues

    +

    Symptom: Can’t reach external services or cloud providers

    Diagnosis:

    -
    # Test VPN tunnel status
    -swanctl --stats
    +
    # Test network connectivity
    +ping -c 3 upcloud.com
    +
    +# Check DNS resolution
    +nslookup api.upcloud.com
    +
    +# Test HTTPS connectivity
    +curl -v  [https://api.upcloud.com](https://api.upcloud.com)
    +
    +# Check proxy settings
    +env | grep -i proxy
    +
    +

    Resolution:

    +
    # Configure proxy if needed
    +export HTTPS_PROXY= [http://proxy.example.com:8080](http://proxy.example.com:8080)
    +provisioning config set network.proxy  [http://proxy.example.com:8080](http://proxy.example.com:8080)
    +
    +# Verify firewall rules
    +sudo ufw status
     
     # Check routing
     ip route show
    -
    -# Test connectivity
    -ping -c 3 10.1.1.10  # AWS target
    -traceroute 10.1.1.10
     
    -

    Solutions:

    +

    Authentication Failures

    +

    Symptom: API requests fail with 401 Unauthorized

    +

    Diagnosis:

    +
    # Check JWT token
    +provisioning auth status
    +
    +# Verify user credentials
    +provisioning auth whoami
    +
    +# Check authentication logs
    +journalctl -u provisioning-control-center | grep "auth"
    +
    +

    Resolution:

    +
    # Refresh authentication token
    +provisioning auth login --username admin
    +
    +# Reset user password
    +provisioning auth reset-password --username admin
    +
    +# Verify MFA configuration
    +provisioning auth mfa status
    +
    +

    Debugging Workflows

    +

    Enable Debug Logging

    +
    # Enable debug mode
    +export PROVISIONING_LOG_LEVEL=debug
    +provisioning workflow create my-cluster --debug
    +
    +# Or in configuration
    +provisioning config set logging.level debug
    +sudo systemctl restart provisioning-orchestrator
    +
    +

    Workflow State Inspection

    +
    # View workflow state
    +provisioning workflow state <workflow-id>
    +
    +# Export workflow state to JSON
    +provisioning workflow state <workflow-id> --format json > workflow-state.json
    +
    +# Inspect checkpoints
    +provisioning workflow checkpoints <workflow-id>
    +
    +

    Manual Workflow Retry

    +
    # Retry failed workflow from last checkpoint
    +provisioning workflow retry <workflow-id>
    +
    +# Retry from specific checkpoint
    +provisioning workflow retry <workflow-id> --from-checkpoint 3
    +
    +# Force retry (skip validation)
    +provisioning workflow retry <workflow-id> --force
    +
    +

    Performance Troubleshooting

    +

    Slow Workflow Execution

    +

    Diagnosis:

    +
    # Profile workflow execution
    +provisioning workflow profile <workflow-id>
    +
    +# Identify bottlenecks
    +provisioning workflow analyze <workflow-id>
    +
    +

    Optimization:

    +
    # Increase parallelism
    +provisioning config set execution.max_parallel_tasks 200
    +
    +# Optimize database queries
    +provisioning database analyze
    +
    +# Add caching
    +provisioning config set cache.enabled true
    +
    +

    Database Performance Issues

    +

    Diagnosis:

    +
    # Check database metrics
    +curl  [http://localhost:8000/metrics](http://localhost:8000/metrics)
    +
    +# Identify slow queries
    +provisioning database slow-queries
    +
    +# Check connection pool
    +provisioning database pool-status
    +
    +

    Optimization:

    +
    # Increase connection pool
    +provisioning config set database.max_connections 200
    +
    +# Add indexes
    +provisioning database create-indexes
    +
    +# Optimize vacuum settings
    +provisioning database vacuum
    +
    +

    Log Analysis

    +

    Centralized Log Viewing

    +
    # View all platform logs
    +journalctl -u provisioning-* -f
    +
    +# Filter by severity
    +journalctl -u provisioning-* -p err
    +
    +# Export logs for analysis
    +journalctl -u provisioning-* --since "1 hour ago" > /tmp/logs.txt
    +
    +

    Structured Log Queries

    +

    Using Loki with LogQL:

    +
    # Find errors in orchestrator
    +{job="provisioning-orchestrator"} | = "ERROR"
    +
    +# Workflow failures
    +{job="provisioning-orchestrator"} | json | status="failed"
    +
    +# API request latency over 1s
    +{job="provisioning-control-center"} | json | duration > 1
    +
    +

    Log Correlation

    +
    # Correlate logs by request ID
    +journalctl -u provisioning-* | grep "request_id=abc123"
    +
    +# Trace workflow execution
    +provisioning workflow trace <workflow-id>
    +
    +

    Advanced Debugging

    +

    Enable Rust Backtrace

    +
    # Enable backtrace for Rust services
    +export RUST_BACKTRACE=1
    +sudo systemctl restart provisioning-orchestrator
    +
    +# Full backtrace
    +export RUST_BACKTRACE=full
    +
    +

    Core Dump Analysis

    +
    # Enable core dumps
    +sudo sysctl -w kernel.core_pattern=/var/crash/core.%e.%p
    +ulimit -c unlimited
    +
    +# Analyze core dump
    +sudo coredumpctl list
    +sudo coredumpctl debug <pid>
    +
    +# In gdb:
    +(gdb) bt
    +(gdb) info threads
    +(gdb) thread apply all bt
    +
    +

    Network Traffic Analysis

    +
    # Capture API traffic
    +sudo tcpdump -i any -w /tmp/api-traffic.pcap port 8080
    +
    +# Analyze with tshark
    +tshark -r /tmp/api-traffic.pcap -Y "http"
    +
    +

    Getting Help

    +

    Collect Diagnostic Information

    +
    # Generate comprehensive diagnostic report
    +provisioning diagnose --full --output /tmp/diagnostics.tar.gz
    +
    +# Report includes:
    +# - Service status
    +# - Configuration files
    +# - Recent logs (last 1000 lines per service)
    +# - Resource usage metrics
    +# - Database status
    +# - Network connectivity tests
    +# - Workflow states
    +
    +

    Support Channels

      -
    1. Verify VPN tunnel is up: swanctl --up aws-vpn
    2. -
    3. Check firewall rules on both sides
    4. -
    5. Verify route table entries
    6. -
    7. Check security group rules
    8. -
    9. Verify DNS resolution
    10. +
    11. Check documentation: provisioning help <topic>
    12. +
    13. Search logs: journalctl -u provisioning-*
    14. +
    15. Review monitoring dashboards: http://localhost:3000
    16. +
    17. Run diagnostics: provisioning diagnose
    18. +
    19. Contact support with diagnostic report
    -

    Issue: High Latency Between Providers

    -

    Diagnosis:

    -
    # Measure latency
    -ping -c 10 10.1.1.10 | tail -1
    -
    -# Check packet loss
    -mtr -c 100 10.1.1.10
    -
    -# Check bandwidth
    -iperf3 -c 10.1.1.10 -t 10
    -
    -

    Solutions:

    +

    Preventive Measures

      -
    • Use geographically closer providers
    • -
    • Check VPN tunnel encryption overhead
    • -
    • Verify network bandwidth
    • -
    • Consider dedicated connections
    • +
    • Enable comprehensive monitoring and alerting
    • +
    • Implement regular health checks
    • +
    • Maintain up-to-date documentation
    • +
    • Test disaster recovery procedures monthly
    • +
    • Keep platform and dependencies updated
    • +
    • Review logs regularly for warning signs
    • +
    • Monitor resource utilization trends
    • +
    • Validate configuration changes before applying
    -

    Issue: DNS Not Resolving Across Providers

    -

    Diagnosis:

    -
    # Test internal DNS
    -nslookup database.internal
    -
    -# Check /etc/resolv.conf
    -cat /etc/resolv.conf
    -
    -# Test from another provider
    -ssh do-server "nslookup database.internal"
    -
    -

    Solutions:

    + -

    Issue: VPN Tunnel Drops

    -

    Diagnosis:

    -
    # Check connection logs
    -journalctl -u strongswan-swanctl -f
    -
    -# Monitor tunnel status
    -watch -n 1 'swanctl --stats'
    -
    -# Check timeout values
    -swanctl --list-connections
    -
    -

    Solutions:

    -
      -
    • Increase keepalive timeout
    • -
    • Enable DPD (Dead Peer Detection)
    • -
    • Check for firewall/ISP blocking
    • -
    • Verify public IP stability
    • -
    -

    Summary

    -

    Multi-provider networking requires:

    -

    Private Networks: VPC/vSwitch per provider -✓ VPN Tunnels: IPSec or Wireguard encryption -✓ Routing: Proper route tables and static routes -✓ Security: Firewall rules and access control -✓ Monitoring: Connectivity and latency checks

    -

    Start with simple two-provider setup (for example, DO + AWS), then expand to three or more providers.

    -

    For more information:

    - -

    DigitalOcean Provider Guide

    -

    This guide covers using DigitalOcean as a cloud provider in the provisioning system. DigitalOcean is known for simplicity, straightforward pricing, -and outstanding documentation, making it ideal for startups, small teams, and developers.

    -

    Table of Contents

    - -

    Overview

    -

    DigitalOcean offers a simplified cloud platform with competitive pricing and outstanding developer experience. Key characteristics:

    -
      -
    • Transparent Pricing: No hidden fees, simple per-resource pricing
    • -
    • Global Presence: Data centers in North America, Europe, and Asia
    • -
    • Managed Services: Databases, Kubernetes (DOKS), App Platform
    • -
    • Developer-Friendly: Outstanding documentation and community support
    • -
    • Performance: Consistent performance, modern infrastructure
    • -
    -

    DigitalOcean Pricing Model

    -

    Unlike AWS, DigitalOcean uses hourly billing with transparent monthly rates:

    -
      -
    • Droplets: $0.03/hour (typically billed monthly)
    • -
    • Volumes: $0.10/GB/month
    • -
    • Managed Database: Price varies by tier
    • -
    • Load Balancer: $10/month
    • -
    • Data Transfer: Generally included for inbound, charged for outbound
    • -
    -

    Supported Resources

    -
    - - - - - - - - - - - +

    Platform Health

    +

    Health monitoring, status checks, and system integrity validation for the Provisioning platform.

    +

    Health Check Overview

    +

    The platform provides multiple levels of health monitoring:

    +
    ResourceProduct NameStatus
    ComputeDroplets✓ Full support
    Block StorageVolumes✓ Full support
    Object StorageSpaces✓ Full support
    Load BalancerLoad Balancer✓ Full support
    DatabaseManaged Databases✓ Full support
    Container RegistryContainer Registry✓ Supported
    CDNCDN✓ Supported
    DNSDomains✓ Full support
    VPCVPC✓ Full support
    FirewallFirewall✓ Full support
    Reserved IPsReserved IPs✓ Supported
    + + + +
    LevelScopeFrequencyResponse Time
    Service HealthIndividual service statusEvery 10s< 100ms
    System HealthOverall platform statusEvery 30s< 500ms
    Infrastructure HealthManaged resourcesEvery 60s< 2s
    Dependency HealthExternal servicesEvery 60s< 1s
    -

    Why DigitalOcean

    -

    When to Choose DigitalOcean

    -

    DigitalOcean is ideal for:

    +

    Quick Health Check

    +
    # Check overall platform health
    +provisioning health
    +
    +# Output:
    +# ✓ Orchestrator: healthy (uptime: 5d 3h)
    +# ✓ Control Center: healthy
    +# ✓ Vault Service: healthy
    +# ✓ Database: healthy (connections: 45/100)
    +# ✓ Network: healthy
    +# ✗ MCP Server: degraded (high latency)
    +
    +# Exit code: 0 = healthy, 1 = degraded, 2 = unhealthy
    +
    +

    Service Health Endpoints

    +

    All services expose /health endpoints returning standardized responses.

    +

    Orchestrator Health

    +
    curl  [http://localhost:8080/health](http://localhost:8080/health)
    +
    +
    {
    +  "status": "healthy",
    +  "version": "5.0.0",
    +  "uptime_seconds": 432000,
    +  "checks": {
    +    "database": "healthy",
    +    "file_system": "healthy",
    +    "memory": "healthy"
    +  },
    +  "metrics": {
    +    "active_workflows": 12,
    +    "queued_tasks": 45,
    +    "completed_tasks": 9876,
    +    "worker_threads": 8
    +  },
    +  "timestamp": "2026-01-16T10:30:00Z"
    +}
    +
    +

    Health status values:

      -
    • Startups: Clear pricing, low minimum commitment
    • -
    • Small Teams: Simple management interface
    • -
    • Developers: Great documentation, API-driven
    • -
    • Regional Deployment: Global presence, predictable costs
    • -
    • Managed Services: Simple database and Kubernetes offerings
    • -
    • Web Applications: Outstanding fit for typical web workloads
    • +
    • healthy - Service operating normally
    • +
    • degraded - Service functional with reduced capacity
    • +
    • unhealthy - Service not functioning
    -

    DigitalOcean is NOT ideal for:

    -
      -
    • Highly Specialized Workloads: Limited service portfolio vs AWS
    • -
    • HIPAA/FedRAMP: Limited compliance options
    • -
    • Extreme Performance: Not focused on HPC
    • -
    • Enterprise with Complex Requirements: Better served by AWS
    • -
    -

    Cost Comparison

    -

    Monthly Comparison: 2 vCPU, 4 GB RAM

    -
      -
    • DigitalOcean: $24/month (constant pricing)
    • -
    • Hetzner: €6.90/month (~$7.50) - cheaper but harder to scale
    • -
    • AWS: $60/month on-demand (but $18 with spot)
    • -
    • UpCloud: $30/month
    • -
    -

    When DigitalOcean Wins:

    -
      -
    • Simplicity and transparency (no reserved instances needed)
    • -
    • Managed database costs
    • -
    • Small deployments (1-5 servers)
    • -
    • Applications using DigitalOcean-specific services
    • -
    -

    Setup and Configuration

    -

    Prerequisites

    -
      -
    • DigitalOcean account with billing enabled
    • -
    • API token from DigitalOcean Control Panel
    • -
    • doctl CLI installed (optional but recommended)
    • -
    • Provisioning system with DigitalOcean provider plugin
    • -
    -

    Step 1: Create DigitalOcean API Token

    +

    Control Center Health

    +
    curl  [http://localhost:8081/health](http://localhost:8081/health)
    +
    +
    {
    +  "status": "healthy",
    +  "version": "5.0.0",
    +  "checks": {
    +    "database": "healthy",
    +    "orchestrator": "healthy",
    +    "vault": "healthy",
    +    "auth": "healthy"
    +  },
    +  "metrics": {
    +    "active_sessions": 23,
    +    "api_requests_per_second": 156,
    +    "p95_latency_ms": 45
    +  }
    +}
    +
    +

    Vault Service Health

    +
    curl  [http://localhost:8085/health](http://localhost:8085/health)
    +
    +
    {
    +  "status": "healthy",
    +  "checks": {
    +    "kms_backend": "healthy",
    +    "encryption": "healthy",
    +    "key_rotation": "healthy"
    +  },
    +  "metrics": {
    +    "active_secrets": 234,
    +    "encryption_ops_per_second": 50,
    +    "kms_latency_ms": 3
    +  }
    +}
    +
    +

    System Health Checks

    +

    Comprehensive Health Check

    +
    # Run all health checks
    +provisioning health check --all
    +
    +# Check specific components
    +provisioning health check --components orchestrator,database,network
    +
    +# Output detailed report
    +provisioning health check --detailed --output /tmp/health-report.json
    +
    +

    Health Check Components

    +

    Platform health checking verifies:

      -
    1. Go to DigitalOcean Control Panel
    2. -
    3. Navigate to API > Tokens/Keys
    4. -
    5. Click Generate New Token
    6. -
    7. Set expiration to 90 days or custom
    8. -
    9. Select Read & Write scope
    10. -
    11. Copy the token (you can only view it once)
    12. +
    13. Service Availability - All services responding
    14. +
    15. Database Connectivity - SurrealDB reachable and responsive
    16. +
    17. Filesystem Health - Disk space and I/O performance
    18. +
    19. Network Connectivity - Internal and external connectivity
    20. +
    21. Resource Utilization - CPU, memory, disk within limits
    22. +
    23. Dependency Status - External services available
    24. +
    25. Security Status - Authentication and encryption functional
    -

    Step 2: Configure Environment Variables

    -
    # Add to ~/.bashrc, ~/.zshrc, or env file
    -export DIGITALOCEAN_TOKEN="dop_v1_xxxxxxxxxxxxxxxxxxxxxxxxxxxx"
    +

    Database Health

    +
    # Check database health
    +provisioning health database
     
    -# Optional: Default region for all operations
    -export DIGITALOCEAN_REGION="nyc3"
    +# Output:
    +# ✓ Connection: healthy (latency: 2ms)
    +# ✓ Disk usage: 45% (22GB / 50GB)
    +# ✓ Active connections: 45 / 100
    +# ✓ Query performance: healthy (avg: 15ms)
    +# ✗ Replication: warning (lag: 5s)
     
    -

    Step 3: Verify Configuration

    -
    # Using provisioning CLI
    -provisioning provider verify digitalocean
    +

    Detailed database metrics:

    +
    # Connection pool status
    +provisioning database pool-status
     
    -# Or using doctl
    -doctl auth init
    -doctl compute droplet list
    +# Slow query analysis
    +provisioning database slow-queries --threshold 1000ms
    +
    +# Storage usage
    +provisioning database storage-stats
     
    -

    Step 4: Configure Workspace

    -

    Create or update config.toml in your workspace:

    -
    [providers.digitalocean]
    -enabled = true
    -token_env = "DIGITALOCEAN_TOKEN"
    -default_region = "nyc3"
    +

    Filesystem Health

    +
    # Check disk space and I/O
    +provisioning health filesystem
     
    -[workspace]
    -provider = "digitalocean"
    -region = "nyc3"
    +# Output:
    +# ✓ Root filesystem: 65% used (325GB / 500GB)
    +# ✓ Data filesystem: 45% used (225GB / 500GB)
    +# ✓ I/O latency: healthy (avg: 5ms)
    +# ✗ Inodes: warning (85% used)
     
    -

    Available Resources

    -

    1. Droplets (Compute)

    -

    DigitalOcean’s core compute offering - cloud servers with hourly billing.

    -

    Resource Type: digitalocean.Droplet

    -

    Available Sizes:

    -
    - - - - - - - - -
    Size SlugvCPURAMStoragePrice/Month
    s-1vcpu-512 m-10gb1512 MB10 GB SSD$4
    s-1vcpu-1gb-25gb11 GB25 GB SSD$6
    s-2vcpu-2gb-50gb22 GB50 GB SSD$12
    s-2vcpu-4gb-80gb24 GB80 GB SSD$24
    s-4vcpu-8gb48 GB160 GB SSD$48
    s-6vcpu-16gb616 GB320 GB SSD$96
    c-224 GB50 GB SSD$40 (CPU-optimized)
    g-2vcpu-8gb28 GB50 GB SSD$60 (GPU)
    -
    -

    Key Features:

    -
      -
    • SSD storage
    • -
    • Hourly or monthly billing
    • -
    • Automatic backups
    • -
    • SSH key management
    • -
    • Private networking via VPC
    • -
    • Firewall rules
    • -
    • Monitoring and alerting
    • -
    -

    2. Volumes (Block Storage)

    -

    Persistent block storage that can be attached to Droplets.

    -

    Resource Type: digitalocean.Volume

    -

    Characteristics:

    -
      -
    • $0.10/GB/month
    • -
    • SSD-based
    • -
    • Snapshots for backup
    • -
    • Maximum 100 TB size
    • -
    • Automatic backups
    • -
    -

    3. Spaces (Object Storage)

    -

    S3-compatible object storage for files, backups, media.

    -

    Characteristics:

    -
      -
    • $5/month for 250 GB
    • -
    • Then $0.015/GB for additional storage
    • -
    • $0.01/GB outbound transfer
    • -
    • Versioning support
    • -
    • CDN integration available
    • -
    -

    4. Load Balancer

    -

    Layer 4/7 load balancing with health checks.

    -

    Price: $10/month

    -

    Features:

    -
      -
    • Round robin, least connections algorithms
    • -
    • Health checks on Droplets
    • -
    • SSL/TLS termination
    • -
    • Sticky sessions
    • -
    • HTTP/HTTPS support
    • -
    -

    5. Managed Databases

    -

    PostgreSQL, MySQL, and Redis databases.

    -

    Price Examples:

    -
      -
    • Single node PostgreSQL (1 GB RAM): $15/month
    • -
    • 3-node HA cluster: $60/month
    • -
    • Enterprise plans available
    • -
    -

    Features:

    -
      -
    • Automated backups
    • -
    • Read replicas
    • -
    • High availability option
    • -
    • Connection pooling
    • -
    • Monitoring dashboard
    • -
    -

    6. Kubernetes (DOKS)

    -

    Managed Kubernetes service.

    -

    Price: $12/month per cluster + node costs

    -

    Features:

    -
      -
    • Managed control plane
    • -
    • Autoscaling node pools
    • -
    • Integrated monitoring
    • -
    • Container Registry integration
    • -
    -

    7. CDN

    -

    Content Delivery Network for global distribution.

    -

    Price: $0.005/GB delivered

    -

    Features:

    -
      -
    • 600+ edge locations
    • -
    • Purge cache by path
    • -
    • Custom domains with SSL
    • -
    • Edge caching
    • -
    -

    8. Domains and DNS

    -

    Domain registration and DNS management.

    -

    Features:

    -
      -
    • Domain registration via Namecheap
    • -
    • Free DNS hosting
    • -
    • TTL control
    • -
    • MX records, CNAMEs, etc.
    • -
    -

    9. VPC (Virtual Private Cloud)

    -

    Private networking between resources.

    -

    Features:

    -
      -
    • Free tier (1 VPC included)
    • -
    • Isolation between resources
    • -
    • Custom IP ranges
    • -
    • Subnet management
    • -
    -

    10. Firewall

    -

    Network firewall rules.

    -

    Features:

    -
      -
    • Inbound/outbound rules
    • -
    • Protocol-specific (TCP, UDP, ICMP)
    • -
    • Source/destination filtering
    • -
    • Rule priorities
    • -
    -

    Nickel Schema Reference

    -

    Droplet Configuration

    -
    let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -
    -digitalocean.Droplet & {
    -  # Required
    -  name = "my-droplet",
    -  region = "nyc3",
    -  size = "s-2vcpu-4gb",
    -
    -  # Optional
    -  image = "ubuntu-22-04-x64",  # Default: ubuntu-22-04-x64
    -  count = 1,  # Number of identical droplets
    -  ssh_keys = ["key-id-1"],
    -  backups = false,
    -  ipv6 = true,
    -  monitoring = true,
    -  vpc_uuid = "vpc-id",
    -
    -  # Volumes to attach
    -  volumes = [
    -    {
    -      size = 100,
    -      name = "data-volume",
    -      filesystem_type = "ext4",
    -      filesystem_label = "data"
    -    }
    -  ],
    -
    -  # Firewall configuration
    -  firewall = {
    -    inbound_rules = [
    -      {
    -        protocol = "tcp",
    -        ports = "22",
    -        sources = {
    -          addresses = ["0.0.0.0/0"],
    -          droplet_ids = [],
    -          tags = []
    -        }
    -      },
    -      {
    -        protocol = "tcp",
    -        ports = "80",
    -        sources = {
    -          addresses = ["0.0.0.0/0"]
    -        }
    -      },
    -      {
    -        protocol = "tcp",
    -        ports = "443",
    -        sources = {
    -          addresses = ["0.0.0.0/0"]
    -        }
    -      }
    -    ],
    -
    -    outbound_rules = [
    -      {
    -        protocol = "tcp",
    -        destinations = {
    -          addresses = ["0.0.0.0/0"]
    -        }
    -      },
    -      {
    -        protocol = "udp",
    -        ports = "53",
    -        destinations = {
    -          addresses = ["0.0.0.0/0"]
    -        }
    -      }
    -    ]
    -  },
    -
    -  # Tags
    -  tags = ["web", "production"],
    -
    -  # User data (startup script)
    -  user_data = "#!/bin/bash\napt-get update\napt-get install -y nginx"
    -}
    -
    -

    Load Balancer Configuration

    -
    digitalocean.LoadBalancer & {
    -  name = "web-lb",
    -  algorithm = "round_robin",  # or "least_connections"
    -  region = "nyc3",
    -
    -  # Forwarding rules
    -  forwarding_rules = [
    -    {
    -      entry_protocol = "http",
    -      entry_port = 80,
    -      target_protocol = "http",
    -      target_port = 80,
    -      certificate_id = null
    -    },
    -    {
    -      entry_protocol = "https",
    -      entry_port = 443,
    -      target_protocol = "http",
    -      target_port = 80,
    -      certificate_id = "cert-id"
    -    }
    -  ],
    -
    -  # Health checks
    -  health_check = {
    -    protocol = "http",
    -    port = 80,
    -    path = "/health",
    -    check_interval_seconds = 10,
    -    response_timeout_seconds = 5,
    -    healthy_threshold = 5,
    -    unhealthy_threshold = 3
    -  },
    -
    -  # Sticky sessions
    -  sticky_sessions = {
    -    type = "cookies",
    -    cookie_name = "LB",
    -    cookie_ttl_seconds = 300
    -  }
    -}
    -
    -

    Volume Configuration

    -
    digitalocean.Volume & {
    -  name = "data-volume",
    -  size = 100,  # GB
    -  region = "nyc3",
    -  description = "Application data volume",
    -  snapshots = true,
    -
    -  # To attach to a Droplet
    -  attachment = {
    -    droplet_id = "droplet-id",
    -    mount_point = "/data"
    -  }
    -}
    -
    -

    Managed Database Configuration

    -
    digitalocean.Database & {
    -  name = "prod-db",
    -  engine = "pg",  # or "mysql", "redis"
    -  version = "14",
    -  size = "db-s-1vcpu-1gb",
    -  region = "nyc3",
    -  num_nodes = 1,  # or 3 for HA
    -
    -  # High availability
    -  multi_az = false,
    -
    -  # Backups
    -  backup_restore = {
    -    backup_created_at = "2024-01-01T00:00:00Z"
    -  }
    -}
    -
    -

    Configuration Examples

    -

    Example 1: Simple Web Server

    -
    let digitalocean = import "../../extensions/providers/digitalocean/nickel/main.ncl" in
    -
    -{
    -  workspace_name = "simple-web",
    -
    -  web_server = digitalocean.Droplet & {
    -    name = "web-01",
    -    region = "nyc3",
    -    size = "s-1vcpu-1gb-25gb",
    -    image = "ubuntu-22-04-x64",
    -    ssh_keys = ["your-ssh-key-id"],
    -
    -    user_data = ''
    -      #!/bin/bash
    -      apt-get update
    -      apt-get install -y nginx
    -      systemctl start nginx
    -      systemctl enable nginx
    -    '',
    -
    -    firewall = {
    -      inbound_rules = [
    -        { protocol = "tcp", ports = "22", sources = { addresses = ["YOUR_IP/32"] } },
    -        { protocol = "tcp", ports = "80", sources = { addresses = ["0.0.0.0/0"] } },
    -        { protocol = "tcp", ports = "443", sources = { addresses = ["0.0.0.0/0"] } }
    -      ]
    -    },
    -
    -    monitoring = true
    -  }
    -}
    -
    -

    Example 2: Web Application with Database

    -
    {
    -  web_tier = digitalocean.Droplet & {
    -    name = "web-server",
    -    region = "nyc3",
    -    size = "s-2vcpu-4gb",
    -    count = 2,
    -
    -    firewall = {
    -      inbound_rules = [
    -        { protocol = "tcp", ports = "22", sources = { addresses = ["0.0.0.0/0"] } },
    -        { protocol = "tcp", ports = "80", sources = { addresses = ["0.0.0.0/0"] } },
    -        { protocol = "tcp", ports = "443", sources = { addresses = ["0.0.0.0/0"] } }
    -      ]
    -    },
    -
    -    tags = ["web", "production"]
    -  },
    -
    -  load_balancer = digitalocean.LoadBalancer & {
    -    name = "web-lb",
    -    region = "nyc3",
    -    algorithm = "round_robin",
    -
    -    forwarding_rules = [
    -      {
    -        entry_protocol = "http",
    -        entry_port = 80,
    -        target_protocol = "http",
    -        target_port = 8080
    -      }
    -    ],
    -
    -    health_check = {
    -      protocol = "http",
    -      port = 8080,
    -      path = "/health",
    -      check_interval_seconds = 10,
    -      response_timeout_seconds = 5
    -    }
    -  },
    -
    -  database = digitalocean.Database & {
    -    name = "app-db",
    -    engine = "pg",
    -    version = "14",
    -    size = "db-s-1vcpu-1gb",
    -    region = "nyc3",
    -    multi_az = true
    -  }
    -}
    -
    -

    Example 3: High-Performance Storage

    -
    {
    -  app_server = digitalocean.Droplet & {
    -    name = "app-with-storage",
    -    region = "nyc3",
    -    size = "s-4vcpu-8gb",
    -
    -    volumes = [
    -      {
    -        size = 500,
    -        name = "app-storage",
    -        filesystem_type = "ext4"
    -      }
    -    ]
    -  },
    -
    -  backup_storage = digitalocean.Volume & {
    -    name = "backup-volume",
    -    size = 1000,
    -    region = "nyc3",
    -    description = "Backup storage for app data"
    -  }
    -}
    -
    -

    Best Practices

    -

    1. Droplet Management

    -

    Instance Sizing

    -
      -
    • Start with smallest viable size (s-1vcpu-1gb)
    • -
    • Monitor CPU/memory usage
    • -
    • Scale vertically for predictable workloads
    • -
    • Use autoscaling with Kubernetes for bursty workloads
    • -
    -

    SSH Key Management

    -
      -
    • Use SSH keys instead of passwords
    • -
    • Store private keys securely
    • -
    • Rotate keys regularly (at least yearly)
    • -
    • Different keys for different environments
    • -
    -

    Monitoring

    -
      -
    • Enable monitoring on all Droplets
    • -
    • Set up alerting for CPU > 80%
    • -
    • Monitor disk usage
    • -
    • Alert on high memory usage
    • -
    -

    2. Firewall Configuration

    -

    Principle of Least Privilege

    -
      -
    • Only allow necessary ports
    • -
    • Specify source IPs when possible
    • -
    • Use SSH key authentication (no passwords)
    • -
    • Block unnecessary outbound traffic
    • -
    -

    Default Rules

    -
    # Minimal firewall for web server
    -inbound_rules = [
    -  { protocol = "tcp", ports = "22", sources = { addresses = ["YOUR_OFFICE_IP/32"] } },
    -  { protocol = "tcp", ports = "80", sources = { addresses = ["0.0.0.0/0"] } },
    -  { protocol = "tcp", ports = "443", sources = { addresses = ["0.0.0.0/0"] } }
    -],
    -
    -outbound_rules = [
    -  { protocol = "tcp", destinations = { addresses = ["0.0.0.0/0"] } },
    -  { protocol = "udp", ports = "53", destinations = { addresses = ["0.0.0.0/0"] } }
    -]
    -
    -

    3. Database Best Practices

    -

    High Availability

    -
      -
    • Use 3-node clusters for production
    • -
    • Enable automated backups (retain for 30 days)
    • -
    • Test backup restore procedures
    • -
    • Use read replicas for scaling reads
    • -
    -

    Connection Pooling

    -
      -
    • Enable PgBouncer for PostgreSQL
    • -
    • Set pool size based on app connections
    • -
    • Monitor connection count
    • -
    -

    Backup Strategy

    -
      -
    • Daily automated backups (DigitalOcean manages)
    • -
    • Export critical data to Spaces weekly
    • -
    • Test restore procedures monthly
    • -
    • Keep backups for minimum 30 days
    • -
    -

    4. Volume Management

    -

    Data Persistence

    -
      -
    • Use volumes for stateful data
    • -
    • Don’t store critical data on Droplet root volume
    • -
    • Enable automatic snapshots
    • -
    • Document mount points
    • -
    -

    Capacity Planning

    -
      -
    • Monitor volume usage
    • -
    • Expand volumes as needed (no downtime)
    • -
    • Delete old snapshots to save costs
    • -
    -

    5. Load Balancer Configuration

    -

    Health Checks

    -
      -
    • Set appropriate health check paths
    • -
    • Conservative intervals (10-30 seconds)
    • -
    • Longer timeout to avoid false positives
    • -
    • Multiple healthy thresholds
    • -
    -

    Sticky Sessions

    -
      -
    • Use if application requires session affinity
    • -
    • Set appropriate TTL (300-3600 seconds)
    • -
    • Monitor for imbalanced traffic
    • -
    -

    6. Cost Optimization

    -

    Droplet Sizing

    -
      -
    • Right-size instances to actual needs
    • -
    • Use snapshots to create custom images
    • -
    • Destroy unused Droplets
    • -
    -

    Reserved Droplets

    -
      -
    • Pre-pay for predictable workloads
    • -
    • 25-30% savings vs hourly
    • -
    -

    Object Storage

    -
      -
    • Use lifecycle policies to delete old data
    • -
    • Compress data before uploading
    • -
    • Use CDN for frequent access (reduces egress)
    • -
    -

    Troubleshooting

    -

    Issue: Droplet Not Accessible

    -

    Symptoms: Cannot SSH to Droplet, connection timeout

    -

    Diagnosis:

    -
      -
    1. Verify Droplet status in DigitalOcean Control Panel
    2. -
    3. Check firewall rules allow port 22 from your IP
    4. -
    5. Verify SSH key is loaded in SSH agent: ssh-add -l
    6. -
    7. Check Droplet has public IP assigned
    8. -
    -

    Solution:

    -
    # Add to firewall
    -doctl compute firewall add-rules firewall-id \
    -  --inbound-rules="protocol:tcp,ports:22,sources:addresses:YOUR_IP"
    -
    -# Test SSH
    -ssh -v -i ~/.ssh/key.pem root@DROPLET_IP
    -
    -# Or use VNC console in Control Panel
    -
    -

    Issue: Volume Not Mounting

    -

    Symptoms: Volume created but not accessible, mount fails

    -

    Diagnosis:

    -
    # Check volume attachment
    -doctl compute volume list
    -
    -# On Droplet, check block devices
    -lsblk
    -
    -# Check filesystem
    -sudo file -s /dev/sdb
    -
    -

    Solution:

    -
    # Format volume (only first time)
    -sudo mkfs.ext4 /dev/sdb
    -
    -# Create mount point
    -sudo mkdir -p /data
    -
    -# Mount volume
    -sudo mount /dev/sdb /data
    -
    -# Make permanent by editing /etc/fstab
    -echo '/dev/sdb /data ext4 defaults,nofail,discard 0 0' | sudo tee -a /etc/fstab
    -
    -

    Issue: Load Balancer Health Checks Failing

    -

    Symptoms: Backends marked unhealthy, traffic not flowing

    -

    Diagnosis:

    -
    # Test health check endpoint manually
    -curl -i http://BACKEND_IP:8080/health
    -
    -# Check backend logs
    -ssh backend-server
    -tail -f /var/log/app.log
    -
    -

    Solution:

    -
      -
    • Verify endpoint returns HTTP 200
    • -
    • Check backend firewall allows load balancer IPs
    • -
    • Adjust health check timing (increase timeout)
    • -
    • Verify backend service is running
    • -
    -

    Issue: Database Connection Issues

    -

    Symptoms: Cannot connect to managed database

    -

    Diagnosis:

    -
    # Test connectivity from Droplet
    -psql -h db-host.db.ondigitalocean.com -U admin -d defaultdb
    -
    -# Check firewall
    -doctl compute firewall list-rules firewall-id
    -
    -

    Solution:

    -
      -
    • Add Droplet to database’s trusted sources
    • -
    • Verify connection string (host, port, username)
    • -
    • Check database is accepting connections
    • -
    • For 3-node cluster, use connection pool endpoint
    • -
    -

    Summary

    -

    DigitalOcean provides a simple, transparent platform ideal for developers and small teams. Its key advantages are:

    -

    ✓ Simple pricing and transparent costs -✓ Excellent documentation -✓ Good performance for typical workloads -✓ Managed services (databases, Kubernetes) -✓ Global presence -✓ Developer-friendly interface

    -

    Start small with a single Droplet and expand to managed services as your application grows.

    -

    For more information, visit: DigitalOcean Documentation

    -

    Hetzner Provider Guide

    -

    This guide covers using Hetzner Cloud as a provider in the provisioning system. Hetzner is renowned for competitive pricing, powerful infrastructure, -and outstanding performance, making it ideal for cost-conscious teams and performance-critical workloads.

    -

    Table of Contents

    - -

    Overview

    -

    Hetzner Cloud provides European cloud infrastructure with exceptional value. Key characteristics:

    -
      -
    • Best Price/Performance: Lower cost than AWS, competitive with DigitalOcean
    • -
    • European Focus: Primary datacenter in Germany with compliance emphasis
    • -
    • Powerful Hardware: Modern CPUs, NVMe storage, 10Gbps networking
    • -
    • Flexible Billing: Hourly or monthly, no long-term contracts
    • -
    • API-First: Comprehensive RESTful API for automation
    • -
    -

    Hetzner Pricing Model

    -

    Hetzner uses hourly billing with generous monthly rates (30.4 days):

    -
      -
    • Cloud Servers: €0.003-0.072/hour (~€3-200/month depending on size)
    • -
    • Volumes: €0.026/GB/month
    • -
    • Data Transfer: €0.12/GB outbound (generous included traffic)
    • -
    • Floating IP: Free (1 per server)
    • -
    -

    Price Comparison (2 vCPU, 4 GB RAM)

    -
    - - - - -
    ProviderMonthlyHourlyNotes
    Hetzner CX31€6.90€0.003Best value
    DigitalOcean$24$0.03573.5x more expensive
    AWS t3.medium$60+$0.0896On-demand pricing
    UpCloud$15$0.0223Mid-range
    -
    -

    Supported Resources

    -
    - - - - - - - - - -
    ResourceProduct NameStatus
    ComputeCloud Servers✓ Full support
    Block StorageVolumes✓ Full support
    Object StorageObject Storage✓ Full support
    Load BalancerLoad Balancer✓ Full support
    NetworkvSwitch/Network✓ Full support
    FirewallFirewall✓ Full support
    DNS✓ Via Hetzner DNS
    Bare MetalDedicated Servers✓ Available
    Floating IPFloating IP✓ Full support
    -
    -

    Why Hetzner

    -

    When to Choose Hetzner

    -

    Hetzner is ideal for:

    -
      -
    • Cost-Conscious Teams: 50-75% cheaper than AWS
    • -
    • European Operations: Primary EU presence
    • -
    • Predictable Workloads: Good for sustained compute
    • -
    • Performance-Critical: Modern hardware, 10Gbps networking
    • -
    • Self-Managed Services: Full control over infrastructure
    • -
    • Bulk Computing: Good pricing for 10-100+ servers
    • -
    -

    Hetzner is NOT ideal for:

    -
      -
    • Managed Services: Limited compared to AWS/DigitalOcean
    • -
    • Global Distribution: Limited regions (mainly EU + US)
    • -
    • Windows Workloads: Limited Windows support
    • -
    • Complex Compliance: Fewer certifications than AWS
    • -
    • Hands-Off Operations: Need to manage own infrastructure
    • -
    -

    Cost Advantages

    -

    Total Cost of Ownership Comparison (5 servers, 100 GB storage):

    -
    - - - -
    ProviderComputeStorageData TransferMonthly
    Hetzner€34.50€2.60Included€37.10
    DigitalOcean$120$10Included$130
    AWS$300$100$450$850
    -
    -

    Hetzner is 3.5x cheaper than DigitalOcean and 23x cheaper than AWS for this scenario.

    -

    Setup and Configuration

    -

    Prerequisites

    -
      -
    • Hetzner Cloud account at Hetzner Console
    • -
    • API token from Cloud Console
    • -
    • SSH key uploaded to Hetzner
    • -
    • hcloud CLI installed (optional but recommended)
    • -
    • Provisioning system with Hetzner provider plugin
    • -
    -

    Step 1: Create Hetzner API Token

    -
      -
    1. Log in to Hetzner Cloud Console
    2. -
    3. Go to Projects > Your Project > Security > API Tokens
    4. -
    5. Click Generate Token
    6. -
    7. Name it (for example, “provisioning”)
    8. -
    9. Select Read & Write permission
    10. -
    11. Copy the token immediately (only shown once)
    12. -
    -

    Step 2: Configure Environment Variables

    -
    # Add to ~/.bashrc, ~/.zshrc, or env file
    -export HCLOUD_TOKEN="MC4wNTI1YmE1M2E4YmE0YTQzMTQ..."
    -
    -# Optional: Set default location
    -export HCLOUD_LOCATION="nbg1"
    -
    -

    Step 3: Install hcloud CLI (Optional)

    -
    # macOS
    -brew install hcloud
    -
    -# Linux
    -curl https://github.com/hetznercloud/cli/releases/download/v1.x.x/hcloud-linux-amd64.tar.gz | tar xz
    -sudo mv hcloud /usr/local/bin/
    -
    -# Verify
    -hcloud version
    -
    -

    Step 4: Configure SSH Key

    -
    # Upload your SSH public key
    -hcloud ssh-key create --name "provisioning-key" \
    -  --public-key-from-file ~/.ssh/id_rsa.pub
    -
    -# List keys
    -hcloud ssh-key list
    -
    -

    Step 5: Configure Workspace

    -

    Create or update config.toml in your workspace:

    -
    [providers.hetzner]
    -enabled = true
    -token_env = "HCLOUD_TOKEN"
    -default_location = "nbg1"
    -default_datacenter = "nbg1-dc8"
    -
    -[workspace]
    -provider = "hetzner"
    -region = "nbg1"
    -
    -

    Available Resources

    -

    1. Cloud Servers (Compute)

    -

    Hetzner’s core compute offering with outstanding performance.

    -

    Available Server Types:

    -
    - - - - - - - - -
    TypevCPURAMSSD StorageNetworkMonthly Price
    CX1111 GB25 GB1Gbps€3.29
    CX2124 GB40 GB1Gbps€6.90
    CX3128 GB80 GB1Gbps€13.80
    CX41416 GB160 GB1Gbps€27.60
    CX51832 GB240 GB10Gbps€55.20
    CPX2148 GB80 GB10Gbps€20.90
    CPX31816 GB160 GB10Gbps€41.80
    CPX411632 GB360 GB10Gbps€83.60
    -
    -

    Key Features:

    -
      -
    • NVMe SSD storage
    • -
    • Hourly or monthly billing
    • -
    • Automatic backups
    • -
    • SSH key management
    • -
    • Floating IPs for high availability
    • -
    • Network interfaces for multi-homing
    • -
    • Cloud-init support
    • -
    • IPMI/KVM console access
    • -
    -

    2. Volumes (Block Storage)

    -

    Persistent block storage that can be attached/detached.

    -

    Characteristics:

    -
      -
    • €0.026/GB/month (highly affordable)
    • -
    • SSD-based with good performance
    • -
    • Up to 10 TB capacity
    • -
    • Snapshots for backup
    • -
    • Can attach to multiple servers (read-only)
    • -
    • Automatic snapshots available
    • -
    -

    3. Object Storage

    -

    S3-compatible object storage.

    -

    Characteristics:

    -
      -
    • €0.025/GB/month
    • -
    • S3-compatible API
    • -
    • Versioning and lifecycle policies
    • -
    • Bucket policy support
    • -
    • CORS configuration
    • -
    -

    4. Floating IPs

    -

    Static IP addresses that can be reassigned.

    -

    Characteristics:

    -
      -
    • Free (1 per server, additional €0.50/month)
    • -
    • IPv4 and IPv6 support
    • -
    • Enable high availability and failover
    • -
    • DNS pointing
    • -
    -

    5. Load Balancer

    -

    Layer 4/7 load balancing.

    -

    Available Plans:

    -
      -
    • LB11: €5/month (100 Mbps)
    • -
    • LB21: €10/month (1 Gbps)
    • -
    • LB31: €20/month (10 Gbps)
    • -
    -

    Features:

    -
      -
    • Health checks
    • -
    • SSL/TLS termination
    • -
    • Path/host-based routing
    • -
    • Sticky sessions
    • -
    • Algorithms: round robin, least connections
    • -
    -

    6. Network/vSwitch

    -

    Virtual switching for private networking.

    -

    Characteristics:

    -
      -
    • Private networks between servers
    • -
    • Subnets within networks
    • -
    • Routes and gateways
    • -
    • Firewall integration
    • -
    -

    7. Firewall

    -

    Network firewall rules.

    -

    Features:

    -
      -
    • Per-server or per-network
    • -
    • Stateful filtering
    • -
    • Protocol-specific rules
    • -
    • Source/destination filtering
    • -
    -

    Nickel Schema Reference

    -

    Cloud Server Configuration

    -
    let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -
    -hetzner.Server & {
    -  # Required
    -  name = "my-server",
    -  server_type = "cx21",
    -  image = "ubuntu-22.04",
    -
    -  # Optional
    -  location = "nbg1",  # nbg1, fsn1, hel1, ash
    -  datacenter = "nbg1-dc8",
    -  ssh_keys = ["key-name"],
    -  count = 1,
    -  public_net = {
    -    enable_ipv4 = true,
    -    enable_ipv6 = true
    -  },
    -
    -  # Volumes to attach
    -  volumes = [
    -    {
    -      size = 100,
    -      format = "ext4",
    -      automount = true
    -    }
    -  ],
    -
    -  # Network configuration
    -  networks = [
    -    {
    -      network_name = "private-net",
    -      ip = "10.0.1.5"
    -    }
    -  ],
    -
    -  # Firewall rules
    -  firewall_rules = [
    -    {
    -      direction = "in",
    -      source_ips = ["0.0.0.0/0", "::/0"],
    -      destination_port = "22",
    -      protocol = "tcp"
    -    },
    -    {
    -      direction = "in",
    -      source_ips = ["0.0.0.0/0", "::/0"],
    -      destination_port = "80",
    -      protocol = "tcp"
    -    },
    -    {
    -      direction = "in",
    -      source_ips = ["0.0.0.0/0", "::/0"],
    -      destination_port = "443",
    -      protocol = "tcp"
    -    }
    -  ],
    -
    -  # Labels for organization
    -  labels = {
    -    "environment" = "production",
    -    "application" = "web"
    -  },
    -
    -  # Startup script
    -  user_data = "#!/bin/bash\napt-get update\napt-get install -y nginx"
    -}
    -
    -

    Volume Configuration

    -
    hetzner.Volume & {
    -  name = "data-volume",
    -  size = 100,  # GB
    -  location = "nbg1",
    -  automount = true,
    -  format = "ext4",
    -
    -  # Attach to server
    -  attachment = {
    -    server = "server-name",
    -    mount_point = "/data"
    -  }
    -}
    -
    -

    Load Balancer Configuration

    -
    hetzner.LoadBalancer & {
    -  name = "web-lb",
    -  load_balancer_type = "lb11",
    -  network_zone = "eu-central",
    -  location = "nbg1",
    -
    -  # Services (backend targets)
    -  services = [
    -    {
    -      protocol = "http",
    -      listen_port = 80,
    -      destination_port = 8080,
    -      health_check = {
    -        protocol = "http",
    -        port = 8080,
    -        interval = 15,
    -        timeout = 10,
    -        unhealthy_threshold = 3
    -      },
    -      http = {
    -        sticky_sessions = true,
    -        http_only = true,
    -        certificates = []
    -      }
    -    }
    -  ]
    -}
    -
    -

    Firewall Configuration

    -
    hetzner.Firewall & {
    -  name = "web-firewall",
    -  labels = { "env" = "prod" },
    -
    -  rules = [
    -    # Allow SSH from management network
    -    {
    -      direction = "in",
    -      source_ips = ["203.0.113.0/24"],
    -      destination_port = "22",
    -      protocol = "tcp"
    -    },
    -    # Allow HTTP/HTTPS from anywhere
    -    {
    -      direction = "in",
    -      source_ips = ["0.0.0.0/0", "::/0"],
    -      destination_port = "80",
    -      protocol = "tcp"
    -    },
    -    {
    -      direction = "in",
    -      source_ips = ["0.0.0.0/0", "::/0"],
    -      destination_port = "443",
    -      protocol = "tcp"
    -    },
    -    # Allow all outbound
    -    {
    -      direction = "out",
    -      destination_ips = ["0.0.0.0/0", "::/0"],
    -      protocol = "esp"
    -    }
    -  ]
    -}
    -
    -

    Configuration Examples

    -

    Example 1: Single Server Web Server

    -
    let hetzner = import "../../extensions/providers/hetzner/nickel/main.ncl" in
    -
    -{
    -  workspace_name = "simple-web",
    -
    -  web_server = hetzner.Server & {
    -    name = "web-01",
    -    server_type = "cx21",
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -    ssh_keys = ["provisioning"],
    -
    -    user_data = ''
    -      #!/bin/bash
    -      apt-get update
    -      apt-get install -y nginx
    -      systemctl start nginx
    -      systemctl enable nginx
    -    '',
    -
    -    firewall_rules = [
    -      { direction = "in", source_ips = ["0.0.0.0/0"], destination_port = "22", protocol = "tcp" },
    -      { direction = "in", source_ips = ["0.0.0.0/0"], destination_port = "80", protocol = "tcp" },
    -      { direction = "in", source_ips = ["0.0.0.0/0"], destination_port = "443", protocol = "tcp" }
    -    ],
    -
    -    labels = { "service" = "web" }
    -  }
    -}
    -
    -

    Example 2: Web Application with Load Balancer and Storage

    -
    {
    -  # Backend servers
    -  app_servers = hetzner.Server & {
    -    name = "app",
    -    server_type = "cx31",
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -    count = 3,
    -    ssh_keys = ["provisioning"],
    -
    -    volumes = [
    -      {
    -        size = 100,
    -        format = "ext4",
    -        automount = true
    -      }
    -    ],
    -
    -    firewall_rules = [
    -      { direction = "in", source_ips = ["0.0.0.0/0"], destination_port = "22", protocol = "tcp" },
    -      { direction = "in", source_ips = ["0.0.0.0/0"], destination_port = "8080", protocol = "tcp" }
    -    ],
    -
    -    labels = { "tier" = "application" }
    -  },
    -
    -  # Load balancer
    -  lb = hetzner.LoadBalancer & {
    -    name = "web-lb",
    -    load_balancer_type = "lb11",
    -    location = "nbg1",
    -
    -    services = [
    -      {
    -        protocol = "http",
    -        listen_port = 80,
    -        destination_port = 8080,
    -        health_check = {
    -          protocol = "http",
    -          port = 8080,
    -          interval = 15
    -        }
    -      }
    -    ]
    -  },
    -
    -  # Persistent storage
    -  shared_storage = hetzner.Volume & {
    -    name = "shared-data",
    -    size = 500,
    -    location = "nbg1",
    -    automount = false,
    -    format = "ext4"
    -  }
    -}
    -
    -

    Example 3: High-Performance Compute Cluster

    -
    {
    -  # Compute nodes with 10Gbps networking
    -  compute_nodes = hetzner.Server & {
    -    name = "compute",
    -    server_type = "cpx41",  # 16 vCPU, 32 GB, 10Gbps
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -    count = 5,
    -
    -    volumes = [
    -      {
    -        size = 500,
    -        format = "ext4",
    -        automount = true
    -      }
    -    ],
    -
    -    labels = { "tier" = "compute" }
    -  },
    -
    -  # Storage node
    -  storage = hetzner.Server & {
    -    name = "storage",
    -    server_type = "cx41",
    -    image = "ubuntu-22.04",
    -    location = "nbg1",
    -
    -    volumes = [
    -      {
    -        size = 2000,
    -        format = "ext4",
    -        automount = true
    -      }
    -    ],
    -
    -    labels = { "tier" = "storage" }
    -  },
    -
    -  # High-capacity volume for data
    -  data_volume = hetzner.Volume & {
    -    name = "compute-data",
    -    size = 5000,
    -    location = "nbg1"
    -  }
    -}
    -
    -

    Best Practices

    -

    1. Server Selection and Sizing

    -

    Performance Tiers:

    -
      -
    • -

      CX Series (Standard): Best value for most workloads

      -
        -
      • CX21: Default choice for 2-4 GB workloads
      • -
      • CX41: Good mid-range option
      • -
      -
    • -
    • -

      CPX Series (ARM-based CPU-optimized): Better for CPU-intensive

      -
        -
      • CPX21: Outstanding value at €20.90/month
      • -
      • CPX31: Good for compute workloads
      • -
      -
    • -
    • -

      CCX Series (AMD EPYC): High-performance options

      -
    • -
    -

    Selection Criteria:

    -
      -
    • Start with CX21 (€6.90/month) for testing
    • -
    • Scale to CPX21 (€20.90/month) for CPU-bound workloads
    • -
    • Use CX31+ (€13.80+) for balanced workloads with data
    • -
    -

    2. Network Architecture

    -

    High Availability:

    -
    # Use Floating IPs for failover
    -floating_ip = hetzner.FloatingIP & {
    -  name = "web-ip",
    -  ip_type = "ipv4",
    -  location = "nbg1"
    -}
    -
    -# Attach to primary server, reassign on failure
    -attachment = {
    -  server = "primary-server"
    -}
    -
    -

    Private Networking:

    -
    # Create private network for internal communication
    -private_network = hetzner.Network & {
    -  name = "private",
    -  ip_range = "10.0.0.0/8",
    -  labels = { "env" = "prod" }
    -}
    -
    -

    3. Storage Strategy

    -

    Volume Sizing:

    -
      -
    • Estimate storage needs: app + data + logs + backups
    • -
    • Add 20% buffer for growth
    • -
    • Monitor usage monthly
    • -
    -

    Backup Strategy:

    -
      -
    • Enable automatic snapshots
    • -
    • Regular manual snapshots for important data
    • -
    • Test restore procedures
    • -
    • Keep snapshots for minimum 30 days
    • -
    -

    4. Firewall Configuration

    -

    Principle of Least Privilege:

    -
    # Only open necessary ports
    -firewall_rules = [
    -  # SSH from management IP only
    -  { direction = "in", source_ips = ["203.0.113.1/32"], destination_port = "22", protocol = "tcp" },
    -
    -  # HTTP/HTTPS from anywhere
    -  { direction = "in", source_ips = ["0.0.0.0/0", "::/0"], destination_port = "80", protocol = "tcp" },
    -  { direction = "in", source_ips = ["0.0.0.0/0", "::/0"], destination_port = "443", protocol = "tcp" },
    -
    -  # Database replication (internal only)
    -  { direction = "in", source_ips = ["10.0.0.0/8"], destination_port = "5432", protocol = "tcp" }
    -]
    -
    -

    5. Monitoring and Health Checks

    -

    Enable Monitoring:

    -
    hcloud server update <server-id> --enable-rescue
    -
    -

    Health Check Patterns:

    -
      -
    • HTTP endpoint returning 200
    • -
    • Custom health check scripts
    • -
    • Regular resource verification
    • -
    -

    6. Cost Optimization

    -

    Reserved Servers (Pre-pay for 12 months):

    -
      -
    • 25% discount vs hourly
    • -
    • Good for predictable workloads
    • -
    -

    Spot Pricing (Coming):

    -
      -
    • Watch for additional discounts
    • -
    • Off-peak capacity
    • -
    -

    Resource Cleanup:

    -
      -
    • Delete unused volumes
    • -
    • Remove old snapshots
    • -
    • Consolidate small servers
    • -
    -

    Troubleshooting

    -

    Issue: Cannot Connect to Server

    -

    Symptoms: SSH timeout or connection refused

    -

    Diagnosis:

    -
    # Check server status
    -hcloud server list
    -
    -# Verify firewall allows port 22
    -hcloud firewall describe firewall-name
    -
    -# Check if server has public IPv4
    -hcloud server describe server-name
    -
    -

    Solution:

    -
    # Update firewall to allow SSH from your IP
    -hcloud firewall add-rules firewall-id \
    -  --rules "direction=in protocol=tcp source_ips=YOUR_IP/32 destination_port=22"
    -
    -# Or reset SSH using rescue mode via console
    -hcloud server request-console server-id
    -
    -

    Issue: Volume Attachment Failed

    -

    Symptoms: Volume created but cannot attach, mount fails

    -

    Diagnosis:

    -
    # Check volume status
    -hcloud volume list
    -
    -# Check server has available attachment slot
    -hcloud server describe server-name
    -
    -

    Solution:

    -
    # Format volume (first time only)
    -sudo mkfs.ext4 /dev/sdb
    -
    -# Mount manually
    -sudo mkdir -p /data
    -sudo mount /dev/sdb /data
    -
    -# Make persistent
    -echo '/dev/sdb /data ext4 defaults,nofail 0 0' | sudo tee -a /etc/fstab
    -sudo mount -a
    -
    -

    Issue: High Data Transfer Costs

    -

    Symptoms: Unexpected egress charges

    -

    Diagnosis:

    -
    # Check server network traffic
    -sar -n DEV 1 100
    -
    -# Monitor connection patterns
    -netstat -an | grep ESTABLISHED | wc -l
    -
    -

    Solution:

    -
      -
    • Use Hetzner Object Storage for static files
    • -
    • Cache content locally
    • -
    • Optimize data transfer patterns
    • -
    • Consider using Content Delivery Network
    • -
    -

    Issue: Load Balancer Not Routing Traffic

    -

    Symptoms: LB created but backends not receiving traffic

    -

    Diagnosis:

    -
    # Check LB status
    -hcloud load-balancer describe lb-name
    -
    -# Test backend directly
    -curl -H "Host: example.com" http://backend-ip:8080/health
    -
    -

    Solution:

    -
      -
    • Ensure backends have firewall allowing LB traffic
    • -
    • Verify health check endpoint works
    • -
    • Check backend service is running
    • -
    • Review health check configuration
    • -
    -

    Summary

    -

    Hetzner provides exceptional value with modern infrastructure:

    -

    ✓ Best price/performance ratio (50%+ cheaper than DigitalOcean) -✓ Excellent European presence -✓ Powerful hardware (NVMe, 10Gbps networking) -✓ Flexible deployment options -✓ Great API and CLI tools

    -

    Start with CX21 servers (€6.90/month) and scale based on needs.

    -

    For more information, visit: Hetzner Cloud Documentation

    -

    Multi-Provider Web App Workspace

    -

    Multi-Region High Availability Workspace

    -

    Cost-Optimized Multi-Provider Workspace

    -

    Quick Reference Master Index

    -

    This directory contains consolidated quick reference guides organized by topic.

    -

    Available Quick References

    - -

    Topic-Specific Guides with Embedded Quick References

    -

    Security:

    -
      -
    • Authentication Quick Reference - See ../security/authentication-layer-guide.md
    • -
    • Config Encryption Quick Reference - See ../security/config-encryption-guide.md
    • -
    -

    Infrastructure:

    -
      -
    • Dynamic Secrets Guide - See ../infrastructure/dynamic-secrets-guide.md
    • -
    • Mode System Guide - See ../infrastructure/mode-system-guide.md
    • -
    -
    -

    Using Quick References

    -

    Quick references are condensed versions of full guides, optimized for:

    -
      -
    • Fast lookup of common commands
    • -
    • Copy-paste ready examples
    • -
    • Quick command reference while working
    • -
    • At-a-glance feature comparison tables
    • -
    -

    For deeper explanations, see the full guides in their respective folders.

    -

    Platform Operations Cheatsheet

    -

    Quick reference for daily operations, deployments, and troubleshooting

    -
    -

    Mode Selection (One Command)

    -
    # Development/Testing
    -export VAULT_MODE=solo REGISTRY_MODE=solo RAG_MODE=solo AI_SERVICE_MODE=solo DAEMON_MODE=solo
    -
    -# Team Environment
    -export VAULT_MODE=multiuser REGISTRY_MODE=multiuser RAG_MODE=multiuser AI_SERVICE_MODE=multiuser DAEMON_MODE=multiuser
    -
    -# CI/CD Pipelines
    -export VAULT_MODE=cicd REGISTRY_MODE=cicd RAG_MODE=cicd AI_SERVICE_MODE=cicd DAEMON_MODE=cicd
    -
    -# Production HA
    -export VAULT_MODE=enterprise REGISTRY_MODE=enterprise RAG_MODE=enterprise AI_SERVICE_MODE=enterprise DAEMON_MODE=enterprise
    -
    -
    -

    Service Ports & Endpoints

    -
    - - - - - - - - -
    ServicePortEndpointHealth Check
    Vault8200http://localhost:8200curl http://localhost:8200/health
    Registry8081http://localhost:8081curl http://localhost:8081/health
    RAG8083http://localhost:8083curl http://localhost:8083/health
    AI Service8082http://localhost:8082curl http://localhost:8082/health
    Orchestrator9090http://localhost:9090curl http://localhost:9090/health
    Control Center8080http://localhost:8080curl http://localhost:8080/health
    MCP Server8084http://localhost:8084curl http://localhost:8084/health
    Installer8085http://localhost:8085curl http://localhost:8085/health
    -
    -
    -

    Service Startup (Order Matters)

    -
    # Build everything first
    -cargo build --release
    -
    -# Then start in dependency order:
    -# 1. Infrastructure
    -cargo run --release -p vault-service &
    -sleep 2
    -
    -# 2. Configuration & Extensions
    -cargo run --release -p extension-registry &
    -sleep 2
    -
    -# 3. AI/RAG Layer
    -cargo run --release -p provisioning-rag &
    -cargo run --release -p ai-service &
    -sleep 2
    -
    -# 4. Orchestration
    -cargo run --release -p orchestrator &
    -cargo run --release -p control-center &
    -cargo run --release -p mcp-server &
    -sleep 2
    -
    -# 5. Background Operations
    -cargo run --release -p provisioning-daemon &
    -
    -# 6. Optional: Installer
    -cargo run --release -p installer &
    -
    -
    -

    Quick Checks (All Services)

    -
    # Check all services running
    -pgrep -a cargo | grep "release -p"
    -
    -# All health endpoints (fast)
    -for port in 8200 8081 8083 8082 9090 8080 8084 8085; do
    -  echo "Port $port: $(curl -s http://localhost:$port/health | jq -r .status 2>/dev/null || echo 'DOWN')"
    -done
    -
    -# Check all listening ports
    -ss -tlnp | grep -E "8200|8081|8083|8082|9090|8080|8084|8085"
    -
    -# Show PIDs of all services
    -ps aux | grep "cargo run --release" | grep -v grep
    -
    -
    -

    Configuration Management

    -

    View Config Files

    -
    # List all available schemas
    -ls -la provisioning/schemas/platform/schemas/
    -
    -# View specific service schema
    -cat provisioning/schemas/platform/schemas/vault-service.ncl
    -
    -# Check schema syntax
    -nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
    -
    -

    Apply Config Changes

    -
    # 1. Update schema or defaults
    -vim provisioning/schemas/platform/schemas/vault-service.ncl
    -# Or update defaults:
    -vim provisioning/schemas/platform/defaults/vault-service-defaults.ncl
    -
    -# 2. Validate
    -nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
    -
    -# 3. Re-generate runtime configs (local, private)
    -./provisioning/.typedialog/platform/scripts/generate-configs.nu vault-service multiuser
    -
    -# 4. Restart service (graceful)
    -pkill -SIGTERM vault-service
    -sleep 2
    -export VAULT_MODE=multiuser
    -cargo run --release -p vault-service &
    -
    -# 5. Verify loaded
    -curl http://localhost:8200/api/config | jq .
    -
    -
    -

    Service Control

    -

    Stop Services

    -
    # Stop all gracefully
    -pkill -SIGTERM -f "cargo run --release"
    -
    -# Wait for shutdown
    -sleep 5
    -
    -# Verify all stopped
    -pgrep -f "cargo run --release" || echo "All stopped"
    -
    -# Force kill if needed
    -pkill -9 -f "cargo run --release"
    -
    -

    Restart Services

    -
    # Single service
    -pkill -SIGTERM vault-service && sleep 2 && cargo run --release -p vault-service &
    -
    -# All services
    -pkill -SIGTERM -f "cargo run --release"
    -sleep 5
    -cargo build --release
    -# Then restart using startup commands above
    -
    -

    Check Logs

    -
    # Follow service logs (if using journalctl)
    -journalctl -fu provisioning-vault
    -journalctl -fu provisioning-orchestrator
    -
    -# Or tail application logs
    -tail -f /var/log/provisioning/*.log
    -
    -# Filter errors
    -grep -i error /var/log/provisioning/*.log
    -
    -
    -

    Database Management

    -

    SurrealDB (Multiuser/Enterprise)

    -
    # Check SurrealDB status
    -curl -s http://surrealdb:8000/health | jq .
    -
    -# Connect to SurrealDB
    -surreal sql --endpoint http://surrealdb:8000 --username root --password root
    -
    -# Run query
    -surreal sql --endpoint http://surrealdb:8000 --username root --password root \
    -  --query "SELECT * FROM services"
    -
    -# Backup database
    -surreal export --endpoint http://surrealdb:8000 \
    -  --username root --password root > backup.sql
    -
    -# Restore database
    -surreal import --endpoint http://surrealdb:8000 \
    -  --username root --password root < backup.sql
    -
    -

    Etcd (Enterprise HA)

    -
    # Check Etcd cluster health
    -etcdctl --endpoints=http://etcd:2379 endpoint health
    -
    -# List members
    -etcdctl --endpoints=http://etcd:2379 member list
    -
    -# Get key from Etcd
    -etcdctl --endpoints=http://etcd:2379 get /provisioning/config
    -
    -# Set key in Etcd
    -etcdctl --endpoints=http://etcd:2379 put /provisioning/config "value"
    -
    -# Backup Etcd
    -etcdctl --endpoints=http://etcd:2379 snapshot save backup.db
    -
    -# Restore Etcd from snapshot
    -etcdctl --endpoints=http://etcd:2379 snapshot restore backup.db
    -
    -
    -

    Environment Variable Overrides

    -

    Override Individual Settings

    -
    # Vault overrides
    -export VAULT_SERVER_URL=http://vault-custom:8200
    -export VAULT_STORAGE_BACKEND=etcd
    -export VAULT_TLS_VERIFY=true
    -
    -# Registry overrides
    -export REGISTRY_SERVER_PORT=9081
    -export REGISTRY_SERVER_WORKERS=8
    -export REGISTRY_GITEA_URL=http://gitea:3000
    -export REGISTRY_OCI_REGISTRY=registry.local:5000
    -
    -# RAG overrides
    -export RAG_ENABLED=true
    -export RAG_EMBEDDINGS_PROVIDER=openai
    -export RAG_EMBEDDINGS_API_KEY=sk-xxx
    -export RAG_LLM_PROVIDER=anthropic
    -
    -# AI Service overrides
    -export AI_SERVICE_SERVER_PORT=9082
    -export AI_SERVICE_RAG_ENABLED=true
    -export AI_SERVICE_MCP_ENABLED=false
    -export AI_SERVICE_DAG_MAX_CONCURRENT_TASKS=50
    -
    -# Daemon overrides
    -export DAEMON_POLL_INTERVAL=30
    -export DAEMON_MAX_WORKERS=8
    -export DAEMON_LOGGING_LEVEL=info
    -
    -
    -

    Health & Status Checks

    -

    Quick Status (30 seconds)

    -
    # Test all services with visual status
    -curl -s http://localhost:8200/health && echo "✓ Vault" || echo "✗ Vault"
    -curl -s http://localhost:8081/health && echo "✓ Registry" || echo "✗ Registry"
    -curl -s http://localhost:8083/health && echo "✓ RAG" || echo "✗ RAG"
    -curl -s http://localhost:8082/health && echo "✓ AI Service" || echo "✗ AI Service"
    -curl -s http://localhost:9090/health && echo "✓ Orchestrator" || echo "✗ Orchestrator"
    -curl -s http://localhost:8080/health && echo "✓ Control Center" || echo "✗ Control Center"
    -
    -

    Detailed Status

    -
    # Orchestrator cluster status
    -curl -s http://localhost:9090/api/v1/cluster/status | jq .
    -
    -# Service integration check
    -curl -s http://localhost:9090/api/v1/services | jq .
    -
    -# Queue status
    -curl -s http://localhost:9090/api/v1/queue/status | jq .
    -
    -# Worker status
    -curl -s http://localhost:9090/api/v1/workers | jq .
    -
    -# Recent tasks (last 10)
    -curl -s http://localhost:9090/api/v1/tasks?limit=10 | jq .
    -
    -
    -

    Performance & Monitoring

    -

    System Resources

    -
    # Memory usage
    -free -h
    -
    -# Disk usage
    +

    Check specific paths:

    +
    # Check data directory
     df -h /var/lib/provisioning
     
    -# CPU load
    -top -bn1 | head -5
    -
    -# Network connections count
    -ss -s
    -
    -# Count established connections
    -netstat -an | grep ESTABLISHED | wc -l
    -
    -# Watch resources in real-time
    -watch -n 1 'free -h && echo "---" && df -h'
    +# Check I/O performance
    +iostat -x 1 5
     
    -

    Service Performance

    -
    # Monitor service memory usage
    -ps aux | grep "cargo run" | awk '{print $2, $6}' | while read pid mem; do
    -  echo "$pid: $(bc <<< "$mem / 1024")MB"
    -done
    +

    Network Health

    +
    # Check network connectivity
    +provisioning health network
     
    -# Monitor request latency (Orchestrator)
    -curl -s http://localhost:9090/api/v1/metrics/latency | jq .
    +# Test external connectivity
    +provisioning health network --external
     
    -# Monitor error rate
    -curl -s http://localhost:9090/api/v1/metrics/errors | jq .
    +# Test provider connectivity
    +provisioning health network --provider upcloud
     
    -
    -

    Troubleshooting Quick Fixes

    -

    Service Won’t Start

    -
    # Check port in use
    -lsof -i :8200
    -ss -tlnp | grep 8200
    +

    Network health checks:

    +
      +
    • Internal service-to-service connectivity
    • +
    • DNS resolution
    • +
    • External API reachability (cloud providers)
    • +
    • Network latency and packet loss
    • +
    • Firewall rules validation
    • +
    +

    Resource Monitoring

    +

    CPU Health

    +
    # Check CPU utilization
    +provisioning health cpu
     
    -# Kill process using port
    -pkill -9 -f "vault-service"
    +# Per-service CPU usage
    +provisioning platform metrics --metric cpu_usage
     
    -# Start with verbose logging
    -RUST_LOG=debug cargo run -p vault-service 2>&1 | head -50
    -
    -# Verify schema exists
    -nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
    -
    -# Check mode defaults
    -ls -la provisioning/schemas/platform/defaults/deployment/$VAULT_MODE-defaults.ncl
    +# Alert if CPU > 90% for 5 minutes
     
    -

    High Memory Usage

    -
    # Identify top memory consumers
    -ps aux --sort=-%mem | head -10
    +

    Monitor CPU load:

    +
    # System load average
    +uptime
     
    -# Reduce worker count for affected service
    -export VAULT_SERVER_WORKERS=2
    -pkill -SIGTERM vault-service
    -sleep 2
    -cargo run --release -p vault-service &
    -
    -# Run memory analysis (if valgrind available)
    -valgrind --leak-check=full target/release/vault-service
    +# Per-process CPU
    +top -b -n 1 | grep provisioning
     
    -

    Database Connection Error

    -
    # Test database connectivity
    -curl http://surrealdb:8000/health
    -etcdctl --endpoints=http://etcd:2379 endpoint health
    +

    Memory Health

    +
    # Check memory utilization
    +provisioning health memory
     
    -# Update connection string
    -export SURREALDB_URL=ws://surrealdb:8000
    -export ETCD_ENDPOINTS=http://etcd:2379
    +# Memory breakdown by service
    +provisioning platform metrics --metric memory_usage
     
    -# Restart service with new config
    -pkill vault-service
    -sleep 2
    -cargo run --release -p vault-service &
    -
    -# Check logs for connection errors
    -grep -i "connection" /var/log/provisioning/*.log
    +# Detect memory leaks
    +provisioning health memory --leak-detection
     
    -

    Services Not Communicating

    -
    # Test inter-service connectivity
    -curl http://localhost:8200/health
    -curl http://localhost:8081/health
    -curl -H "X-Service: vault" http://localhost:9090/api/v1/health
    +

    Memory metrics:

    +
    # Available memory
    +free -h
     
    -# Check DNS resolution (if using hostnames)
    -nslookup vault.internal
    -dig vault.internal
    -
    -# Add to /etc/hosts if DNS fails
    -echo "127.0.0.1 vault.internal" >> /etc/hosts
    +# Per-service memory
    +ps aux | grep provisioning | awk '{sum+=$6} END {print sum/1024 " MB"}'
     
    -
    -

    Emergency Procedures

    -

    Full Service Recovery

    -
    # 1. Stop everything
    -pkill -9 -f "cargo run"
    +

    Disk Health

    +
    # Check disk health
    +provisioning health disk
     
    -# 2. Backup current data
    -tar -czf /backup/provisioning-$(date +%s).tar.gz /var/lib/provisioning/
    +# SMART status (if available)
    +sudo smartctl -H /dev/sda
    +
    +

    Automated Health Monitoring

    +

    Health Check Service

    +

    Enable continuous health monitoring:

    +
    # Start health monitor
    +provisioning health monitor --interval 30
     
    -# 3. Clean slate (solo mode only)
    -rm -rf /tmp/provisioning-solo
    +# Monitor with alerts
    +provisioning health monitor --interval 30 --alert-email [ops@example.com](mailto:ops@example.com)
     
    -# 4. Restart services
    -export VAULT_MODE=solo
    -cargo build --release
    -cargo run --release -p vault-service &
    -sleep 2
    -cargo run --release -p extension-registry &
    +# Monitor specific components
    +provisioning health monitor --components orchestrator,database --interval 10
    +
    +

    Systemd Health Monitoring

    +

    Systemd watchdog for automatic restart on failure:

    +
    # /etc/systemd/system/provisioning-orchestrator.service
    +[Service]
    +Type=notify
    +WatchdogSec=30
    +Restart=on-failure
    +RestartSec=10
    +StartLimitIntervalSec=300
    +StartLimitBurst=5
    +
    +

    Service sends periodic health status:

    +
    // Rust service code
    +sd_notify::notify(true, &[NotifyState::Watchdog])?;
    +

    Health Dashboards

    +

    Grafana Health Dashboard

    +

    Import platform health dashboard:

    +
    provisioning monitoring install-dashboard --name platform-health
    +
    +

    Dashboard panels:

    +
      +
    • Service status indicators
    • +
    • Resource utilization gauges
    • +
    • Error rate graphs
    • +
    • Latency histograms
    • +
    • Workflow success rate
    • +
    • Database connection pool
    • +
    +

    Access: http://localhost:3000/d/platform-health

    +

    CLI Health Dashboard

    +

    Real-time health monitoring in terminal:

    +
    # Interactive health dashboard
    +provisioning health dashboard
    +
    +# Auto-refresh every 5 seconds
    +provisioning health dashboard --refresh 5
    +
    +

    Health Alerts

    +

    Prometheus Alert Rules

    +
    # Platform health alerts
    +groups:
    +  - name: platform_health
    +    rules:
    +      - alert: ServiceUnhealthy
    +        expr: up{job=~"provisioning-.*"} == 0
    +        for: 1m
    +        labels:
    +          severity: critical
    +        annotations:
    +          summary: "Service is unhealthy"
    +
    +      - alert: HighMemoryUsage
    +        expr: process_resident_memory_bytes > 4e9
    +        for: 5m
    +        labels:
    +          severity: warning
    +
    +      - alert: DatabaseConnectionPoolExhausted
    +        expr: database_connection_pool_active / database_connection_pool_max > 0.9
    +        for: 2m
    +        labels:
    +          severity: critical
    +
    +

    Health Check Notifications

    +

    Configure health check notifications:

    +
    # /etc/provisioning/health.toml
    +[notifications]
    +enabled = true
    +
    +[notifications.email]
    +enabled = true
    +smtp_server = "smtp.example.com"
    +from = "[health@provisioning.example.com](mailto:health@provisioning.example.com)"
    +to = ["[ops@example.com](mailto:ops@example.com)"]
    +
    +[notifications.slack]
    +enabled = true
    +webhook_url = " [https://hooks.slack.com/services/..."](https://hooks.slack.com/services/...")
    +channel = "#provisioning-health"
    +
    +[notifications.pagerduty]
    +enabled = true
    +service_key = "..."
    +
    +

    Dependency Health

    +

    External Service Health

    +

    Check health of dependencies:

    +
    # Check cloud provider API
    +provisioning health dependency upcloud
    +
    +# Check vault service
    +provisioning health dependency vault
    +
    +# Check all dependencies
    +provisioning health dependency --all
    +
    +

    Dependency health includes:

    +
      +
    • API reachability
    • +
    • Authentication validity
    • +
    • API quota/rate limits
    • +
    • Service degradation status
    • +
    +

    Third-party Service Monitoring

    +

    Monitor integrated services:

    +
    # Kubernetes cluster health (if managing K8s)
    +provisioning health kubernetes
    +
    +# Database replication health
    +provisioning health database --replication
    +
    +# Secret store health
    +provisioning health secrets
    +
    +

    Health Metrics

    +

    Key metrics tracked for health monitoring:

    +

    Service Metrics

    +
    provisioning_service_up{service="orchestrator"} 1
    +provisioning_service_health_status{service="orchestrator"} 1
    +provisioning_service_uptime_seconds{service="orchestrator"} 432000
    +
    +

    Resource Metrics

    +
    provisioning_cpu_usage_percent 45
    +provisioning_memory_usage_bytes 2.5e9
    +provisioning_disk_usage_percent{mount="/var/lib/provisioning"} 45
    +provisioning_network_errors_total 0
    +
    +

    Performance Metrics

    +
    provisioning_api_latency_p50_ms 25
    +provisioning_api_latency_p95_ms 85
    +provisioning_api_latency_p99_ms 150
    +provisioning_workflow_duration_seconds 45
    +
    +

    Health Best Practices

    +
      +
    • Monitor all critical services continuously
    • +
    • Set appropriate alert thresholds
    • +
    • Test alert notifications regularly
    • +
    • Maintain health check runbooks
    • +
    • Review health metrics weekly
    • +
    • Establish health baselines
    • +
    • Automate remediation where possible
    • +
    • Document health status definitions
    • +
    • Integrate health checks with CI/CD
    • +
    • Monitor upstream dependencies
    • +
    +

    Troubleshooting Unhealthy State

    +

    When health check fails:

    +
    # 1. Identify unhealthy component
    +provisioning health check --detailed
    +
    +# 2. View component logs
    +journalctl -u provisioning-<component> -n 100
    +
    +# 3. Check resource availability
    +provisioning health resources
    +
    +# 4. Restart unhealthy service
    +sudo systemctl restart provisioning-<component>
     
     # 5. Verify recovery
    -curl http://localhost:8200/health
    -curl http://localhost:8081/health
    +provisioning health check
    +
    +# 6. Review recent changes
    +git log --since="1 day ago" -- /etc/provisioning/
     
    -

    Rollback to Previous Configuration

    -
    # 1. Stop affected service
    -pkill -SIGTERM vault-service
    -
    -# 2. Restore previous schema from version control
    -git checkout HEAD~1 -- provisioning/schemas/platform/schemas/vault-service.ncl
    -git checkout HEAD~1 -- provisioning/schemas/platform/defaults/vault-service-defaults.ncl
    -
    -# 3. Re-generate runtime config
    -./provisioning/.typedialog/platform/scripts/generate-configs.nu vault-service solo
    -
    -# 4. Restart with restored config
    -export VAULT_MODE=solo
    -sleep 2
    -cargo run --release -p vault-service &
    -
    -# 5. Verify restored state
    -curl http://localhost:8200/health
    -curl http://localhost:8200/api/config | jq .
    -
    -

    Data Recovery

    -
    # Restore SurrealDB from backup
    -surreal import --endpoint http://surrealdb:8000 \
    -  --username root --password root < /backup/surreal-20260105.sql
    -
    -# Restore Etcd from snapshot
    -etcdctl --endpoints=http://etcd:2379 snapshot restore /backup/etcd-20260105.db
    -
    -# Restore filesystem data (solo mode)
    -cp -r /backup/vault-data/* /tmp/provisioning-solo/vault/
    -chmod -R 755 /tmp/provisioning-solo/vault/
    -
    -
    -

    File Locations

    -
    # Configuration files (PUBLIC - version controlled)
    -provisioning/schemas/platform/                   # Nickel schemas & defaults
    -provisioning/.typedialog/platform/               # Forms & generation scripts
    -
    -# Configuration files (PRIVATE - gitignored)
    -provisioning/config/runtime/                     # Actual deployment configs
    -
    -# Build artifacts
    -target/release/vault-service
    -target/release/extension-registry
    -target/release/provisioning-rag
    -target/release/ai-service
    -target/release/orchestrator
    -target/release/control-center
    -target/release/provisioning-daemon
    -
    -# Logs (if configured)
    -/var/log/provisioning/
    -/tmp/provisioning-solo/logs/
    -
    -# Data directories
    -/var/lib/provisioning/      # Production data
    -/tmp/provisioning-solo/     # Solo mode data
    -/mnt/provisioning-data/     # Shared storage (multiuser)
    -
    -# Backups
    -/mnt/provisioning-backups/  # Automated backups
    -/backup/                    # Manual backups
    -
    -
    -

    Mode Quick Reference Matrix

    -
    - - - - - - - - + + +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    Security System

    +

    Enterprise-grade security infrastructure with 12 integrated components providing +authentication, authorization, encryption, and compliance.

    +

    Overview

    +

    The Provisioning platform security system delivers comprehensive protection across all layers of +the infrastructure automation platform. Built for enterprise deployments, it provides defense-in-depth +through multiple security controls working together.

    +

    Security Architecture

    +

    The security system is organized into 12 core components:

    +
    AspectSoloMultiuserCICDEnterprise
    Workers2-44-68-1216-32
    StorageFilesystemSurrealDBMemoryEtcd+Replicas
    Startup2-5 min3-8 min1-2 min5-15 min
    DataEphemeralPersistentNoneReplicated
    TLSNoOptionalNoYes
    HANoNoNoYes
    Machines12-413+
    LoggingDebugInfoWarnInfo+Audit
    + + + + + + + + + + + +
    ComponentPurposeKey Features
    AuthenticationUser identity verificationJWT tokens, session management, multi-provider auth
    AuthorizationAccess control enforcementCedar policy engine, RBAC, fine-grained permissions
    MFAMulti-factor authenticationTOTP, WebAuthn/FIDO2, backup codes
    Audit LoggingComprehensive audit trails7-year retention, 5 export formats, compliance reporting
    KMSKey management5 KMS backends, envelope encryption, key rotation
    Secrets ManagementSecure secret storageSecretumVault integration, SOPS/Age, dynamic secrets
    EncryptionData protectionAt-rest and in-transit encryption, AES-256-GCM
    Secure CommunicationNetwork securityTLS/mTLS, certificate management, secure channels
    Certificate ManagementPKI operationsCA management, certificate issuance, rotation
    ComplianceRegulatory adherenceSOC2, GDPR, HIPAA, policy enforcement
    Security TestingValidation framework350+ tests, vulnerability scanning, penetration testing
    Break-GlassEmergency accessMulti-party approval, audit trails, time-limited access
    -
    -

    Common Command Patterns

    -

    Deploy Mode Change

    -
    # Migrate solo to multiuser
    -pkill -SIGTERM -f "cargo run"
    -sleep 5
    -tar -czf backup-solo.tar.gz /var/lib/provisioning/
    -export VAULT_MODE=multiuser REGISTRY_MODE=multiuser
    -cargo run --release -p vault-service &
    -sleep 2
    -cargo run --release -p extension-registry &
    -
    -

    Restart Single Service Without Downtime

    -
    # For load-balanced deployments:
    -# 1. Remove from load balancer
    -# 2. Graceful shutdown
    -pkill -SIGTERM vault-service
    -# 3. Wait for connections to drain
    -sleep 10
    -# 4. Restart service
    -cargo run --release -p vault-service &
    -# 5. Health check
    -curl http://localhost:8200/health
    -# 6. Return to load balancer
    -
    -

    Scale Workers for Load

    -
    # Increase workers when under load
    -export VAULT_SERVER_WORKERS=16
    -pkill -SIGTERM vault-service
    -sleep 2
    -cargo run --release -p vault-service &
    -
    -# Alternative: Edit schema/defaults
    -vim provisioning/schemas/platform/schemas/vault-service.ncl
    -# Or: vim provisioning/schemas/platform/defaults/vault-service-defaults.ncl
    -# Change: server.workers = 16, then re-generate and restart
    -./provisioning/.typedialog/platform/scripts/generate-configs.nu vault-service enterprise
    -pkill -SIGTERM vault-service
    -sleep 2
    -cargo run --release -p vault-service &
    -
    -
    -

    Diagnostic Bundle

    -
    # Generate complete diagnostics for support
    -echo "=== Processes ===" && pgrep -a cargo
    -echo "=== Listening Ports ===" && ss -tlnp
    -echo "=== System Resources ===" && free -h && df -h
    -echo "=== Schema Info ===" && nickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl
    -echo "=== Active Env Vars ===" && env | grep -E "VAULT_|REGISTRY_|RAG_|AI_SERVICE_"
    -echo "=== Service Health ===" && for port in 8200 8081 8083 8082 9090 8080; do
    -  curl -s http://localhost:$port/health || echo "Port $port DOWN"
    -done
    -
    -# Package diagnostics for support ticket
    -tar -czf diagnostics-$(date +%Y%m%d-%H%M%S).tar.gz \
    -  /var/log/provisioning/ \
    -  provisioning/schemas/platform/ \
    -  provisioning/.typedialog/platform/ \
    -  <(ps aux) \
    -  <(env | grep -E "VAULT_|REGISTRY_|RAG_")
    -
    -
    -

    Essential References

    +

    Security Layers

    +

    Layer 1: Identity and Access

    +

    + Authentication Flow JWT OAuth MFA Token Refresh Session +

      -
    • Full Deployment Guide: provisioning/docs/src/operations/deployment-guide.md
    • -
    • Service Management: provisioning/docs/src/operations/service-management-guide.md
    • -
    • Config Guide: provisioning/docs/src/development/typedialog-platform-config-guide.md
    • -
    • Troubleshooting: provisioning/docs/src/operations/troubleshooting-guide.md
    • -
    • Platform Status: Check .coder/2026-01-05-phase13-19-completion.md for latest platform info
    • +
    • Authentication: Verify user identity with JWT tokens and Argon2id password hashing
    -
    -

    Last Updated: 2026-01-05 -Version: 1.0.0 -Status: Production Ready ✅

    -

    RAG System - Quick Reference Guide

    -

    Last Updated: 2025-11-06 -Status: Production Ready | 22/22 tests passing | 0 warnings

    -
    -

    📦 What You Have

    -

    Complete RAG System

    +

    + Authorization Cedar Policy Engine RBAC Permit Deny Evaluation +

      -
    • ✅ Document ingestion (Markdown, Nickel, Nushell)
    • -
    • ✅ Vector embeddings (OpenAI + local ONNX fallback)
    • -
    • ✅ SurrealDB vector storage with HNSW
    • -
    • ✅ RAG agent with Claude API
    • -
    • ✅ MCP server tools (ready for integration)
    • -
    • ✅ 22/22 tests passing
    • -
    • ✅ Zero compiler warnings
    • -
    • ✅ ~2,500 lines of production code
    • +
    • Authorization: Enforce access control with Cedar policies and RBAC
    • +
    • MFA: Add second factor with TOTP or FIDO2 hardware keys
    -

    Key Files

    -
    provisioning/platform/rag/src/
    -├── agent.rs          - RAG orchestration
    -├── llm.rs            - Claude API client
    -├── retrieval.rs      - Vector search
    -├── db.rs             - SurrealDB integration
    -├── ingestion.rs      - Document pipeline
    -├── embeddings.rs     - Vector generation
    -└── ... (5 more modules)
    +

    Layer 2: Data Protection

    +

    + Encryption Layers At-Rest In-Transit Post-Quantum Secrets Management +

    +
      +
    • Encryption: Protect data at rest with AES-256-GCM and in transit with TLS 1.3
    • +
    • Secrets Management: Store secrets securely in SecretumVault with automatic rotation
    • +
    • KMS: Manage encryption keys with envelope encryption across 5 backend options
    • +
    +

    Layer 3: Network Security

    +
      +
    • Secure Communication: Enforce TLS/mTLS for all service-to-service communication
    • +
    • Certificate Management: Automate certificate lifecycle with cert-manager integration
    • +
    • Network Policies: Control traffic flow with Kubernetes NetworkPolicies
    • +
    +

    Layer 4: Compliance and Monitoring

    +
      +
    • Audit Logging: Record all security events with 7-year retention
    • +
    • Compliance: Validate against SOC2, GDPR, and HIPAA frameworks
    • +
    • Security Testing: Continuous validation with automated security test suite
    • +
    +

    Performance Characteristics

    +
      +
    • Authentication Overhead: Less than 20ms per request with JWT verification
    • +
    • Authorization Decision: Less than 10ms with Cedar policy evaluation
    • +
    • Encryption Operations: Less than 5ms with KMS-backed envelope encryption
    • +
    • Audit Logging: Asynchronous with zero blocking on critical path
    • +
    • MFA Verification: Less than 100ms for TOTP, less than 500ms for WebAuthn
    • +
    +

    Security Standards

    +

    The security system adheres to industry standards and best practices:

    +
      +
    • OWASP Top 10: Protection against common web vulnerabilities
    • +
    • NIST Cybersecurity Framework: Aligned with identify, protect, detect, respond, recover
    • +
    • Zero Trust Architecture: Never trust, always verify principle
    • +
    • Defense in Depth: Multiple layers of security controls
    • +
    • Least Privilege: Minimal access rights for users and services
    • +
    • Secure by Default: Security controls enabled out of the box
    • +
    +

    Component Integration

    +

    All security components work together as a cohesive system:

    +
    ┌─────────────────────────────────────────────────────────────┐
    +│                    User Request                             │
    +└──────────────────────┬──────────────────────────────────────┘
    +                       │
    +                       ▼
    +┌─────────────────────────────────────────────────────────────┐
    +│  Authentication (JWT + Session)                             │
    +│  ↓                                                           │
    +│  Authorization (Cedar Policies)                             │
    +│  ↓                                                           │
    +│  MFA Verification (if required)                             │
    +└──────────────────────┬──────────────────────────────────────┘
    +                       │
    +                       ▼
    +┌─────────────────────────────────────────────────────────────┐
    +│  Audit Logging (Record all actions)                         │
    +└──────────────────────┬──────────────────────────────────────┘
    +                       │
    +                       ▼
    +┌─────────────────────────────────────────────────────────────┐
    +│  Secure Communication (TLS/mTLS)                            │
    +│  ↓                                                           │
    +│  Data Access (Encrypted with KMS)                           │
    +│  ↓                                                           │
    +│  Secrets Retrieved (SecretumVault)                          │
    +└──────────────────────┬──────────────────────────────────────┘
    +                       │
    +                       ▼
    +┌─────────────────────────────────────────────────────────────┐
    +│  Compliance Validation (SOC2/GDPR checks)                   │
    +└──────────────────────┬──────────────────────────────────────┘
    +                       │
    +                       ▼
    +┌─────────────────────────────────────────────────────────────┐
    +│                    Response                                 │
    +└─────────────────────────────────────────────────────────────┘
     
    +

    Security Configuration

    +

    Security settings are managed through hierarchical configuration:

    +
    # Security defaults in config/security.toml
    +[security]
    +auth_enabled = true
    +mfa_required = true
    +audit_enabled = true
    +encryption_at_rest = true
    +tls_min_version = "1.3"
    +
    +[security.jwt]
    +algorithm = "RS256"
    +access_token_ttl = 900        # 15 minutes
    +refresh_token_ttl = 604800    # 7 days
    +
    +[security.mfa]
    +totp_enabled = true
    +webauthn_enabled = true
    +backup_codes_count = 10
    +
    +[security.kms]
    +backend = "secretumvault"
    +envelope_encryption = true
    +key_rotation_days = 90
    +
    +[security.audit]
    +retention_days = 2555         # 7 years
    +export_formats = ["json", "csv", "parquet", "sqlite", "syslog"]
    +
    +[security.compliance]
    +frameworks = ["soc2", "gdpr", "hipaa"]
    +policy_enforcement = "strict"
    +
    +

    Quick Start

    +

    Enable security system for your deployment:

    +
    # Enable all security features
    +provisioning config set security.enabled true
    +
    +# Configure authentication
    +provisioning config set security.auth.jwt_algorithm RS256
    +provisioning config set security.auth.mfa_required true
    +
    +# Set up SecretumVault integration
    +provisioning config set security.secrets.backend secretumvault
    +provisioning config set security.secrets.url  [http://localhost:8200](http://localhost:8200)
    +
    +# Enable audit logging
    +provisioning config set security.audit.enabled true
    +provisioning config set security.audit.retention_days 2555
    +
    +# Configure compliance framework
    +provisioning config set security.compliance.frameworks soc2,gdpr
    +
    +# Verify security configuration
    +provisioning security validate
    +
    +

    Documentation Structure

    +

    This security documentation is organized into 12 detailed guides:

    +
      +
    1. Authentication - JWT token-based authentication and session management
    2. +
    3. Authorization - Cedar policy engine and RBAC access control
    4. +
    5. Multi-Factor Authentication - TOTP and WebAuthn/FIDO2 implementation
    6. +
    7. Audit Logging - Comprehensive audit trails and compliance reporting
    8. +
    9. Key Management Service - Encryption key management and rotation
    10. +
    11. Secrets Management - SecretumVault and SOPS/Age integration
    12. +
    13. Encryption - At-rest and in-transit data protection
    14. +
    15. Secure Communication - TLS/mTLS and network security
    16. +
    17. Certificate Management - PKI and certificate lifecycle
    18. +
    19. Compliance - SOC2, GDPR, HIPAA frameworks
    20. +
    21. Security Testing - Test suite and vulnerability scanning
    22. +
    23. Break-Glass Procedures - Emergency access and recovery
    24. +
    +

    Security Metrics

    +

    The security system tracks key metrics for monitoring and reporting:

    +
      +
    • Authentication Success Rate: Percentage of successful login attempts
    • +
    • MFA Adoption Rate: Percentage of users with MFA enabled
    • +
    • Policy Violations: Count of authorization denials
    • +
    • Audit Event Rate: Events logged per second
    • +
    • Secret Rotation Compliance: Percentage of secrets rotated within policy
    • +
    • Certificate Expiration: Days until certificate expiration
    • +
    • Compliance Score: Overall compliance posture percentage
    • +
    • Security Test Pass Rate: Percentage of security tests passing
    • +
    +

    Best Practices

    +

    Follow these security best practices:

    +
      +
    1. Enable MFA for all users: Require second factor for all accounts
    2. +
    3. Rotate secrets regularly: Automate secret rotation every 90 days
    4. +
    5. Monitor audit logs: Review security events daily
    6. +
    7. Test security controls: Run security test suite before deployments
    8. +
    9. Keep certificates current: Automate certificate renewal 30 days before expiration
    10. +
    11. Review policies regularly: Audit Cedar policies quarterly
    12. +
    13. Limit break-glass access: Require multi-party approval for emergency access
    14. +
    15. Encrypt all data: Enable encryption at rest and in transit
    16. +
    17. Follow least privilege: Grant minimal required permissions
    18. +
    19. Validate compliance: Run compliance checks before production deployments
    20. +
    +

    Getting Help

    +

    For security issues and questions:

    +
      +
    • Security Documentation: Complete guides in this security section
    • +
    • CLI Help: provisioning security help
    • +
    • Security Validation: provisioning security validate
    • +
    • Audit Query: provisioning security audit query
    • +
    • Compliance Check: provisioning security compliance check
    • +
    +

    Security Updates

    +

    The security system is continuously updated to address emerging threats and vulnerabilities. Subscribe to security advisories and apply updates promptly.


    -

    🚀 Quick Start

    -

    Build & Test

    -
    cd /Users/Akasha/project-provisioning/provisioning/platform
    -cargo test -p provisioning-rag
    +

    Next Steps:

    + +

    Authentication

    +

    JWT token-based authentication with session management, login flows, and multi-provider support.

    +

    Overview

    +

    The authentication system verifies user identity through JWT (JSON Web Token) tokens with RS256 +signatures and Argon2id password hashing. It provides secure session management, token refresh +capabilities, and support for multiple authentication providers.

    +

    Architecture

    +

    Authentication Flow

    +
    ┌──────────┐                ┌──────────────┐                ┌────────────┐
    +│  Client  │                │  Auth Service│                │  Database  │
    +└────┬─────┘                └──────┬───────┘                └─────┬──────┘
    +     │                             │                              │
    +     │  POST /auth/login           │                              │
    +     │  {username, password}       │                              │
    +     │────────────────────────────>│                              │
    +     │                             │                              │
    +     │                             │  Find user by username       │
    +     │                             │─────────────────────────────>│
    +     │                             │<─────────────────────────────│
    +     │                             │  User record                 │
    +     │                             │                              │
    +     │                             │  Verify password (Argon2id)  │
    +     │                             │                              │
    +     │                             │  Create session              │
    +     │                             │─────────────────────────────>│
    +     │                             │<─────────────────────────────│
    +     │                             │                              │
    +     │                             │  Generate JWT token pair     │
    +     │                             │                              │
    +     │  {access_token, refresh}    │                              │
    +     │<────────────────────────────│                              │
    +     │                             │                              │
     
    -

    Run Example

    -
    cargo run --example rag_agent
    -
    -

    Check Tests

    -
    cargo test -p provisioning-rag --lib
    -# Result: test result: ok. 22 passed; 0 failed
    -
    -
    -

    📚 Documentation Files

    -
    - - - - - - +

    Components

    +
    FilePurpose
    PHASE5_CLAUDE_INTEGRATION_SUMMARY.mdClaude API details
    PHASE6_MCP_INTEGRATION_SUMMARY.mdMCP integration guide
    RAG_SYSTEM_COMPLETE_SUMMARY.mdOverall architecture
    RAG_SYSTEM_STATUS_SUMMARY.mdCurrent status & metrics
    PHASE7_ADVANCED_RAG_FEATURES_PLAN.mdFuture roadmap
    RAG_IMPLEMENTATION_COMPLETE.mdFinal status report
    + + + + +
    ComponentPurposeTechnology
    AuthServiceCore authentication logicRust service in control-center
    JwtServiceToken generation and verificationRS256 algorithm with jsonwebtoken crate
    SessionManagerSession lifecycle managementDatabase-backed session storage
    PasswordHasherPassword hashing and verificationArgon2id with configurable parameters
    UserServiceUser account managementCRUD operations with role assignment
    -
    -

    ⚙️ Configuration

    -

    Environment Variables

    -
    # Required for Claude integration
    -export ANTHROPIC_API_KEY="sk-..."
    -
    -# Optional for OpenAI embeddings
    -export OPENAI_API_KEY="sk-..."
    -
    -

    SurrealDB

    -
      -
    • Default: In-memory for testing
    • -
    • Production: Network mode with persistence
    • -
    -

    Model

    -
      -
    • Default: claude-opus-4-1
    • -
    • Customizable via configuration
    • -
    -
    -

    🎯 Key Capabilities

    -

    1. Ask Questions

    -
    let response = agent.ask("How do I deploy?").await?;
    -// Returns: answer + sources + confidence
    - -
    let results = retriever.search("deployment", Some(5)).await?;
    -// Returns: top-5 similar documents
    -

    3. Workspace Awareness

    -
    let context = workspace.enrich_query("deploy");
    -// Automatically includes: taskservs, providers, infrastructure
    -

    4. MCP Integration

    -
      -
    • Tools: rag_answer_question, semantic_search_rag, rag_system_status
    • -
    • Ready when MCP server re-enabled
    • -
    -
    -

    📊 Performance

    -
    - - - - - -
    MetricValue
    Query Time (P95)450 ms
    Throughput100+ qps
    Cost$0.008/query
    Memory~200 MB
    Test Pass Rate100%
    -
    -
    -

    ✅ What’s Working

    -
      -
    • ✅ Multi-format document chunking
    • -
    • ✅ Vector embedding generation
    • -
    • ✅ Semantic similarity search
    • -
    • ✅ RAG question answering
    • -
    • ✅ Claude API integration
    • -
    • ✅ Workspace context enrichment
    • -
    • ✅ Error handling & fallbacks
    • -
    • ✅ Comprehensive testing
    • -
    • ✅ MCP tool scaffolding
    • -
    • ✅ Production-ready code quality
    • -
    -
    -

    🔧 What’s Not Implemented (Phase 7)

    -

    Coming soon (next phase):

    -
      -
    • Response caching (70% hit rate planned)
    • -
    • Token streaming (better UX)
    • -
    • Function calling (Claude invokes tools)
    • -
    • Hybrid search (vector + keyword)
    • -
    • Multi-turn conversations
    • -
    • Query optimization
    • -
    -
    -

    🎯 Next Steps

    -

    This Week

    -
      -
    1. Review status & documentation
    2. -
    3. Get feedback on Phase 7 priorities
    4. -
    5. Set up monitoring infrastructure
    6. -
    -

    Next Week (Phase 7a)

    -
      -
    1. Implement response caching
    2. -
    3. Add streaming responses
    4. -
    5. Deploy Prometheus metrics
    6. -
    -

    Weeks 3-4 (Phase 7b)

    -
      -
    1. Implement function calling
    2. -
    3. Add hybrid search
    4. -
    5. Support conversations
    6. -
    -
    -

    📞 How to Use

    -

    As a Library

    -
    use provisioning_rag::{RagAgent, DbConnection, RetrieverEngine};
    -
    -// Initialize
    -let db = DbConnection::new(config).await?;
    -let retriever = RetrieverEngine::new(config, db, embeddings).await?;
    -let agent = RagAgent::new(retriever, context, model)?;
    -
    -// Ask questions
    -let response = agent.ask("question").await?;
    -

    Via MCP Server (When Enabled)

    -
    POST /tools/rag_answer_question
    -{
    -  "question": "How do I deploy?"
    +

    JWT Token Structure

    +

    Access Token

    +

    Short-lived token for API authentication (default: 15 minutes).

    +
    {
    +  "header": {
    +    "alg": "RS256",
    +    "typ": "JWT"
    +  },
    +  "payload": {
    +    "sub": "550e8400-e29b-41d4-a716-446655440000",
    +    "email": "[user@example.com](mailto:user@example.com)",
    +    "username": "alice",
    +    "roles": ["user", "developer"],
    +    "session_id": "sess_abc123",
    +    "mfa_verified": true,
    +    "permissions_hash": "sha256:abc123...",
    +    "iat": 1704067200,
    +    "exp": 1704068100,
    +    "iss": "provisioning-platform",
    +    "aud": "api.provisioning.example.com"
    +  }
     }
     
    -

    From CLI (via example)

    -
    cargo run --example rag_agent
    +

    Refresh Token

    +

    Long-lived token for obtaining new access tokens (default: 7 days).

    +
    {
    +  "header": {
    +    "alg": "RS256",
    +    "typ": "JWT"
    +  },
    +  "payload": {
    +    "sub": "550e8400-e29b-41d4-a716-446655440000",
    +    "session_id": "sess_abc123",
    +    "token_type": "refresh",
    +    "iat": 1704067200,
    +    "exp": 1704672000,
    +    "iss": "provisioning-platform"
    +  }
    +}
     
    -
    -

    🔗 Integration Points

    -

    Current

    +

    Password Security

    +

    Argon2id Configuration

    +

    Password hashing uses Argon2id with security-hardened parameters:

    +
    // Default Argon2id parameters
    +argon2::Params {
    +    m_cost: 65536,      // 64 MB memory
    +    t_cost: 3,          // 3 iterations
    +    p_cost: 4,          // 4 parallelism
    +    output_len: 32      // 32 byte hash
    +}
    +

    Password Requirements

    +

    Default password policy enforces:

      -
    • Claude API ✅ (Anthropic)
    • -
    • SurrealDB ✅ (Vector store)
    • -
    • OpenAI ✅ (Embeddings)
    • -
    • Local ONNX ✅ (Fallback)
    • +
    • Minimum 12 characters
    • +
    • At least one uppercase letter
    • +
    • At least one lowercase letter
    • +
    • At least one digit
    • +
    • At least one special character
    • +
    • Not in common password list
    • +
    • Not similar to username or email
    -

    Future (Phase 7+)

    -
      -
    • Prometheus (metrics)
    • -
    • Streaming API
    • -
    • Function calling framework
    • -
    • Hybrid search engine
    • -
    -
    -

    🚨 Known Issues

    -

    None - System is production ready

    -
    -

    📈 Metrics

    -

    Code Quality

    -
      -
    • Tests: 22/22 passing
    • -
    • Warnings: 0
    • -
    • Coverage: >90%
    • -
    • Type Safety: Complete
    • -
    -

    Performance

    -
      -
    • Latency P95: 450 ms
    • -
    • Throughput: 100+ qps
    • -
    • Cost: $0.008/query
    • -
    • Memory: ~200 MB
    • -
    -
    -

    💡 Tips

    -

    For Development

    +

    Session Management

    +

    Session Lifecycle

      -
    1. Add tests alongside code
    2. -
    3. Use cargo test frequently
    4. -
    5. Check cargo doc --open for API
    6. -
    7. Run clippy: cargo clippy
    8. +
    9. Creation: New session created on successful login
    10. +
    11. Active: Session tracked with last activity timestamp
    12. +
    13. Refresh: Session extended on token refresh
    14. +
    15. Expiration: Session expires after inactivity timeout
    16. +
    17. Revocation: Manual logout or security event terminates session
    -

    For Deployment

    +

    Session Storage

    +

    Sessions stored in database with:

    +
    pub struct Session {
    +    pub session_id: Uuid,
    +    pub user_id: Uuid,
    +    pub created_at: DateTime<Utc>,
    +    pub expires_at: DateTime<Utc>,
    +    pub last_activity: DateTime<Utc>,
    +    pub ip_address: Option<String>,
    +    pub user_agent: Option<String>,
    +    pub is_active: bool,
    +}
    +

    Session Tracking

    +

    Track multiple concurrent sessions per user:

    +
    # List active sessions for user
    +provisioning security sessions list --user alice
    +
    +# Revoke specific session
    +provisioning security sessions revoke --session-id sess_abc123
    +
    +# Revoke all sessions except current
    +provisioning security sessions revoke-all --except-current
    +
    +

    Login Flows

    +

    Standard Login

    +

    Basic username/password authentication:

    +
    # CLI login
    +provisioning auth login --username alice --password <password>
    +
    +# API login
    +curl -X POST  [https://api.provisioning.example.com/auth/login](https://api.provisioning.example.com/auth/login) \
    +  -H "Content-Type: application/json" \
    +  -d '{
    +    "username_or_email": "alice",
    +    "password": "SecurePassword123!",
    +    "client_info": {
    +      "ip_address": "192.168.1.100",
    +      "user_agent": "provisioning-cli/1.0"
    +    }
    +  }'
    +
    +

    Response:

    +
    {
    +  "access_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +  "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +  "token_type": "Bearer",
    +  "expires_in": 900,
    +  "user": {
    +    "user_id": "550e8400-e29b-41d4-a716-446655440000",
    +    "username": "alice",
    +    "email": "[alice@example.com](mailto:alice@example.com)",
    +    "roles": ["user", "developer"]
    +  }
    +}
    +
    +

    MFA Login

    +

    Two-phase authentication with MFA:

    +
    # Phase 1: Initial authentication
    +provisioning auth login --username alice --password <password>
    +
    +# Response indicates MFA required
    +# {
    +#   "mfa_required": true,
    +#   "mfa_token": "temp_token_abc123",
    +#   "available_methods": ["totp", "webauthn"]
    +# }
    +
    +# Phase 2: MFA verification
    +provisioning auth mfa-verify --mfa-token temp_token_abc123 --code 123456
    +
    +

    SSO Login

    +

    Single Sign-On with external providers:

    +
    # Initiate SSO flow
    +provisioning auth sso --provider okta
    +
    +# Or with SAML
    +provisioning auth sso --provider azure-ad --protocol saml
    +
    +

    Token Refresh

    +

    Automatic Refresh

    +

    Client libraries automatically refresh tokens before expiration:

    +
    // Automatic token refresh in Rust client
    +let client = ProvisioningClient::new()
    +    .with_auto_refresh(true)
    +    .build()?;
    +
    +// Tokens refreshed transparently
    +client.server().list().await?;
    +

    Manual Refresh

    +

    Explicit token refresh when needed:

    +
    # CLI token refresh
    +provisioning auth refresh
    +
    +# API token refresh
    +curl -X POST  [https://api.provisioning.example.com/auth/refresh](https://api.provisioning.example.com/auth/refresh) \
    +  -H "Content-Type: application/json" \
    +  -d '{
    +    "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9..."
    +  }'
    +
    +

    Response:

    +
    {
    +  "access_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +  "token_type": "Bearer",
    +  "expires_in": 900
    +}
    +
    +

    Multi-Provider Authentication

    +

    Supported Providers

    +
    + + + + + +
    ProviderTypeConfiguration
    LocalUsername/passwordBuilt-in user database
    LDAPDirectory serviceActive Directory, OpenLDAP
    SAMLSSOOkta, Azure AD, OneLogin
    OIDCOAuth2/OpenIDGoogle, GitHub, Auth0
    mTLSCertificateClient certificate authentication
    +
    +

    Provider Configuration

    +
    [auth.providers.ldap]
    +enabled = true
    +server = "ldap://ldap.example.com"
    +base_dn = "dc=example,dc=com"
    +bind_dn = "cn=admin,dc=example,dc=com"
    +user_filter = "(uid={username})"
    +
    +[auth.providers.saml]
    +enabled = true
    +entity_id = " [https://provisioning.example.com"](https://provisioning.example.com")
    +sso_url = " [https://okta.example.com/sso/saml"](https://okta.example.com/sso/saml")
    +certificate_path = "/etc/provisioning/saml-cert.pem"
    +
    +[auth.providers.oidc]
    +enabled = true
    +issuer = " [https://accounts.google.com"](https://accounts.google.com")
    +client_id = "client_id_here"
    +client_secret = "client_secret_here"
    +redirect_uri = " [https://provisioning.example.com/auth/callback"](https://provisioning.example.com/auth/callback")
    +
    +

    Token Validation

    +

    JWT Verification

    +

    All API requests validate JWT tokens:

    +
    // Middleware validates JWT on every request
    +pub async fn jwt_auth_middleware(
    +    headers: HeaderMap,
    +    State(jwt_service): State<Arc<JwtService>>,
    +    mut request: Request,
    +    next: Next,
    +) -> Result<Response, AuthError> {
    +    // Extract token from Authorization header
    +    let token = extract_bearer_token(&headers)?;
    +
    +    // Verify signature and claims
    +    let claims = jwt_service.verify_access_token(&token)?;
    +
    +    // Check expiration
    +    if claims.exp < Utc::now().timestamp() {
    +        return Err(AuthError::TokenExpired);
    +    }
    +
    +    // Inject user context into request
    +    request.extensions_mut().insert(claims);
    +
    +    Ok(next.run(request).await)
    +}
    +

    Token Revocation

    +

    Revoke tokens on security events:

    +
    # Revoke all tokens for user
    +provisioning security tokens revoke-user --user alice
    +
    +# Revoke specific token
    +provisioning security tokens revoke --token-id token_abc123
    +
    +# Check token status
    +provisioning security tokens status --token eyJhbGci...
    +
    +

    Security Hardening

    +

    Configuration

    +

    Secure authentication settings:

    +
    [security.auth]
    +# JWT settings
    +jwt_algorithm = "RS256"
    +jwt_issuer = "provisioning-platform"
    +access_token_ttl = 900           # 15 minutes
    +refresh_token_ttl = 604800       # 7 days
    +token_leeway = 30                # 30 seconds clock skew
    +
    +# Password policy
    +password_min_length = 12
    +password_require_uppercase = true
    +password_require_lowercase = true
    +password_require_digit = true
    +password_require_special = true
    +password_check_common = true
    +
    +# Session settings
    +session_timeout = 1800           # 30 minutes inactivity
    +max_sessions_per_user = 5
    +remember_me_duration = 2592000   # 30 days
    +
    +# Security controls
    +enforce_mfa = true
    +allow_password_reset = true
    +lockout_after_attempts = 5
    +lockout_duration = 900           # 15 minutes
    +
    +

    Best Practices

      -
    1. Set API keys first
    2. -
    3. Test with examples
    4. -
    5. Monitor via metrics
    6. -
    7. Setup log aggregation
    8. +
    9. Use strong passwords: Enforce password policy with minimum 12 characters
    10. +
    11. Enable MFA: Require second factor for all users
    12. +
    13. Rotate keys regularly: Update JWT signing keys every 90 days
    14. +
    15. Monitor failed attempts: Alert on suspicious login patterns
    16. +
    17. Limit session duration: Use short access token TTL with refresh tokens
    18. +
    19. Secure token storage: Store tokens securely, never in local storage
    20. +
    21. Validate on every request: Always verify JWT signature and expiration
    22. +
    23. Use HTTPS only: Never transmit tokens over unencrypted connections
    -

    For Debugging

    -
      -
    1. Enable debug logging: RUST_LOG=debug
    2. -
    3. Check test examples
    4. -
    5. Review error types in error.rs
    6. -
    7. Use cargo expand for macros
    8. -
    -
    -

    📚 Learning Resources

    -
      -
    1. Module Documentation: cargo doc --open
    2. -
    3. Example Code: examples/rag_agent.rs
    4. -
    5. Tests: Tests in each module
    6. -
    7. Architecture: RAG_SYSTEM_COMPLETE_SUMMARY.md
    8. -
    9. Integration: PHASE6_MCP_INTEGRATION_SUMMARY.md
    10. -
    -
    -

    🎓 Architecture Overview

    -
    User Question
    -    ↓
    -Query Enrichment (Workspace context)
    -    ↓
    -Vector Search (HNSW in SurrealDB)
    -    ↓
    -Context Building (Retrieved documents)
    -    ↓
    -Claude API Call
    -    ↓
    -Answer Generation
    -    ↓
    -Return with Sources & Confidence
    +

    CLI Integration

    +

    Login and Session Management

    +
    # Login with credentials
    +provisioning auth login --username alice
    +
    +# Login with MFA
    +provisioning auth login --username alice --mfa
    +
    +# Check authentication status
    +provisioning auth status
    +
    +# Logout (revoke session)
    +provisioning auth logout
    +
    +# List active sessions
    +provisioning security sessions list
    +
    +# Refresh token
    +provisioning auth refresh
     
    -
    -

    🔐 Security

    +

    Token Management

    +
    # Show current token
    +provisioning auth token show
    +
    +# Validate token
    +provisioning auth token validate
    +
    +# Decode token (without verification)
    +provisioning auth token decode
    +
    +# Revoke token
    +provisioning auth token revoke
    +
    +

    API Reference

    +

    Endpoints

    +
    + + + + + + + + +
    EndpointMethodPurpose
    /auth/loginPOSTAuthenticate with credentials
    /auth/refreshPOSTRefresh access token
    /auth/logoutPOSTRevoke session and tokens
    /auth/verifyPOSTVerify MFA code
    /auth/sessionsGETList active sessions
    /auth/sessions/:idDELETERevoke specific session
    /auth/password-resetPOSTInitiate password reset
    /auth/password-changePOSTChange password
    +
    +

    Troubleshooting

    +

    Common Issues

    +

    Token expired errors:

    +
    # Refresh token
    +provisioning auth refresh
    +
    +# Or re-login
    +provisioning auth login
    +
    +

    Invalid signature:

    +
    # Check JWT configuration
    +provisioning config get security.auth.jwt_algorithm
    +
    +# Verify public key is correct
    +provisioning security keys verify
    +
    +

    MFA verification failures:

    +
    # Check time sync (TOTP requires accurate time)
    +ntpdate -q pool.ntp.org
    +
    +# Re-sync MFA device
    +provisioning auth mfa-setup --resync
    +
    +

    Session not found:

    +
    # Clear local session and re-login
    +provisioning auth logout
    +provisioning auth login
    +
    +

    Monitoring

    +

    Metrics

    +

    Track authentication metrics:

      -
    • ✅ API keys via environment
    • -
    • ✅ No hardcoded secrets
    • -
    • ✅ Input validation
    • -
    • ✅ Graceful error handling
    • -
    • ✅ No unsafe code
    • -
    • ✅ Type-safe throughout
    • +
    • Login success rate
    • +
    • Failed login attempts per user
    • +
    • Average session duration
    • +
    • Token refresh rate
    • +
    • MFA verification success rate
    • +
    • Active sessions count
    • +
    +

    Alerts

    +

    Configure alerts for security events:

    +
      +
    • Multiple failed login attempts
    • +
    • Login from new location
    • +
    • Unusual authentication patterns
    • +
    • Session hijacking attempts
    • +
    • Token tampering detected

    -

    📞 Support

    +

    Next Steps:

      -
    • Code Issues: Check test examples
    • -
    • Integration: See PHASE6 docs
    • -
    • Architecture: See COMPLETE_SUMMARY.md
    • -
    • API Details: Run cargo doc --open
    • -
    • Examples: See examples/rag_agent.rs
    • +
    • Configure Authorization with Cedar policies
    • +
    • Enable Multi-Factor Authentication
    • +
    • Set up Audit Logging for authentication events
    -
    -

    Status: 🟢 Production Ready -Last Verified: 2025-11-06 -All Tests: ✅ Passing -Next Phase: 🔵 Phase 7 (Ready to start)

    -

    Justfile Recipes - Quick Reference

    -

    Authentication (auth.just)

    -
    # Login & Logout
    -just auth-login <user>              # Login to platform
    -just auth-logout                    # Logout current session
    -just whoami                         # Show current user status
    -
    -# MFA Setup
    -just mfa-enroll-totp                # Enroll in TOTP MFA
    -just mfa-enroll-webauthn            # Enroll in WebAuthn MFA
    -just mfa-verify <code>              # Verify MFA code
    -
    -# Sessions
    -just auth-sessions                  # List active sessions
    -just auth-revoke-session <id>       # Revoke specific session
    -just auth-revoke-all                # Revoke all other sessions
    -
    -# Workflows
    -just auth-login-prod <user>         # Production login (MFA required)
    -just auth-quick                     # Quick re-authentication
    -
    -# Help
    -just auth-help                      # Complete authentication guide
    -
    -

    KMS (kms.just)

    -
    # Encryption
    -just kms-encrypt <file>             # Encrypt file with RustyVault
    -just kms-decrypt <file>             # Decrypt file
    -just encrypt-config <file>          # Encrypt configuration file
    -
    -# Backends
    -just kms-backends                   # List available backends
    -just kms-test-all                   # Test all backends
    -just kms-switch-backend <backend>   # Change default backend
    -
    -# Key Management
    -just kms-generate-key               # Generate AES256 key
    -just kms-list-keys                  # List encryption keys
    -just kms-rotate-key <id>            # Rotate key
    -
    -# Bulk Operations
    -just encrypt-env-files [dir]        # Encrypt all .env files
    -just encrypt-configs [dir]          # Encrypt all configs
    -just decrypt-all-files <dir>        # Decrypt all .enc files
    -
    -# Workflows
    -just kms-setup                      # Setup KMS for project
    -just quick-encrypt <file>           # Fast encrypt
    -just quick-decrypt <file>           # Fast decrypt
    -
    -# Help
    -just kms-help                       # Complete KMS guide
    -
    -

    Orchestrator (orchestrator.just)

    -
    # Status
    -just orch-status                    # Show orchestrator status
    -just orch-health                    # Health check
    -just orch-info                      # Detailed information
    -
    -# Tasks
    -just orch-tasks                     # List all tasks
    -just orch-tasks-running             # Show running tasks
    -just orch-tasks-failed              # Show failed tasks
    -just orch-task-cancel <id>          # Cancel task
    -just orch-task-retry <id>           # Retry failed task
    -
    -# Workflows
    -just workflow-list                  # List all workflows
    -just workflow-status <id>           # Show workflow status
    -just workflow-monitor <id>          # Monitor real-time
    -just workflow-logs <id>             # Show logs
    -
    -# Batch Operations
    -just batch-submit <file>            # Submit batch workflow
    -just batch-monitor <id>             # Monitor batch progress
    -just batch-rollback <id>            # Rollback batch
    -just batch-cancel <id>              # Cancel batch
    -
    -# Validation
    -just orch-validate <file>           # Validate KCL workflow
    -just workflow-dry-run <file>        # Simulate execution
    -
    -# Cleanup
    -just workflow-cleanup               # Clean completed workflows
    -just workflow-cleanup-old <days>    # Clean old workflows
    -just workflow-cleanup-failed        # Clean failed workflows
    -
    -# Quick Workflows
    -just quick-server-create <infra>    # Quick server creation
    -just quick-taskserv-install <t> <i> # Quick taskserv install
    -just quick-cluster-deploy <c> <i>   # Quick cluster deploy
    -
    -# Help
    -just orch-help                      # Complete orchestrator guide
    -
    -

    Plugin Testing

    -
    just test-plugins                   # Test all plugins
    -just test-plugin-auth               # Test auth plugin
    -just test-plugin-kms                # Test KMS plugin
    -just test-plugin-orch               # Test orchestrator plugin
    -just list-plugins                   # List installed plugins
    -
    -

    Common Workflows

    -

    Complete Authentication Setup

    -
    just auth-login alice
    -just mfa-enroll-totp
    -just auth-status
    -
    -

    Production Deployment Workflow

    -
    # Login with MFA
    -just auth-login-prod alice
    -
    -# Encrypt sensitive configs
    -just encrypt-config prod/secrets.yaml
    -just encrypt-env-files ./config
    -
    -# Submit batch workflow
    -just batch-submit workflows/deploy-prod.ncl
    -just batch-monitor <workflow-id>
    -
    -

    KMS Setup and Testing

    -
    # Setup KMS
    -just kms-setup
    -
    -# Test all backends
    -just kms-test-all
    -
    -# Encrypt project configs
    -just encrypt-configs config/
    -
    -

    Monitoring Operations

    -
    # Check orchestrator health
    -just orch-health
    -
    -# Monitor running tasks
    -just orch-tasks-running
    -
    -# View workflow logs
    -just workflow-logs <workflow-id>
    -
    -# Check metrics
    -just orch-metrics
    -
    -

    Cleanup Operations

    -
    # Cleanup old workflows
    -just workflow-cleanup-old 30
    -
    -# Cleanup failed workflows
    -just workflow-cleanup-failed
    -
    -# Decrypt all files for migration
    -just decrypt-all-files ./encrypted
    -
    -

    Tips

    -
      -
    1. -

      Help is Built-in: Every module has a help recipe

      +

      Authorization

      +

      Multi-Factor Authentication

      +

      Audit Logging

      +

      KMS Guide

      +

      Secrets Management

      +

      SecretumVault Integration Guide

      +

      SecretumVault is a post-quantum cryptography (PQC) secure vault system integrated with +Provisioning’s vault-service. It provides quantum-resistant encryption for sensitive credentials +and infrastructure secrets.

      +

      Overview

      +

      SecretumVault combines:

        -
      • just auth-help
      • -
      • just kms-help
      • -
      • just orch-help
      • +
      • Post-Quantum Cryptography: Algorithms resistant to quantum computer attacks
      • +
      • Hardware Acceleration: Optional FPGA acceleration for performance
      • +
      • Distributed Architecture: Multi-node secure storage
      • +
      • Compliance: FIPS 140-3 ready, NIST standards
      -
    2. -
    3. -

      Tab Completion: Use just --list to see all available recipes

      -
    4. -
    5. -

      Dry-Run: Use just -n <recipe> to see what would be executed

      -
    6. -
    7. -

      Shortcuts: Many recipes have short aliases

      +

      Architecture

      +

      Integration Points

      +
      Provisioning
      +    ├─ CLI (Nushell)
      +    │   └─ nu_plugin_secretumvault
      +    │
      +    ├─ vault-service (Rust)
      +    │   ├─ secretumvault backend
      +    │   ├─ rustyvault compatibility
      +    │   └─ SOPS + Age integration
      +    │
      +    └─ Control Center
      +        └─ Secret management UI
      +
      +

      Cryptographic Stack

      +
      User Secret
      +    ↓
      +KDF (Key Derivation Function)
      +    ├─ Argon2id (password-based)
      +    └─ HKDF (key-based)
      +    ↓
      +PQC Encryption Layer
      +    ├─ CRYSTALS-Kyber (key encapsulation)
      +    ├─ Falcon (signature)
      +    ├─ SPHINCS+ (backup signature)
      +    └─ Hybrid: PQC + Classical (AES-256)
      +    ↓
      +Authenticated Encryption
      +    ├─ ChaCha20-Poly1305
      +    └─ AES-256-GCM
      +    ↓
      +Secure Storage
      +    ├─ Local vault
      +    ├─ SurrealDB
      +    └─ Hardware module (optional)
      +
      +

      Installation

      +

      Install SecretumVault

      +
      # Install via provisioning
      +provisioning install secretumvault
      +
      +# Or manual installation
      +cd /Users/Akasha/Development/secretumvault
      +cargo install --path .
      +
      +# Verify installation
      +secretumvault --version
      +
      +

      Install Nushell Plugin

      +
      # Install plugin
      +provisioning install nu-plugin-secretumvault
      +
      +# Reload Nushell
      +nu -c "plugin add nu_plugin_secretumvault"
      +
      +# Verify
      +nu -c "secretumvault-plugin version"
      +
      +

      Configuration

      +

      Environment Setup

      +
      # Set vault location
      +export SECRETUMVAULT_HOME=~/.secretumvault
      +
      +# Set encryption algorithm
      +export SECRETUMVAULT_CIPHER=kyber-aes  # kyber-aes, falcon-aes, hybrid
      +
      +# Set key derivation
      +export SECRETUMVAULT_KDF=argon2id      # argon2id, pbkdf2
      +
      +# Enable hardware acceleration (optional)
      +export SECRETUMVAULT_HW_ACCEL=enabled
      +
      +

      Configuration File

      +
      # ~/.secretumvault/config.yaml
      +vault:
      +  storage_backend: surrealdb          # local, surrealdb, redis
      +  encryption_cipher: kyber-aes        # kyber-aes, falcon-aes, hybrid
      +  key_derivation: argon2id            # argon2id, pbkdf2
      +
      +  # Argon2id parameters (password strength)
      +  kdf:
      +    memory: 65536                     # KB
      +    iterations: 3
      +    parallelism: 4
      +
      +  # Encryption parameters
      +  encryption:
      +    key_length: 256                   # bits
      +    nonce_length: 12                  # bytes
      +    auth_tag_length: 16               # bytes
      +
      +# Database backend (if using SurrealDB)
      +database:
      +  url: "surrealdb://localhost:8000"
      +  namespace: "provisioning"
      +  database: "secrets"
      +
      +# Hardware acceleration (optional)
      +hardware:
      +  use_fpga: false
      +  fpga_device: "/dev/fpga0"
      +
      +# Backup configuration
      +backup:
      +  enabled: true
      +  interval: 24                        # hours
      +  retention: 30                       # days
      +  encrypt_backup: true
      +  backup_path: ~/.secretumvault/backups
      +
      +# Access logging
      +audit:
      +  enabled: true
      +  log_file: ~/.secretumvault/audit.log
      +  log_level: info
      +  rotate_logs: true
      +  retention_days: 365
      +
      +# Master key management
      +master_key:
      +  protection: none                    # none, tpm, hsm, hardware-module
      +  rotation_enabled: true
      +  rotation_interval: 90               # days
      +
      +

      Usage

      +

      Command Line Interface

      +
      # Create master key
      +secretumvault init
      +
      +# Add secret
      +secretumvault secret add \
      +  --name database-password \
      +  --value "supersecret" \
      +  --metadata "type=database,app=api"
      +
      +# Retrieve secret
      +secretumvault secret get database-password
      +
      +# List secrets
      +secretumvault secret list
      +
      +# Delete secret
      +secretumvault secret delete database-password
      +
      +# Rotate key
      +secretumvault key rotate
      +
      +# Backup vault
      +secretumvault backup create --output vault-backup.enc
      +
      +# Restore vault
      +secretumvault backup restore vault-backup.enc
      +
      +

      Nushell Integration

      +
      # Load SecretumVault plugin
      +plugin add nu_plugin_secretumvault
      +
      +# Add secret from Nushell
      +let password = "mypassword"
      +secretumvault-plugin store "app-secret" $password
      +
      +# Retrieve secret
      +let db_pass = (secretumvault-plugin retrieve "database-password")
      +
      +# List all secrets
      +secretumvault-plugin list
      +
      +# Delete secret
      +secretumvault-plugin delete "old-secret"
      +
      +# Rotate key
      +secretumvault-plugin rotate-key
      +
      +

      Provisioning Integration

      +
      # Configure vault-service to use SecretumVault
      +provisioning config set security.vault.backend secretumvault
      +
      +# Enable in form prefill
      +provisioning setup profile --use-secretumvault
      +
      +# Manage secrets via CLI
      +provisioning vault add \
      +  --name aws-access-key \
      +  --value "AKIAIOSFODNN7EXAMPLE" \
      +  --metadata "provider=aws,env=production"
      +
      +# Use secret in infrastructure
      +provisioning ai "Create AWS resources using secret aws-access-key"
      +
      +

      Post-Quantum Cryptography

      +

      Algorithms Supported

      +
      + + + + + +
      AlgorithmTypeNIST StatusPerformance
      CRYSTALS-KyberKEMFinalistFast
      FalconSignatureFinalistMedium
      SPHINCS+Hash-based SignatureFinalistSlower
      AES-256Hybrid (Classical)StandardVery fast
      ChaCha20Stream CipherAlternativeFast
      +
      + +

      SecretumVault uses hybrid encryption by default:

      +
      Secret Input
      +    ↓
      +Key Material: Classical (AES-256) + PQC (Kyber)
      +    ├─ Generate AES key
      +    ├─ Generate Kyber keypair
      +    └─ Encapsulate using Kyber
      +    ↓
      +Encrypt with both algorithms
      +    ├─ AES-256-GCM encryption
      +    └─ Kyber encapsulation (public key cryptography)
      +    ↓
      +Both keys required to decrypt
      +    ├─ If quantum computer breaks Kyber → AES still secure
      +    └─ If breakthrough in AES → Kyber still secure
      +    ↓
      +Encrypted Secret Stored
      +
      +

      Advantages:

        -
      • just whoami = just auth-status
      • +
      • Protection against quantum computers (PQC)
      • +
      • Protection against classical attacks (AES-256)
      • +
      • Compatible with both current and future threats
      • +
      • No single point of failure
      +

      Key Rotation Strategy

      +
      # Manual key rotation
      +secretumvault key rotate --algorithm kyber-aes
      +
      +# Scheduled rotation (every 90 days)
      +secretumvault key rotate --schedule 90d
      +
      +# Emergency rotation
      +secretumvault key rotate --emergency --force
      +
      +

      Security Features

      +

      Authentication

      +
      # Master key authentication
      +secretumvault auth login
      +
      +# MFA for sensitive operations
      +secretumvault auth mfa enable --method totp
      +
      +# Biometric unlock (supported platforms)
      +secretumvault auth enable-biometric
      +
      +

      Access Control

      +
      # Set vault permissions
      +secretumvault acl set database-password \
      +  --read "api-service,backup-service" \
      +  --write "admin" \
      +  --delete "admin"
      +
      +# View access logs
      +secretumvault audit log --secret database-password
      +
      +

      Audit Logging

      +

      Every operation is logged:

      +
      # View audit log
      +secretumvault audit log --since 24h
      +
      +# Export audit log
      +secretumvault audit export --format json > audit.json
      +
      +# Monitor real-time
      +secretumvault audit monitor
      +
      +

      Sample Log Entry:

      +
      {
      +  "timestamp": "2026-01-16T01:47:00Z",
      +  "operation": "secret_retrieve",
      +  "secret": "database-password",
      +  "user": "api-service",
      +  "status": "success",
      +  "ip_address": "127.0.0.1",
      +  "device_id": "device-123"
      +}
      +
      +

      Disaster Recovery

      +

      Backup Procedures

      +
      # Create encrypted backup
      +secretumvault backup create \
      +  --output /secure/vault-backup.enc \
      +  --compression gzip
      +
      +# Verify backup integrity
      +secretumvault backup verify /secure/vault-backup.enc
      +
      +# Restore from backup
      +secretumvault backup restore \
      +  --input /secure/vault-backup.enc \
      +  --verify-checksum
      +
      +

      Recovery Key

      +
      # Generate recovery key (for emergencies)
      +secretumvault recovery-key generate \
      +  --threshold 3 \
      +  --shares 5
      +
      +# Share recovery shards
      +# Share with 5 trusted people, need 3 to recover
      +
      +# Recover using shards
      +secretumvault recovery-key restore \
      +  --shard1 /secure/shard1.key \
      +  --shard2 /secure/shard2.key \
      +  --shard3 /secure/shard3.key
      +
      +

      Performance

      +

      Benchmark Results

      +
      + + + + + +
      OperationTimeAlgorithm
      Store secret50-100msKyber-AES
      Retrieve secret30-50msKyber-AES
      Key rotation200-500msKyber-AES
      Backup 1000 secrets2-3 secondsKyber-AES
      Restore from backup3-5 secondsKyber-AES
      +
      +

      Hardware Acceleration

      +

      With FPGA acceleration:

      +
      + + + +
      OperationNativeFPGASpeedup
      Store secret75ms15ms5x
      Key rotation350ms50ms7x
      Backup 10002.5s0.4s6x
      +
      +

      Troubleshooting

      +

      Cannot Initialize Vault

      +
      # Check permissions
      +ls -la ~/.secretumvault
      +
      +# Clear corrupted state
      +rm ~/.secretumvault/state.lock
      +
      +# Reinitialize
      +secretumvault init --force
      +
      +

      Slow Performance

      +
      # Check algorithm
      +secretumvault config get encryption.cipher
      +
      +# Switch to faster algorithm
      +export SECRETUMVAULT_CIPHER=kyber-aes
      +
      +# Enable hardware acceleration
      +export SECRETUMVAULT_HW_ACCEL=enabled
      +
      +

      Master Key Lost

      +
      # Use recovery key (if available)
      +secretumvault recovery-key restore \
      +  --shard1 ... --shard2 ... --shard3 ...
      +
      +# If no recovery key exists, vault is unrecoverable
      +# Use recent backup instead
      +secretumvault backup restore vault-backup.enc
      +
      +

      Compliance & Standards

      +

      Certifications

      +
        +
      • NIST PQC Standards: CRYSTALS-Kyber, Falcon, SPHINCS+
      • +
      • FIPS 140-3 Ready: Cryptographic module certification path
      • +
      • NIST SP 800-175B: Post-quantum cryptography guidance
      • +
      • EU Cyber Resilience Act: PQC readiness
      • +
      +

      Export Controls

      +

      SecretumVault is subject to cryptography export controls in some jurisdictions. Ensure compliance with local regulations.

      + + +

      Encryption

      +

      Secure Communication

      +

      Certificate Management

      +

      Compliance

      +

      Security Testing

      +

      + Provisioning Logo +

      +

      + Provisioning +

      +

      Development

      +

      Comprehensive guides for developers building extensions, custom providers, plugins, and +integrations on the Provisioning platform.

      +

      Overview

      +

      Provisioning is designed to be extended and customized for specific infrastructure needs. This section provides everything needed to:

      +
        +
      • Build custom cloud providers interfacing with any infrastructure platform via the Provider SDK
      • +
      • Create custom detectors for domain-specific infrastructure analysis and anomaly detection
      • +
      • Develop task services for specialized infrastructure operations beyond built-in services
      • +
      • Write Nushell plugins for high-performance scripting extensions
      • +
      • Integrate external systems via REST APIs and the MCP (Model Context Protocol)
      • +
      • Understand platform internals for daemon architecture, caching, and performance optimization
      • +
      +

      The platform uses modern Rust with async/await, Nushell for scripting, and +Nickel for configuration - all with production-ready code examples.

      +

      Development Guides

      +

      Extension Development

      +
        +
      • +

        Extension Development - Framework for +extensions (providers, task services, plugins, clusters) with type-safety

      • -

        Error Handling: Destructive operations require confirmation

        +

        Custom Provider Development - Build +cloud providers with async Rust, credentials, state, error recovery, testing

      • -

        Composition: Combine recipes for complex workflows

        -
        just auth-login alice && just orch-health && just workflow-list
        -
        +

        Custom Task Services - Specialized service +development for infrastructure operations

        +
      • +
      • +

        Custom Detector Development - Cost, +compliance, performance, security risk detection

        +
      • +
      • +

        Plugin Development - Nushell plugins for +high-performance scripting with FFI bindings

      • -
    -

    Recipe Count

    -
      -
    • Auth: 29 recipes
    • -
    • KMS: 38 recipes
    • -
    • Orchestrator: 56 recipes
    • -
    • Total: 123 recipes
    -

    Documentation

    +

    Platform Internals

      -
    • Full authentication guide: just auth-help
    • -
    • Full KMS guide: just kms-help
    • -
    • Full orchestrator guide: just orch-help
    • -
    • Security system: docs/architecture/adr-009-security-system-complete.md
    • +
    • Provisioning Daemon Internals - +TCP server, connection pooling, caching, metrics, shutdown, 50x speedup
    -
    -

    Quick Start: just helpjust auth-helpjust auth-login <user>just mfa-enroll-totp

    -

    OCI Registry Quick Reference

    -

    Version: 1.0.0 | Date: 2025-10-06

    -
    -

    Prerequisites

    -
    # Install OCI tool (choose one)
    -brew install oras        # Recommended
    -brew install skopeo      # Alternative
    -go install github.com/google/go-containerregistry/cmd/crane@latest  # Alternative
    +

    Integration and APIs

    +
      +
    • +

      API Guide - REST API integration with authentication, +pagination, error handling, rate limiting

      +
    • +
    • +

      Build System - Cargo configuration, feature flags, +dependencies, cross-platform compilation

      +
    • +
    • +

      Testing - Unit, integration, property-based testing, +benchmarking, CI/CD patterns

      +
    • +
    +

    Community

    +
      +
    • Contributing - Guidelines, standards, review +process, licensing
    • +
    +

    Quick Start Paths

    +

    I want to build a custom provider

    +

    Start with Custom Provider Development - includes +template, credential patterns, error handling, tests, and publishing workflow.

    +

    I want to create custom detectors

    +

    See Custom Detector Development - covers analysis frameworks, state tracking, testing, and marketplace distribution.

    +

    I want to extend with Nushell

    +

    Read Plugin Development - FFI bindings, type safety, performance optimization, and integration patterns.

    +

    I want to understand system performance

    +

    Study Provisioning Daemon Internals - architecture, caching strategy, connection pooling, metrics collection.

    +

    I want to integrate external systems

    +

    Check API Guide - REST endpoints, authentication, webhooks, and integration patterns.

    +

    Technology Stack

    +
      +
    • Language: Rust (async/await with Tokio), Nushell (scripting)
    • +
    • Configuration: Nickel (type-safe) + TOML (generated)
    • +
    • Testing: Unit tests, integration tests, property-based tests
    • +
    • Performance: Prometheus metrics, connection pooling, LRU caching
    • +
    • Security: Post-quantum cryptography, type-safety, secure defaults
    • +
    +

    Development Environment

    +

    All development builds with:

    +
    cargo build --release
    +cargo test --all
    +cargo clippy -- -D warnings
     
    -
    -

    Quick Start (5 Minutes)

    -
    # 1. Start local OCI registry
    -provisioning oci-registry start
    -
    -# 2. Login to registry
    -provisioning oci login localhost:5000
    -
    -# 3. Pull an extension
    -provisioning oci pull kubernetes:1.28.0
    -
    -# 4. List available extensions
    -provisioning oci list
    -
    -# 5. Configure workspace to use OCI
    -# Edit: workspace/config/provisioning.yaml
    -# Add OCI dependency configuration
    +
    +
      +
    • For architecture insights → See provisioning/docs/src/architecture/
    • +
    • For API details → See provisioning/docs/src/api-reference/
    • +
    • For examples → See provisioning/docs/src/examples/
    • +
    • For deployment → See provisioning/docs/src/operations/
    • +
    +

    Extension Development

    +

    Creating custom extensions to add providers, task services, and clusters to the Provisioning platform.

    +

    Extension Overview

    +

    Extensions are modular components that extend platform capabilities:

    +
    + + + + +
    Extension TypePurposeImplementationComplexity
    ProvidersCloud infrastructure backendsNushell scripts + Nickel schemasModerate
    Task ServicesInfrastructure componentsNushell installation scriptsSimple
    ClustersComplete deploymentsNickel schemas + orchestrationModerate
    WorkflowsAutomation templatesNickel workflow definitionsSimple
    +
    +

    Extension Structure

    +

    Standard extension directory layout:

    +
    provisioning/extensions/<type>/<name>/
    +├── nickel/
    +│   ├── schema.ncl      # Nickel type definitions
    +│   ├── defaults.ncl    # Default configuration
    +│   └── validation.ncl  # Validation rules
    +├── scripts/
    +│   ├── install.nu      # Installation script
    +│   ├── uninstall.nu    # Removal script
    +│   └── validate.nu     # Validation script
    +├── templates/
    +│   └── config.template # Configuration templates
    +├── tests/
    +│   └── test_*.nu       # Test scripts
    +├── docs/
    +│   └── README.md       # Documentation
    +└── metadata.toml       # Extension metadata
     
    -
    -

    Common Commands

    -

    Extension Discovery

    -
    # List all extensions
    -provisioning oci list
    +

    Extension Metadata

    +

    Every extension requires metadata.toml:

    +
    # metadata.toml
    +[extension]
    +name = "my-provider"
    +type = "provider"
    +version = "1.0.0"
    +description = "Custom cloud provider"
    +author = "Your Name <[email@example.com](mailto:email@example.com)>"
    +license = "MIT"
     
    -# Search for extensions
    -provisioning oci search kubernetes
    +[dependencies]
    +nushell = ">=0.109.0"
    +nickel = ">=1.15.1"
     
    -# Show available versions
    -provisioning oci tags kubernetes
    +[dependencies.extensions]
    +# Other extensions this depends on
    +base-provider = "1.0.0"
     
    -# Inspect extension details
    -provisioning oci inspect kubernetes:1.28.0
    +[capabilities]
    +create_server = true
    +delete_server = true
    +create_network = true
    +
    +[configuration]
    +required_fields = ["api_key", "region"]
    +optional_fields = ["timeout", "retry_attempts"]
     
    -

    Extension Installation

    -
    # Pull specific version
    -provisioning oci pull kubernetes:1.28.0
    -
    -# Pull to custom location
    -provisioning oci pull redis:7.0.0 --destination /path/to/extensions
    -
    -# Pull from custom registry
    -provisioning oci pull postgres:15.0 \
    -  --registry harbor.company.com \
    -  --namespace provisioning-extensions
    +

    Creating a Provider Extension

    +

    Providers implement cloud infrastructure backends.

    +

    Provider Structure

    +
    provisioning/extensions/providers/my-provider/
    +├── nickel/
    +│   ├── schema.ncl
    +│   ├── server.ncl
    +│   └── network.ncl
    +├── scripts/
    +│   ├── create_server.nu
    +│   ├── delete_server.nu
    +│   ├── list_servers.nu
    +│   └── validate.nu
    +├── templates/
    +│   └── server.template
    +├── tests/
    +│   └── test_provider.nu
    +└── metadata.toml
     
    -

    Extension Publishing

    -
    # Login (one-time)
    -provisioning oci login localhost:5000
    +

    Provider Schema (Nickel)

    +
    # nickel/schema.ncl
    +{
    +  Provider = {
    +    name | String,
    +    api_key | String,
    +    region | String,
    +    timeout | default = 30 | Number,
     
    -# Package extension
    -provisioning oci package ./extensions/taskservs/redis
    +    server_config = {
    +      default_plan | default = "medium" | String,
    +      allowed_plans | Array String,
    +    },
    +  },
    +
    +  Server = {
    +    name | String,
    +    plan | String,
    +    zone | String,
    +    hostname | String,
    +    tags | default = [] | Array String,
    +  },
    +}
    +
    +

    Provider Implementation (Nushell)

    +
    # scripts/create_server.nu
    +#!/usr/bin/env nu
    +
    +# Create server using provider API
    +export def main [
    +    config: record  # Provider configuration
    +    server: record  # Server specification
    +] {
    +    # Validate configuration
    +    validate-config $config
    +
    +    # Construct API request
    +    let request = {
    +        name: $server.name
    +        plan: $server.plan
    +        zone: $server.zone
    +    }
    +
    +    # Call provider API
    +    let response = http post $"($config.api_endpoint)/servers" {
    +        headers: {
    +            Authorization: $"Bearer ($config.api_key)"
    +        }
    +        body: ($request | to json)
    +    }
    +
    +    # Return server details
    +    $response | from json
    +}
    +
    +# Validate provider configuration
    +def validate-config [config: record] {
    +    if ($config.api_key | is-empty) {
    +        error make {msg: "api_key is required"}
    +    }
    +
    +    if ($config.region | is-empty) {
    +        error make {msg: "region is required"}
    +    }
    +}
    +
    +

    Provider Interface Contract

    +

    All providers must implement:

    +
    # Required operations
    +create_server    # Create new server
    +delete_server    # Delete existing server
    +get_server       # Get server details
    +list_servers     # List all servers
    +server_status    # Check server status
    +
    +# Optional operations
    +create_network   # Create network
    +delete_network   # Delete network
    +attach_storage   # Attach storage volume
    +create_snapshot  # Create server snapshot
    +
    +

    Creating a Task Service Extension

    +

    Task services are installable infrastructure components.

    +

    Task Service Structure

    +
    provisioning/extensions/taskservs/my-service/
    +├── nickel/
    +│   ├── schema.ncl
    +│   └── defaults.ncl
    +├── scripts/
    +│   ├── install.nu
    +│   ├── uninstall.nu
    +│   ├── health.nu
    +│   └── validate.nu
    +├── templates/
    +│   ├── config.yaml.template
    +│   └── systemd.service.template
    +├── tests/
    +│   └── test_service.nu
    +├── docs/
    +│   └── README.md
    +└── metadata.toml
    +
    +

    Task Service Metadata

    +
    # metadata.toml
    +[extension]
    +name = "my-service"
    +type = "taskserv"
    +version = "2.1.0"
    +description = "Custom infrastructure service"
    +
    +[dependencies.taskservs]
    +# Task services this depends on
    +containerd = ">=1.7.0"
    +kubernetes = ">=1.28.0"
    +
    +[installation]
    +requires_root = true
    +platforms = ["linux"]
    +architectures = ["x86_64", "aarch64"]
    +
    +[health_check]
    +enabled = true
    +endpoint = " [http://localhost:8000/health"](http://localhost:8000/health")
    +interval = 30
    +timeout = 5
    +
    +

    Task Service Installation Script

    +
    # scripts/install.nu
    +#!/usr/bin/env nu
    +
    +export def main [
    +    config: record  # Service configuration
    +    server: record  # Target server details
    +] {
    +    print "Installing my-service..."
    +
    +    # Download binaries
    +    let version = $config.version? | default "latest"
    +    download-binary $version
    +
    +    # Install systemd service
    +    install-systemd-service $config
    +
    +    # Configure service
    +    generate-config $config
    +
    +    # Start service
    +    start-service
    +
    +    # Verify installation
    +    verify-installation
    +
    +    print "Installation complete"
    +}
    +
    +def download-binary [version: string] {
    +    let url = $" [https://github.com/org/my-service/releases/download/($versio](https://github.com/org/my-service/releases/download/($versio)n)/my-service"
    +    http get $url | save /usr/local/bin/my-service
    +    chmod +x /usr/local/bin/my-service
    +}
    +
    +def install-systemd-service [config: record] {
    +    let template = open ../templates/systemd.service.template
    +    let rendered = $template | str replace --all "{{VERSION}}" $config.version
    +    $rendered | save /etc/systemd/system/my-service.service
    +    systemctl daemon-reload
    +}
    +
    +def start-service [] {
    +    systemctl enable my-service
    +    systemctl start my-service
    +}
    +
    +def verify-installation [] {
    +    let status = systemctl is-active my-service
    +    if $status != "active" {
    +        error make {msg: "Service failed to start"}
    +    }
    +
    +    # Health check
    +    sleep 5sec
    +    let health = http get  [http://localhost:8000/health](http://localhost:8000/health)
    +    if $health.status != "healthy" {
    +        error make {msg: "Health check failed"}
    +    }
    +}
    +
    +

    Creating a Cluster Extension

    +

    Clusters combine servers and task services into complete deployments.

    +

    Cluster Schema

    +
    # nickel/schema.ncl
    +{
    +  Cluster = {
    +    metadata = {
    +      name | String,
    +      provider | String,
    +      environment | default = "production" | String,
    +    },
    +
    +    infrastructure = {
    +      servers | Array {
    +        name | String,
    +        role | | [ "control", "worker", "storage" | ],
    +        plan | String,
    +      },
    +    },
    +
    +    services = {
    +      taskservs | Array String,
    +      order | default = [] | Array String,
    +    },
    +
    +    networking = {
    +      private_network | default = true | Bool,
    +      cidr | default = "10.0.0.0/16" | String,
    +    },
    +  },
    +}
    +
    +

    Cluster Definition Example

    +
    # clusters/kubernetes-ha.ncl
    +{
    +  metadata.name = "k8s-ha-cluster",
    +  metadata.provider = "upcloud",
    +
    +  infrastructure.servers = [
    +    {name = "control-01", role = "control", plan = "large"},
    +    {name = "control-02", role = "control", plan = "large"},
    +    {name = "control-03", role = "control", plan = "large"},
    +    {name = "worker-01", role = "worker", plan = "xlarge"},
    +    {name = "worker-02", role = "worker", plan = "xlarge"},
    +  ],
    +
    +  services.taskservs = ["containerd", "etcd", "kubernetes", "cilium"],
    +  services.order = ["containerd", "etcd", "kubernetes", "cilium"],
    +
    +  networking.private_network = true,
    +  networking.cidr = "10.100.0.0/16",
    +}
    +
    +

    Extension Testing

    +

    Test Structure

    +
    # tests/test_provider.nu
    +use std assert
    +
    +# Test provider configuration validation
    +export def test_validate_config [] {
    +    let valid_config = {
    +        api_key: "test-key"
    +        region: "us-east-1"
    +    }
    +
    +    let result = validate-config $valid_config
    +    assert equal $result.valid true
    +}
    +
    +# Test server creation
    +export def test_create_server [] {
    +    let config = load-test-config
    +    let server_spec = {
    +        name: "test-server"
    +        plan: "medium"
    +        zone: "us-east-1a"
    +    }
    +
    +    let result = create-server $config $server_spec
    +    assert equal $result.status "created"
    +}
    +
    +# Run all tests
    +export def main [] {
    +    test_validate_config
    +    test_create_server
    +    print "All tests passed"
    +}
    +
    +

    Run tests:

    +
    # Test extension
    +provisioning extension test my-provider
    +
    +# Test specific component
    +nu tests/test_provider.nu
    +
    +

    Extension Packaging

    +

    OCI Registry Publishing

    +

    Package and publish extension:

    +
    # Build extension package
    +provisioning extension build my-provider
    +
    +# Validate package
    +provisioning extension validate my-provider-1.0.0.tar.gz
     
     # Publish to registry
    -provisioning oci push ./extensions/taskservs/redis redis 1.0.0
    -
    -# Verify publication
    -provisioning oci tags redis
    +provisioning extension publish my-provider-1.0.0.tar.gz \
    +  --registry registry.example.com
     
    -

    Dependency Management

    -
    # Resolve all dependencies
    -provisioning dep resolve
    -
    -# Check for updates
    -provisioning dep check-updates
    -
    -# Update specific extension
    -provisioning dep update kubernetes
    -
    -# Show dependency tree
    -provisioning dep tree kubernetes
    -
    -# Validate dependencies
    -provisioning dep validate
    +

    Package structure:

    +
    my-provider-1.0.0.tar.gz
    +├── metadata.toml
    +├── nickel/
    +├── scripts/
    +├── templates/
    +├── tests/
    +├── docs/
    +└── manifest.json
     
    -
    -

    Configuration Templates

    -

    Workspace OCI Configuration

    -

    File: workspace/config/provisioning.yaml

    -
    dependencies:
    -  extensions:
    -    source_type: "oci"
    +

    Extension Installation

    +

    Install extension from registry:

    +
    # Install from OCI registry
    +provisioning extension install my-provider --version 1.0.0
     
    -    oci:
    -      registry: "localhost:5000"
    -      namespace: "provisioning-extensions"
    -      tls_enabled: false
    -      auth_token_path: "~/.provisioning/tokens/oci"
    +# Install from local file
    +provisioning extension install ./my-provider-1.0.0.tar.gz
     
    -    modules:
    -      providers:
    -        - "oci://localhost:5000/provisioning-extensions/aws:2.0.0"
    +# List installed extensions
    +provisioning extension list
     
    -      taskservs:
    -        - "oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0"
    -        - "oci://localhost:5000/provisioning-extensions/containerd:1.7.0"
    +# Update extension
    +provisioning extension update my-provider --version 1.1.0
     
    -      clusters:
    -        - "oci://localhost:5000/provisioning-extensions/buildkit:0.12.0"
    +# Uninstall extension
    +provisioning extension uninstall my-provider
     
    -

    Extension Manifest

    -

    File: extensions/{type}/{name}/manifest.yaml

    -
    name: redis
    -type: taskserv
    -version: 1.0.0
    -description: Redis in-memory data store
    -author: Your Name
    -license: MIT
    -
    -dependencies:
    -  os: ">=1.0.0"
    -
    -tags:
    -  - database
    -  - cache
    -
    -platforms:
    -  - linux/amd64
    -
    -min_provisioning_version: "3.0.0"
    -
    -
    -

    Extension Development Workflow

    -
    # 1. Create extension
    -provisioning generate extension taskserv redis
    -
    -# 2. Develop extension
    -# Edit files in extensions/taskservs/redis/
    -
    -# 3. Test locally
    -provisioning module load taskserv workspace_dev redis --source local
    -provisioning taskserv create redis --infra test --check
    -
    -# 4. Validate structure
    -provisioning oci package validate ./extensions/taskservs/redis
    -
    -# 5. Package
    -provisioning oci package ./extensions/taskservs/redis
    -
    -# 6. Publish
    -provisioning oci push ./extensions/taskservs/redis redis 1.0.0
    -
    -# 7. Verify
    -provisioning oci inspect redis:1.0.0
    -
    -
    -

    Registry Management

    -

    Local Registry (Development)

    -
    # Start
    -provisioning oci-registry start
    -
    -# Stop
    -provisioning oci-registry stop
    -
    -# Status
    -provisioning oci-registry status
    -
    -# Endpoint: localhost:5000
    -# Storage: ~/.provisioning/oci-registry/
    -
    -

    Remote Registry (Production)

    -
    # Login to Harbor
    -provisioning oci login harbor.company.com --username admin
    -
    -# Configure in workspace
    -# Edit workspace/config/provisioning.yaml:
    -# dependencies:
    -#   registry:
    -#     oci:
    -#       endpoint: "https://harbor.company.com"
    -#       tls_enabled: true
    -
    -
    -

    Migration from Monorepo

    -
    # 1. Dry-run migration (preview)
    -provisioning migrate-to-oci workspace_dev --dry-run
    -
    -# 2. Migrate with publishing
    -provisioning migrate-to-oci workspace_dev --publish
    -
    -# 3. Validate migration
    -provisioning validate-migration workspace_dev
    -
    -# 4. Generate report
    -provisioning migration-report workspace_dev
    -
    -# 5. Rollback if needed
    -provisioning rollback-migration workspace_dev
    -
    -
    -

    Troubleshooting

    -

    Registry Not Running

    -
    # Check if registry is running
    -curl http://localhost:5000/v2/_catalog
    -
    -# Start if not running
    -provisioning oci-registry start
    -
    -

    Authentication Failed

    -
    # Login again
    -provisioning oci login localhost:5000
    -
    -# Or use token file
    -echo "your-token" > ~/.provisioning/tokens/oci
    -
    -

    Extension Not Found

    -
    # Check registry connection
    -provisioning oci config
    -
    -# List available extensions
    -provisioning oci list
    -
    -# Check namespace
    -provisioning oci list --namespace provisioning-extensions
    -
    -

    Dependency Resolution Failed

    -
    # Validate dependencies
    -provisioning dep validate
    -
    -# Show dependency tree
    -provisioning dep tree kubernetes
    -
    -# Check for updates
    -provisioning dep check-updates
    -
    -
    -

    Best Practices

    -

    Versioning

    -

    DO: Use semantic versioning (MAJOR.MINOR.PATCH)

    -
    version: 1.2.3
    -
    -

    DON’T: Use arbitrary versions

    -
    version: latest  # Unpredictable
    -
    -

    Dependencies

    -

    DO: Specify version constraints

    -
    dependencies:
    -  containerd: ">=1.7.0"
    -  etcd: "^3.5.0"
    -
    -

    DON’T: Use wildcards

    -
    dependencies:
    -  containerd: "*"  # Too permissive
    -
    -

    Security

    -

    DO:

    +

    Best Practices

      -
    • Use TLS for production registries
    • -
    • Rotate authentication tokens
    • -
    • Scan for vulnerabilities
    • +
    • Follow naming conventions: lowercase with hyphens
    • +
    • Version extensions semantically (semver)
    • +
    • Document all configuration options
    • +
    • Provide comprehensive tests
    • +
    • Include usage examples in docs
    • +
    • Validate input parameters
    • +
    • Handle errors gracefully
    • +
    • Log important operations
    • +
    • Support idempotent operations
    • +
    • Keep dependencies minimal
    -

    DON’T:

    + -
    -

    Common Patterns

    -

    Pull and Install

    -
    # Pull extension
    -provisioning oci pull kubernetes:1.28.0
    +

    Provider Development

    +

    Implementing custom cloud provider integrations for the Provisioning platform.

    +

    Provider Architecture

    +

    Providers abstract cloud infrastructure APIs through a unified interface, allowing infrastructure definitions to be portable across clouds.

    +

    Provider Interface

    +

    All providers must implement these core operations:

    +
    # Server lifecycle
    +create_server     # Provision new server
    +delete_server     # Remove server
    +get_server        # Fetch server details
    +list_servers      # List all servers
    +update_server     # Modify server configuration
    +server_status     # Get current state
     
    -# Resolve dependencies (auto-installs)
    -provisioning dep resolve
    +# Network operations (optional)
    +create_network    # Create private network
    +delete_network    # Remove network
    +attach_network    # Attach server to network
     
    -# Use extension
    -provisioning taskserv create kubernetes
    +# Storage operations (optional)
    +attach_volume     # Attach storage volume
    +detach_volume     # Detach storage volume
    +create_snapshot   # Snapshot server disk
     
    -

    Update Extensions

    -
    # Check for updates
    -provisioning dep check-updates
    +

    Provider Template

    +

    Use the official provider template:

    +
    # Generate provider scaffolding
    +provisioning generate provider --name my-cloud --template standard
     
    -# Update specific extension
    -provisioning dep update kubernetes
    +# Creates:
    +# extensions/providers/my-cloud/
    +# ├── nickel/
    +# │   ├── schema.ncl
    +# │   ├── server.ncl
    +# │   └── network.ncl
    +# ├── scripts/
    +# │   ├── create_server.nu
    +# │   ├── delete_server.nu
    +# │   └── list_servers.nu
    +# └── metadata.toml
    +
    +

    Provider Schema (Nickel)

    +

    Define provider configuration schema:

    +
    # nickel/schema.ncl
    +{
    +  ProviderConfig = {
    +    name | String,
    +    api_endpoint | String,
    +    api_key | String,
    +    region | String,
    +    timeout | default = 30 | Number,
    +    retry_attempts | default = 3 | Number,
     
    -# Update all
    -provisioning dep resolve --update
    -
    -

    Copy Between Registries

    -
    # Copy from local to production
    -provisioning oci copy \
    -  localhost:5000/provisioning-extensions/kubernetes:1.28.0 \
    -  harbor.company.com/provisioning/kubernetes:1.28.0
    -
    -

    Publish Multiple Extensions

    -
    # Publish all taskservs
    -for dir in (ls extensions/taskservs); do
    -  provisioning oci push $dir.name $dir.name 1.0.0
    -done
    -
    -
    -

    Environment Variables

    -
    # Override registry
    -export PROVISIONING_OCI_REGISTRY="harbor.company.com"
    +    plans = {
    +      small  = {cpu = 2, memory = 4096, disk = 25},
    +      medium = {cpu = 4, memory = 8192, disk = 50},
    +      large  = {cpu = 8, memory = 16384, disk = 100},
    +    },
     
    -# Override namespace
    -export PROVISIONING_OCI_NAMESPACE="my-extensions"
    +    regions | Array String,
    +  },
     
    -# Set auth token
    -export PROVISIONING_OCI_TOKEN="your-token-here"
    +  ServerSpec = {
    +    name | String,
    +    plan | String,
    +    zone | String,
    +    image | default = "ubuntu-24.04" | String,
    +    ssh_keys | Array String,
    +    user_data | default = "" | String,
    +  },
    +}
     
    -
    -

    File Locations

    -
    ~/.provisioning/
    -├── oci-cache/              # OCI artifact cache
    -├── oci-registry/           # Local Zot registry data
    -└── tokens/
    -    └── oci                 # OCI auth token
    +

    Implementing Server Creation

    +

    Create server implementation:

    +
    # scripts/create_server.nu
    +#!/usr/bin/env nu
     
    -workspace/
    -├── config/
    -│   └── provisioning.yaml   # OCI configuration
    -└── extensions/             # Installed extensions
    -    ├── providers/
    -    ├── taskservs/
    -    └── clusters/
    +export def main [
    +    config: record,  # Provider configuration
    +    spec: record     # Server specification
    +]: nothing -> record {
    +    # Validate inputs
    +    validate-provider-config $config
    +    validate-server-spec $spec
    +
    +    # Map plan to provider-specific values
    +    let plan = get-plan-details $config $spec.plan
    +
    +    # Construct API request
    +    let request = {
    +        hostname: $spec.name
    +        plan: $plan.name
    +        zone: $spec.zone
    +        storage_devices: [{
    +            action: "create"
    +            storage: $plan.disk
    +            title: "root"
    +        }]
    +        login: {
    +            user: "root"
    +            keys: $spec.ssh_keys
    +        }
    +    }
    +
    +    # Call provider API with retry logic
    +    let server = retry-api-call | { |
    +        http post $"($config.api_endpoint)/server" {
    +            headers: {Authorization: $"Bearer ($config.api_key)"}
    +            body: ($request | to json)
    +        } | from json
    +    } $config.retry_attempts
    +
    +    # Wait for server to be ready
    +    wait-for-server-ready $config $server.uuid
    +
    +    # Return server details
    +    {
    +        id: $server.uuid
    +        name: $server.hostname
    +        ip_address: $server.ip_addresses.0.address
    +        status: "running"
    +        provider: $config.name
    +    }
    +}
    +
    +def validate-provider-config [config: record] {
    +    if ($config.api_key | is-empty) {
    +        error make {msg: "API key required"}
    +    }
    +    if ($config.region | is-empty) {
    +        error make {msg: "Region required"}
    +    }
    +}
    +
    +def get-plan-details [config: record, plan_name: string]: nothing -> record {
    +    $config.plans | get $plan_name
    +}
    +
    +def retry-api-call [operation: closure, max_attempts: int]: nothing -> any {
    +    mut attempt = 1
    +    mut last_error = null
    +
    +    while $attempt <= $max_attempts {
    +        try {
    +            return (do $operation)
    +        } catch | { err |
    +            $last_error = $err
    +            if $attempt < $max_attempts {
    +                sleep (1sec * $attempt)  # Exponential backoff
    +                $attempt = $attempt + 1
    +            }
    +        }
    +    }
    +
    +    error make {msg: $"API call failed after ($max_attempts) attempts: ($last_error)"}
    +}
    +
    +def wait-for-server-ready [config: record, server_id: string] {
    +    mut ready = false
    +    mut attempts = 0
    +    let max_wait = 120  # 2 minutes
    +
    +    while not $ready and $attempts < $max_wait {
    +        let status = http get $"($config.api_endpoint)/server/($server_id)" {
    +            headers: {Authorization: $"Bearer ($config.api_key)"}
    +        } | from json
    +
    +        if $status.state == "started" {
    +            $ready = true
    +        } else {
    +            sleep 1sec
    +            $attempts = $attempts + 1
    +        }
    +    }
    +
    +    if not $ready {
    +        error make {msg: "Server failed to start within timeout"}
    +    }
    +}
     
    -
    - +

    Provider Testing

    +

    Comprehensive provider testing:

    +
    # tests/test_provider.nu
    +use std assert
    +
    +export def test_create_server [] {
    +    # Mock provider config
    +    let config = {
    +        name: "test-cloud"
    +        api_endpoint: " [http://localhost:8080"](http://localhost:8080")
    +        api_key: "test-key"
    +        region: "test-region"
    +        plans: {
    +            small: {cpu: 2, memory: 4096, disk: 25}
    +        }
    +    }
    +
    +    # Mock server spec
    +    let spec = {
    +        name: "test-server"
    +        plan: "small"
    +        zone: "test-zone"
    +        ssh_keys: ["ssh-rsa AAAA..."]
    +    }
    +
    +    # Test server creation
    +    let server = create-server $config $spec
    +
    +    assert ($server.id != null)
    +    assert ($server.name == "test-server")
    +    assert ($server.status == "running")
    +}
    +
    +export def test_list_servers [] {
    +    let config = load-test-config
    +    let servers = list-servers $config
    +
    +    assert ($servers | length) > 0
    +}
    +
    +export def main [] {
    +    print "Running provider tests..."
    +    test_create_server
    +    test_list_servers
    +    print "All tests passed!"
    +}
    +
    +

    Error Handling

    +

    Robust error handling for provider operations:

    +
    # Handle API errors gracefully
    +def handle-api-error [error: record]: nothing -> record {
    +    match $error.status {
    +        401 => {error make {msg: "Authentication failed - check API key"}}
    +        403 => {error make {msg: "Permission denied - insufficient privileges"}}
    +        404 => {error make {msg: "Resource not found"}}
    +        429 => {error make {msg: "Rate limit exceeded - retry later"}}
    +        500 => {error make {msg: "Provider API error - contact support"}}
    +        _   => {error make {msg: $"Unknown error: ($error.message)"}}
    +    }
    +}
    +
    +

    Provider Best Practices

      -
    • OCI Registry Guide - Complete user guide
    • -
    • Multi-Repo Architecture - Architecture details
    • -
    • Implementation Summary - Technical details
    • +
    • Implement idempotent operations where possible
    • +
    • Handle rate limiting with exponential backoff
    • +
    • Validate all inputs before API calls
    • +
    • Log all API requests and responses (without secrets)
    • +
    • Use connection pooling for better performance
    • +
    • Cache provider capabilities and quotas
    • +
    • Implement proper timeout handling
    • +
    • Return consistent error messages
    • +
    • Test against provider sandbox/staging environment
    • +
    • Version provider schemas carefully
    -
    -

    Quick Help: provisioning oci --help | provisioning dep --help

    -

    Sudo Password Handling - Quick Reference

    -

    When Sudo is Required

    -

    Sudo password is needed when fix_local_hosts: true in your server configuration. This modifies:

    +
      -
    • /etc/hosts - Maps server hostnames to IP addresses
    • -
    • ~/.ssh/config - Adds SSH connection shortcuts
    • +
    • Extension Development - Extension basics
    • +
    • API Guide - REST API patterns
    • +
    • Testing - Testing strategies
    -

    Quick Solutions

    -

    ✅ Best: Cache Credentials First

    -
    sudo -v && provisioning -c server create
    -
    -

    Credentials cached for 5 minutes, no prompts during operation.

    -

    ✅ Alternative: Disable Host Fixing

    -
    # In your settings.ncl or server config
    -fix_local_hosts = false
    -
    -

    No sudo required, manual /etc/hosts management.

    -

    ✅ Manual: Enter Password When Prompted

    -
    provisioning -c server create
    -# Enter password when prompted
    -# Or press CTRL-C to cancel
    -
    -

    CTRL-C Handling

    -

    CTRL-C Behavior

    -

    IMPORTANT: Pressing CTRL-C at the sudo password prompt will interrupt the entire operation due to how Unix signals work. This is expected -behavior and cannot be caught by Nushell.

    -

    When you press CTRL-C at the password prompt:

    -
    Password: [CTRL-C]
    -
    -Error: nu::shell::error
    -  × Operation interrupted
    -
    -

    Why this happens: SIGINT (CTRL-C) is sent to the entire process group, including Nushell itself. The signal propagates before exit code handling -can occur.

    -

    Graceful Handling (Non-CTRL-C Cancellation)

    -

    The system does handle these cases gracefully:

    -

    No password provided (just press Enter):

    -
    Password: [Enter]
    -
    -⚠ Operation cancelled - sudo password required but not provided
    -ℹ Run 'sudo -v' first to cache credentials, or run without --fix-local-hosts
    -
    -

    Wrong password 3 times:

    -
    Password: [wrong]
    -Password: [wrong]
    -Password: [wrong]
    -
    -⚠ Operation cancelled - sudo password required but not provided
    -ℹ Run 'sudo -v' first to cache credentials, or run without --fix-local-hosts
    -
    - -

    To avoid password prompts entirely:

    -
    # Best: Pre-cache credentials (lasts 5 minutes)
    -sudo -v && provisioning -c server create
    -
    -# Alternative: Disable host modification
    -# Set fix_local_hosts = false in your server config
    -
    -

    Common Commands

    -
    # Cache sudo for 5 minutes
    -sudo -v
    -
    -# Check if cached
    -sudo -n true && echo "Cached" || echo "Not cached"
    -
    -# Create alias for convenience
    -alias prvng='sudo -v && provisioning'
    -
    -# Use the alias
    -prvng -c server create
    -
    -

    Troubleshooting

    -
    - - - - - +

    Plugin Development

    +

    Developing Nushell plugins for performance-critical operations in the Provisioning platform.

    +

    Plugin Overview

    +

    Nushell plugins provide 10-50x performance improvement over HTTP APIs through native Rust implementations.

    +

    Available Plugins

    +
    IssueSolution
    “Password required” errorRun sudo -v first
    CTRL-C doesn’t work cleanlyUpdate to latest version
    Too many password promptsSet fix_local_hosts = false
    Sudo not availableMust disable fix_local_hosts
    Wrong password 3 timesRun sudo -k to reset, then sudo -v
    + + +
    PluginPurposePerformance GainLanguage
    nu_plugin_authAuthentication and OS keyring5x fasterRust
    nu_plugin_kmsKMS encryption operations10x fasterRust
    nu_plugin_orchestratorOrchestrator queries30x fasterRust
    -

    Environment-Specific Settings

    -

    Development (Local)

    -
    fix_local_hosts = true  # Convenient for local testing
    +

    Plugin Architecture

    +

    Plugins communicate with Nushell via MessagePack protocol:

    +
    Nushell ←→ MessagePack ←→ Plugin Process
    +  ↓                           ↓
    +Script                    Native Rust
     
    -

    CI/CD (Automation)

    -
    fix_local_hosts = false  # No interactive prompts
    +

    Creating a Plugin

    +

    Plugin Template

    +

    Generate plugin scaffold:

    +
    # Create new plugin
    +cargo new --lib nu_plugin_myfeature
    +cd nu_plugin_myfeature
     
    -

    Production (Servers)

    -
    fix_local_hosts = false  # Managed by configuration management
    +

    Add dependencies to Cargo.toml:

    +
    [package]
    +name = "nu_plugin_myfeature"
    +version = "0.1.0"
    +edition = "2021"
    +
    +[dependencies]
    +nu-plugin = "0.109.0"
    +nu-protocol = "0.109.0"
    +serde = {version = "1.0", features = ["derive"]}
     
    -

    What fix_local_hosts Does

    -

    When enabled:

    +

    Plugin Implementation

    +

    Implement plugin interface:

    +
    // src/lib.rs
    +use nu_plugin::{EvaluatedCall, LabeledError, Plugin};
    +use nu_protocol::{Category, PluginSignature, SyntaxShape, Type, Value};
    +
    +pub struct MyFeaturePlugin;
    +
    +impl Plugin for MyFeaturePlugin {
    +    fn signature(&self) -> Vec<PluginSignature> {
    +        vec![
    +            PluginSignature::build("my-feature")
    +                .usage("Perform my feature operation")
    +                .required("input", SyntaxShape::String, "input value")
    +                .input_output_type(Type::String, Type::String)
    +                .category(Category::Custom("provisioning".into())),
    +        ]
    +    }
    +
    +    fn run(
    +        &mut self,
    +        name: &str,
    +        call: &EvaluatedCall,
    +        input: &Value,
    +    ) -> Result<Value, LabeledError> {
    +        match name {
    +            "my-feature" => self.my_feature(call, input),
    +            _ => Err(LabeledError {
    +                label: "Unknown command".into(),
    +                msg: format!("Unknown command: {}", name),
    +                span: None,
    +            }),
    +        }
    +    }
    +}
    +
    +impl MyFeaturePlugin {
    +    fn my_feature(&self, call: &EvaluatedCall, _input: &Value) -> Result<Value, LabeledError> {
    +        let input: String = call.req(0)?;
    +
    +        // Perform operation
    +        let result = perform_operation(&input);
    +
    +        Ok(Value::string(result, call.head))
    +    }
    +}
    +
    +fn perform_operation(input: &str) -> String {
    +    // Your implementation here
    +    format!("Processed: {}", input)
    +}
    +
    +// Plugin entry point
    +fn main() {
    +    nu_plugin::serve_plugin(&mut MyFeaturePlugin, nu_plugin::MsgPackSerializer {})
    +}
    +

    Building Plugin

    +
    # Build release version
    +cargo build --release
    +
    +# Install plugin
    +nu -c 'plugin add target/release/nu_plugin_myfeature'
    +nu -c 'plugin use myfeature'
    +
    +# Test plugin
    +nu -c 'my-feature "test input"'
    +
    +

    Plugin Performance Optimization

    +

    Benchmarking

    +
    use std::time::Instant;
    +
    +pub fn benchmark_operation() {
    +    let start = Instant::now();
    +
    +    // Operation to benchmark
    +    perform_expensive_operation();
    +
    +    let duration = start.elapsed();
    +    eprintln!("Operation took: {:?}", duration);
    +}
    +

    Caching

    +

    Implement caching for expensive operations:

    +
    use std::collections::HashMap;
    +use std::sync::{Arc, Mutex};
    +
    +pub struct CachedPlugin {
    +    cache: Arc<Mutex<HashMap<String, String>>>,
    +}
    +
    +impl CachedPlugin {
    +    fn get_or_compute(&self, key: &str) -> String {
    +        let mut cache = self.cache.lock().unwrap();
    +
    +        if let Some(value) = cache.get(key) {
    +            return value.clone();
    +        }
    +
    +        let value = expensive_computation(key);
    +        cache.insert(key.to_string(), value.clone());
    +        value
    +    }
    +}
    +

    Testing Plugins

    +

    Unit Tests

    +
    #[cfg(test)]
    +mod tests {
    +    use super::*;
    +    use nu_protocol::{Span, Value};
    +
    +    #[test]
    +    fn test_my_feature() {
    +        let plugin = MyFeaturePlugin;
    +        let input = Value::string("test", Span::test_data());
    +        let result = plugin.my_feature(&mock_call(), &input).unwrap();
    +
    +        assert_eq!(result.as_string().unwrap(), "Processed: test");
    +    }
    +
    +    fn mock_call() -> EvaluatedCall {
    +        // Mock EvaluatedCall for testing
    +        todo!()
    +    }
    +}
    +

    Integration Tests

    +
    # tests/test_plugin.nu
    +use std assert
    +
    +def test_plugin_functionality [] {
    +    let result = my-feature "test input"
    +    assert equal $result "Processed: test input"
    +}
    +
    +def main [] {
    +    test_plugin_functionality
    +    print "Plugin tests passed"
    +}
    +
    +

    Plugin Best Practices

    +
      +
    • Keep plugin logic focused and single-purpose
    • +
    • Minimize dependencies to reduce binary size
    • +
    • Use async operations for I/O-bound tasks
    • +
    • Implement proper error handling
    • +
    • Document all plugin commands
    • +
    • Version plugins with semantic versioning
    • +
    • Provide fallback to HTTP API if plugin unavailable
    • +
    • Cache expensive computations
    • +
    • Profile and benchmark performance improvements
    • +
    + + +

    API Integration Guide

    +

    Integrate third-party APIs with Provisioning infrastructure.

    +

    API Client Development

    +

    Create clients for external APIs:

    +
    // src/api_client.rs
    +use reqwest::Client;
    +
    +pub struct ApiClient {
    +    endpoint: String,
    +    api_key: String,
    +    client: Client,
    +}
    +
    +impl ApiClient {
    +    pub async fn call(&self, path: &str) -> Result<Response> {
    +        let url = format!("{}{}", self.endpoint, path);
    +        self.client
    +            .get(&url)
    +            .bearer_auth(&self.api_key)
    +            .send()
    +            .await
    +    }
    +}
    +

    Webhook Integration

    +

    Handle webhooks from external systems:

    +
    #[post("/webhooks/{service}")]
    +pub async fn handle_webhook(path: web::Path<String>, body: web::Bytes) -> impl Responder {
    +    let service = path.into_inner();
    +    match service.as_str() {
    +        "github" => handle_github_webhook(&body),
    +        "stripe" => handle_stripe_webhook(&body),
    +        _ => HttpResponse::NotFound().finish(),
    +    }
    +}
    +

    Error Handling

    +

    Robust error handling for API calls with retries:

    +
    pub async fn call_api_with_retry(
    +    client: &ApiClient,
    +    path: &str,
    +    max_retries: u32,
    +) -> Result<Response> {
    +    for attempt in 0..max_retries {
    +        match client.call(path).await {
    +            Ok(response) => return Ok(response),
    +            Err(e) if attempt < max_retries - 1 => {
    +                let delay = Duration::from_secs(2_u64.pow(attempt));
    +                tokio::time::sleep(delay).await;
    +            }
    +            Err(e) => return Err(e),
    +        }
    +    }
    +    Err(ApiError::MaxRetriesExceeded.into())
    +}
    + + +

    Build System

    +

    Building, testing, and packaging the Provisioning platform and extensions with Cargo, Just, and Nickel.

    +

    Build Tools

    +
    + + + + +
    ToolPurposeVersion Required
    CargoRust compilation and testingLatest stable
    JustTask runner for common operationsLatest
    NickelSchema validation and type checking1.15.1+
    NushellScript execution and testing0.109.0+
    +
    +

    Building Platform Services

    +

    Build All Services

    +
    # Build all Rust services in release mode
    +cd provisioning/platform
    +cargo build --release --workspace
    +
    +# Or using just task runner
    +just build-platform
    +
    +

    Binary outputs in target/release/:

    +
      +
    • provisioning-orchestrator
    • +
    • provisioning-control-center
    • +
    • provisioning-vault-service
    • +
    • provisioning-installer
    • +
    +

    Build Individual Service

    +
    # Orchestrator service
    +cd provisioning/platform/crates/orchestrator
    +cargo build --release
    +
    +# Control Center service
    +cd provisioning/platform/crates/control-center
    +cargo build --release
    +
    +# Development build (faster compilation)
    +cargo build
    +
    +

    Testing

    +

    Run All Tests

    +
    # Rust unit and integration tests
    +cargo test --workspace
    +
    +# Nushell script tests
    +just test-nushell
    +
    +# Complete test suite
    +just test-all
    +
    +

    Test Specific Component

    +
    # Test orchestrator crate
    +cargo test -p provisioning-orchestrator
    +
    +# Test with output visible
    +cargo test -p provisioning-orchestrator -- --nocapture
    +
    +# Test specific function
    +cargo test -p provisioning-orchestrator test_workflow_creation
    +
    +# Run tests matching pattern
    +cargo test workflow
    +
    +

    Security Tests

    +
    # Run 350+ security test cases
    +cargo test -p security --test '*'
    +
    +# Specific security component
    +cargo test -p security authentication
    +cargo test -p security authorization
    +cargo test -p security kms
    +
    +

    Code Quality

    +

    Formatting

    +
    # Format all Rust code
    +cargo fmt --all
    +
    +# Check formatting without modifying
    +cargo fmt --all -- --check
    +
    +# Format Nickel schemas
    +nickel fmt provisioning/schemas/**/*.ncl
    +
    +

    Linting

    +
    # Run Clippy linter
    +cargo clippy --all -- -D warnings
    +
    +# Auto-fix Clippy warnings
    +cargo clippy --all --fix
    +
    +# Clippy with all features enabled
    +cargo clippy --all --all-features -- -D warnings
    +
    +

    Nickel Validation

    +
    # Type check Nickel schemas
    +nickel typecheck provisioning/schemas/main.ncl
    +
    +# Evaluate schema
    +nickel eval provisioning/schemas/main.ncl
    +
    +# Format Nickel files
    +nickel fmt provisioning/schemas/**/*.ncl
    +
    +

    Continuous Integration

    +

    The platform uses automated CI workflows for quality assurance.

    +

    GitHub Actions Pipeline

    +

    Key CI jobs:

    +
    1. Rust Build and Test
    +   - cargo build --release --workspace
    +   - cargo test --workspace
    +   - cargo clippy --all -- -D warnings
    +
    +2. Nushell Validation
    +   - nu --check core/cli/provisioning
    +   - Run Nushell test suite
    +
    +3. Nickel Schema Validation
    +   - nickel typecheck schemas/main.ncl
    +   - Validate all schema files
    +
    +4. Security Tests
    +   - Run 350+ security test cases
    +   - Vulnerability scanning
    +
    +5. Documentation Build
    +   - mdbook build docs
    +   - Markdown linting
    +
    +

    Packaging and Distribution

    +

    Create Release Package

    +
    # Build optimized binaries
    +cargo build --release --workspace
    +
    +# Strip debug symbols (reduce binary size)
    +strip target/release/provisioning-orchestrator
    +strip target/release/provisioning-control-center
    +
    +# Create distribution archive
    +just package
    +
    +

    Package Structure

    +
    provisioning-5.0.0-linux-x86_64.tar.gz
    +├── bin/
    +│   ├── provisioning                    # Main CLI
    +│   ├── provisioning-orchestrator       # Orchestrator service
    +│   ├── provisioning-control-center     # Control Center
    +│   ├── provisioning-vault-service      # Vault service
    +│   └── provisioning-installer          # Platform installer
    +├── lib/
    +│   └── nulib/                          # Nushell libraries
    +├── schemas/                            # Nickel schemas
    +├── config/
    +│   └── config.defaults.toml            # Default configuration
    +├── systemd/
    +│   └── *.service                       # Systemd unit files
    +└── README.md
    +
    +

    Cross-Platform Builds

    +

    Supported Targets

    +
    # Linux x86_64 (primary platform)
    +cargo build --release --target x86_64-unknown-linux-gnu
    +
    +# Linux ARM64 (Raspberry Pi, cloud ARM instances)
    +cargo build --release --target aarch64-unknown-linux-gnu
    +
    +# macOS x86_64
    +cargo build --release --target x86_64-apple-darwin
    +
    +# macOS ARM64 (Apple Silicon)
    +cargo build --release --target aarch64-apple-darwin
    +
    +

    Cross-Compilation Setup

    +
    # Add target architectures
    +rustup target add x86_64-unknown-linux-gnu
    +rustup target add aarch64-unknown-linux-gnu
    +
    +# Install cross-compilation tool
    +cargo install cross
    +
    +# Cross-compile with Docker
    +cross build --release --target aarch64-unknown-linux-gnu
    +
    +

    Just Task Runner

    +

    Common build tasks in justfile:

    +
    # Build all components
    +build-all: build-platform build-plugins
    +
    +# Build platform services
    +build-platform:
    +    cd platform && cargo build --release --workspace
    +
    +# Run all tests
    +test: test-rust test-nushell test-integration
    +
    +# Test Rust code
    +test-rust:
    +    cargo test --workspace
    +
    +# Test Nushell scripts
    +test-nushell:
    +    nu scripts/test/test_all.nu
    +
    +# Format all code
    +fmt:
    +    cargo fmt --all
    +    nickel fmt schemas/**/*.ncl
    +
    +# Lint all code
    +lint:
    +    cargo clippy --all -- -D warnings
    +    nickel typecheck schemas/main.ncl
    +
    +# Create release package
    +package:
    +    ./scripts/package.nu
    +
    +# Clean build artifacts
    +clean:
    +    cargo clean
    +    rm -rf target/
    +
    +

    Usage examples:

    +
    just build-all     # Build everything
    +just test          # Run all tests
    +just fmt           # Format code
    +just lint          # Run linters
    +just package       # Create distribution
    +just clean         # Remove artifacts
    +
    +

    Performance Optimization

    +

    Release Builds

    +
    # Cargo.toml
    +[profile.release]
    +opt-level = 3              # Maximum optimization
    +lto = "fat"                # Link-time optimization
    +codegen-units = 1          # Better optimization, slower compile
    +strip = true               # Strip debug symbols
    +panic = "abort"            # Smaller binary size
    +
    +

    Build Time Optimization

    +
    # Cargo.toml
    +[profile.dev]
    +opt-level = 1              # Basic optimization
    +incremental = true         # Faster recompilation
    +
    +

    Speed up compilation:

    +
    # Use faster linker (Linux)
    +sudo apt install lld
    +export RUSTFLAGS="-C link-arg=-fuse-ld=lld"
    +
    +# Parallel compilation
    +cargo build -j 8
    +
    +# Use cargo-watch for auto-rebuild
    +cargo install cargo-watch
    +cargo watch -x build
    +
    +

    Development Workflow

    + +
    # 1. Start development
    +just clean
    +just build-all
    +
    +# 2. Make changes to code
    +
    +# 3. Test changes quickly
    +cargo check                # Fast syntax check
    +cargo test <specific-test> # Test specific functionality
    +
    +# 4. Full validation before commit
    +just fmt
    +just lint
    +just test
    +
    +# 5. Create package for testing
    +just package
    +
    +

    Hot Reload Development

    +
    # Auto-rebuild on file changes
    +cargo watch -x build
    +
    +# Auto-test on changes
    +cargo watch -x test
    +
    +# Run service with auto-reload
    +cargo watch -x 'run --bin provisioning-orchestrator'
    +
    +

    Debugging Builds

    +

    Debug Information

    +
    # Build with full debug info
    +cargo build
    +
    +# Build with debug info in release mode
    +cargo build --release --profile release-with-debug
    +
    +# Run with backtraces
    +RUST_BACKTRACE=1 cargo run
    +RUST_BACKTRACE=full cargo run
    +
    +

    Build Verbosity

    +
    # Verbose build output
    +cargo build -vv
    +
    +# Show build commands
    +cargo build -vvv
    +
    +# Show timing information
    +cargo build --timings
    +
    +

    Dependency Tree

    +
    # View dependency tree
    +cargo tree
    +
    +# Duplicate dependencies
    +cargo tree --duplicates
    +
    +# Build graph visualization
    +cargo depgraph | dot -Tpng > deps.png
    +
    +

    Best Practices

    +
      +
    • Always run just test before committing
    • +
    • Use cargo fmt and cargo clippy for code quality
    • +
    • Test on multiple platforms before release
    • +
    • Strip binaries for production distributions
    • +
    • Version binaries with semantic versioning
    • +
    • Cache dependencies in CI/CD
    • +
    • Use release profile for production builds
    • +
    • Document build requirements in README
    • +
    • Automate common tasks with Just
    • +
    • Keep build times reasonable (<5 min)
    • +
    +

    Troubleshooting

    +

    Common Build Issues

    +

    Compilation fails with linker error:

    +
    # Install build dependencies
    +sudo apt install build-essential pkg-config libssl-dev
    +
    +

    Out of memory during build:

    +
    # Reduce parallel jobs
    +cargo build -j 2
    +
    +# Use more swap space
    +sudo fallocate -l 8G /swapfile
    +sudo mkswap /swapfile
    +sudo swapon /swapfile
    +
    +

    Clippy warnings:

    +
    # Fix automatically where possible
    +cargo clippy --all --fix
    +
    +# Allow specific lints temporarily
    +#[allow(clippy::too_many_arguments)]
    +
    + +
      +
    • Testing - Testing strategies and procedures
    • +
    • Contributing - Contribution guidelines including build requirements
    • +
    +

    Testing

    +

    Comprehensive testing strategies for the Provisioning platform including unit tests, integration tests, and 350+ security tests.

    +

    Testing Overview

    +

    The platform maintains extensive test coverage across multiple test types:

    +
    + + + + +
    Test TypeCountCoverage TargetAverage Runtime
    Unit Tests200+Core logic 80%+< 5 seconds
    Integration Tests100+Component integration 60%+< 30 seconds
    Security Tests350+Security components 100%< 60 seconds
    End-to-End Tests50+Full workflows< 5 minutes
    +
    +

    Running Tests

    +

    All Tests

    +
    # Run complete test suite
    +cargo test --workspace
    +
    +# With output visible
    +cargo test --workspace -- --nocapture
    +
    +# Parallel execution with 8 threads
    +cargo test --workspace --jobs 8
    +
    +# Include ignored tests
    +cargo test --workspace -- --ignored
    +
    +

    Test by Category

    +
    # Unit tests only (--lib)
    +cargo test --lib
    +
    +# Integration tests only (--test)
    +cargo test --test '*'
    +
    +# Documentation tests
    +cargo test --doc
    +
    +# Security test suite
    +cargo test -p security --test '*'
    +
    +

    Test Specific Component

    +
    # Test orchestrator crate
    +cargo test -p provisioning-orchestrator
    +
    +# Test control center
    +cargo test -p provisioning-control-center
    +
    +# Test specific module
    +cargo test -p provisioning-orchestrator workflows::
    +
    +# Test specific function
    +cargo test -p provisioning-orchestrator test_workflow_creation
    +
    +

    Unit Testing

    +

    Unit tests verify individual functions and modules in isolation.

    +

    Rust Unit Tests

    +
    // src/workflows.rs
    +#[cfg(test)]
    +mod tests {
    +    use super::*;
    +
    +    #[test]
    +    fn test_create_workflow() {
    +        let config = WorkflowConfig {
    +            name: "test-workflow".into(),
    +            tasks: vec![],
    +        };
    +
    +        let workflow = Workflow::new(config);
    +
    +        assert_eq!(workflow.name(), "test-workflow");
    +        assert_eq!(workflow.status(), WorkflowStatus::Pending);
    +    }
    +
    +    #[test]
    +    fn test_workflow_execution() {
    +        let mut workflow = create_test_workflow();
    +
    +        let result = workflow.execute();
    +
    +        assert!(result.is_ok());
    +        assert_eq!(workflow.status(), WorkflowStatus::Completed);
    +    }
    +
    +    #[test]
    +    #[should_panic(expected = "Invalid workflow")]
    +    fn test_invalid_workflow() {
    +        Workflow::new(invalid_config());
    +    }
    +}
    +

    Nushell Unit Tests

    +
    # tests/test_provider.nu
    +use std assert
    +
    +export def test_validate_config [] {
    +    let config = {api_key: "test-key", region: "us-east-1"}
    +    let result = validate-config $config
    +    assert equal $result.valid true
    +}
    +
    +export def test_create_server [] {
    +    let spec = {name: "test-server", plan: "medium"}
    +    let server = create-server test-config $spec
    +    assert ($server.id != null)
    +}
    +
    +export def main [] {
    +    test_validate_config
    +    test_create_server
    +    print "All tests passed"
    +}
    +
    +

    Run Nushell tests:

    +
    nu tests/test_provider.nu
    +
    +

    Integration Testing

    +

    Integration tests verify components work together correctly.

    +

    Service Integration Tests

    +
    // tests/orchestrator_integration.rs
    +use provisioning_orchestrator::Orchestrator;
    +use provisioning_database::Database;
    +
    +#[tokio::test]
    +async fn test_workflow_persistence() {
    +    let db = Database::new_test().await;
    +    let orchestrator = Orchestrator::new(db.clone());
    +
    +    let workflow_id = orchestrator.create_workflow(test_config()).await.unwrap();
    +
    +    // Verify workflow persisted to database
    +    let workflow = db.get_workflow(&workflow_id).await.unwrap();
    +    assert_eq!(workflow.id, workflow_id);
    +}
    +
    +#[tokio::test]
    +async fn test_api_integration() {
    +    let app = create_test_app().await;
    +
    +    let response = app
    +        .post("/api/v1/workflows")
    +        .json(&test_workflow())
    +        .send()
    +        .await
    +        .unwrap();
    +
    +    assert_eq!(response.status(), 201);
    +}
    +

    Test Containers

    +

    Use Docker containers for realistic integration testing:

    +
    use testcontainers::*;
    +
    +#[tokio::test]
    +async fn test_with_database() {
    +    let docker = clients::Cli::default();
    +    let postgres = docker.run(images::postgres::Postgres::default());
    +
    +    let db_url = format!(
    +        "postgres://postgres@localhost:{}/test",
    +        postgres.get_host_port_ipv4(5432)
    +    );
    +
    +    // Run tests against real database
    +    let db = Database::connect(&db_url).await.unwrap();
    +    // Test database operations...
    +}
    +

    Security Testing

    +

    Comprehensive security testing with 350+ test cases covering all security components.

    +

    Authentication Tests

    +
    #[tokio::test]
    +async fn test_jwt_verification() {
    +    let auth = AuthService::new();
    +
    +    let token = auth.generate_token("user123").unwrap();
    +    let claims = auth.verify_token(&token).unwrap();
    +
    +    assert_eq!(claims.sub, "user123");
    +}
    +
    +#[tokio::test]
    +async fn test_invalid_token() {
    +    let auth = AuthService::new();
    +    let result = auth.verify_token("invalid.token.here");
    +    assert!(result.is_err());
    +}
    +
    +#[tokio::test]
    +async fn test_token_expiration() {
    +    let auth = AuthService::new();
    +    let token = create_expired_token();
    +    let result = auth.verify_token(&token);
    +    assert!(matches!(result, Err(AuthError::TokenExpired)));
    +}
    +

    Authorization Tests

    +
    #[tokio::test]
    +async fn test_rbac_enforcement() {
    +    let authz = AuthorizationService::new();
    +
    +    let decision = authz.authorize(
    +        "user:user123",
    +        "workflow:create",
    +        "resource:my-cluster"
    +    ).await;
    +
    +    assert_eq!(decision, Decision::Allow);
    +}
    +
    +#[tokio::test]
    +async fn test_policy_denial() {
    +    let authz = AuthorizationService::new();
    +
    +    let decision = authz.authorize(
    +        "user:guest",
    +        "server:delete",
    +        "resource:prod-server"
    +    ).await;
    +
    +    assert_eq!(decision, Decision::Deny);
    +}
    +

    Encryption Tests

    +
    #[tokio::test]
    +async fn test_kms_encryption() {
    +    let kms = KmsService::new();
    +
    +    let plaintext = b"secret data";
    +    let ciphertext = kms.encrypt(plaintext).await.unwrap();
    +    let decrypted = kms.decrypt(&ciphertext).await.unwrap();
    +
    +    assert_eq!(plaintext, decrypted.as_slice());
    +}
    +
    +#[tokio::test]
    +async fn test_encryption_performance() {
    +    let kms = KmsService::new();
    +    let plaintext = vec![0u8; 1024]; // 1KB
    +
    +    let start = Instant::now();
    +    kms.encrypt(&plaintext).await.unwrap();
    +    let duration = start.elapsed();
    +
    +    // KMS encryption should complete in < 10ms
    +    assert!(duration < Duration::from_millis(10));
    +}
    +

    End-to-End Testing

    +

    Complete workflow testing from start to finish.

    +

    Full Workflow Tests

    +
    #[tokio::test]
    +async fn test_complete_workflow() {
    +    let platform = Platform::start_test_instance().await;
    +
    +    // Create infrastructure
    +    let cluster_id = platform
    +        .create_cluster(test_cluster_config())
    +        .await
    +        .unwrap();
    +
    +    // Wait for completion (5 minute timeout)
    +    platform
    +        .wait_for_cluster(&cluster_id, Duration::from_secs(300))
    +        .await;
    +
    +    // Verify cluster health
    +    let health = platform.check_cluster_health(&cluster_id).await;
    +    assert!(health.is_healthy());
    +
    +    // Cleanup
    +    platform.delete_cluster(&cluster_id).await.unwrap();
    +}
    +

    Test Fixtures

    +

    Shared test data and utilities.

    +

    Common Test Fixtures

    +
    // tests/fixtures/mod.rs
    +pub fn test_workflow_config() -> WorkflowConfig {
    +    WorkflowConfig {
    +        name: "test-workflow".into(),
    +        tasks: vec![
    +            Task::new("task1", TaskType::CreateServer),
    +            Task::new("task2", TaskType::InstallService),
    +        ],
    +    }
    +}
    +
    +pub fn test_server_spec() -> ServerSpec {
    +    ServerSpec {
    +        name: "test-server".into(),
    +        plan: "medium".into(),
    +        zone: "us-east-1a".into(),
    +        image: "ubuntu-24.04".into(),
    +    }
    +}
    +

    Mocking

    +

    Mock external dependencies for isolated testing.

    +

    Mock External Services

    +
    use mockall::*;
    +
    +#[automock]
    +trait CloudProvider {
    +    async fn create_server(&self, spec: &ServerSpec) -> Result<Server>;
    +}
    +
    +#[tokio::test]
    +async fn test_with_mock_provider() {
    +    let mut mock_provider = MockCloudProvider::new();
    +
    +    mock_provider
    +        .expect_create_server()
    +        .returning| ( | _ Ok(test_server()));
    +
    +    let result = mock_provider.create_server(&test_spec()).await;
    +    assert!(result.is_ok());
    +}
    +

    Test Coverage

    +

    Measure and maintain code coverage.

    +

    Generate Coverage Report

    +
    # Install tarpaulin
    +cargo install cargo-tarpaulin
    +
    +# Generate HTML coverage report
    +cargo tarpaulin --out Html --output-dir coverage
    +
    +# Generate multiple formats
    +cargo tarpaulin --out Html --out Xml --out Json
    +
    +# View coverage
    +open coverage/index.html
    +
    +

    Coverage Goals

    +
      +
    • Unit tests: Minimum 80% code coverage
    • +
    • Integration tests: Minimum 60% component coverage
    • +
    • Critical paths: 100% coverage required
    • +
    • Security components: 100% coverage required
    • +
    +

    Performance Testing

    +

    Benchmark critical operations.

    +

    Benchmark Tests

    +
    use criterion::{black_box, criterion_group, criterion_main, Criterion};
    +
    +fn benchmark_workflow_creation(c: &mut Criterion) {
    +    c.bench_function("create_workflow", | | b {
    +        b.iter| ( | {
    +            Workflow::new(black_box(test_config()))
    +        })
    +    });
    +}
    +
    +fn benchmark_database_query(c: &mut Criterion) {
    +    c.bench_function("query_workflows", | | b {
    +        b.iter| ( | {
    +            db.query_workflows(black_box(&filter))
    +        })
    +    });
    +}
    +
    +criterion_group!(benches, benchmark_workflow_creation, benchmark_database_query);
    +criterion_main!(benches);
    +

    Run benchmarks:

    +
    cargo bench
    +
    +

    Test Best Practices

    +
      +
    • Write tests before or alongside code (TDD approach)
    • +
    • Keep tests focused and isolated
    • +
    • Use descriptive test names that explain what is tested
    • +
    • Clean up test resources (databases, files, containers)
    • +
    • Mock external dependencies to avoid flaky tests
    • +
    • Test both success and error conditions
    • +
    • Maintain shared test fixtures for consistency
    • +
    • Run tests in CI/CD pipeline
    • +
    • Monitor test execution time (fail if too slow)
    • +
    • Refactor tests alongside production code
    • +
    +

    Continuous Testing

    +

    Watch Mode

    +

    Auto-run tests on code changes:

    +
    # Install cargo-watch
    +cargo install cargo-watch
    +
    +# Watch and run tests
    +cargo watch -x test
    +
    +# Watch specific package
    +cargo watch -x 'test -p provisioning-orchestrator'
    +
    +

    Pre-Commit Testing

    +

    Run tests automatically before commits:

    +
    # Install pre-commit hooks
    +pre-commit install
    +
    +# Runs on every commit:
    +# - cargo test
    +# - cargo clippy
    +# - cargo fmt --check
    +
    + + +

    Contributing

    +

    Guidelines for contributing to the Provisioning platform including setup, workflow, and best practices.

    +

    Getting Started

    +

    Prerequisites

    +

    Install required development tools:

    +
    # Rust toolchain (latest stable)
    +curl --proto '=https' --tlsv1.2 -sSf  [https://sh.rustup.rs](https://sh.rustup.rs) | sh
    +
    +# Nushell shell
    +brew install nushell
    +
    +# Nickel configuration language
    +brew install nickel
    +
    +# Just task runner
    +brew install just
    +
    +# Additional development tools
    +cargo install cargo-watch cargo-tarpaulin cargo-audit
    +
    +

    Development Workflow

    +

    Follow these guidelines for all code changes and ensure adherence to the project’s technical standards.

      -
    1. Removes old hostname entries from /etc/hosts
    2. -
    3. Adds new hostname → IP mapping to /etc/hosts
    4. -
    5. Adds SSH config entry to ~/.ssh/config
    6. -
    7. Removes old SSH host keys for the hostname
    8. +
    9. Read applicable language guidelines
    10. +
    11. Create feature branch from main
    12. +
    13. Make changes following project standards
    14. +
    15. Write or update tests
    16. +
    17. Run full test suite and linting
    18. +
    19. Create pull request with clear description
    -

    When disabled:

    +

    Code Style Guidelines

    +

    Rust Code

    +

    Rust code guidelines:

      -
    • You manually manage /etc/hosts entries
    • -
    • You manually manage ~/.ssh/config entries
    • -
    • SSH to servers using IP addresses instead of hostnames
    • +
    • Use idiomatic Rust patterns
    • +
    • No unwrap() in production code
    • +
    • Comprehensive error handling with custom error types
    • +
    • Format with cargo fmt
    • +
    • Pass cargo clippy -- -D warnings with zero warnings
    • +
    • Add inline documentation for public APIs
    -

    Security Note

    -

    The provisioning tool never stores or caches your sudo password. It only:

    +

    Nushell Scripts

    +

    Nushell code guidelines:

      -
    • Checks if sudo credentials are already cached (via sudo -n true)
    • -
    • Detects when sudo fails due to missing credentials
    • -
    • Provides helpful error messages and exit cleanly
    • +
    • Use structured data pipelines
    • +
    • Avoid external command dependencies where possible
    • +
    • Handle errors gracefully with try-catch
    • +
    • Document functions with comments
    • +
    • Use type annotations for clarity
    -

    Your sudo password timeout is controlled by the system’s sudoers configuration (default: 5 minutes).

    -

    Configuration Validation Guide

    -

    Overview

    -

    The new configuration system includes comprehensive schema validation to catch errors early and ensure configuration correctness.

    -

    Schema Validation Features

    -

    1. Required Fields Validation

    -

    Ensures all required fields are present:

    -
    # Schema definition
    -[required]
    -fields = ["name", "version", "enabled"]
    +

    Nickel Schemas

    +

    Nickel configuration guidelines:

    +
      +
    • Define clear type constraints
    • +
    • Use lazy evaluation appropriately
    • +
    • Provide default values where sensible
    • +
    • Document schema fields
    • +
    • Validate schemas with nickel typecheck
    • +
    +

    Testing Requirements

    +

    All contributions must include appropriate tests:

    +

    Required Tests

    +
      +
    • Unit tests for all new functions
    • +
    • Integration tests for component interactions
    • +
    • Security tests for security-related changes
    • +
    • Documentation tests for code examples
    • +
    +

    Running Tests

    +
    # Run all tests
    +just test
     
    -# Valid config
    -name = "my-service"
    -version = "1.0.0"
    -enabled = true
    +# Run specific test suite
    +cargo test -p provisioning-orchestrator
     
    -# Invalid - missing 'enabled'
    -name = "my-service"
    -version = "1.0.0"
    -# Error: Required field missing: enabled
    +# Run with coverage
    +cargo tarpaulin --out Html
     
    -

    2. Type Validation

    -

    Validates field types:

    -
    # Schema
    -[fields.port]
    -type = "int"
    +

    Test Coverage Requirements

    +
      +
    • Unit tests: Minimum 80% code coverage
    • +
    • Critical paths: 100% coverage
    • +
    • Security components: 100% coverage
    • +
    +

    Documentation

    +

    Required Documentation

    +

    All code changes must include:

    +
      +
    • Inline code documentation for public APIs
    • +
    • Updated README if adding new components
    • +
    • Examples showing usage
    • +
    • Migration guide for breaking changes
    • +
    +

    Documentation Standards

    +

    Documentation standards:

    +
      +
    • Use Markdown for all documentation
    • +
    • Code blocks must specify language
    • +
    • Keep lines ≤150 characters
    • +
    • No bare URLs (use markdown links)
    • +
    • Test all code examples
    • +
    +

    Commit Message Format

    +

    Use conventional commit format:

    +
    <type>(<scope>): <subject>
     
    -[fields.name]
    -type = "string"
    +<body>
     
    -[fields.enabled]
    -type = "bool"
    -
    -# Valid
    -port = 8080
    -name = "orchestrator"
    -enabled = true
    -
    -# Invalid - wrong type
    -port = "8080"  # Error: Expected int, got string
    +<footer>
     
    -

    3. Enum Validation

    -

    Restricts values to predefined set:

    -
    # Schema
    -[fields.environment]
    -type = "string"
    -enum = ["dev", "staging", "prod"]
    +

    Types:

    +
      +
    • feat: New feature
    • +
    • fix: Bug fix
    • +
    • docs: Documentation changes
    • +
    • test: Adding or updating tests
    • +
    • refactor: Code refactoring
    • +
    • perf: Performance improvements
    • +
    • chore: Maintenance tasks
    • +
    +

    Example:

    +
    feat(orchestrator): add workflow retry mechanism
     
    -# Valid
    -environment = "prod"
    +- Implement exponential backoff strategy
    +- Add max retry configuration option
    +- Update workflow state tracking
     
    -# Invalid
    -environment = "production"  # Error: Must be one of: dev, staging, prod
    +Closes #123
     
    -

    4. Range Validation

    -

    Validates numeric ranges:

    -
    # Schema
    -[fields.port]
    -type = "int"
    -min = 1024
    -max = 65535
    +

    Pull Request Process

    +

    Before Creating PR

    +
      +
    1. Update your branch with latest main
    2. +
    3. Run full test suite: just test
    4. +
    5. Run linters: just lint
    6. +
    7. Format code: just fmt
    8. +
    9. Build successfully: just build-all
    10. +
    +

    PR Description Template

    +
    ## Description
    +Brief description of changes and motivation
     
    -# Valid
    -port = 8080
    +## Type of Change
    +- [ ] Bug fix (non-breaking change fixing an issue)
    +- [ ] New feature (non-breaking change adding functionality)
    +- [ ] Breaking change (fix or feature causing existing functionality to change)
    +- [ ] Documentation update
     
    -# Invalid - below minimum
    -port = 80  # Error: Must be >= 1024
    +## Testing
    +- [ ] Unit tests added or updated
    +- [ ] Integration tests pass
    +- [ ] Manual testing completed
    +- [ ] Test coverage maintained or improved
     
    -# Invalid - above maximum
    -port = 70000  # Error: Must be <= 65535
    +## Checklist
    +- [ ] Code follows project style guidelines
    +- [ ] Self-review completed
    +- [ ] Documentation updated
    +- [ ] No new compiler warnings
    +- [ ] Tested on relevant platforms
    +
    +## Related Issues
    +Closes #<issue-number>
     
    -

    5. Pattern Validation

    -

    Validates string patterns using regex:

    -
    # Schema
    -[fields.email]
    -type = "string"
    -pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
    -
    -# Valid
    -email = "admin@example.com"
    -
    -# Invalid
    -email = "not-an-email"  # Error: Does not match pattern
    +

    Code Review

    +

    All PRs require code review before merging. Reviewers check:

    +
      +
    • Correctness and quality of implementation
    • +
    • Test coverage and quality
    • +
    • Documentation completeness
    • +
    • Adherence to style guidelines
    • +
    • Security implications
    • +
    • Performance considerations
    • +
    • Breaking changes properly documented
    • +
    +

    Development Best Practices

    +

    Code Quality

    +
      +
    • Write self-documenting code with clear naming
    • +
    • Keep functions focused and single-purpose
    • +
    • Avoid premature optimization
    • +
    • Use meaningful variable and function names
    • +
    • Comment complex logic, not obvious code
    • +
    +

    Error Handling

    +
      +
    • Use custom error types, not strings
    • +
    • Provide context in error messages
    • +
    • Handle errors at appropriate level
    • +
    • Log errors with sufficient detail
    • +
    • Never ignore errors silently
    • +
    +

    Performance

    +
      +
    • Profile before optimizing
    • +
    • Use appropriate data structures
    • +
    • Minimize allocations in hot paths
    • +
    • Consider async for I/O-bound operations
    • +
    • Benchmark performance-critical code
    • +
    +

    Security

    +
      +
    • Validate all inputs
    • +
    • Never log sensitive data
    • +
    • Use constant-time comparisons for secrets
    • +
    • Follow principle of least privilege
    • +
    • Review security guidelines for security-related changes
    • +
    +

    Getting Help

    +

    Need assistance with contributions?

    +
      +
    1. Check existing documentation in docs/
    2. +
    3. Search for similar closed issues and PRs
    4. +
    5. Ask questions in GitHub Discussions
    6. +
    7. Reach out to maintainers
    8. +
    +

    Recognition

    +

    Contributors are recognized in:

    +
      +
    • CONTRIBUTORS.md file
    • +
    • Release notes for significant contributions
    • +
    • Project documentation acknowledgments
    • +
    +

    Thank you for contributing to the Provisioning platform!

    +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    API Reference

    +

    Complete API documentation for the Provisioning platform, including REST endpoints, CLI +commands, and library interfaces.

    +

    Available APIs

    +

    The Provisioning platform provides multiple API surfaces for different use cases and integration patterns.

    +

    REST API

    +

    HTTP-based APIs for external integration and programmatic access.

    + +

    Command-Line Interface

    +

    Native CLI for interactive and scripted operations.

    + +

    Nushell Libraries

    +

    Internal library APIs for extension development and customization.

    + +

    API Categories

    +

    Infrastructure Management

    +

    Manage cloud resources, servers, and infrastructure components.

    +

    REST Endpoints:

    +
      +
    • Server Management - Create, delete, update, list servers
    • +
    • Provider Integration - Cloud provider operations
    • +
    • Network Configuration - Network, firewall, routing
    • +
    +

    CLI Commands:

    +
      +
    • provisioning server - Server lifecycle operations
    • +
    • provisioning provider - Provider configuration
    • +
    • provisioning infrastructure - Infrastructure queries
    • +
    +

    Service Orchestration

    +

    Deploy and manage infrastructure services and clusters.

    +

    REST Endpoints:

    +
      +
    • Task Service Deployment - Install, remove, update services
    • +
    • Cluster Management - Cluster lifecycle operations
    • +
    • Dependency Resolution - Automatic dependency handling
    • +
    +

    CLI Commands:

    +
      +
    • provisioning taskserv - Task service operations
    • +
    • provisioning cluster - Cluster management
    • +
    • provisioning workflow - Workflow execution
    • +
    +

    Workflow Automation

    +

    Execute batch operations and complex workflows.

    +

    REST Endpoints:

    +
      +
    • Workflow Submission - Submit and track workflows
    • +
    • Task Status - Real-time task monitoring
    • +
    • Checkpoint Recovery - Resume interrupted workflows
    • +
    +

    CLI Commands:

    +
      +
    • provisioning batch - Batch workflow operations
    • +
    • provisioning workflow - Workflow management
    • +
    • provisioning orchestrator - Orchestrator control
    • +
    +

    Configuration Management

    +

    Manage configuration across hierarchical layers.

    +

    REST Endpoints:

    +
      +
    • Configuration Retrieval - Get active configuration
    • +
    • Validation - Validate configuration files
    • +
    • Schema Queries - Query configuration schemas
    • +
    +

    CLI Commands:

    +
      +
    • provisioning config - Configuration operations
    • +
    • provisioning validate - Validation commands
    • +
    • provisioning schema - Schema management
    • +
    +

    Security & Authentication

    +

    Manage authentication, authorization, secrets, and encryption.

    +

    REST Endpoints:

    +
      +
    • Authentication - Login, token management, MFA
    • +
    • Authorization - Policy evaluation, permissions
    • +
    • Secrets Management - Secret storage and retrieval
    • +
    • KMS Operations - Key management and encryption
    • +
    • Audit Logging - Security event tracking
    • +
    +

    CLI Commands:

    +
      +
    • provisioning auth - Authentication operations
    • +
    • provisioning vault - Secret management
    • +
    • provisioning kms - Key management
    • +
    • provisioning audit - Audit log queries
    • +
    +

    Platform Services

    +

    Control platform components and system health.

    +

    REST Endpoints:

    +
      +
    • Service Health - Health checks and status
    • +
    • Service Control - Start, stop, restart services
    • +
    • Configuration - Service configuration management
    • +
    • Monitoring - Metrics and performance data
    • +
    +

    CLI Commands:

    +
      +
    • provisioning platform - Platform management
    • +
    • provisioning service - Service control
    • +
    • provisioning health - Health monitoring
    • +
    +

    API Conventions

    +

    REST API Standards

    +

    All REST endpoints follow consistent conventions:

    +

    Authentication:

    +
    Authorization: Bearer <jwt-token>
     
    -

    6. Deprecated Fields

    -

    Warns about deprecated configuration:

    -
    # Schema
    -[deprecated]
    -fields = ["old_field"]
    -
    -[deprecated_replacements]
    -old_field = "new_field"
    -
    -# Config using deprecated field
    -old_field = "value"  # Warning: old_field is deprecated. Use new_field instead.
    +

    Request Format:

    +
    Content-Type: application/json
     
    -

    Using Schema Validator

    -

    Command Line

    -
    # Validate workspace config
    -provisioning workspace config validate
    +

    Response Format:

    +
    {
    +  "status": "succes| s error",
    +  "data": { ... },
    +  "message": "Human-readable message",
    +  "timestamp": "2026-01-16T10:30:00Z"
    +}
    +
    +

    Error Responses:

    +
    {
    +  "status": "error",
    +  "error": {
    +    "code": "ERR_CODE",
    +    "message": "Error description",
    +    "details": { ... }
    +  },
    +  "timestamp": "2026-01-16T10:30:00Z"
    +}
    +
    +

    CLI Command Patterns

    +

    All CLI commands follow consistent patterns:

    +

    Common Flags:

    +
      +
    • --yes - Skip confirmation prompts
    • +
    • --check - Dry-run mode, show what would happen
    • +
    • --wait - Wait for operation completion
    • +
    • --format jso| n yam| l table - Output format
    • +
    • --verbose - Detailed output
    • +
    • --quiet - Minimal output
    • +
    +

    Command Structure:

    +
    provisioning <domain> <action> <resource> [flags]
    +
    +

    Examples:

    +
    provisioning server create web-01 --plan medium --yes
    +provisioning taskserv install kubernetes --cluster prod
    +provisioning workflow submit deploy.ncl --wait
    +
    +

    Library Function Signatures

    +

    Nushell library functions follow consistent signatures:

    +

    Parameter Order:

    +
      +
    1. Required positional parameters
    2. +
    3. Optional positional parameters
    4. +
    5. Named parameters (flags)
    6. +
    +

    Return Values:

    +
      +
    • Success: Returns data structure (record, table, list)
    • +
    • Error: Throws error with structured message
    • +
    +

    Example:

    +
    def create-server [
    +  name: string           # Required: server name
    +  --plan: string = "medium"  # Optional: server plan
    +  --wait                 # Optional: wait flag
    +] {
    +  # Implementation
    +}
    +
    +

    API Versioning

    +

    The Provisioning platform uses semantic versioning for APIs:

    +
      +
    • Major version - Breaking changes to API contracts
    • +
    • Minor version - Backwards-compatible additions
    • +
    • Patch version - Backwards-compatible bug fixes
    • +
    +

    Current API Version: v1.0.0

    +

    Version Compatibility:

    +
      +
    • REST API includes version in URL: /api/v1/servers
    • +
    • CLI maintains backwards compatibility across minor versions
    • +
    • Libraries use semantic import versioning
    • +
    +

    Rate Limiting

    +

    REST API endpoints implement rate limiting to ensure platform stability:

    +
      +
    • Default Limit: 100 requests per minute per API key
    • +
    • Burst Limit: 20 requests per second
    • +
    • Headers: Rate limit information in response headers
    • +
    +
    X-RateLimit-Limit: 100
    +X-RateLimit-Remaining: 95
    +X-RateLimit-Reset: 1642334400
    +
    +

    Authentication

    +

    All APIs require authentication except public health endpoints.

    +

    Supported Methods:

    +
      +
    • JWT Tokens - Primary authentication method
    • +
    • API Keys - For service-to-service integration
    • +
    • MFA - Multi-factor authentication for sensitive operations
    • +
    +

    Token Management:

    +
    # Login and obtain token
    +provisioning auth login --user admin
     
    -# Validate provider config
    +# Use token in requests
    +curl -H "Authorization: Bearer $TOKEN"  [https://api/v1/servers](https://api/v1/servers)
    +
    +

    See Authentication Guide for complete details.

    +

    API Discovery

    +

    Discover available APIs programmatically:

    +

    REST API:

    +
    # Get API specification (OpenAPI)
    +curl  [https://api/v1/openapi.json](https://api/v1/openapi.json)
    +
    +

    CLI:

    +
    # List all commands
    +provisioning help --all
    +
    +# Get command details
    +provisioning server help
    +
    +

    Libraries:

    +
    # List available modules
    +use lib_provisioning *
    +$nu.scope.commands | where is_custom
    +
    +

    Next Steps

    + + + +

    REST API Reference

    +

    Complete HTTP API documentation for the Provisioning platform covering 83+ endpoints across 9 platform services.

    +

    Base URL

    +
     [https://api.provisioning.local/api/v1](https://api.provisioning.local/api/v1)
    +
    +

    All endpoints are prefixed with /api/v1 for version compatibility.

    +

    Authentication

    +

    All API requests require authentication using JWT Bearer tokens:

    +
    Authorization: Bearer <your-jwt-token>
    +
    +

    Obtain tokens via the Authentication API endpoints.

    +

    Common Response Format

    +

    All responses follow a consistent structure:

    +

    Success Response:

    +
    {
    +  "status": "success",
    +  "data": { ... },
    +  "message": "Operation completed successfully",
    +  "timestamp": "2026-01-16T10:30:00Z"
    +}
    +
    +

    Error Response:

    +
    {
    +  "status": "error",
    +  "error": {
    +    "code": "ERR_CODE",
    +    "message": "Human-readable error message",
    +    "details": { ... }
    +  },
    +  "timestamp": "2026-01-16T10:30:00Z"
    +}
    +
    +

    HTTP Status Codes

    +
    + + + + + + + + + + + + +
    CodeMeaningUsage
    200OKSuccessful GET, PUT, PATCH requests
    201CreatedSuccessful POST request creating resource
    202AcceptedAsync operation accepted, check status
    204No ContentSuccessful DELETE request
    400Bad RequestInvalid request parameters
    401UnauthorizedMissing or invalid authentication
    403ForbiddenValid auth but insufficient permissions
    404Not FoundResource does not exist
    409ConflictResource conflict (duplicate name, etc.)
    429Too Many RequestsRate limit exceeded
    500Internal Server ErrorServer error
    503Service UnavailableService temporarily unavailable
    +
    +

    API Services

    +

    The platform exposes 9 distinct services with REST APIs:

    +
      +
    1. Orchestrator - Workflow execution and task management
    2. +
    3. Control Center - Platform management and monitoring
    4. +
    5. Extension Registry - Extension distribution
    6. +
    7. Auth Service - Authentication and identity
    8. +
    9. Vault Service - Secrets management
    10. +
    11. KMS Service - Key management and encryption
    12. +
    13. Audit Service - Audit logging and compliance
    14. +
    15. Policy Service - Authorization policies
    16. +
    17. Gateway Service - API gateway and routing
    18. +
    +

    Orchestrator API

    +

    Workflow execution, task scheduling, and state management.

    +

    Base Path: /api/v1/orchestrator

    +

    Submit Workflow

    +

    Submit a new workflow for execution.

    +

    Endpoint: POST /workflows

    +

    Request:

    +
    {
    +  "name": "deploy-cluster",
    +  "type": "cluster",
    +  "operations": [
    +    {
    +      "id": "create-servers",
    +      "type": "server",
    +      "action": "create",
    +      "params": {
    +        "infra": "my-cluster.ncl"
    +      }
    +    },
    +    {
    +      "id": "install-k8s",
    +      "type": "taskserv",
    +      "action": "install",
    +      "params": {
    +        "name": "kubernetes"
    +      },
    +      "dependencies": ["create-servers"]
    +    }
    +  ],
    +  "priority": "normal",
    +  "checkpoint_enabled": true
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "workflow_id": "wf-20260116-abc123",
    +    "state": "queued",
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Get Workflow Status

    +

    Retrieve workflow execution status.

    +

    Endpoint: GET /workflows/{workflow_id}

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "workflow_id": "wf-20260116-abc123",
    +    "name": "deploy-cluster",
    +    "state": "running",
    +    "progress": {
    +      "total_tasks": 2,
    +      "completed": 1,
    +      "failed": 0,
    +      "running": 1
    +    },
    +    "current_task": {
    +      "id": "install-k8s",
    +      "state": "running",
    +      "started_at": "2026-01-16T10:32:00Z"
    +    },
    +    "created_at": "2026-01-16T10:30:00Z",
    +    "updated_at": "2026-01-16T10:32:15Z"
    +  }
    +}
    +
    +

    List Workflows

    +

    List all workflows with optional filtering.

    +

    Endpoint: GET /workflows

    +

    Query Parameters:

    +
      +
    • state (optional) - Filter by state: queue| d runnin| g complete| d failed
    • +
    • limit (optional) - Maximum results (default: 50, max: 100)
    • +
    • offset (optional) - Pagination offset
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "workflows": [
    +      {
    +        "workflow_id": "wf-20260116-abc123",
    +        "name": "deploy-cluster",
    +        "state": "running",
    +        "created_at": "2026-01-16T10:30:00Z"
    +      }
    +    ],
    +    "total": 1,
    +    "limit": 50,
    +    "offset": 0
    +  }
    +}
    +
    +

    Cancel Workflow

    +

    Cancel a running workflow.

    +

    Endpoint: POST /workflows/{workflow_id}/cancel

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "workflow_id": "wf-20260116-abc123",
    +    "state": "cancelled",
    +    "cancelled_at": "2026-01-16T10:35:00Z"
    +  }
    +}
    +
    +

    Get Task Logs

    +

    Retrieve logs for a specific task in a workflow.

    +

    Endpoint: GET /workflows/{workflow_id}/tasks/{task_id}/logs

    +

    Query Parameters:

    +
      +
    • lines (optional) - Number of lines (default: 100)
    • +
    • follow (optional) - Stream logs (SSE)
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "task_id": "install-k8s",
    +    "logs": [
    +      {
    +        "timestamp": "2026-01-16T10:32:00Z",
    +        "level": "info",
    +        "message": "Starting Kubernetes installation"
    +      },
    +      {
    +        "timestamp": "2026-01-16T10:32:15Z",
    +        "level": "info",
    +        "message": "Downloading Kubernetes binaries"
    +      }
    +    ]
    +  }
    +}
    +
    +

    Resume Workflow

    +

    Resume a failed workflow from checkpoint.

    +

    Endpoint: POST /workflows/{workflow_id}/resume

    +

    Request:

    +
    {
    +  "from_checkpoint": "create-servers",
    +  "skip_failed": false
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "workflow_id": "wf-20260116-abc123",
    +    "state": "running",
    +    "resumed_at": "2026-01-16T10:40:00Z"
    +  }
    +}
    +
    +

    Control Center API

    +

    Platform management, service control, and monitoring.

    +

    Base Path: /api/v1/control-center

    +

    List Services

    +

    List all platform services and their status.

    +

    Endpoint: GET /services

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "services": [
    +      {
    +        "name": "orchestrator",
    +        "state": "running",
    +        "health": "healthy",
    +        "uptime": 86400,
    +        "version": "1.0.0"
    +      },
    +      {
    +        "name": "control-center",
    +        "state": "running",
    +        "health": "healthy",
    +        "uptime": 86400,
    +        "version": "1.0.0"
    +      }
    +    ]
    +  }
    +}
    +
    +

    Get Service Health

    +

    Check health status of a specific service.

    +

    Endpoint: GET /services/{service_name}/health

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "health": "healthy",
    +    "checks": {
    +      "api": "pass",
    +      "database": "pass",
    +      "storage": "pass"
    +    },
    +    "timestamp": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Start Service

    +

    Start a stopped platform service.

    +

    Endpoint: POST /services/{service_name}/start

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "state": "starting",
    +    "message": "Service start initiated"
    +  }
    +}
    +
    +

    Stop Service

    +

    Gracefully stop a running service.

    +

    Endpoint: POST /services/{service_name}/stop

    +

    Request:

    +
    {
    +  "force": false,
    +  "timeout": 30
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "state": "stopped",
    +    "message": "Service stopped gracefully"
    +  }
    +}
    +
    +

    Restart Service

    +

    Restart a platform service.

    +

    Endpoint: POST /services/{service_name}/restart

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "state": "restarting",
    +    "message": "Service restart initiated"
    +  }
    +}
    +
    +

    Get Service Configuration

    +

    Retrieve service configuration.

    +

    Endpoint: GET /services/{service_name}/config

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "config": {
    +      "port": 8080,
    +      "max_workers": 10,
    +      "checkpoint_enabled": true
    +    }
    +  }
    +}
    +
    +

    Update Service Configuration

    +

    Update service configuration (requires restart).

    +

    Endpoint: PUT /services/{service_name}/config

    +

    Request:

    +
    {
    +  "config": {
    +    "max_workers": 20
    +  },
    +  "restart": true
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "service": "orchestrator",
    +    "config_updated": true,
    +    "restart_required": true
    +  }
    +}
    +
    +

    Get Platform Metrics

    +

    Retrieve platform-wide metrics.

    +

    Endpoint: GET /metrics

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "platform": {
    +      "uptime": 86400,
    +      "version": "1.0.0"
    +    },
    +    "resources": {
    +      "cpu_usage": 45.2,
    +      "memory_usage": 62.8,
    +      "disk_usage": 38.1
    +    },
    +    "workflows": {
    +      "total": 150,
    +      "running": 5,
    +      "queued": 2,
    +      "failed": 3
    +    },
    +    "timestamp": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Extension Registry API

    +

    Extension distribution, versioning, and discovery.

    +

    Base Path: /api/v1/registry

    +

    List Extensions

    +

    List available extensions.

    +

    Endpoint: GET /extensions

    +

    Query Parameters:

    +
      +
    • type (optional) - Filter by type: provide| r taskser| v cluste| r workflow
    • +
    • search (optional) - Search by name or description
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "extensions": [
    +      {
    +        "name": "kubernetes",
    +        "type": "taskserv",
    +        "version": "1.29.0",
    +        "description": "Kubernetes orchestration platform",
    +        "dependencies": ["containerd", "etcd"]
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Get Extension Details

    +

    Get detailed information about an extension.

    +

    Endpoint: GET /extensions/{extension_name}

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "name": "kubernetes",
    +    "type": "taskserv",
    +    "version": "1.29.0",
    +    "description": "Kubernetes orchestration platform",
    +    "dependencies": ["containerd", "etcd"],
    +    "versions": ["1.29.0", "1.28.5", "1.27.10"],
    +    "metadata": {
    +      "author": "Provisioning Team",
    +      "license": "Apache-2.0",
    +      "homepage": " [https://kubernetes.io"](https://kubernetes.io")
    +    }
    +  }
    +}
    +
    +

    Download Extension

    +

    Download an extension package.

    +

    Endpoint: GET /extensions/{extension_name}/download

    +

    Query Parameters:

    +
      +
    • version (optional) - Specific version (default: latest)
    • +
    +

    Response: Binary OCI image blob

    +

    Publish Extension

    +

    Publish a new extension or version.

    +

    Endpoint: POST /extensions

    +

    Request: Multipart form data with OCI image

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "name": "kubernetes",
    +    "version": "1.29.0",
    +    "published_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Auth Service API

    +

    Authentication, identity management, and MFA.

    +

    Base Path: /api/v1/auth

    +

    Login

    +

    Authenticate user and obtain JWT token.

    +

    Endpoint: POST /login

    +

    Request:

    +
    {
    +  "username": "admin",
    +  "password": "secure-password"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +    "refresh_token": "refresh-token-abc123",
    +    "expires_in": 3600,
    +    "user": {
    +      "id": "user-123",
    +      "username": "admin",
    +      "roles": ["admin"]
    +    }
    +  }
    +}
    +
    +

    MFA Challenge

    +

    Request MFA challenge for two-factor authentication.

    +

    Endpoint: POST /mfa/challenge

    +

    Request:

    +
    {
    +  "username": "admin",
    +  "password": "secure-password"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "challenge_id": "challenge-abc123",
    +    "methods": ["totp", "webauthn"],
    +    "expires_in": 300
    +  }
    +}
    +
    +

    MFA Verify

    +

    Verify MFA code and complete authentication.

    +

    Endpoint: POST /mfa/verify

    +

    Request:

    +
    {
    +  "challenge_id": "challenge-abc123",
    +  "method": "totp",
    +  "code": "123456"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +    "refresh_token": "refresh-token-abc123",
    +    "expires_in": 3600
    +  }
    +}
    +
    +

    Refresh Token

    +

    Obtain new access token using refresh token.

    +

    Endpoint: POST /refresh

    +

    Request:

    +
    {
    +  "refresh_token": "refresh-token-abc123"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...",
    +    "expires_in": 3600
    +  }
    +}
    +
    +

    Logout

    +

    Invalidate current session and tokens.

    +

    Endpoint: POST /logout

    +

    Request:

    +
    {
    +  "refresh_token": "refresh-token-abc123"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "message": "Logged out successfully"
    +}
    +
    +

    Create User

    +

    Create a new user account (admin only).

    +

    Endpoint: POST /users

    +

    Request:

    +
    {
    +  "username": "developer",
    +  "email": "[dev@example.com](mailto:dev@example.com)",
    +  "password": "secure-password",
    +  "roles": ["developer"]
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "user_id": "user-456",
    +    "username": "developer",
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    List Users

    +

    List all users (admin only).

    +

    Endpoint: GET /users

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "users": [
    +      {
    +        "user_id": "user-123",
    +        "username": "admin",
    +        "email": "[admin@example.com](mailto:admin@example.com)",
    +        "roles": ["admin"],
    +        "created_at": "2026-01-01T00:00:00Z"
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Vault Service API

    +

    Secrets management and dynamic credentials.

    +

    Base Path: /api/v1/vault

    +

    Store Secret

    +

    Store a new secret.

    +

    Endpoint: POST /secrets

    +

    Request:

    +
    {
    +  "path": "database/postgres/password",
    +  "data": {
    +    "username": "dbuser",
    +    "password": "db-password"
    +  },
    +  "metadata": {
    +    "description": "PostgreSQL credentials"
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "path": "database/postgres/password",
    +    "version": 1,
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Retrieve Secret

    +

    Retrieve a stored secret.

    +

    Endpoint: GET /secrets/{path}

    +

    Query Parameters:

    +
      +
    • version (optional) - Specific version (default: latest)
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "path": "database/postgres/password",
    +    "version": 1,
    +    "data": {
    +      "username": "dbuser",
    +      "password": "db-password"
    +    },
    +    "metadata": {
    +      "description": "PostgreSQL credentials"
    +    },
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    List Secrets

    +

    List all secret paths.

    +

    Endpoint: GET /secrets

    +

    Query Parameters:

    +
      +
    • prefix (optional) - Filter by path prefix
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "secrets": [
    +      {
    +        "path": "database/postgres/password",
    +        "versions": 1,
    +        "updated_at": "2026-01-16T10:30:00Z"
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Delete Secret

    +

    Delete a secret (soft delete, preserves versions).

    +

    Endpoint: DELETE /secrets/{path}

    +

    Response:

    +
    {
    +  "status": "success",
    +  "message": "Secret deleted successfully"
    +}
    +
    +

    Generate Dynamic Credentials

    +

    Generate temporary credentials for supported backends.

    +

    Endpoint: POST /dynamic/{backend}/generate

    +

    Request:

    +
    {
    +  "role": "readonly",
    +  "ttl": 3600
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "credentials": {
    +      "username": "v-readonly-abc123",
    +      "password": "temporary-password"
    +    },
    +    "ttl": 3600,
    +    "expires_at": "2026-01-16T11:30:00Z"
    +  }
    +}
    +
    +

    KMS Service API

    +

    Key management, encryption, and decryption.

    +

    Base Path: /api/v1/kms

    +

    Encrypt Data

    +

    Encrypt data using a managed key.

    +

    Endpoint: POST /encrypt

    +

    Request:

    +
    {
    +  "key_id": "master-key-01",
    +  "plaintext": "sensitive data",
    +  "context": {
    +    "purpose": "config-encryption"
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "ciphertext": "AQICAHh...",
    +    "key_id": "master-key-01"
    +  }
    +}
    +
    +

    Decrypt Data

    +

    Decrypt previously encrypted data.

    +

    Endpoint: POST /decrypt

    +

    Request:

    +
    {
    +  "ciphertext": "AQICAHh...",
    +  "context": {
    +    "purpose": "config-encryption"
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "plaintext": "sensitive data",
    +    "key_id": "master-key-01"
    +  }
    +}
    +
    +

    Create Key

    +

    Create a new encryption key.

    +

    Endpoint: POST /keys

    +

    Request:

    +
    {
    +  "key_id": "app-key-01",
    +  "algorithm": "AES-256-GCM",
    +  "metadata": {
    +    "description": "Application encryption key"
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "key_id": "app-key-01",
    +    "algorithm": "AES-256-GCM",
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    List Keys

    +

    List all encryption keys.

    +

    Endpoint: GET /keys

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "keys": [
    +      {
    +        "key_id": "master-key-01",
    +        "algorithm": "AES-256-GCM",
    +        "state": "enabled",
    +        "created_at": "2026-01-01T00:00:00Z"
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Rotate Key

    +

    Rotate an encryption key.

    +

    Endpoint: POST /keys/{key_id}/rotate

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "key_id": "master-key-01",
    +    "version": 2,
    +    "rotated_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Audit Service API

    +

    Audit logging, compliance tracking, and event queries.

    +

    Base Path: /api/v1/audit

    +

    Query Audit Logs

    +

    Query audit events with filtering.

    +

    Endpoint: GET /logs

    +

    Query Parameters:

    +
      +
    • user (optional) - Filter by user ID
    • +
    • action (optional) - Filter by action type
    • +
    • resource (optional) - Filter by resource type
    • +
    • start_time (optional) - Start timestamp
    • +
    • end_time (optional) - End timestamp
    • +
    • limit (optional) - Maximum results (default: 100)
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "events": [
    +      {
    +        "event_id": "evt-abc123",
    +        "timestamp": "2026-01-16T10:30:00Z",
    +        "user": "admin",
    +        "action": "workflow.submit",
    +        "resource": "wf-20260116-abc123",
    +        "result": "success",
    +        "metadata": {
    +          "workflow_name": "deploy-cluster"
    +        }
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Export Audit Logs

    +

    Export audit logs in various formats.

    +

    Endpoint: GET /export

    +

    Query Parameters:

    +
      +
    • format - Export format: jso| n cs| v syslo| g ce| f splunk
    • +
    • start_time - Start timestamp
    • +
    • end_time - End timestamp
    • +
    +

    Response: File download in requested format

    +

    Get Compliance Report

    +

    Generate compliance report for specific period.

    +

    Endpoint: GET /compliance

    +

    Query Parameters:

    +
      +
    • standard - Compliance standard: gdp| r soc| 2 iso27001
    • +
    • start_time - Report start time
    • +
    • end_time - Report end time
    • +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "standard": "soc2",
    +    "period": {
    +      "start": "2026-01-01T00:00:00Z",
    +      "end": "2026-01-16T23:59:59Z"
    +    },
    +    "controls": [
    +      {
    +        "control_id": "CC6.1",
    +        "status": "compliant",
    +        "evidence_count": 150
    +      }
    +    ],
    +    "summary": {
    +      "total_controls": 10,
    +      "compliant": 9,
    +      "non_compliant": 1
    +    }
    +  }
    +}
    +
    +

    Policy Service API

    +

    Authorization policy management (Cedar policies).

    +

    Base Path: /api/v1/policy

    +

    Evaluate Policy

    +

    Evaluate authorization request against policies.

    +

    Endpoint: POST /evaluate

    +

    Request:

    +
    {
    +  "principal": "User::\"admin\"",
    +  "action": "Action::\"workflow.submit\"",
    +  "resource": "Workflow::\"deploy-cluster\"",
    +  "context": {
    +    "time": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "decision": "allow",
    +    "policies": ["admin-full-access"],
    +    "diagnostics": {
    +      "reason": "User has admin role"
    +    }
    +  }
    +}
    +
    +

    Create Policy

    +

    Create a new authorization policy.

    +

    Endpoint: POST /policies

    +

    Request:

    +
    {
    +  "policy_id": "developer-read-only",
    +  "content": "permit(principal in Role::\"developer\", action == Action::\"read\", resource);",
    +  "description": "Developers have read-only access"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "policy_id": "developer-read-only",
    +    "created_at": "2026-01-16T10:30:00Z"
    +  }
    +}
    +
    +

    List Policies

    +

    List all authorization policies.

    +

    Endpoint: GET /policies

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "policies": [
    +      {
    +        "policy_id": "admin-full-access",
    +        "description": "Admins have full access",
    +        "created_at": "2026-01-01T00:00:00Z"
    +      }
    +    ],
    +    "total": 1
    +  }
    +}
    +
    +

    Update Policy

    +

    Update an existing policy (hot reload).

    +

    Endpoint: PUT /policies/{policy_id}

    +

    Request:

    +
    {
    +  "content": "permit(principal in Role::\"developer\", action == Action::\"read\", resource);"
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "policy_id": "developer-read-only",
    +    "updated_at": "2026-01-16T10:30:00Z",
    +    "reloaded": true
    +  }
    +}
    +
    +

    Delete Policy

    +

    Delete an authorization policy.

    +

    Endpoint: DELETE /policies/{policy_id}

    +

    Response:

    +
    {
    +  "status": "success",
    +  "message": "Policy deleted successfully"
    +}
    +
    +

    Gateway Service API

    +

    API gateway, routing, and rate limiting.

    +

    Base Path: /api/v1/gateway

    +

    Get Route Configuration

    +

    Retrieve current routing configuration.

    +

    Endpoint: GET /routes

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "routes": [
    +      {
    +        "path": "/api/v1/orchestrator/*",
    +        "target": " [http://orchestrator:8080",](http://orchestrator:8080",)
    +        "methods": ["GET", "POST", "PUT", "DELETE"],
    +        "auth_required": true
    +      }
    +    ]
    +  }
    +}
    +
    +

    Update Routes

    +

    Update gateway routing (hot reload).

    +

    Endpoint: PUT /routes

    +

    Request:

    +
    {
    +  "routes": [
    +    {
    +      "path": "/api/v1/custom/*",
    +      "target": " [http://custom-service:9000",](http://custom-service:9000",)
    +      "methods": ["GET", "POST"],
    +      "auth_required": true
    +    }
    +  ]
    +}
    +
    +

    Response:

    +
    {
    +  "status": "success",
    +  "message": "Routes updated successfully"
    +}
    +
    +

    Get Rate Limits

    +

    Retrieve rate limiting configuration.

    +

    Endpoint: GET /rate-limits

    +

    Response:

    +
    {
    +  "status": "success",
    +  "data": {
    +    "global": {
    +      "requests_per_minute": 100,
    +      "burst": 20
    +    },
    +    "per_user": {
    +      "requests_per_minute": 60,
    +      "burst": 10
    +    }
    +  }
    +}
    +
    +

    Error Codes

    +

    Common error codes returned by the API:

    +
    + + + + + + + + + + + +
    CodeDescription
    ERR_AUTH_INVALIDInvalid authentication credentials
    ERR_AUTH_EXPIREDToken expired
    ERR_AUTH_MFA_REQUIREDMFA verification required
    ERR_FORBIDDENInsufficient permissions
    ERR_NOT_FOUNDResource not found
    ERR_CONFLICTResource conflict
    ERR_VALIDATIONInvalid request parameters
    ERR_RATE_LIMITRate limit exceeded
    ERR_WORKFLOW_FAILEDWorkflow execution failed
    ERR_SERVICE_UNAVAILABLEService temporarily unavailable
    ERR_INTERNALInternal server error
    +
    +

    Rate Limiting Headers

    +

    All responses include rate limiting headers:

    +
    X-RateLimit-Limit: 100
    +X-RateLimit-Remaining: 95
    +X-RateLimit-Reset: 1642334400
    +X-RateLimit-Retry-After: 60
    +
    +

    Pagination

    +

    List endpoints support pagination using offset-based pagination:

    +

    Request:

    +
    GET /api/v1/workflows?limit=50&offset=100
    +
    +

    Response includes:

    +
    {
    +  "data": { ... },
    +  "total": 500,
    +  "limit": 50,
    +  "offset": 100,
    +  "has_more": true
    +}
    +
    +

    Webhooks

    +

    Platform supports webhook notifications for async operations:

    +

    Webhook Payload:

    +
    {
    +  "event": "workflow.completed",
    +  "timestamp": "2026-01-16T10:30:00Z",
    +  "data": {
    +    "workflow_id": "wf-20260116-abc123",
    +    "state": "completed"
    +  },
    +  "signature": "sha256=abc123..."
    +}
    +
    +

    Configure webhooks via Control Center API.

    + + +

    CLI Commands Reference

    +

    Complete command-line interface documentation for the Provisioning platform covering 111+ commands across 11 domain modules.

    +

    Command Structure

    +

    All commands follow the pattern:

    +
    provisioning <domain> <action> [resource] [flags]
    +
    +

    Common Flags (available on most commands):

    +
      +
    • --yes - Skip confirmation prompts (auto-yes)
    • +
    • --check - Dry-run mode, show what would happen without executing
    • +
    • --wait - Wait for async operations to complete
    • +
    • --format <jso| n yam| l table> - Output format (default: table)
    • +
    • --verbose - Detailed output with debug information
    • +
    • --quiet - Minimal output, errors only
    • +
    • --help - Show command help
    • +
    +

    Quick Reference

    +

    Shortcuts - Single-letter aliases for common domains:

    +
    provisioning s = provisioning server
    +provisioning t = provisioning taskserv
    +provisioning c = provisioning cluster
    +provisioning w = provisioning workspace
    +provisioning cfg = provisioning config
    +provisioning b = provisioning batch
    +
    +

    Help Navigation - Bi-directional help system:

    +
    provisioning help server = provisioning server help
    +provisioning help ws = provisioning workspace help
    +
    +

    Domain Modules

    +

    The CLI is organized into 11 domain modules:

    +
      +
    1. Infrastructure - Server, provider, network management
    2. +
    3. Orchestration - Workflow, batch, task execution
    4. +
    5. Configuration - Config validation and management
    6. +
    7. Workspace - Multi-workspace operations
    8. +
    9. Development - Extensions and customization
    10. +
    11. Utilities - Tools and helpers
    12. +
    13. Generation - Schema and config generation
    14. +
    15. Authentication - Auth, MFA, users
    16. +
    17. Security - Vault, KMS, audit, policies
    18. +
    19. Platform - Service control and monitoring
    20. +
    21. Guides - Interactive documentation
    22. +
    +

    Infrastructure Commands

    +

    Manage cloud infrastructure, servers, and resources.

    +

    Server Commands

    +

    provisioning server create [NAME]

    +

    Create a new server or servers from infrastructure configuration.

    +

    Flags:

    +
      +
    • --infra <file> - Nickel infrastructure file
    • +
    • --plan <size> - Server plan (small/medium/large/xlarge)
    • +
    • --provider <name> - Cloud provider (upcloud/aws/local)
    • +
    • --zone <name> - Availability zone
    • +
    • --ssh-key <path> - SSH public key path
    • +
    • --tags <key=value> - Server tags (repeatable)
    • +
    • --yes - Skip confirmation
    • +
    • --check - Dry-run mode
    • +
    • --wait - Wait for server creation
    • +
    +

    Examples:

    +
    # Create server from infrastructure file
    +provisioning server create --infra my-cluster.ncl --yes --wait
    +
    +# Create single server interactively
    +provisioning server create web-01 --plan medium --provider upcloud
    +
    +# Check what would be created (dry-run)
    +provisioning server create --infra cluster.ncl --check
    +
    +

    provisioning server delete [NAM| E ID]

    +

    Delete servers.

    +

    Flags:

    +
      +
    • --all - Delete all servers in current infrastructure
    • +
    • --force - Force deletion without cleanup
    • +
    • --yes - Skip confirmation
    • +
    +

    Examples:

    +
    # Delete specific server
    +provisioning server delete web-01 --yes
    +
    +# Delete all servers
    +provisioning server delete --all --yes
    +
    +

    provisioning server list

    +

    List all servers in the current workspace.

    +

    Flags:

    +
      +
    • --provider <name> - Filter by provider
    • +
    • --state <state> - Filter by state (running/stopped/error)
    • +
    • --format <format> - Output format
    • +
    +

    Examples:

    +
    # List all servers
    +provisioning server list
    +
    +# List only running servers
    +provisioning server list --state running --format json
    +
    +

    provisioning server status [NAM| E ID]

    +

    Get detailed server status.

    +

    Examples:

    +
    provisioning server status web-01
    +provisioning server status --all
    +
    +

    provisioning server ssh [NAM| E ID]

    +

    SSH into a server.

    +

    Examples:

    +
    provisioning server ssh web-01
    +provisioning server ssh web-01 -- "systemctl status kubelet"
    +
    +

    Provider Commands

    +

    provisioning provider list

    +

    List available cloud providers.

    +

    provisioning provider validate <NAME>

    +

    Validate provider configuration and credentials.

    +

    Examples:

    +
    provisioning provider validate upcloud
     provisioning provider validate aws
    -
    -# Validate platform service config
    -provisioning platform validate orchestrator
    -
    -# Validate with detailed output
    -provisioning workspace config validate --verbose
     
    -

    Programmatic Usage

    -
    use provisioning/core/nulib/lib_provisioning/config/schema_validator.nu *
    -
    -# Load config
    -let config = (open ~/workspaces/my-project/config/provisioning.yaml | from yaml)
    -
    -# Validate against schema
    -let result = (validate-workspace-config $config)
    -
    -# Check results
    -if $result.valid {
    -  print "✅ Configuration is valid"
    -} else {
    -  print "❌ Configuration has errors:"
    -  for error in $result.errors {
    -    print $"  • ($error.message)"
    -  }
    -}
    -
    -# Display warnings
    -if ($result.warnings | length) > 0 {
    -  print "⚠️  Warnings:"
    -  for warning in $result.warnings {
    -    print $"  • ($warning.message)"
    -  }
    -}
    +

    provisioning provider zones <NAME>

    +

    List available zones for a provider.

    +

    Examples:

    +
    provisioning provider zones upcloud
    +provisioning provider zones aws --region us-east-1
     
    -

    Pretty Print Results

    -
    # Validate and print formatted results
    -let result = (validate-workspace-config $config)
    -print-validation-results $result
    -
    -

    Schema Examples

    -

    Workspace Schema

    -

    File: /Users/Akasha/project-provisioning/provisioning/config/workspace.schema.toml

    -
    [required]
    -fields = ["workspace", "paths"]
    -
    -[fields.workspace]
    -type = "record"
    -
    -[fields.workspace.name]
    -type = "string"
    -pattern = "^[a-z][a-z0-9-]*$"
    -
    -[fields.workspace.version]
    -type = "string"
    -pattern = "^\\d+\\.\\d+\\.\\d+$"
    -
    -[fields.paths]
    -type = "record"
    -
    -[fields.paths.base]
    -type = "string"
    -
    -[fields.paths.infra]
    -type = "string"
    -
    -[fields.debug]
    -type = "record"
    -
    -[fields.debug.enabled]
    -type = "bool"
    -
    -[fields.debug.log_level]
    -type = "string"
    -enum = ["debug", "info", "warn", "error"]
    -
    -

    Provider Schema (AWS)

    -

    File: /Users/Akasha/project-provisioning/provisioning/extensions/providers/aws/config.schema.toml

    -
    [required]
    -fields = ["provider", "credentials"]
    -
    -[fields.provider]
    -type = "record"
    -
    -[fields.provider.name]
    -type = "string"
    -enum = ["aws"]
    -
    -[fields.provider.region]
    -type = "string"
    -pattern = "^[a-z]{2}-[a-z]+-\\d+$"
    -
    -[fields.provider.enabled]
    -type = "bool"
    -
    -[fields.credentials]
    -type = "record"
    -
    -[fields.credentials.type]
    -type = "string"
    -enum = ["environment", "file", "iam_role"]
    -
    -[fields.compute]
    -type = "record"
    -
    -[fields.compute.default_instance_type]
    -type = "string"
    -
    -[fields.compute.default_ami]
    -type = "string"
    -pattern = "^ami-[a-f0-9]{8,17}$"
    -
    -[fields.network]
    -type = "record"
    -
    -[fields.network.vpc_id]
    -type = "string"
    -pattern = "^vpc-[a-f0-9]{8,17}$"
    -
    -[fields.network.subnet_id]
    -type = "string"
    -pattern = "^subnet-[a-f0-9]{8,17}$"
    -
    -[deprecated]
    -fields = ["old_region_field"]
    -
    -[deprecated_replacements]
    -old_region_field = "provider.region"
    -
    -

    Platform Service Schema (Orchestrator)

    -

    File: /Users/Akasha/project-provisioning/provisioning/platform/orchestrator/config.schema.toml

    -
    [required]
    -fields = ["service", "server"]
    -
    -[fields.service]
    -type = "record"
    -
    -[fields.service.name]
    -type = "string"
    -enum = ["orchestrator"]
    -
    -[fields.service.enabled]
    -type = "bool"
    -
    -[fields.server]
    -type = "record"
    -
    -[fields.server.host]
    -type = "string"
    -
    -[fields.server.port]
    -type = "int"
    -min = 1024
    -max = 65535
    -
    -[fields.workers]
    -type = "int"
    -min = 1
    -max = 32
    -
    -[fields.queue]
    -type = "record"
    -
    -[fields.queue.max_size]
    -type = "int"
    -min = 100
    -max = 10000
    -
    -[fields.queue.storage_path]
    -type = "string"
    -
    -

    KMS Service Schema

    -

    File: /Users/Akasha/project-provisioning/provisioning/core/services/kms/config.schema.toml

    -
    [required]
    -fields = ["kms", "encryption"]
    -
    -[fields.kms]
    -type = "record"
    -
    -[fields.kms.enabled]
    -type = "bool"
    -
    -[fields.kms.provider]
    -type = "string"
    -enum = ["aws_kms", "gcp_kms", "azure_kv", "vault", "local"]
    -
    -[fields.encryption]
    -type = "record"
    -
    -[fields.encryption.algorithm]
    -type = "string"
    -enum = ["AES-256-GCM", "ChaCha20-Poly1305"]
    -
    -[fields.encryption.key_rotation_days]
    -type = "int"
    -min = 30
    -max = 365
    -
    -[fields.vault]
    -type = "record"
    -
    -[fields.vault.address]
    -type = "string"
    -pattern = "^https?://.*$"
    -
    -[fields.vault.token_path]
    -type = "string"
    -
    -[deprecated]
    -fields = ["old_kms_type"]
    -
    -[deprecated_replacements]
    -old_kms_type = "kms.provider"
    -
    -

    Validation Workflow

    -

    1. Development

    -
    # Create new config
    -vim ~/workspaces/dev/config/provisioning.yaml
    -
    -# Validate immediately
    -provisioning workspace config validate
    -
    -# Fix errors and revalidate
    -vim ~/workspaces/dev/config/provisioning.yaml
    -provisioning workspace config validate
    -
    -

    2. CI/CD Pipeline

    -
    # GitLab CI
    -validate-config:
    -  stage: validate
    -  script:
    -    - provisioning workspace config validate
    -    - provisioning provider validate aws
    -    - provisioning provider validate upcloud
    -    - provisioning platform validate orchestrator
    -  only:
    -    changes:
    -      - "*/config/**/*"
    -
    -

    3. Pre-Deployment

    -
    # Validate all configurations before deployment
    -provisioning workspace config validate --verbose
    -provisioning provider validate --all
    -provisioning platform validate --all
    -
    -# If valid, proceed with deployment
    -if [[ $? -eq 0 ]]; then
    -  provisioning deploy --workspace production
    -fi
    -
    -

    Error Messages

    -

    Clear Error Format

    -
    ❌ Validation failed
    -
    -Errors:
    -  • Required field missing: workspace.name
    -  • Field port type mismatch: expected int, got string
    -  • Field environment must be one of: dev, staging, prod
    -  • Field port must be >= 1024
    -  • Field email does not match pattern: ^[a-zA-Z0-9._%+-]+@.*$
    -
    -⚠️  Warnings:
    -  • Field old_field is deprecated. Use new_field instead.
    -
    -

    Error Details

    -

    Each error includes:

    +

    Orchestration Commands

    +

    Execute workflows, batch operations, and manage tasks.

    +

    Workflow Commands

    +

    provisioning workflow submit <FILE>

    +

    Submit a workflow for execution.

    +

    Flags:

      -
    • field: Which field has the error
    • -
    • type: Error type (missing_required, type_mismatch, invalid_enum, etc.)
    • -
    • message: Human-readable description
    • -
    • Additional context: Expected values, patterns, ranges
    • +
    • --priority <level> - Priority (low/normal/high/critical)
    • +
    • --checkpoint - Enable checkpoint recovery
    • +
    • --wait - Wait for completion
    -

    Common Validation Patterns

    -

    Pattern 1: Hostname Validation

    -
    [fields.hostname]
    -type = "string"
    -pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
    -
    -

    Pattern 2: Email Validation

    -
    [fields.email]
    -type = "string"
    -pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
    -
    -

    Pattern 3: Semantic Version

    -
    [fields.version]
    -type = "string"
    -pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$"
    -
    -

    Pattern 4: URL Validation

    -
    [fields.url]
    -type = "string"
    -pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$"
    -
    -

    Pattern 5: IPv4 Address

    -
    [fields.ip_address]
    -type = "string"
    -pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"
    -
    -

    Pattern 6: AWS Resource ID

    -
    [fields.instance_id]
    -type = "string"
    -pattern = "^i-[a-f0-9]{8,17}$"
    +

    Examples:

    +
    # Submit workflow and wait
    +provisioning workflow submit deploy.ncl --wait
     
    -[fields.ami_id]
    -type = "string"
    -pattern = "^ami-[a-f0-9]{8,17}$"
    +# Submit with high priority
    +provisioning workflow submit urgent.ncl --priority high
    +
    +

    provisioning workflow status <ID>

    +

    Get workflow execution status.

    +

    Examples:

    +
    provisioning workflow status wf-20260116-abc123
    +
    +

    provisioning workflow list

    +

    List workflows.

    +

    Flags:

    +
      +
    • --state <state> - Filter by state (queued/running/completed/failed)
    • +
    • --limit <num> - Maximum results
    • +
    +

    Examples:

    +
    # List running workflows
    +provisioning workflow list --state running
     
    -[fields.vpc_id]
    -type = "string"
    -pattern = "^vpc-[a-f0-9]{8,17}$"
    +# List failed workflows
    +provisioning workflow list --state failed --format json
     
    -

    Testing Validation

    -

    Unit Tests

    -
    # Run validation test suite
    -nu provisioning/tests/config_validation_tests.nu
    +

    provisioning workflow cancel <ID>

    +

    Cancel a running workflow.

    +

    Examples:

    +
    provisioning workflow cancel wf-20260116-abc123 --yes
     
    -

    Integration Tests

    -
    # Test with real configs
    -provisioning test validate --workspace dev
    -provisioning test validate --workspace staging
    -provisioning test validate --workspace prod
    -
    -

    Custom Validation

    -
    # Create custom validation function
    -def validate-custom-config [config: record] {
    -  let result = (validate-workspace-config $config)
    +

    provisioning workflow resume <ID>

    +

    Resume a failed workflow from checkpoint.

    +

    Flags:

    +
      +
    • --from <checkpoint> - Resume from specific checkpoint
    • +
    • --skip-failed - Skip failed tasks
    • +
    +

    Examples:

    +
    # Resume from last checkpoint
    +provisioning workflow resume wf-20260116-abc123
     
    -  # Add custom business logic validation
    -  if ($config.workspace.name | str starts-with "prod") {
    -    if not $config.debug.enabled == false {
    -      $result.errors = ($result.errors | append {
    -        field: "debug.enabled"
    -        type: "custom"
    -        message: "Debug must be disabled in production"
    -      })
    +# Resume from specific checkpoint
    +provisioning workflow resume wf-20260116-abc123 --from create-servers
    +
    +

    provisioning workflow logs <ID>

    +

    View workflow logs.

    +

    Flags:

    +
      +
    • --task <id> - Show logs for specific task
    • +
    • --follow - Stream logs in real-time
    • +
    • --lines <num> - Number of lines (default: 100)
    • +
    +

    Examples:

    +
    # View all workflow logs
    +provisioning workflow logs wf-20260116-abc123
    +
    +# Follow logs in real-time
    +provisioning workflow logs wf-20260116-abc123 --follow
    +
    +# View specific task logs
    +provisioning workflow logs wf-20260116-abc123 --task install-k8s
    +
    +

    Batch Commands

    +

    provisioning batch submit <FILE>

    +

    Submit a batch workflow with multiple operations.

    +

    Flags:

    +
      +
    • --parallel <num> - Maximum parallel operations
    • +
    • --wait - Wait for completion
    • +
    +

    Examples:

    +
    # Submit batch workflow
    +provisioning batch submit multi-region.ncl --parallel 3 --wait
    +
    +

    provisioning batch status <ID>

    +

    Get batch workflow status with progress.

    +

    provisioning batch monitor <ID>

    +

    Monitor batch execution in real-time.

    +

    Configuration Commands

    +

    Validate and manage configuration.

    +

    provisioning config validate

    +

    Validate current configuration.

    +

    Flags:

    +
      +
    • --infra <file> - Specific infrastructure file
    • +
    • --all - Validate all configuration files
    • +
    +

    Examples:

    +
    # Validate workspace configuration
    +provisioning config validate
    +
    +# Validate specific infrastructure
    +provisioning config validate --infra cluster.ncl
    +
    +

    provisioning config show

    +

    Display effective configuration.

    +

    Flags:

    +
      +
    • --key <path> - Show specific config value
    • +
    • --format <format> - Output format
    • +
    +

    Examples:

    +
    # Show all configuration
    +provisioning config show
    +
    +# Show specific value
    +provisioning config show --key paths.base
    +
    +# Export as JSON
    +provisioning config show --format json > config.json
    +
    +

    provisioning config reload

    +

    Reload configuration from files.

    +

    provisioning config diff

    +

    Show configuration differences between environments.

    +

    Flags:

    +
      +
    • --from <env> - Source environment
    • +
    • --to <env> - Target environment
    • +
    +

    Workspace Commands

    +

    Manage isolated workspaces.

    +

    provisioning workspace init <NAME>

    +

    Initialize a new workspace.

    +

    Flags:

    +
      +
    • --template <name> - Workspace template
    • +
    • --path <path> - Custom workspace path
    • +
    +

    Examples:

    +
    # Create workspace from default template
    +provisioning workspace init my-project
    +
    +# Create from template
    +provisioning workspace init prod --template production
    +
    +

    provisioning workspace switch <NAME>

    +

    Switch to a different workspace.

    +

    Examples:

    +
    provisioning workspace switch production
    +provisioning workspace switch dev
    +
    +

    provisioning workspace list

    +

    List all workspaces.

    +

    Flags:

    +
      +
    • --format <format> - Output format
    • +
    +

    Examples:

    +
    provisioning workspace list
    +provisioning workspace list --format json
    +
    +

    provisioning workspace current

    +

    Show current active workspace.

    +

    provisioning workspace delete <NAME>

    +

    Delete a workspace.

    +

    Flags:

    +
      +
    • --force - Force deletion without cleanup
    • +
    • --yes - Skip confirmation
    • +
    +

    Development Commands

    +

    Develop custom extensions.

    +

    provisioning extension create <TYPE> <NAME>

    +

    Create a new extension.

    +

    Types: provider, taskserv, cluster, workflow

    +

    Flags:

    +
      +
    • --template <name> - Extension template
    • +
    +

    Examples:

    +
    # Create new task service
    +provisioning extension create taskserv my-service
    +
    +# Create new provider
    +provisioning extension create provider my-cloud --template basic
    +
    +

    provisioning extension validate <PATH>

    +

    Validate extension structure and configuration.

    +

    provisioning extension package <PATH>

    +

    Package extension for distribution (OCI format).

    +

    Flags:

    +
      +
    • --version <version> - Extension version
    • +
    • --output <path> - Output file path
    • +
    +

    Examples:

    +
    provisioning extension package ./my-service --version 1.0.0
    +
    +

    provisioning extension install <NAM| E PATH>

    +

    Install an extension from registry or file.

    +

    Examples:

    +
    # Install from registry
    +provisioning extension install kubernetes
    +
    +# Install from local file
    +provisioning extension install ./my-service.tar.gz
    +
    +

    provisioning extension list

    +

    List installed extensions.

    +

    Flags:

    +
      +
    • --type <type> - Filter by type
    • +
    • --available - Show available (not installed)
    • +
    +

    Utility Commands

    +

    Helper commands and tools.

    +

    provisioning version

    +

    Show platform version information.

    +

    Flags:

    +
      +
    • --check - Check for updates
    • +
    +

    Examples:

    +
    provisioning version
    +provisioning version --check
    +
    +

    provisioning health

    +

    Check platform health.

    +

    Flags:

    +
      +
    • --service <name> - Check specific service
    • +
    +

    Examples:

    +
    # Check all services
    +provisioning health
    +
    +# Check specific service
    +provisioning health --service orchestrator
    +
    +

    provisioning diagnostics

    +

    Run platform diagnostics.

    +

    Flags:

    +
      +
    • --output <path> - Save diagnostic report
    • +
    +

    Examples:

    +
    provisioning diagnostics --output diagnostics.json
    +
    +

    provisioning setup versions

    +

    Generate versions file from Nickel schemas.

    +

    Examples:

    +
    # Generate /provisioning/core/versions file
    +provisioning setup versions
    +
    +# Use in shell scripts
    +source /provisioning/core/versions
    +echo "Nushell version: $NU_VERSION"
    +
    +

    Generation Commands

    +

    Generate schemas, configurations, and infrastructure code.

    +

    provisioning generate config <TYPE>

    +

    Generate configuration templates.

    +

    Types: workspace, infrastructure, provider

    +

    Flags:

    +
      +
    • --output <path> - Output file path
    • +
    • --format <format> - Output format (nickel/yaml/toml)
    • +
    +

    Examples:

    +
    # Generate workspace config
    +provisioning generate config workspace --output config.ncl
    +
    +# Generate infrastructure template
    +provisioning generate config infrastructure --format nickel
    +
    +

    provisioning generate schema <NAME>

    +

    Generate Nickel schema from existing configuration.

    +

    provisioning generate docs

    +

    Generate documentation from schemas.

    +

    Authentication Commands

    +

    Manage authentication and user accounts.

    +

    provisioning auth login

    +

    Authenticate to the platform.

    +

    Flags:

    +
      +
    • --user <username> - Username
    • +
    • --password <password> - Password (prompt if not provided)
    • +
    • --mfa <code> - MFA code
    • +
    +

    Examples:

    +
    # Interactive login
    +provisioning auth login --user admin
    +
    +# Login with MFA
    +provisioning auth login --user admin --mfa 123456
    +
    +

    provisioning auth logout

    +

    Logout and invalidate tokens.

    +

    provisioning auth token

    +

    Display or refresh authentication token.

    +

    Flags:

    +
      +
    • --refresh - Refresh the token
    • +
    +

    provisioning auth user create <USERNAME>

    +

    Create a new user (admin only).

    +

    Flags:

    +
      +
    • --email <email> - User email
    • +
    • --roles <roles> - Comma-separated roles
    • +
    +

    Examples:

    +
    provisioning auth user create developer --email [dev@example.com](mailto:dev@example.com) --roles developer,operator
    +
    +

    provisioning auth user list

    +

    List all users (admin only).

    +

    provisioning auth user delete <USERNAME>

    +

    Delete a user (admin only).

    +

    Security Commands

    +

    Manage secrets, encryption, audit logs, and policies.

    +

    Vault Commands

    +

    provisioning vault store <PATH>

    +

    Store a secret.

    +

    Flags:

    +
      +
    • --value <value> - Secret value
    • +
    • --file <path> - Read value from file
    • +
    +

    Examples:

    +
    # Store secret interactively
    +provisioning vault store database/postgres/password
    +
    +# Store from value
    +provisioning vault store api/key --value "secret-value"
    +
    +# Store from file
    +provisioning vault store ssh/key --file ~/.ssh/id_rsa
    +
    +

    provisioning vault get <PATH>

    +

    Retrieve a secret.

    +

    Flags:

    +
      +
    • --version <num> - Specific version
    • +
    • --output <path> - Save to file
    • +
    +

    Examples:

    +
    # Get latest secret
    +provisioning vault get database/postgres/password
    +
    +# Get specific version
    +provisioning vault get database/postgres/password --version 2
    +
    +

    provisioning vault list

    +

    List all secret paths.

    +

    Flags:

    +
      +
    • --prefix <prefix> - Filter by path prefix
    • +
    +

    provisioning vault delete <PATH>

    +

    Delete a secret.

    +

    KMS Commands

    +

    provisioning kms encrypt <FILE>

    +

    Encrypt a file or data.

    +

    Flags:

    +
      +
    • --key <id> - Key ID
    • +
    • --output <path> - Output file
    • +
    +

    Examples:

    +
    # Encrypt file
    +provisioning kms encrypt config.yaml --key master-key --output config.enc
    +
    +# Encrypt string
    +echo "sensitive data" | provisioning kms encrypt --key master-key
    +
    +

    provisioning kms decrypt <FILE>

    +

    Decrypt encrypted data.

    +

    Flags:

    +
      +
    • --output <path> - Output file
    • +
    +

    provisioning kms create-key <ID>

    +

    Create a new encryption key.

    +

    Flags:

    +
      +
    • --algorithm <algo> - Algorithm (default: AES-256-GCM)
    • +
    +

    provisioning kms list-keys

    +

    List all encryption keys.

    +

    provisioning kms rotate-key <ID>

    +

    Rotate an encryption key.

    +

    Audit Commands

    +

    provisioning audit query

    +

    Query audit logs.

    +

    Flags:

    +
      +
    • --user <user> - Filter by user
    • +
    • --action <action> - Filter by action
    • +
    • --resource <resource> - Filter by resource
    • +
    • --start <time> - Start time
    • +
    • --end <time> - End time
    • +
    • --limit <num> - Maximum results
    • +
    +

    Examples:

    +
    # Query recent audit logs
    +provisioning audit query --limit 100
    +
    +# Query specific user actions
    +provisioning audit query --user admin --action workflow.submit
    +
    +# Query time range
    +provisioning audit query --start "2026-01-15" --end "2026-01-16"
    +
    +

    provisioning audit export

    +

    Export audit logs.

    +

    Flags:

    +
      +
    • --format <format> - Export format (json/csv/syslog/cef/splunk)
    • +
    • --start <time> - Start time
    • +
    • --end <time> - End time
    • +
    • --output <path> - Output file
    • +
    +

    Examples:

    +
    # Export as JSON
    +provisioning audit export --format json --output audit.json
    +
    +# Export last 7 days as CSV
    +provisioning audit export --format csv --start "7 days ago" --output audit.csv
    +
    +

    provisioning audit compliance

    +

    Generate compliance report.

    +

    Flags:

    +
      +
    • --standard <standard> - Compliance standard (gdpr/soc2/iso27001)
    • +
    • --start <time> - Report start time
    • +
    • --end <time> - Report end time
    • +
    +

    Policy Commands

    +

    provisioning policy create <ID>

    +

    Create an authorization policy.

    +

    Flags:

    +
      +
    • --content <cedar> - Cedar policy content
    • +
    • --file <path> - Load from file
    • +
    • --description <text> - Policy description
    • +
    +

    Examples:

    +
    # Create from file
    +provisioning policy create developer-read --file policies/read-only.cedar
    +
    +# Create inline
    +provisioning policy create admin-full --content "permit(principal in Role::\"admin\", action, resource);"
    +
    +

    provisioning policy list

    +

    List all authorization policies.

    +

    provisioning policy evaluate

    +

    Evaluate a policy decision.

    +

    Flags:

    +
      +
    • --principal <entity> - Principal entity
    • +
    • --action <action> - Action
    • +
    • --resource <resource> - Resource
    • +
    +

    Examples:

    +
    provisioning policy evaluate \
    +  --principal "User::\"admin\"" \
    +  --action "Action::\"workflow.submit\"" \
    +  --resource "Workflow::\"deploy\""
    +
    +

    provisioning policy update <ID>

    +

    Update an existing policy (hot reload).

    +

    provisioning policy delete <ID>

    +

    Delete an authorization policy.

    +

    Platform Commands

    +

    Control platform services.

    +

    provisioning platform service list

    +

    List all platform services and status.

    +

    provisioning platform service start <NAME>

    +

    Start a platform service.

    +

    Examples:

    +
    provisioning platform service start orchestrator
    +
    +

    provisioning platform service stop <NAME>

    +

    Stop a platform service.

    +

    Flags:

    +
      +
    • --force - Force stop without graceful shutdown
    • +
    • --timeout <seconds> - Graceful shutdown timeout
    • +
    +

    provisioning platform service restart <NAME>

    +

    Restart a platform service.

    +

    provisioning platform service health <NAME>

    +

    Check service health.

    +

    provisioning platform metrics

    +

    Display platform-wide metrics.

    +

    Flags:

    +
      +
    • --watch - Continuously update metrics
    • +
    +

    Guides Commands

    +

    Access interactive guides and documentation.

    +

    provisioning guide from-scratch

    +

    Complete walkthrough from installation to first deployment.

    +

    provisioning guide update

    +

    Guide for updating the platform.

    +

    provisioning guide customize

    +

    Guide for customizing extensions.

    +

    provisioning sc

    +

    Quick reference shortcut guide (fastest).

    +

    provisioning help [COMMAND]

    +

    Display help for any command.

    +

    Examples:

    +
    # General help
    +provisioning help
    +
    +# Command-specific help
    +provisioning help server create
    +provisioning server create --help  # Same result
    +
    +

    Task Service Commands

    +

    provisioning taskserv install <NAME>

    +

    Install a task service on servers.

    +

    Flags:

    +
      +
    • --cluster <name> - Target cluster
    • +
    • --version <version> - Specific version
    • +
    • --servers <names> - Target servers (comma-separated)
    • +
    • --wait - Wait for installation
    • +
    • --yes - Skip confirmation
    • +
    +

    Examples:

    +
    # Install Kubernetes on cluster
    +provisioning taskserv install kubernetes --cluster prod --wait
    +
    +# Install specific version
    +provisioning taskserv install kubernetes --version 1.29.0
    +
    +# Install on specific servers
    +provisioning taskserv install containerd --servers web-01,web-02
    +
    +

    provisioning taskserv remove <NAME>

    +

    Remove a task service.

    +

    Flags:

    +
      +
    • --cluster <name> - Target cluster
    • +
    • --purge - Remove all data
    • +
    • --yes - Skip confirmation
    • +
    +

    provisioning taskserv list

    +

    List installed task services.

    +

    Flags:

    +
      +
    • --available - Show available (not installed) services
    • +
    +

    provisioning taskserv status <NAME>

    +

    Get task service status.

    +

    Examples:

    +
    provisioning taskserv status kubernetes
    +
    +

    Cluster Commands

    +

    provisioning cluster create <NAME>

    +

    Create a complete cluster from configuration.

    +

    Flags:

    +
      +
    • --infra <file> - Nickel infrastructure file
    • +
    • --type <type> - Cluster type (kubernetes/etcd/postgres)
    • +
    • --wait - Wait for creation
    • +
    • --yes - Skip confirmation
    • +
    • --check - Dry-run mode
    • +
    +

    Examples:

    +
    # Create Kubernetes cluster
    +provisioning cluster create prod-k8s --infra k8s-cluster.ncl --wait
    +
    +# Check what would be created
    +provisioning cluster create staging --infra staging.ncl --check
    +
    +

    provisioning cluster delete <NAME>

    +

    Delete a cluster and all resources.

    +

    Flags:

    +
      +
    • --keep-data - Preserve data volumes
    • +
    • --yes - Skip confirmation
    • +
    +

    provisioning cluster list

    +

    List all clusters.

    +

    provisioning cluster status <NAME>

    +

    Get detailed cluster status.

    +

    Examples:

    +
    provisioning cluster status prod-k8s
    +
    +

    provisioning cluster scale <NAME>

    +

    Scale cluster nodes.

    +

    Flags:

    +
      +
    • --workers <num> - Number of worker nodes
    • +
    • --control-plane <num> - Number of control plane nodes
    • +
    +

    Examples:

    +
    # Scale workers to 5 nodes
    +provisioning cluster scale prod-k8s --workers 5
    +
    +

    Test Commands

    +

    provisioning test quick <TASKSERV>

    +

    Quick test of a task service in container.

    +

    Examples:

    +
    provisioning test quick kubernetes
    +provisioning test quick postgres
    +
    +

    provisioning test topology load <NAME>

    +

    Load a test topology template.

    +

    provisioning test env create

    +

    Create a test environment.

    +

    Flags:

    +
      +
    • --topology <name> - Topology template
    • +
    • --services <names> - Services to install
    • +
    +

    provisioning test env list

    +

    List active test environments.

    +

    provisioning test env cleanup <ID>

    +

    Cleanup a test environment.

    +

    Environment Variables

    +

    The CLI respects these environment variables:

    +
      +
    • PROVISIONING_WORKSPACE - Override active workspace
    • +
    • PROVISIONING_CONFIG - Custom config file path
    • +
    • PROVISIONING_LOG_LEVEL - Log level (debug/info/warn/error)
    • +
    • PROVISIONING_API_URL - API endpoint URL
    • +
    • PROVISIONING_TOKEN - Auth token (overrides login)
    • +
    +

    Exit Codes

    +
    + + + + + + + + + +
    CodeMeaning
    0Success
    1General error
    2Invalid usage
    3Configuration error
    4Authentication error
    5Permission denied
    6Resource not found
    7Operation failed
    8Timeout
    +
    +

    Shell Completion

    +

    Generate shell completion scripts:

    +
    # Bash
    +provisioning completion bash > /etc/bash_completion.d/provisioning
    +
    +# Zsh
    +provisioning completion zsh > ~/.zsh/completion/_provisioning
    +
    +# Fish
    +provisioning completion fish > ~/.config/fish/completions/provisioning.fish
    +
    + + +

    Nushell Libraries

    +

    Orchestrator API

    +

    Control Center API

    +

    Examples

    +

    + Provisioning Logo +

    +

    + Provisioning +

    +

    Architecture

    +

    Deep dive into Provisioning platform architecture, design principles, and +architectural decisions that shape the system.

    +

    Overview

    +

    The Provisioning platform uses modular, microservice-based architecture for +enterprise infrastructure as code across multiple clouds. This section +documents foundational architectural decisions and system design that enable:

    +
      +
    • Multi-cloud orchestration across AWS, UpCloud, Hetzner, Kubernetes, and on-premise systems
    • +
    • Workspace-first organization with complete infrastructure isolation and multi-tenancy support
    • +
    • Type-safe configuration using Nickel language as source of truth
    • +
    • Autonomous operations through intelligent detectors and automated incident response
    • +
    • Post-quantum security with hybrid encryption protecting against future threats
    • +
    +

    Architecture Documentation

    +

    System Understanding

    +

    + System Architecture Overview with 12 Microservices +

    +
      +
    • +

      System Overview - Platform architecture with 12 +microservices, 80+ CLI commands, multi-tenancy model, cloud integration

      +
    • +
    • +

      Design Principles - Configuration-driven design, +workspace isolation, type-safety mandates, autonomous operations, security-first

      +
    • +
    • +

      Component Architecture - 12 microservices: +Orchestrator, Control-Center, Vault-Service, Extension-Registry, AI-Service, +Detector, RAG, MCP-Server, KMS, Platform-Config, Service-Clients

      +
    • +
    • +

      Integration Patterns - REST APIs, async +message queues, event-driven workflows, service discovery, state management

      +
    • +
    +

    + Microservices Communication Patterns REST Async Events +

    +

    Architectural Decisions

    +
      +
    • Architecture Decision Records (ADRs) - 10 decisions: +modular CLI, workspace-first design, Nickel type-safety, microservice +distribution, communication, post-quantum cryptography, encryption, +observability, SLO management, incident automation
    • +
    +

    Key Architectural Patterns

    +

    Modular Design (ADR-001)

    +
      +
    • Decentralized CLI command registration reducing code by 84%
    • +
    • Dynamic command discovery and 80+ keyboard shortcuts
    • +
    • Extensible architecture supporting custom commands
    • +
    +

    Workspace-First Organization (ADR-002)

    +
      +
    • Workspaces as primary organizational unit grouping infrastructure, configs, and state
    • +
    • Complete isolation for multi-tenancy and team collaboration
    • +
    • Local schema and extension customization per workspace
    • +
    +

    Type-Safe Configuration (ADR-003)

    +
      +
    • Nickel language as source of truth for all infrastructure definitions
    • +
    • Mandatory schema validation at parse time (not runtime)
    • +
    • Complete migration from KCL with backward compatibility
    • +
    +

    Distributed Microservices (ADR-004)

    +
      +
    • 12 specialized microservices handling specific domains
    • +
    • Independent scaling and deployment per service
    • +
    • Service communication via REST + async queues
    • +
    +

    Security Architecture (ADR-006 & ADR-007)

    +
      +
    • Post-quantum cryptography with CRYSTALS-Kyber hybrid encryption
    • +
    • Multi-layer encryption: at-rest (KMS), in-transit (TLS 1.3), field-level, end-to-end
    • +
    • Centralized secrets management via SecretumVault
    • +
    +

    Observability & Resilience (ADR-008, ADR-009, ADR-010)

    +
      +
    • Unified observability: Prometheus metrics, ELK logging, Jaeger tracing
    • +
    • SLO-driven operations with error budget enforcement
    • +
    • Autonomous incident detection and self-healing
    • +
    + +
      +
    • For implementation details → See provisioning/docs/src/features/
    • +
    • For API documentation → See provisioning/docs/src/api-reference/
    • +
    • For deployment guides → See provisioning/docs/src/operations/
    • +
    • For security details → See provisioning/docs/src/security/
    • +
    • For development → See provisioning/docs/src/development/
    • +
    +

    System Overview

    +

    Complete architecture of the Provisioning Infrastructure Automation Platform.

    +

    Architecture Layers

    +

    Provisioning uses a 5-layer modular architecture:

    +
    ┌─────────────────────────────────────────────────────────────┐
    +│ User Interface Layer                                        │
    +│ • CLI (provisioning command)  • Web Control Center (UI)     │
    +│ • REST API  • MCP Server (AI) • Batch Scheduler             │
    +└──────────────────────────┬──────────────────────────────────┘
    +                           ↓
    +┌─────────────────────────────────────────────────────────────┐
    +│ Core Engine Layer (provisioning/core/)                      │
    +│ • 211-line CLI dispatcher (84% code reduction)              │
    +│ • 476+ configuration accessors (hierarchical)               │
    +│ • Provider abstraction (multi-cloud support)                │
    +│ • Workspace management system                               │
    +│ • Infrastructure validation (54+ Nushell libraries)         │
    +│ • Secrets management (SOPS + Age integration)               │
    +└──────────────────────────┬──────────────────────────────────┘
    +                           ↓
    +┌─────────────────────────────────────────────────────────────┐
    +│ Orchestration Layer (provisioning/platform/)                │
    +│ • Hybrid Orchestrator (Rust + Nushell)                      │
    +│ • Workflow execution with checkpoints                       │
    +│ • Dependency resolver & task scheduler                      │
    +│ • File-based persistence                                    │
    +│ • REST API endpoints (83+)                                  │
    +│ • State management (SurrealDB)                              │
    +└──────────────────────────┬──────────────────────────────────┘
    +                           ↓
    +┌─────────────────────────────────────────────────────────────┐
    +│ Extension Layer (provisioning/extensions/)                  │
    +│ • Cloud Providers (UpCloud, AWS, Hetzner, Local)            │
    +│ • Task Services (50+ services in 18 categories)             │
    +│ • Clusters (9 pre-built cluster templates)                  │
    +│ • Batch Workflows (automation templates)                    │
    +│ • Nushell Plugins (10-50x performance gains)                │
    +└──────────────────────────┬──────────────────────────────────┘
    +                           ↓
    +┌─────────────────────────────────────────────────────────────┐
    +│ Infrastructure Layer                                        │
    +│ • Cloud Resources (servers, networks, storage)              │
    +│ • Running Services (Kubernetes, databases, etc.)            │
    +│ • State Persistence (SurrealDB, file storage)               │
    +│ • Monitoring & Logging (Prometheus, Loki)                   │
    +└─────────────────────────────────────────────────────────────┘
    +
    +

    Core System Components

    +

    1. CLI Layer (provisioning/core/cli/)

    +

    Entry Point: provisioning/core/cli/provisioning

    +
      +
    • Bash wrapper (210 lines) - Minimal bootstrap
    • +
    • Routes commands to Nushell dispatcher
    • +
    • Loads environment and validates workspace
    • +
    • Handles error reporting
    • +
    +

    Key Features:

    +
      +
    • Single entry point
    • +
    • Pluggable architecture
    • +
    • Support for 111+ commands
    • +
    • 80+ shortcuts for productivity
    • +
    +

    2. Core Engine (provisioning/core/nulib/)

    +

    Structure: 54 Nushell libraries organized by function

    +

    Main Components:

    +

    Configuration Management (lib_provisioning/config/)

    +
      +
    • Hierarchical loading: 5-layer precedence system
    • +
    • 476+ accessors: Type-safe configuration access
    • +
    • Variable interpolation: Template expansion
    • +
    • TOML merging: Environment-specific overrides
    • +
    • Validation: Schema enforcement
    • +
    +

    Provider Abstraction (lib_provisioning/providers/)

    +
      +
    • Multi-cloud support: UpCloud, AWS, Hetzner, Local
    • +
    • Unified interface: Single API for all providers
    • +
    • Dynamic loading: Load providers on-demand
    • +
    • Credential management: Encrypted credential handling
    • +
    • State tracking: Provider-specific state persistence
    • +
    +

    Workspace Management (lib_provisioning/workspace/)

    +
      +
    • Workspace registry: Track all workspaces
    • +
    • Switching: Atomic workspace transitions
    • +
    • Isolation: Independent state per workspace
    • +
    • Configuration loading: Workspace-specific overrides
    • +
    • Extensions: Inherit from platform extensions
    • +
    +

    Infrastructure Validation (lib_provisioning/infra_validator/)

    +
      +
    • Schema validation: Nickel contract checking
    • +
    • Constraint enforcement: Business rule validation
    • +
    • Dependency analysis: Infrastructure dependency graph
    • +
    • Type checking: Static type validation
    • +
    • Error reporting: Detailed error messages with suggestions
    • +
    +

    Secrets Management (lib_provisioning/secrets/)

    +
      +
    • SOPS integration: Mozilla SOPS for encryption
    • +
    • Age encryption: Modern symmetric encryption
    • +
    • KMS backends: Cosmian, AWS KMS, local
    • +
    • Credential injection: Runtime variable substitution
    • +
    • Audit logging: Track secret access
    • +
    +

    Command Utilities (lib_provisioning/cmd/)

    +
      +
    • SSH operations: Remote command execution
    • +
    • Batch operations: Parallel command execution
    • +
    • Error handling: Structured error reporting
    • +
    • Logging: Comprehensive operation logging
    • +
    • Retry logic: Automatic retry with backoff
    • +
    +

    3. Orchestration Engine (provisioning/platform/)

    +

    Technology: Rust + Nushell hybrid

    +

    12 Microservices (Rust crates):

    +
    + + + + + + + + + + + + +
    ServicePurposeKey Features
    orchestratorWorkflow executionScheduler, file persistence, REST API
    control-centerAPI gateway + authRBAC, Cedar policies, audit logging
    control-center-uiWeb dashboardInfrastructure view, config management
    mcp-serverAI integrationModel Context Protocol, auto-completion
    vault-serviceSecrets storageEncryption, KMS, credential injection
    extension-registryOCI registryExtension distribution, versioning
    ai-serviceLLM featuresPrompt optimization, context awareness
    detectorAnomaly detectionHealth monitoring, pattern recognition
    ragKnowledge retrievalDocument embedding, semantic search
    provisioning-daemonBackground serviceEvent monitoring, task scheduling
    platform-configConfig managementSchema validation, environment handling
    service-clientsAPI clientsSDK for platform services, cloud APIs
    +
    +

    Detailed Services:

    +

    Orchestrator (crates/orchestrator/)

    +
      +
    • High-performance scheduler: Rust core
    • +
    • File-based persistence: Durable queue
    • +
    • Workflow execution: Dependency-aware scheduling
    • +
    • Checkpoint recovery: Resume from failures
    • +
    • Parallel execution: Multi-task handling
    • +
    • State management: Track job status
    • +
    • REST API: 9 core endpoints
    • +
    • Port: 9090 (health check endpoint)
    • +
    +

    Control Center (crates/control-center/)

    +
      +
    • Authorization engine: Cedar policy enforcement
    • +
    • RBAC system: Role-based access control
    • +
    • Audit logging: Complete audit trail
    • +
    • API gateway: REST API for all operations
    • +
    • System configuration: Central configuration management
    • +
    • Health monitoring: Real-time system status
    • +
    +

    Control Center UI (crates/control-center-ui/)

    +
      +
    • Web dashboard: Real-time infrastructure view
    • +
    • Workflow visualization: Batch job monitoring
    • +
    • Configuration management: Web-based configuration
    • +
    • Resource explorer: Browse infrastructure
    • +
    • Audit viewer: Security audit trail
    • +
    +

    MCP Server (crates/mcp-server/)

    +
      +
    • AI integration: Model Context Protocol support
    • +
    • Natural language: Parse infrastructure requests
    • +
    • Auto-completion: Intelligent configuration suggestions
    • +
    • 7 settings tools: Configuration management via LLM
    • +
    • Context-aware: Understand workspace context
    • +
    +

    Vault Service (crates/vault-service/)

    +
      +
    • Secrets backend: Encrypted credential storage
    • +
    • KMS integration: Key Management System support
    • +
    • SOPS + Age: SOPS encryption backend
    • +
    • Credential injection: Secure credential delivery
    • +
    • Audit logging: Secret access tracking
    • +
    +

    Extension Registry (crates/extension-registry/)

    +
      +
    • OCI distribution: Container image distribution
    • +
    • Extension packaging: Provider/taskserv distribution
    • +
    • Version management: Semantic versioning
    • +
    • Registry API: Content addressable storage
    • +
    +

    AI Service (crates/ai-service/)

    +
      +
    • LLM integration: Large Language Model support
    • +
    • Prompt optimization: Infrastructure request parsing
    • +
    • Context awareness: Workspace context enrichment
    • +
    • Response generation: Configuration suggestions
    • +
    +

    Detector (crates/detector/)

    +
      +
    • Anomaly detection: System health monitoring
    • +
    • Pattern recognition: Infrastructure issue identification
    • +
    • Alert generation: Alerting system integration
    • +
    • Real-time monitoring: Continuous surveillance
    • +
    +

    Platform Config (crates/platform-config/)

    +
      +
    • Configuration management: Centralized config loading
    • +
    • Schema validation: Configuration validation
    • +
    • Environment handling: Multi-environment support
    • +
    • Default settings: System-wide defaults
    • +
    +

    Provisioning Daemon (crates/provisioning-daemon/)

    +
      +
    • Background service: Continuous operation
    • +
    • Event monitoring: System event handling
    • +
    • Task scheduling: Background job execution
    • +
    • State synchronization: Infrastructure state sync
    • +
    +

    RAG Service (crates/rag/)

    +
      +
    • Retrieval Augmented Generation: Knowledge base integration
    • +
    • Document embedding: Semantic search
    • +
    • Context retrieval: Intelligent response context
    • +
    • Knowledge synthesis: Answer generation
    • +
    +

    Service Clients (crates/service-clients/)

    +
      +
    • API clients: Client SDK for platform services
    • +
    • Cloud providers: Multi-cloud provider SDKs
    • +
    • Request handling: HTTP/RPC client utilities
    • +
    • Connection pooling: Efficient resource management
    • +
    +

    4. Extensions (provisioning/extensions/)

    +

    Modular infrastructure components:

    +

    Providers (5 cloud providers)

    +
      +
    • UpCloud - Primary European cloud
    • +
    • AWS - Amazon Web Services
    • +
    • Hetzner - Baremetal & cloud servers
    • +
    • Local - Development environment
    • +
    • Demo - Testing & mocking
    • +
    +

    Each provider includes:

    +
      +
    • Nickel schemas for configuration
    • +
    • API client implementation
    • +
    • Server creation/deletion logic
    • +
    • Network management
    • +
    • State tracking
    • +
    +

    Task Services (50+ services in 18 categories)

    +
    + + + + + + + + + +
    CategoryServicesPurpose
    Container Runtimecontainerd, crio, podman, crun, youki, runcContainer execution
    Kuberneteskubernetes, etcd, coredns, cilium, flannel, calicoOrchestration
    Storagerook-ceph, local-storage, mayastor, external-nfsData persistence
    Databasespostgres, redis, mysql, mongodbData management
    Networkingip-aliases, proxy, resolv, kmsNetwork services
    Securitywebhook, kms, oras, radicleSecurity services
    Observabilityprometheus, grafana, loki, jaegerMonitoring & logging
    Developmentgitea, coder, desktop, buildkitDeveloper tools
    Hypervisorkvm, qemu, libvirtVirtualization
    +
    +

    Clusters (9 pre-built templates)

    +
      +
    • web - Web service cluster (nginx + postgres)
    • +
    • oci-reg - Container registry
    • +
    • git - Git hosting (Gitea)
    • +
    • buildkit - Build infrastructure
    • +
    • k8s-ha - HA Kubernetes (3 control planes)
    • +
    • postgresql - HA PostgreSQL cluster
    • +
    • cicd-argocd - GitOps CI/CD
    • +
    • cicd-tekton - Tekton pipelines
    • +
    +

    5. Infrastructure Layer

    +

    What Provisioning Manages:

    +
      +
    • Cloud Resources: VMs, networks, storage
    • +
    • Services: Kubernetes, databases, monitoring
    • +
    • Applications: Web services, APIs, tools
    • +
    • State: Configuration, data, logs
    • +
    • Monitoring: Metrics, traces, logs
    • +
    +

    Configuration System

    +

    Hierarchical 5-Layer System:

    +
    Precedence (High → Low):
    +
    +1. Runtime Arguments   (CLI flags: --provider upcloud)
    +   ↓
    +2. Environment Variables (PROVISIONING_PROVIDER=aws)
    +   ↓
    +3. Workspace Config    (~workspace/config/provisioning.yaml)
    +   ↓
    +4. Environment Defaults (workspace/config/prod-defaults.toml)
    +   ↓
    +5. System Defaults     (~/.config/provisioning/ + platform defaults)
    +
    +

    Configuration Languages:

    +
    + + + + +
    FormatPurposeValidationEditability
    NickelInfrastructure source✅ Type-safe, contractsDirect
    TOMLSettings, defaultsSchema validationDirect
    YAMLUser config, metadataSchema validationDirect
    JSONExported configsSchema validationGenerated
    +
    +

    Key Features:

    +
      +
    • Lazy evaluation
    • +
    • Recursive merging
    • +
    • Variable interpolation
    • +
    • Constraint checking
    • +
    • Automatic validation
    • +
    +

    State Management

    +

    SurrealDB Graph Database:

    +

    Stores complex infrastructure relationships:

    +
    Nodes:
    +- Servers (compute)
    +- Networks (connectivity)
    +- Storage (persistence)
    +- Services (software)
    +- Workflows (automation)
    +
    +Edges:
    +- Server → Network (connected)
    +- Server → Storage (mounted)
    +- Service → Server (running on)
    +- Workflow → Dependency (depends on)
    +
    +

    File-Based Persistence:

    +

    For orchestrator queue and checkpoints:

    +
    ~/.provisioning/
    +├── state/              # Infrastructure state
    +├── checkpoints/        # Workflow checkpoints
    +├── queue/              # Orchestrator queue
    +└── logs/               # Operational logs
    +
    +

    Security Architecture

    +

    4-Layer Security Model:

    +
    + + + + +
    LayerComponentsFeatures
    AuthenticationJWT, sessions, MFA2FA, TOTP, WebAuthn
    AuthorizationCedar policies, RBACFine-grained permissions
    EncryptionAES-256-GCM, TLSAt-rest & in-transit
    AuditLogging, compliance7-year retention
    +
    +

    Security Services:

    +
      +
    • JWT token validation
    • +
    • Argon2id password hashing
    • +
    • Multi-factor authentication
    • +
    • Cedar policy enforcement
    • +
    • Encrypted credential storage
    • +
    • KMS integration (5 backends)
    • +
    • Audit logging (5 export formats)
    • +
    • Compliance checking (SOC2, GDPR, HIPAA)
    • +
    +

    Performance Characteristics

    +

    Modular CLI (84% code reduction):

    +
      +
    • Main CLI: 211 lines (vs. 1,329 before)
    • +
    • Command discovery: O(1) dispatcher
    • +
    • Lazy loading: Commands loaded on-demand
    • +
    • Caching: Configuration cached after first load
    • +
    +

    Orchestrator Performance:

    +
      +
    • Dependency resolution: O(n log n) topological sort
    • +
    • Parallel execution: Configurable task limit
    • +
    • Checkpoint recovery: Resume from failure point
    • +
    • Memory efficient: File-based queue
    • +
    +

    Provider Operations:

    +
      +
    • Batch creation: Parallel server provisioning
    • +
    • Bulk operations: Multi-resource transactions
    • +
    • State tracking: Efficient state queries
    • +
    • Rollback: Atomic operation reversal
    • +
    +

    Nushell Plugins (10-50x speedup):

    +
      +
    • Compiled Rust extensions
    • +
    • Direct native code execution
    • +
    • Zero-copy data passing
    • +
    • Async I/O support
    • +
    +

    Deployment Modes

    +

    Three Operational Modes:

    +
    + + + +
    ModeInteractionConfigurationRollbackUse Case
    Interactive TUIRatatui UIManual inputAutomaticDevelopment
    Headless CLICommand-lineScript-drivenManualAutomation
    Unattended CI/CDNon-interactiveConfiguration fileAutomaticCI/CD pipelines
    +
    +

    Technology Stack

    +
    + + + + + + + +
    ComponentTechnologyWhy
    IaC LanguageNickelType-safe, lazy evaluation, contracts
    ScriptingNushell 0.109+Structured data pipelines
    PerformanceRustZero-cost abstractions, memory safety
    StateSurrealDBGraph database for relationships
    EncryptionSOPS + AgeIndustry-standard encryption
    SecurityCedar + JWTPolicy enforcement + tokens
    OrchestrationCustomSpecialized for infrastructure workflows
    +
    +

    File Organization

    +
    provisioning/
    +├── core/                       # CLI engine (Nushell)
    +│   ├── cli/provisioning       # Main entry point
    +│   ├── nulib/                 # 54 core libraries
    +│   ├── plugins/               # Nushell plugins (Rust)
    +│   └── scripts/               # Utility scripts
    +│
    +├── platform/                   # Microservices (Rust)
    +│   ├── crates/                # 12 microservices
    +│   │   ├── orchestrator/      # Workflow scheduler
    +│   │   ├── control-center/    # API gateway + auth
    +│   │   ├── control-center-ui/ # Web dashboard
    +│   │   ├── mcp-server/        # AI integration
    +│   │   ├── vault-service/     # Secrets backend
    +│   │   ├── extension-registry/ # OCI registry
    +│   │   ├── ai-service/        # LLM features
    +│   │   ├── detector/          # Anomaly detection
    +│   │   ├── rag/               # Knowledge retrieval
    +│   │   ├── provisioning-daemon/ # Background service
    +│   │   ├── platform-config/   # Config management
    +│   │   └── service-clients/   # API clients
    +│   └── Cargo.toml             # Rust workspace
    +│
    +├── extensions/                # Extensible components
    +│   ├── providers/             # Cloud providers (5)
    +│   ├── taskservs/             # Task services (50+)
    +│   ├── clusters/              # Cluster templates (9)
    +│   └── workflows/             # Automation templates
    +│
    +├── schemas/                   # Nickel schemas
    +│   ├── main.ncl              # Entry point
    +│   ├── config/               # Configuration schemas
    +│   ├── infrastructure/       # Infrastructure schemas
    +│   ├── operations/           # Operational schemas
    +│   └── [other schemas]       # Additional schemas
    +│
    +├── config/                    # System configuration
    +│   └── config.defaults.toml  # Default settings
    +│
    +├── bootstrap/                 # Installation
    +│   ├── install.sh            # Bash bootstrap
    +│   └── install.nu            # Nushell installer
    +│
    +├── docs/                      # Product documentation
    +│   └── src/                  # mdBook source
    +│
    +└── README.md                  # Project overview
    +
    +

    Component Interaction

    +

    Typical Workflow:

    +
    User Input
    +   ↓
    +CLI Dispatcher (provisioning/core/cli/provisioning)
    +   ↓
    +Nushell Handler (provisioning/core/nulib/commands/)
    +   ↓
    +Configuration Loading (lib_provisioning/config/)
    +   ↓
    +Provider Selection (lib_provisioning/providers/)
    +   ↓
    +Validation (lib_provisioning/infra_validator/)
    +   ↓
    +Orchestrator Queue (provisioning/platform/orchestrator/)
    +   ↓
    +Task Execution (provider + task service)
    +   ↓
    +State Update (SurrealDB / file storage)
    +   ↓
    +Audit Logging (security system)
    +   ↓
    +User Feedback
    +
    +

    Scalability

    +

    Provisioning scales from:

    +
      +
    • Solo: 2 CPU cores, 4GB RAM (single instance)
    • +
    • MultiUser: 4-8 CPU cores, 8GB RAM (small team)
    • +
    • CICD: 8+ CPU cores, 16GB RAM (enterprise)
    • +
    • Enterprise: Multi-node Kubernetes (unlimited)
    • +
    +

    Bottlenecks & Solutions:

    +
    + + + + +
    ComponentBottleneckSolution
    OrchestratorTask queuePartition by workspace
    StateSurrealDBHorizontal scaling
    ProvidersAPI rate limitsExponential backoff
    StorageDisk I/OSSD + caching
    +
    +

    Integration Points

    +

    Provisioning integrates with:

    +
      +
    • Kubernetes API - Cluster management
    • +
    • Cloud Provider APIs - Resource provisioning
    • +
    • SOPS + Age - Secrets encryption
    • +
    • Prometheus - Metrics collection
    • +
    • Cedar - Policy enforcement
    • +
    • SurrealDB - State persistence
    • +
    • MCP - AI integration
    • +
    • KMS - Key management (Cosmian, AWS, local)
    • +
    +

    Reliability Features

    +

    Fault Tolerance:

    +
      +
    • Checkpoint recovery - Resume from failure
    • +
    • Automatic rollback - Revert failed operations
    • +
    • Retry logic - Exponential backoff
    • +
    • Health checks - Continuous monitoring
    • +
    • Backup & restore - Data protection
    • +
    +

    High Availability:

    +
      +
    • Multi-node orchestrator
    • +
    • Database replication
    • +
    • Service redundancy
    • +
    • Load balancing
    • +
    • Failover automation
    • +
    + + +

    Design Principles

    +

    Core principles guiding Provisioning architecture and development.

    +

    1. Workspace-First Design

    +

    Principle: Workspaces are the default organizational unit for ALL infrastructure work.

    +

    Why:

    +
      +
    • Explicit project isolation
    • +
    • Prevent accidental cross-project modifications
    • +
    • Independent credential management
    • +
    • Clear configuration boundaries
    • +
    • Team collaboration enablement
    • +
    +

    Application:

    +
      +
    • Every workspace has independent state
    • +
    • Workspace switching is atomic
    • +
    • Configuration per workspace
    • +
    • Extensions inherited from platform
    • +
    +

    Code Example:

    +
    # Workspace-enforced workflow
    +provisioning workspace init my-project
    +provisioning workspace switch my-project
    +
    +# This command requires active workspace
    +provisioning server create --name web-01
    +
    +

    Impact: All commands validate active workspace before execution.

    +
    +

    2. Type-Safety Mandatory

    +

    Principle: ALL configurations MUST be type-safe. Validation is NEVER optional.

    +

    Why:

    +
      +
    • Catch errors at configuration time
    • +
    • Prevent runtime failures
    • +
    • Enable IDE support (LSP)
    • +
    • Enforce consistency
    • +
    • Reduce deployment risk
    • +
    +

    Application:

    +
      +
    • Nickel is source of truth (NOT TOML)
    • +
    • Type contracts on ALL schemas
    • +
    • Gradual typing not allowed
    • +
    • Validation in ALL profiles (dev, prod, cicd)
    • +
    • Static analysis before deployment
    • +
    +

    Code Example:

    +
    # Type-safe infrastructure definition
    +{
    +  name : String = "server-01"
    +  plan : | [ 'small, 'medium, 'large | ] = 'medium
    +  zone : String = "de-fra1"
    +  backup_enabled : Bool = false
    +} | ServerContract
    +
    +

    Impact: Type errors caught before infrastructure changes.

    +
    +

    3. Configuration-Driven, Never Hardcoded

    +

    Principle: Configuration is the source of truth. Hardcoded values are forbidden.

    +

    Why:

    +
      +
    • Enable environment-specific behavior
    • +
    • Support multiple deployment modes
    • +
    • Allow runtime reconfiguration
    • +
    • Audit configuration changes
    • +
    • Team collaboration
    • +
    +

    Application:

    +
      +
    • 5-layer configuration hierarchy
    • +
    • 476+ configuration accessors
    • +
    • Variable interpolation
    • +
    • Environment-specific overrides
    • +
    • Schema validation
    • +
    +

    Code Example:

    +
    # Configuration drives behavior
    +provisioning server create --plan $(config.server.default_plan)
    +
    +# Environment-specific configs
    +PROVISIONING_ENV=prod provisioning server create
    +
    +

    Forbidden:

    +
    # ❌ WRONG - Hardcoded values
    +let server_plan = "medium"
    +
    +# ✅ RIGHT - Configuration-driven
    +let server_plan = (config.server.plan)
    +
    +

    Impact: Single codebase supports all environments.

    +
    +

    4. Multi-Cloud Abstraction

    +

    Principle: Provider-agnostic interfaces enable multi-cloud deployments.

    +

    Why:

    +
      +
    • Avoid vendor lock-in
    • +
    • Reuse infrastructure code
    • +
    • Support multiple cloud strategies
    • +
    • Easy provider switching
    • +
    +

    Application:

    +
      +
    • Unified provider interface
    • +
    • Abstract resource definitions
    • +
    • Provider-specific implementation
    • +
    • Automatic provider selection
    • +
    +

    Code Example:

    +
    # Provider-agnostic configuration
    +{
    +  servers = [
    +    {
    +      name = "web-01"
    +      plan = "medium"      # Abstract plan size
    +      provider = "upcloud" # Swappable provider
    +    }
    +  ]
    +}
    +
    +

    Impact: Same Nickel schema deploys to UpCloud, AWS, or Hetzner.

    +
    +

    5. Modular, Extensible Architecture

    +

    Principle: Components are loosely coupled, independently deployable.

    +

    Why:

    +
      +
    • Easy to add features
    • +
    • Support custom extensions
    • +
    • Avoid monolithic growth
    • +
    • Enable community contributions
    • +
    • Flexible deployment options
    • +
    +

    Application:

    +
      +
    • 54 core Nushell libraries
    • +
    • 111+ CLI commands in 7 domains
    • +
    • 50+ task services
    • +
    • 5 cloud providers
    • +
    • 9 cluster templates
    • +
    • Pluggable provider interface
    • +
    +

    Impact: Add features without modifying core system.

    +
    +

    6. Hybrid Rust + Nushell

    +

    Principle: Rust for performance-critical components, Nushell for orchestration.

    +

    Why:

    +
      +
    • Rust: Type safety, zero-cost abstractions, performance
    • +
    • Nushell: Structured data, productivity, easy automation
    • +
    • Hybrid: Best of both worlds
    • +
    +

    Application:

    +
      +
    • Core CLI: Bash wrapper → Nushell dispatcher
    • +
    • Orchestrator: Rust scheduler + Nushell task execution
    • +
    • Libraries: Nushell for business logic
    • +
    • Performance: Rust plugins for 10-50x speedup
    • +
    +

    Impact: Fast, type-safe, productive infrastructure automation.

    +
    +

    7. State Management via Graph Database

    +

    Principle: Infrastructure relationships tracked via SurrealDB graph.

    +

    Why:

    +
      +
    • Model complex infrastructure relationships
    • +
    • Query relationships efficiently
    • +
    • Track dependencies
    • +
    • Support rollback via state history
    • +
    • Audit trail
    • +
    +

    Application:

    +
      +
    • SurrealDB for relationship queries
    • +
    • File-based persistence for queue
    • +
    • Event-driven state updates
    • +
    • Checkpoint-based recovery
    • +
    +

    Example Relationships:

    +
    Server → Network (connected to)
    +Server → Storage (mounts)
    +Cluster → Service (runs)
    +Workflow → Dependency (depends on)
    +
    +

    Impact: Complex infrastructure relationships handled gracefully.

    +
    +

    8. Security-First Design

    +

    Principle: Security is built-in, not bolted-on.

    +

    Why:

    +
      +
    • Enterprise compliance
    • +
    • Data protection
    • +
    • Access control
    • +
    • Audit trails
    • +
    • Threat detection
    • +
    +

    Application:

    +
      +
    • 4-layer security model (auth, authz, encryption, audit)
    • +
    • JWT authentication
    • +
    • Cedar policy enforcement
    • +
    • AES-256-GCM encryption
    • +
    • 7-year audit retention
    • +
    • MFA support (TOTP, WebAuthn)
    • +
    +

    Impact: Enterprise-grade security by default.

    +
    +

    9. Progressive Disclosure

    +

    Principle: Simple for common cases, powerful for advanced use cases.

    +

    Why:

    +
      +
    • Low barrier to entry
    • +
    • Professional productivity
    • +
    • Advanced features available
    • +
    • Avoid overwhelming users
    • +
    • Gradual learning curve
    • +
    +

    Application:

    +
      +
    • Simple: Interactive TUI installer
    • +
    • Productive: CLI with 80+ shortcuts
    • +
    • Powerful: Batch workflows, policies
    • +
    • Advanced: Custom extensions, hooks
    • +
    +

    Impact: All skill levels supported.

    +
    +

    10. Fail-Fast, Recover Gracefully

    +

    Principle: Detect issues early, provide recovery mechanisms.

    +

    Why:

    +
      +
    • Prevent invalid deployments
    • +
    • Enable safe recovery
    • +
    • Minimize blast radius
    • +
    • Audit failures for learning
    • +
    +

    Application:

    +
      +
    • Validation before execution
    • +
    • Checkpoint-based recovery
    • +
    • Automatic rollback on failure
    • +
    • Detailed error messages
    • +
    • Retry with exponential backoff
    • +
    +

    Code Example:

    +
    # Validate before deployment
    +provisioning validate config --strict
    +
    +# Dry-run to check impact
    +provisioning --check server create
    +
    +# Safe rollback on failure
    +provisioning workflow rollback --to-checkpoint
    +
    +

    Impact: Safe infrastructure changes with confidence.

    +
    +

    11. Observable & Auditable

    +

    Principle: All operations traceable, all changes auditable.

    +

    Why:

    +
      +
    • Compliance & regulation
    • +
    • Troubleshooting
    • +
    • Security investigation
    • +
    • Team accountability
    • +
    • Historical analysis
    • +
    +

    Application:

    +
      +
    • Comprehensive audit logging
    • +
    • 5 export formats (JSON, YAML, CSV, syslog, CloudWatch)
    • +
    • Structured log entries
    • +
    • Operation tracing
    • +
    • Resource change tracking
    • +
    +

    Impact: Complete visibility into infrastructure changes.

    +
    +

    12. No Shortcuts on Reliability

    +

    Principle: Reliability features are standard, not optional.

    +

    Why:

    +
      +
    • Production requirements
    • +
    • Minimize downtime
    • +
    • Data protection
    • +
    • Business continuity
    • +
    • Trust & confidence
    • +
    +

    Application:

    +
      +
    • Checkpoint recovery
    • +
    • Automatic rollback
    • +
    • Health monitoring
    • +
    • Backup & restore
    • +
    • Multi-node deployment
    • +
    • Service redundancy
    • +
    +

    Impact: Enterprise-grade reliability standard.

    +
    +

    Architectural Decision Records (ADRs)

    +

    Key decisions documenting rationale:

    +
    + + + + + + +
    ADRDecisionRationale
    ADR-011Nickel MigrationType-safety over KCL flexibility
    ADR-010Config Strategy5-layer hierarchy over flat config
    ADR-009SurrealDBGraph relationships over relational
    ADR-008Modular CLI80+ shortcuts over verbose commands
    ADR-007Workspace-FirstIsolation over global state
    ADR-006Hybrid ArchitectureRust + Nushell for best of both
    +
    +
    +

    Design Trade-offs

    +
    + + + + + + +
    DecisionGainCost
    Type-SafetyFewer errorsLearning curve
    Config HierarchyFlexibilityComplexity
    Workspace IsolationSafetyDuplication
    Modular CLIDiscoverabilityNo single command
    SurrealDBRelationshipsResource overhead
    Validation StrictSafetyFast iteration friction
    +
    +
    + + +

    Component Architecture

    +

    Detailed architecture of each major Provisioning component.

    +

    Core Components Map

    +
    User Interface
    +  ├─ CLI (Nushell dispatcher)
    +  ├─ Web Dashboard (Control Center UI)
    +  ├─ REST API (Control Center)
    +  └─ MCP Server (AI Integration)
    +       ↓
    +Core Engine (54 Nushell libraries)
    +  ├─ Configuration Management
    +  ├─ Provider Abstraction
    +  ├─ Workspace Management
    +  ├─ Infrastructure Validation
    +  ├─ Secrets Management
    +  └─ Command Utilities
    +       ↓
    +Platform Services (12 Rust microservices)
    +  ├─ Orchestrator (Workflow execution)
    +  ├─ Control Center (API + Auth)
    +  ├─ Control Center UI (Web dashboard)
    +  ├─ MCP Server (AI integration)
    +  ├─ Vault Service (Secrets backend)
    +  ├─ Extension Registry (OCI distribution)
    +  ├─ AI Service (LLM features)
    +  ├─ Detector (Anomaly detection)
    +  ├─ RAG (Knowledge retrieval)
    +  ├─ Provisioning Daemon (Background service)
    +  ├─ Platform Config (Configuration management)
    +  └─ Service Clients (API clients)
    +       ↓
    +Extensions (Modular infrastructure)
    +  ├─ Providers (5 cloud providers)
    +  ├─ Task Services (50+ services)
    +  ├─ Clusters (9 templates)
    +  └─ Workflows (Automation)
    +       ↓
    +Infrastructure (Running resources)
    +  ├─ Cloud Compute
    +  ├─ Networks & Storage
    +  ├─ Services
    +  └─ Monitoring
    +
    +

    1. CLI Layer

    +

    Location: provisioning/core/cli/

    +

    Main Entry Point (provisioning)

    +

    Bash wrapper that:

    +
      +
    1. Detects Nushell installation
    2. +
    3. Loads environment variables
    4. +
    5. Validates workspace requirement
    6. +
    7. Routes command to dispatcher
    8. +
    9. Handles error reporting
    10. +
    +

    Command Dispatcher

    +

    Location: provisioning/core/nulib/main_provisioning/dispatcher.nu

    +

    Supports:

    +
      +
    • 111+ commands across 7 domains
    • +
    • 80+ shortcuts for productivity
    • +
    • Bi-directional help (help workspace / workspace help)
    • +
    • Dynamic loading of command modules
    • +
    +

    2. Core Engine Components

    +

    Configuration Management

    +

    Location: provisioning/core/nulib/lib_provisioning/config/

    +

    Key Features:

    +
      +
    • Load merged configuration from 5 layers
    • +
    • 476+ accessors for config values
    • +
    • Variable interpolation & TOML merging
    • +
    • Schema validation
    • +
    • Configuration caching
    • +
    +

    Provider Abstraction

    +

    Location: provisioning/core/nulib/lib_provisioning/providers/

    +

    Supported Providers (5):

    +
      +
    • UpCloud - Primary European cloud
    • +
    • AWS - Amazon Web Services
    • +
    • Hetzner - Baremetal & cloud
    • +
    • Local - Development environment
    • +
    • Demo - Testing & mocking
    • +
    +

    Features:

    +
      +
    • Unified cloud provider interface
    • +
    • Dynamic provider loading
    • +
    • Credential management
    • +
    • Provider state tracking
    • +
    +

    Workspace Management

    +

    Location: provisioning/core/nulib/lib_provisioning/workspace/

    +

    Responsibilities:

    +
      +
    • Workspace registry tracking
    • +
    • Atomic workspace switching
    • +
    • Configuration isolation
    • +
    • Extension inheritance
    • +
    • State management
    • +
    +

    Workspace Registry:

    +
    workspaces:
    +  active: "my-project"
    +  registry:
    +    my-project:
    +      path: ~/.provisioning/workspaces/workspace_my_project
    +      created: 2026-01-16T10:30:00Z
    +      template: default
    +
    +

    Infrastructure Validation

    +

    Location: provisioning/core/nulib/lib_provisioning/infra_validator/

    +

    Validation Stages:

    +
      +
    1. Syntax check - Valid Nickel syntax
    2. +
    3. Type check - Type correctness
    4. +
    5. Schema check - Matches expected schema
    6. +
    7. Constraint check - Business rule validation
    8. +
    9. Dependency check - Infrastructure dependencies
    10. +
    11. Security check - Security policies
    12. +
    +

    Secrets Management

    +

    Location: provisioning/core/nulib/lib_provisioning/secrets/

    +

    Backends:

    +
      +
    • SOPS + Age (default)
    • +
    • Cosmian KMS (enterprise)
    • +
    • AWS KMS (AWS)
    • +
    • Local KMS (development)
    • +
    +

    3. Platform Services

    +

    Orchestrator

    +

    Location: provisioning/platform/crates/orchestrator/

    +

    Technology: Rust + Nushell

    +

    Key Features:

    +
      +
    • High-performance workflow execution
    • +
    • File-based persistence
    • +
    • Checkpoint recovery
    • +
    • Parallel execution with dependencies
    • +
    • REST API (83+ endpoints)
    • +
    • Priority-based task scheduling
    • +
    +

    State Persistence:

    +
    ~/.provisioning/
    +├── queue/           # Task queue
    +├── checkpoints/     # Workflow checkpoints
    +└── state/           # Infrastructure state
    +
    +

    Control Center

    +

    Location: provisioning/platform/crates/control-center/

    +

    Technology: Rust (Axum)

    +

    Features:

    +
      +
    • JWT authentication
    • +
    • Cedar policy authorization
    • +
    • RBAC system
    • +
    • Audit logging
    • +
    • REST API for all operations
    • +
    +

    Authorization Model:

    +
      +
    • User roles (admin, user, viewer)
    • +
    • Fine-grained permissions
    • +
    • Cedar policy enforcement
    • +
    • Attribute-based access control
    • +
    +

    Control Center UI

    +

    Location: provisioning/platform/crates/control-center-ui/

    +

    Features:

    +
      +
    • Real-time infrastructure view
    • +
    • Workflow visualization
    • +
    • Configuration management
    • +
    • Resource monitoring
    • +
    • Audit log viewer
    • +
    +

    MCP Server

    +

    Location: provisioning/platform/crates/mcp-server/

    +

    Technology: Rust

    +

    Features:

    +
      +
    • AI-powered assistance via MCP
    • +
    • Natural language command parsing
    • +
    • Auto-completion of configurations
    • +
    • 7 configuration tools for LLM
    • +
    • Context-aware recommendations
    • +
    +

    Vault Service

    +

    Location: provisioning/platform/crates/vault-service/

    +

    Features:

    +
      +
    • Encrypted credential storage
    • +
    • KMS integration (5 backends)
    • +
    • SOPS + Age encryption
    • +
    • Secure credential injection
    • +
    • Audit logging for secret access
    • +
    +

    Extension Registry

    +

    Location: provisioning/platform/crates/extension-registry/

    +

    Features:

    +
      +
    • OCI-compliant distribution
    • +
    • Provider/taskserv packaging
    • +
    • Semantic version management
    • +
    • Content addressable storage
    • +
    • Registry API endpoints
    • +
    +

    AI Service

    +

    Location: provisioning/platform/crates/ai-service/

    +

    Features:

    +
      +
    • LLM integration platform
    • +
    • Infrastructure request parsing
    • +
    • Workspace context enrichment
    • +
    • Configuration suggestion generation
    • +
    • Multi-provider LLM support
    • +
    +

    Detector

    +

    Location: provisioning/platform/crates/detector/

    +

    Features:

    +
      +
    • System health monitoring
    • +
    • Anomaly pattern detection
    • +
    • Infrastructure issue identification
    • +
    • Real-time surveillance
    • +
    • Alerting system integration
    • +
    +

    RAG Service

    +

    Location: provisioning/platform/crates/rag/

    +

    Features:

    +
      +
    • Retrieval Augmented Generation
    • +
    • Document semantic embedding
    • +
    • Knowledge base integration
    • +
    • Context-aware answer generation
    • +
    • Multi-source knowledge synthesis
    • +
    +

    Provisioning Daemon

    +

    Location: provisioning/platform/crates/provisioning-daemon/

    +

    Features:

    +
      +
    • Background service operation
    • +
    • System event monitoring
    • +
    • Background job execution
    • +
    • Infrastructure state synchronization
    • +
    • Event-driven architecture
    • +
    +

    Platform Config

    +

    Location: provisioning/platform/crates/platform-config/

    +

    Features:

    +
      +
    • Centralized configuration loading
    • +
    • Schema-based validation
    • +
    • Multi-environment support
    • +
    • System-wide default settings
    • +
    • Configuration hot-reload support
    • +
    +

    Service Clients

    +

    Location: provisioning/platform/crates/service-clients/

    +

    Features:

    +
      +
    • Platform service client SDKs
    • +
    • Cloud provider API clients
    • +
    • HTTP/RPC request handling
    • +
    • Connection pooling and management
    • +
    • Retry logic and error handling
    • +
    +

    4. Extension Components

    +

    Providers

    +

    Location: provisioning/extensions/providers/

    +

    Structure:

    +
    providers/
    +├── upcloud/        # UpCloud provider
    +├── aws/            # AWS provider
    +├── hetzner/        # Hetzner provider
    +├── local/          # Local dev provider
    +├── demo/           # Demo/test provider
    +└── prov_lib/       # Shared utilities
    +
    +

    Provider Interface:

    +
      +
    • Create/delete resources
    • +
    • List resources
    • +
    • Query resource status
    • +
    • Network/storage management
    • +
    • Credential validation
    • +
    +

    Task Services

    +

    Location: provisioning/extensions/taskservs/

    +

    50+ Services in 18 categories:

    +
      +
    • Container runtimes (containerd, podman, crio)
    • +
    • Kubernetes (etcd, coredns, cilium, calico)
    • +
    • Storage (rook-ceph, mayastor, nfs)
    • +
    • Databases (postgres, redis, mongodb)
    • +
    • Networking (ip-aliases, proxy, kms)
    • +
    • Security (webhook, kms, oras)
    • +
    • Observability (prometheus, grafana, loki)
    • +
    • Development (gitea, coder, buildkit)
    • +
    • Hypervisor (kvm, qemu, libvirt)
    • +
    +

    Clusters

    +

    Location: provisioning/extensions/clusters/

    +

    9 Pre-built Templates:

    +
      +
    • web - Web service cluster
    • +
    • oci-reg - Container registry
    • +
    • git - Git hosting (Gitea)
    • +
    • buildkit - Build infrastructure
    • +
    • k8s-ha - HA Kubernetes
    • +
    • postgresql - HA PostgreSQL
    • +
    • cicd-argocd - GitOps CI/CD
    • +
    • cicd-tekton - Tekton pipelines
    • +
    +

    5. Configuration Layer

    +

    Nickel Schemas

    +

    Location: provisioning/schemas/

    +

    Structure (27 directories):

    +
    schemas/
    +├── main.ncl             # Entry point
    +├── lib/                 # Utilities
    +├── config/              # Settings
    +├── infrastructure/      # Servers, networks
    +├── operations/          # Workflows
    +├── deployment/          # Kubernetes
    +├── services/            # Service defs
    +└── versions.ncl         # Tool versions
    +
    +

    3-File Pattern:

    +
      +
    1. contracts.ncl - Type definitions
    2. +
    3. defaults.ncl - Default values
    4. +
    5. main.ncl - Entry point + makers
    6. +
    +

    Component Dependencies

    +
    CLI
    +  ├─ Configuration
    +  ├─ Workspace
    +  ├─ Validation
    +  ├─ Secrets
    +  └─ Providers
    +
    +Providers
    +  └─ Orchestrator
    +
    +Orchestrator
    +  ├─ Task Services
    +  ├─ Control Center
    +  └─ State Manager
    +
    +Control Center
    +  ├─ Authorization
    +  ├─ Audit Logging
    +  └─ State Manager
    +
    +

    Communication Patterns

    +

    Synchronous (Request-Response)

    +
    CLI → Orchestrator → Provider → Cloud API
    +
    +

    Asynchronous (Queue)

    +
    CLI → Orchestrator (queue) → [Background execution]
    +
    +

    Event-Driven

    +
    Provider Event → Orchestrator → State Update
    +                            → Control Center
    +                            → Monitoring
    +
    + + +

    Integration Patterns

    +

    Design patterns for extending and integrating with Provisioning.

    +

    1. Provider Integration Pattern

    +

    Pattern: Add a new cloud provider to Provisioning.

    +

    2. Task Service Integration Pattern

    +

    Pattern: Add infrastructure component.

    +

    3. Cluster Template Pattern

    +

    Pattern: Create pre-configured cluster template.

    +

    4. Batch Workflow Pattern

    +

    Pattern: Create automation workflow for complex operations.

    +

    5. Custom Extension Pattern

    +

    Pattern: Create custom Nushell library.

    +

    6. Authorization Policy Pattern

    +

    Pattern: Define fine-grained access control via Cedar.

    +

    7. Webhook Integration

    +

    Pattern: Trigger Provisioning from external systems.

    +

    8. Monitoring Integration

    +

    Pattern: Export metrics and logs to monitoring systems.

    +

    9. CI/CD Integration

    +

    Pattern: Use Provisioning in automated pipelines.

    +

    10. MCP Tool Integration

    +

    Pattern: Add AI-powered tool via MCP.

    +

    Integration Scenarios

    +

    Multi-Cloud Deployment

    +

    Deploy across UpCloud, AWS, and Hetzner in single workflow.

    +

    GitOps Workflow

    +

    Git changes trigger infrastructure updates via webhooks.

    +

    Self-Service Deployment

    +

    Non-technical users request infrastructure via natural language.

    +

    Best Practices

    +
      +
    1. Use type-safe Nickel schemas
    2. +
    3. Implement proper error handling
    4. +
    5. Log all operations for audit trails
    6. +
    7. Test extensions before production
    8. +
    9. Document configuration & usage
    10. +
    11. Version extensions independently
    12. +
    13. Support backward compatibility
    14. +
    15. Validate inputs & encrypt credentials
    16. +
    + + +

    Architecture Decision Records

    +

    This section contains Architecture Decision Records (ADRs) documenting key architectural decisions and their rationale for the Provisioning platform.

    +

    ADR Index

    +

    Core Architecture Decisions

    + +

    Security and Cryptography

    + +

    Operations and Observability

    + +

    Decision Format

    +

    Each ADR follows this structure:

    +
      +
    • Status: Accepted, Proposed, Deprecated, Superseded
    • +
    • Context: Problem statement and constraints
    • +
    • Decision: The chosen approach
    • +
    • Consequences: Benefits and trade-offs
    • +
    • Alternatives: Other options considered
    • +
    • References: Related ADRs and external docs
    • +
    +

    Rationale for ADRs

    +

    ADRs document the “why” behind architectural choices:

    +
      +
    1. Modular CLI - Scales command set without monolithic registration
    2. +
    3. Workspace-First - Isolates infrastructure and supports multi-tenancy
    4. +
    5. Nickel Source of Truth - Ensures type-safe configuration and prevents runtime errors
    6. +
    7. Microservice Distribution - Enables independent scaling and deployment
    8. +
    9. Communication Protocol - Balances synchronous needs with async event processing
    10. +
    11. Post-Quantum Crypto - Protects against future quantum computing threats
    12. +
    13. Multi-Layer Encryption - Defense in depth against data breaches
    14. +
    15. Observability - Enables rapid troubleshooting and performance analysis
    16. +
    17. SLO Management - Aligns infrastructure quality with business objectives
    18. +
    19. Incident Automation - Reduces MTTR and improves system resilience
    20. +
    +

    Cross-References

    +

    These ADRs interact with:

    +
      +
    • Platform Documentation - See provisioning/docs/src/architecture/
    • +
    • Features - See provisioning/docs/src/features/ for implementation details
    • +
    • Development Guides - See provisioning/docs/src/development/ for extending systems
    • +
    • Security Documentation - See provisioning/docs/src/security/ for compliance details
    • +
    • Operations Guides - See provisioning/docs/src/operations/ for deployment procedures
    • +
    +

    Examples

    +

    Real-world infrastructure as code examples demonstrating Provisioning across +multi-cloud, Kubernetes, security, and operational scenarios.

    +

    Overview

    +

    This section contains production-ready examples showing how to:

    +
      +
    • Deploy infrastructure from basic single-cloud to complex multi-cloud environments
    • +
    • Orchestrate Kubernetes clusters with Provisioning automation
    • +
    • Implement security patterns including encryption, secrets management, and compliance
    • +
    • Build custom workflows for specialized infrastructure operations
    • +
    • Handle disaster recovery with backup strategies and failover procedures
    • +
    • Optimize costs through resource analysis and right-sizing
    • +
    • Migrate legacy systems from traditional infrastructure to cloud-native architectures
    • +
    • Test infrastructure as code with validation, policy checks, and integration tests
    • +
    +

    All examples use Nickel for type-safe configuration and are designed as learning resources and templates for your own deployments.

    +

    Quick Start Examples

    +

    Basic Infrastructure Setup

    +
      +
    • +

      Basic Setup - Single-cloud with networking, +compute, storage - perfect starting point

      +
    • +
    • +

      E-Commerce Platform - Multi-tier +application across AWS and UpCloud with load balancing, databases

      +
    • +
    +

    Multi-Cloud Deployments

    + +

    Operational Examples

    + +

    Advanced Patterns

    + +

    Security and Compliance

    + +

    Cloud Provider Specific

    + +

    Configuration and Migration

    + +

    Example Organization

    +

    Each example follows this structure:

    +
    example-name.md
    +├── Overview - What this example demonstrates
    +├── Prerequisites - Required setup
    +├── Architecture Diagram - Visual representation
    +├── Nickel Configuration - Complete, runnable configuration
    +├── Deployment Steps - Command-by-command instructions
    +├── Verification - How to validate deployment
    +├── Troubleshooting - Common issues and solutions
    +└── Next Steps - How to extend or customize
    +
    +

    Learning Paths

    +

    I’m new to Provisioning

    +
      +
    1. Start with Basic Setup
    2. +
    3. Read Real-World Scenario
    4. +
    5. Try Kubernetes Deployment
    6. +
    +

    I need multi-cloud infrastructure

    +
      +
    1. Review Multi-Cloud Deployment
    2. +
    3. Study Hybrid Cloud Setup
    4. +
    5. Implement Advanced Networking
    6. +
    +

    I need to migrate existing infrastructure

    +
      +
    1. Start with Legacy System Migration
    2. +
    3. Add Terraform Migration if applicable
    4. +
    5. Set up GitOps Deployment
    6. +
    +

    I need enterprise features

    +
      +
    1. Implement Compliance and Audit
    2. +
    3. Set up Disaster Recovery
    4. +
    5. Deploy Cost Governance
    6. +
    7. Configure Secrets Rotation
    8. +
    +

    Copy and Customize

    +

    All examples are self-contained and can be:

    +
      +
    1. Copied into your workspace and adapted
    2. +
    3. Extended with additional resources and customizations
    4. +
    5. Tested using Provisioning’s validation framework
    6. +
    7. Deployed directly via provisioning apply
    8. +
    +

    Use them as templates, learning resources, or reference implementations for your own infrastructure.

    + +
      +
    • Configuration Guide → See provisioning/docs/src/infrastructure/nickel-guide.md
    • +
    • API Reference → See provisioning/docs/src/api-reference/
    • +
    • Development → See provisioning/docs/src/development/
    • +
    • Operations → See provisioning/docs/src/operations/
    • +
    +

    Basic Setup

    +

    Simple infrastructure setup examples for getting started with the Provisioning platform.

    +

    Single Server Deployment

    +

    Deploy a simple web server with UpCloud:

    +
    # workspace/infra/web-server.ncl
    +{
    +  servers = [
    +    {
    +      name = "web-01",
    +      provider = 'upcloud,
    +      plan = 'medium,
    +      zone = "fi-hel1",
    +      storage = [
    +        {size_gb = 50, type = 'ssd}
    +      ]
    +    }
    +  ]
    +}
    +
    +

    Deploy:

    +
    provisioning workspace create basic-web
    +cd basic-web
    +cp ../examples/web-server.ncl infra/
    +
    +provisioning deploy --workspace basic-web --yes
    +
    +

    Three-Tier Application

    +

    Web frontend, application backend, database:

    +
    {
    +  servers = [
    +    {name = "web-01", provider = 'upcloud, plan = 'small, zone = "fi-hel1"},
    +    {name = "app-01", provider = 'upcloud, plan = 'medium, zone = "fi-hel1"},
    +    {name = "db-01", provider = 'upcloud, plan = 'large, zone = "fi-hel1",
    +     storage = [{size_gb = 100, type = 'ssd}]},
    +  ],
    +
    +  task_services = [
    +    {name = "nginx", target = "web-01"},
    +    {name = "nodejs", target = "app-01"},
    +    {name = "postgresql", target = "db-01"},
    +  ]
    +}
    +
    +

    Development Environment

    +

    Local development stack with Docker:

    +
    {
    +  servers = [
    +    {name = "dev-local", provider = 'local, plan = 'medium}
    +  ],
    +
    +  task_services = [
    +    {name = "docker"},
    +    {name = "postgresql"},
    +    {name = "redis"},
    +  ]
    +}
    +
    +

    References

    + +

    Multi-Cloud Examples

    +

    Deploy infrastructure across multiple cloud providers for redundancy and geographic distribution.

    +

    Primary-Backup Configuration

    +

    UpCloud primary in Europe, AWS backup in US:

    +
    {
    +  servers = [
    +    # Primary (UpCloud EU)
    +    {name = "web-eu", provider = 'upcloud, zone = "fi-hel1", plan = 'medium},
    +    {name = "db-eu", provider = 'upcloud, zone = "fi-hel1", plan = 'large},
    +
    +    # Backup (AWS US)
    +    {name = "web-us", provider = 'aws, zone = "us-east-1a", plan = 't3.medium},
    +    {name = "db-us", provider = 'aws, zone = "us-east-1a", plan = 'm5.large},
    +  ],
    +
    +  replication = {
    +    enabled = true,
    +    pairs = [
    +      {primary = "db-eu", standby = "db-us", mode = 'async}
    +    ]
    +  }
    +}
    +
    +

    Geographic Distribution

    +

    Deploy to multiple regions for low latency:

    +
    {
    +  servers = [
    +    {name = "web-eu", provider = 'upcloud, zone = "fi-hel1"},
    +    {name = "web-us", provider = 'aws, zone = "us-west-2a"},
    +    {name = "web-asia", provider = 'aws, zone = "ap-southeast-1a"},
    +  ],
    +
    +  load_balancing = {
    +    global = true,
    +    geo_routing = true
    +  }
    +}
    +
    +

    References

    + +

    Kubernetes Deployment Examples

    +

    Deploy production-ready Kubernetes clusters with the Provisioning platform.

    +

    Basic Kubernetes Cluster

    +

    3-node cluster with Cilium CNI:

    +
    {
    +  task_services = [
    +    {
    +      name = "kubernetes",
    +      config = {
    +        control_plane = {nodes = 3, plan = 'medium},
    +        workers = [{name = "default", nodes = 3, plan = 'large}],
    +        networking = {
    +          cni = 'cilium,
    +          pod_cidr = "10.42.0.0/16",
    +          service_cidr = "10.43.0.0/16"
    +        }
    +      }
    +    }
    +  ]
    +}
    +
    +

    Production Cluster with Storage

    +

    Kubernetes with Rook-Ceph storage:

    +
    {
    +  task_services = [
    +    {
    +      name = "kubernetes",
    +      config = {
    +        control_plane = {nodes = 3, plan = 'medium},
    +        workers = [
    +          {name = "general", nodes = 5, plan = 'large},
    +          {name = "storage", nodes = 3, plan = 'xlarge,
    +           storage = [{size_gb = 500, type = 'ssd}]}
    +        ],
    +        networking = {cni = 'cilium}
    +      }
    +    },
    +    {
    +      name = "rook-ceph",
    +      config = {
    +        storage_nodes = ["storage-0", "storage-1", "storage-2"],
    +        osd_per_device = 1
    +      }
    +    }
    +  ]
    +}
    +
    +

    References

    + +

    Custom Workflow Examples

    +

    Build complex deployment workflows with dependency management and parallel execution.

    +

    Multi-Stage Deployment

    +
    {
    +  workflows = [{
    +    name = "app-deployment",
    +    steps = [
    +      {name = "provision-infrastructure", type = 'provision},
    +      {name = "install-kubernetes", type = 'task, depends_on = ["provision-infrastructure"]},
    +      {name = "deploy-application", type = 'task, depends_on = ["install-kubernetes"]},
    +      {name = "configure-monitoring", type = 'task, depends_on = ["deploy-application"]}
    +    ]
    +  }]
    +}
    +
    +

    Parallel Regional Deployment

    +
    {
    +  workflows = [{
    +    name = "global-rollout",
    +    steps = [
    +      {name = "deploy-eu", type = 'task},
    +      {name = "deploy-us", type = 'task},
    +      {name = "deploy-asia", type = 'task},
    +      {name = "configure-dns", type = 'configure,
    +       depends_on = ["deploy-eu", "deploy-us", "deploy-asia"]}
    +    ]
    +  }]
    +}
    +
    +

    References

    + +

    Security Configuration Examples

    +

    Security configuration examples for authentication, encryption, and secrets management.

    +

    Complete Security Configuration

    +
    {
    +  security = {
    +    authentication = {
    +      enabled = true,
    +      jwt_algorithm = "RS256",
    +      mfa_required = true
    +    },
    +
    +    secrets = {
    +      backend = "secretumvault",
    +      url = " [https://vault.example.com",](https://vault.example.com",)
    +      auto_rotate = true,
    +      rotation_days = 90
    +    },
    +
    +    encryption = {
    +      at_rest = true,
    +      algorithm = "AES-256-GCM",
    +      kms_backend = "secretumvault"
    +    },
    +
    +    audit = {
    +      enabled = true,
    +      retention_days = 2555,
    +      export_format = "json"
    +    }
    +  }
    +}
    +
    +

    SecretumVault Integration

    +
    # Configure SecretumVault
    +provisioning config set security.secrets.backend secretumvault
    +provisioning config set security.secrets.url  [http://localhost:8200](http://localhost:8200)
    +
    +# Store secrets
    +provisioning vault put database/password --value="secret123"
    +
    +# Retrieve secrets
    +provisioning vault get database/password
    +
    +

    Encrypted Infrastructure Configuration

    +
    {
    +  providers.upcloud = {
    +    username = "admin",
    +    password = std.secret "UPCLOUD_PASSWORD"  # Encrypted
    +  },
    +
    +  databases = [{
    +    name = "production-db",
    +    password = std.secret "DB_PASSWORD"  # Encrypted
    +  }]
    +}
    +
    +

    References

    + +

    Troubleshooting

    +

    Systematic problem-solving guides and debugging procedures for diagnosing and resolving issues with the Provisioning platform.

    +

    Overview

    +

    This section helps you:

    +
      +
    • Solve common issues - Database connection errors, authentication failures, deployment failures
    • +
    • Debug problems - Diagnostic tools, log analysis, tracing execution paths
    • +
    • Analyze logs - Log aggregation, filtering, searching, pattern recognition
    • +
    • Understand errors - Error message interpretation and root cause analysis
    • +
    • Get support - Knowledge base, community resources, professional support
    • +
    +

    Organized by problem type and component for quick navigation.

    +

    Troubleshooting Guides

    +

    Quick Problem Solving

    +
      +
    • +

      Common Issues - Authentication failures, +deployment errors, configuration, resource limits, network problems

      +
    • +
    • +

      Debug Guide - Debug logging, verbose output, trace +execution, collect diagnostics, analyze stack traces

      +
    • +
    • +

      Logs Analysis - Find logs, search techniques, +log patterns, interpreting errors, diagnostics

      +
    • +
    +

    Component-Specific Troubleshooting

    +

    Each microservice and component has its own troubleshooting section:

    +
      +
    • Orchestrator Issues - Workflow failures, scheduling problems, state inconsistencies
    • +
    • Control Center Issues - API errors, permission problems, configuration issues
    • +
    • Vault Service Issues - Secret access failures, key rotation problems, authentication errors
    • +
    • Detector Issues - Analysis failures, false positives, configuration problems
    • +
    • Extension Registry Issues - Provider loading, dependency resolution, versioning conflicts
    • +
    +

    Infrastructure and Configuration

    +
      +
    • Configuration Problems - Nickel syntax errors, schema validation failures, type mismatches
    • +
    • Provider Issues - Authentication failures, API limits, resource creation failures
    • +
    • Task Service Failures - Service-specific errors, timeout issues, state management problems
    • +
    • Network Problems - Connectivity issues, DNS resolution, firewall rules, certificate problems
    • +
    +

    Problem Diagnosis Flowchart

    +
    Issue Occurs
    +    ↓
    +Is it an authentication issue? → See [Common Issues](./common-issues.md) - Authentication
    +    ↓ No
    +Is it a deployment failure? → See [Common Issues](./common-issues.md) - Deployment
    +    ↓ No
    +Is it a configuration error? → See [Debug Guide](./debug-guide.md) - Configuration
    +    ↓ No
    +Enable debug logging → See [Debug Guide](./debug-guide.md)
    +    ↓
    +Collect logs and traces → See [Logs Analysis](./logs-analysis.md)
    +    ↓
    +Analyze patterns → Identify root cause
    +    ↓
    +Apply fix or escalate
    +
    +

    Quick Reference: Common Problems

    +
    + + + + + + + + +
    ProblemSolutionGuide
    “Authentication failed”Check credentials, enable MFACommon Issues
    “Permission denied”Verify RBAC policies, check Cedar rulesCommon Issues
    “Deployment failed”Check logs, verify resources, test connectivityDebug Guide
    “Configuration invalid”Validate Nickel schema, check typesCommon Issues
    “Provider unavailable”Check API keys, verify connectivityCommon Issues
    “Resource creation failed”Check resource limits, verify accountDebug Guide
    “Timeout”Increase timeouts, check performanceDebug Guide
    “Database error”Check connections, verify schemaCommon Issues
    +
    +

    Debugging Workflow

    +
      +
    1. Reproduce - Can you consistently reproduce the issue?
    2. +
    3. Enable Debug Logging - Set RUST_LOG=debug and PROVISIONING_LOG_LEVEL=debug
    4. +
    5. Collect Evidence - Logs, configuration, error messages, stack traces
    6. +
    7. Analyze Patterns - Look for errors, warnings, unusual timing
    8. +
    9. Identify Cause - Root cause analysis
    10. +
    11. Test Fix - Verify the fix resolves the issue
    12. +
    13. Prevent Recurrence - Update documentation, add tests
    14. +
    +

    Enable Diagnostic Logging

    +
    # Set log level to debug
    +export RUST_LOG=debug
    +export PROVISIONING_LOG_LEVEL=debug
    +
    +# Collect logs to file
    +provisioning config set logging.file /var/log/provisioning.log
    +provisioning config set logging.level debug
    +
    +# Enable verbose output
    +provisioning --verbose <command>
    +
    +# Run with tracing
    +RUST_BACKTRACE=1 provisioning <command>
    +
    +

    Common Error Codes

    +
    + + + + + + + +
    CodeMeaningAction
    401UnauthorizedCheck authentication credentials
    403ForbiddenCheck authorization policies
    404Not FoundVerify resource exists
    409ConflictResolve state conflicts
    422InvalidVerify configuration schema
    500Internal ErrorCheck server logs
    503Service UnavailableWait for service to recover
    +
    +

    Escalation Paths

    +

    Community Support

    +
      +
    1. Check Common Issues
    2. +
    3. Search community forums
    4. +
    5. Ask on GitHub discussions
    6. +
    +

    Professional Support

    +
      +
    1. Open a support ticket
    2. +
    3. Provide: logs, configuration, reproduction steps
    4. +
    5. Wait for response
    6. +
    +

    Emergency Issues (Security, Data Loss)

    +
      +
    1. Contact security team immediately
    2. +
    3. Provide all evidence
    4. +
    5. Document timeline
    6. +
    +

    Support Resources

    +
      +
    • Documentation → Complete guides in provisioning/docs/src/
    • +
    • GitHub Issues → Community issues and discussions
    • +
    • Slack Community → Real-time community support
    • +
    • Email Supportprofessional@provisioning.io
    • +
    • Chat Support → Available during business hours
    • +
    + +
      +
    • Operations Guide → See provisioning/docs/src/operations/
    • +
    • Architecture → See provisioning/docs/src/architecture/
    • +
    • Features → See provisioning/docs/src/features/
    • +
    • Development → See provisioning/docs/src/development/
    • +
    • Examples → See provisioning/docs/src/examples/
    • +
    +

    Common Issues

    +

    Debug Guide

    +

    Logs Analysis

    +

    Getting Help

    +

    AI & Machine Learning

    +

    Provisioning includes comprehensive AI capabilities for infrastructure automation via natural +language, intelligent configuration suggestions, and anomaly detection.

    +

    Overview

    +

    The AI system consists of three integrated components:

    +
      +
    1. TypeDialog AI Backends - Interactive form intelligence and agent automation
    2. +
    3. AI Service Microservice - Central AI processing and coordination
    4. +
    5. Core AI Libraries - Nushell query processing and LLM integration
    6. +
    +

    Key Capabilities

    +

    Natural Language Infrastructure

    +

    Request infrastructure changes in plain English:

    +
    # Natural language request
    +provisioning ai "Create 3 web servers with load balancing and auto-scaling"
    +
    +# Returns:
    +# - Parsed infrastructure requirements
    +# - Generated Nickel configuration
    +# - Deployment confirmation
    +
    +

    Intelligent Configuration

    +

    AI suggests optimal configurations based on context:

    +
      +
    • Database selection and tuning
    • +
    • Network topology recommendations
    • +
    • Security policy generation
    • +
    • Resource allocation optimization
    • +
    +

    Anomaly Detection

    +

    Continuous monitoring and intelligent alerting:

    +
      +
    • Infrastructure health anomalies
    • +
    • Performance pattern detection
    • +
    • Security issue identification
    • +
    • Predictive alerting
    • +
    +

    Components at a Glance

    +
    + + + + + + +
    ComponentPurposeTechnology
    typedialog-aiForm intelligence & suggestionsHTTP server, SurrealDB
    typedialog-agAI agents & workflow automationType-safe agents, Nickel transpilation
    ai-serviceCentral AI microserviceRust, LLM integration
    ragKnowledge base retrievalSemantic search, embeddings
    mcp-serverModel Context ProtocolAI tool interface
    detectorAnomaly detection systemPattern recognition
    +
    +

    Quick Start

    +

    Enable AI Features

    +
    # Install AI tools
    +provisioning install ai-tools
    +
    +# Configure AI service
    +provisioning ai configure --provider openai --model gpt-4
    +
    +# Test AI capabilities
    +provisioning ai test
    +
    +

    Use Natural Language

    +
    # Simple request
    +provisioning ai "Create a Kubernetes cluster"
    +
    +# Complex request with options
    +provisioning ai "Deploy PostgreSQL HA cluster with replication in AWS, backup to S3"
    +
    +# Get help on AI features
    +provisioning help ai
    +
    +

    Architecture

    +

    The AI system follows a layered architecture:

    +
    ┌─────────────────────────────────┐
    +│  User Interface Layer            │
    +│  • Natural language input        │
    +│  • TypeDialog AI forms           │
    +│  • Chat interface                │
    +└────────────┬────────────────────┘
    +             ↓
    +┌─────────────────────────────────┐
    +│  AI Orchestration Layer          │
    +│  • AI Service (Rust)             │
    +│  • Query processing (Nushell)    │
    +│  • Intent recognition            │
    +└────────────┬────────────────────┘
    +             ↓
    +┌─────────────────────────────────┐
    +│  Knowledge & Processing Layer    │
    +│  • RAG (Retrieval)               │
    +│  • LLM Integration               │
    +│  • MCP Server                    │
    +│  • Detector (anomalies)          │
    +└────────────┬────────────────────┘
    +             ↓
    +┌─────────────────────────────────┐
    +│  Infrastructure Layer            │
    +│  • Nickel configuration          │
    +│  • Deployment execution          │
    +│  • Monitoring & feedback         │
    +└─────────────────────────────────┘
    +
    +

    Topics

    + +

    Configuration

    +

    Environment Variables

    +
    # LLM Provider
    +export PROVISIONING_AI_PROVIDER=openai        # openai, anthropic, local
    +export PROVISIONING_AI_MODEL=gpt-4            # Model identifier
    +export PROVISIONING_AI_API_KEY=sk-...         # API key
    +
    +# AI Service
    +export PROVISIONING_AI_SERVICE_PORT=9091      # AI service port
    +export PROVISIONING_AI_ENABLE_ANOMALY=true    # Enable detector
    +export PROVISIONING_AI_RAG_THRESHOLD=0.75     # Similarity threshold
    +
    +

    Configuration File

    +
    # ~/.config/provisioning/ai.yaml
    +ai:
    +  enabled: true
    +  provider: openai
    +  model: gpt-4
    +  api_key: ${PROVISIONING_AI_API_KEY}
    +
    +  service:
    +    port: 9091
    +    timeout: 30
    +    max_retries: 3
    +
    +  typedialog:
    +    ai_enabled: true
    +    ag_enabled: true
    +    suggestions: true
    +
    +  rag:
    +    enabled: true
    +    similarity_threshold: 0.75
    +    max_results: 5
    +
    +  detector:
    +    enabled: true
    +    update_interval: 60
    +    alert_threshold: 0.8
    +
    +

    Use Cases

    +

    1. Infrastructure from Description

    +

    Describe infrastructure in natural language, get Nickel configuration:

    +
    provisioning ai deploy "
    +  Create a production Kubernetes cluster with:
    +  - 3 control planes
    +  - 5 worker nodes
    +  - HA PostgreSQL (3 nodes)
    +  - Prometheus monitoring
    +  - Encrypted networking
    +"
    +
    +

    2. Configuration Assistance

    +

    Get AI suggestions while filling out forms:

    +
    provisioning setup profile
    +# TypeDialog shows suggestions based on context
    +# Database recommendations based on workload
    +# Security settings optimized for environment
    +
    +

    3. Troubleshooting

    +

    AI analyzes logs and suggests fixes:

    +
    provisioning ai troubleshoot --service orchestrator
    +
    +# Output:
    +# Issue detected: High memory usage
    +# Likely cause: Task queue backlog
    +# Suggestion: Scale orchestrator replicas to 3
    +# Command: provisioning orchestrator scale --replicas 3
    +
    +

    4. Anomaly Detection

    +

    Continuous monitoring with intelligent alerts:

    +
    provisioning ai anomalies --since 1h
    +
    +# Output:
    +# ⚠️  Unusual pattern detected
    +# Time: 2026-01-16T01:47:00Z
    +# Service: control-center
    +# Metric: API response time
    +# Baseline: 45ms → Current: 320ms (+611%)
    +# Likelihood: Query performance regression
    +
    +

    Limitations

    +
      +
    • LLM Dependency: Requires external LLM provider (OpenAI, Anthropic, etc.)
    • +
    • Network Required: Cloud-based LLM providers need internet connectivity
    • +
    • Context Window: Large infrastructures may exceed LLM context limits
    • +
    • Cost: API calls incur per-token charges
    • +
    • Latency: Natural language processing adds response latency (2-5 seconds)
    • +
    +

    Configuration Files

    +

    Key files for AI configuration:

    +
    + + + + + + +
    FilePurpose
    .typedialog/ai.dbAI SurrealDB database (typedialog-ai)
    .typedialog/agent-*.yamlAI agent definitions (typedialog-ag)
    ~/.config/provisioning/ai.yamlUser AI settings
    provisioning/core/versions.nclTypeDialog versions
    core/nulib/lib_provisioning/ai/Core AI libraries
    platform/crates/ai-service/AI service crate
    +
    +

    Performance

    +

    Typical Latencies

    +
    + + + + +
    OperationLatency
    Simple request parsing100-200ms
    LLM inference2-5 seconds
    Configuration generation500ms-1s
    Anomaly detection50-100ms
    +
    +

    Scalability

    +
      +
    • Concurrent requests: 100+ (load balanced)
    • +
    • Query processing: 10,000+ queries/second
    • +
    • RAG similarity search: <50ms for 1M documents
    • +
    • Anomaly detection: Real-time on 1000+ metrics
    • +
    +

    Security

    +

    API Keys

    +
      +
    • Stored encrypted in vault-service
    • +
    • Never logged or persisted in plain text
    • +
    • Rotated automatically (configurable)
    • +
    • Audit trail for all API usage
    • +
    +

    Data Privacy

    +
      +
    • Natural language queries not stored by default
    • +
    • LLM provider agreements (OpenAI terms, etc.)
    • +
    • Local-only RAG option available
    • +
    • GDPR compliance support
    • +
    + + +

    AI Architecture

    +

    Complete system architecture of Provisioning’s AI capabilities, from user interface through infrastructure generation.

    +

    System Overview

    +
    ┌──────────────────────────────────────────────────┐
    +│  User Interface Layer                            │
    +│  • CLI (natural language)                        │
    +│  • TypeDialog AI forms                           │
    +│  • Interactive wizards                           │
    +│  • Web dashboard                                 │
    +└────────────────────┬─────────────────────────────┘
    +                     ↓
    +┌──────────────────────────────────────────────────┐
    +│  Request Processing Layer                        │
    +│  • Intent recognition                            │
    +│  • Entity extraction                             │
    +│  • Context parsing                               │
    +│  • Request validation                            │
    +└────────────────────┬─────────────────────────────┘
    +                     ↓
    +┌──────────────────────────────────────────────────┐
    +│  Knowledge & Retrieval Layer (RAG)              │
    +│  • Document embedding                            │
    +│  • Vector similarity search                      │
    +│  • Keyword matching (BM25)                       │
    +│  • Hybrid ranking                                │
    +└────────────────────┬─────────────────────────────┘
    +                     ↓
    +┌──────────────────────────────────────────────────┐
    +│  LLM Integration Layer                           │
    +│  • MCP tool registration                         │
    +│  • Context augmentation                          │
    +│  • Prompt engineering                            │
    +│  • LLM API calls (OpenAI, Anthropic, etc.)      │
    +└────────────────────┬─────────────────────────────┘
    +                     ↓
    +┌──────────────────────────────────────────────────┐
    +│  Configuration Generation Layer                  │
    +│  • Nickel code generation                        │
    +│  • Schema validation                             │
    +│  • Constraint checking                           │
    +│  • Cost estimation                               │
    +└────────────────────┬─────────────────────────────┘
    +                     ↓
    +┌──────────────────────────────────────────────────┐
    +│  Execution & Feedback Layer                      │
    +│  • DAG planning                                  │
    +│  • Dry-run simulation                            │
    +│  • Deployment execution                          │
    +│  • Performance monitoring                        │
    +└──────────────────────────────────────────────────┘
    +
    +

    Component Architecture

    +

    1. User Interface Layer

    +

    Entry Points:

    +
    Natural Language Input
    +    ├─ CLI: provisioning ai "create kubernetes cluster"
    +    ├─ Interactive: provisioning ai interactive
    +    ├─ Forms: TypeDialog AI-enhanced forms
    +    └─ Web Dashboard: /ai/infrastructure-builder
    +
    +

    Processing:

    +
      +
    • Tokenization and normalization
    • +
    • Command pattern matching
    • +
    • Ambiguity resolution
    • +
    • Confidence scoring
    • +
    +

    2. Intent Recognition

    +
    User Request
    +    ↓
    +Intent Classification
    +    ├─ Create infrastructure (60%)
    +    ├─ Modify configuration (25%)
    +    ├─ Query knowledge (10%)
    +    └─ Troubleshoot issue (5%)
    +    ↓
    +Entity Extraction
    +    ├─ Resource type (server, database, cluster)
    +    ├─ Cloud provider (AWS, UpCloud, Hetzner)
    +    ├─ Count/Scale (3 nodes, 10GB)
    +    ├─ Requirements (HA, encrypted, monitoring)
    +    └─ Constraints (budget, region, environment)
    +    ↓
    +Request Structure
    +
    +

    3. RAG Knowledge Retrieval

    +

    Embedding Process:

    +
    Query: "Create 3 web servers with load balancer"
    +    ↓
    +Embed Query → Vector [0.234, 0.567, 0.891, ...]
    +    ↓
    +Search Relevant Documents
    +    ├─ Vector similarity (semantic)
    +    ├─ BM25 keyword matching (syntactic)
    +    └─ Hybrid ranking
    +    ↓
    +Top Results:
    +    1. "Web Server HA Patterns" (0.94 similarity)
    +    2. "Load Balancing Best Practices" (0.87)
    +    3. "Auto-Scaling Configuration" (0.76)
    +    ↓
    +Extract Context & Augment Prompt
    +
    +

    Knowledge Organization:

    +
    knowledge/
    +├── infrastructure/           (450 docs)
    +│   ├── kubernetes/
    +│   ├── databases/
    +│   ├── networking/
    +│   └── web-services/
    +├── best-practices/          (300 docs)
    +│   ├── high-availability/
    +│   ├── disaster-recovery/
    +│   └── performance/
    +├── providers/               (250 docs)
    +│   ├── aws/
    +│   ├── upcloud/
    +│   └── hetzner/
    +└── security/                (200 docs)
    +    ├── encryption/
    +    ├── authentication/
    +    └── compliance/
    +
    +

    4. LLM Integration (MCP)

    +

    Tool Registration:

    +
    LLM (GPT-4, Claude 3)
    +    ↓
    +MCP Server (provisioning-mcp)
    +    ↓
    +Available Tools:
    +    ├─ create_infrastructure
    +    ├─ analyze_configuration
    +    ├─ generate_policies
    +    ├─ estimate_costs
    +    ├─ check_compatibility
    +    ├─ validate_nickel
    +    ├─ query_knowledge_base
    +    └─ get_recommendations
    +    ↓
    +Tool Execution
    +
    +

    Prompt Engineering Pipeline:

    +
    Base Prompt Template
    +    ↓
    +Add Context (RAG results)
    +    ↓
    +Add Constraints
    +    ├─ Budget limit
    +    ├─ Region restrictions
    +    ├─ Compliance requirements
    +    └─ Performance targets
    +    ↓
    +Add Examples
    +    ├─ Successful deployments
    +    ├─ Error patterns
    +    └─ Best practices
    +    ↓
    +Enhanced Prompt
    +    ↓
    +LLM Inference
    +
    +

    5. Configuration Generation

    +

    Nickel Code Generation:

    +
    LLM Output (structured)
    +    ↓
    +Nickel Template Filling
    +    ├─ Server definitions
    +    ├─ Network configuration
    +    ├─ Storage setup
    +    └─ Monitoring config
    +    ↓
    +Generated Nickel File
    +    ↓
    +Syntax Validation
    +    ↓
    +Schema Validation (Type Checking)
    +    ↓
    +Constraint Verification
    +    ├─ Resource limits
    +    ├─ Budget constraints
    +    ├─ Compliance policies
    +    └─ Provider capabilities
    +    ↓
    +Cost Estimation
    +    ↓
    +Final Configuration
    +
    +

    6. Execution & Feedback

    +

    Deployment Planning:

    +
    Configuration
    +    ↓
    +DAG Generation (Directed Acyclic Graph)
    +    ├─ Task decomposition
    +    ├─ Dependency analysis
    +    ├─ Parallelization
    +    └─ Scheduling
    +    ↓
    +Dry-Run Simulation
    +    ├─ Check resources available
    +    ├─ Validate API access
    +    ├─ Estimate time
    +    └─ Identify risks
    +    ↓
    +Execution with Checkpoints
    +    ├─ Create resources
    +    ├─ Monitor progress
    +    ├─ Collect metrics
    +    └─ Save checkpoints
    +    ↓
    +Post-Deployment
    +    ├─ Verify functionality
    +    ├─ Run health checks
    +    ├─ Collect performance data
    +    └─ Store feedback for future improvements
    +
    +

    Data Flow Examples

    +

    Example 1: Simple Request

    +
    User: "Create 3 web servers with load balancer"
    +    ↓
    +Intent: Create Infrastructure
    +Entities: type=server, count=3, load_balancer=true
    +    ↓
    +RAG Retrieval: "Web Server Patterns", "Load Balancing"
    +    ↓
    +LLM Prompt:
    +"Generate Nickel config for 3 web servers with load balancer.
    +Context: [web server best practices from knowledge base]
    +Constraints: High availability, auto-scaling enabled"
    +    ↓
    +Generated Nickel:
    +{
    +  servers = [
    +    {name = "web-01", cpu = 4, memory = 8},
    +    {name = "web-02", cpu = 4, memory = 8},
    +    {name = "web-03", cpu = 4, memory = 8}
    +  ]
    +  load_balancer = {
    +    type = "application"
    +    health_check = "/health"
    +  }
    +}
    +    ↓
    +Configuration Generated & Validated ✓
    +    ↓
    +User Approval
    +    ↓
    +Deployment
    +
    +

    Example 2: Complex Multi-Cloud Request

    +
    User: "Deploy Kubernetes to AWS, UpCloud, and Hetzner with replication"
    +    ↓
    +Intent: Multi-Cloud Infrastructure
    +Entities: type=kubernetes, providers=[aws, upcloud, hetzner], replicas=3
    +    ↓
    +RAG Retrieval:
    +    - "Multi-Cloud Kubernetes Patterns"
    +    - "Inter-Region Replication"
    +    - "AWS Kubernetes Setup"
    +    - "UpCloud Kubernetes Setup"
    +    - "Hetzner Kubernetes Setup"
    +    ↓
    +LLM Processes:
    +    1. Analyze multi-cloud topology
    +    2. Identify networking requirements
    +    3. Plan data replication strategy
    +    4. Consider regional compliance
    +    ↓
    +Generated Nickel:
    +    - Infrastructure definitions for each provider
    +    - Inter-region networking configuration
    +    - Replication topology
    +    - Failover policies
    +    ↓
    +Cost Breakdown:
    +    AWS: $2,500/month
    +    UpCloud: $1,800/month
    +    Hetzner: $1,500/month
    +    Total: $5,800/month
    +    ↓
    +Compliance Check: EU GDPR ✓, US HIPAA ✓
    +    ↓
    +Ready for Deployment
    +
    +

    Key Technologies

    +

    LLM Providers

    +

    Supported external LLM providers:

    +
    + + + +
    ProviderModelsLatencyCost
    OpenAIGPT-4, GPT-3.52-3s$0.05-0.15/1K tokens
    AnthropicClaude 3 Opus2-4s$0.03-0.015/1K tokens
    Local (Ollama)Llama 2, Mistral5-10sFree
    +
    +

    Vector Databases

    +
      +
    • SurrealDB (default): Embedded vector database with HNSW indexing
    • +
    • Pinecone: Cloud vector database (optional)
    • +
    • Milvus: Open-source vector database (optional)
    • +
    +

    Embedding Models

    +
      +
    • text-embedding-3-small (OpenAI): 1,536 dimensions
    • +
    • text-embedding-3-large (OpenAI): 3,072 dimensions
    • +
    • all-MiniLM-L6-v2 (local): 384 dimensions
    • +
    +

    Performance Characteristics

    +

    Latency Breakdown

    +

    For a typical infrastructure creation request:

    +
    + + + + + + +
    StageLatencyDetails
    Intent Recognition50-100msLocal NLP
    RAG Retrieval50-100msVector search
    LLM Inference2-5sExternal API
    Nickel Generation100-200msTemplate filling
    Validation200-500msType checking
    Total2.5-6 secondsEnd-to-end
    +
    +

    Concurrency

    +
      +
    • Concurrent Requests: 100+ (with load balancing)
    • +
    • RAG QPS: 50+ searches/second
    • +
    • LLM Throughput: 10+ concurrent requests per API key
    • +
    • Memory: 500MB-2GB (depends on cache size)
    • +
    +

    Security Architecture

    +

    Data Protection

    +
    User Input
    +    ↓
    +Input Sanitization
    +    ├─ Remove PII
    +    ├─ Validate constraints
    +    └─ Check permissions
    +    ↓
    +Processing (encrypted in transit)
    +    ├─ TLS 1.3 to LLM provider
    +    ├─ Secrets stored in vault-service
    +    └─ Credentials never logged
    +    ↓
    +Generated Configuration
    +    ├─ Encrypted at rest (AES-256)
    +    ├─ Signed for integrity
    +    └─ Audit trail maintained
    +    ↓
    +Output
    +
    +

    Access Control

    +
      +
    • API key validation
    • +
    • RBAC permission checking
    • +
    • Rate limiting per user/key
    • +
    • Audit logging of all operations
    • +
    +

    Extensibility

    +

    Custom Tools

    +

    Register custom tools with MCP:

    +
    // Custom tool example
    +register_tool("custom-validator", | confi| g {
    +    validate_custom_requirements(&config)
    +});
    +

    Custom RAG Documents

    +

    Add domain-specific knowledge:

    +
    provisioning ai knowledge import \
    +  --source ./custom-docs \
    +  --category infrastructure
    +
    +

    Fine-tuning (Future)

    +
      +
    • Support for fine-tuned LLM models
    • +
    • Custom prompt templates
    • +
    • Organization-specific knowledge bases
    • +
    + + +

    TypeDialog AI & AG Integration

    +

    TypeDialog provides two AI-powered tools for Provisioning: typedialog-ai (configuration assistant) and typedialog-ag (agent automation).

    +

    TypeDialog Components

    +

    typedialog-ai v0.1.0

    +

    AI Assistant - HTTP server backend for intelligent form suggestions and infrastructure recommendations.

    +

    Purpose: Enhance interactive forms with AI-powered suggestions and natural language parsing.

    +

    Architecture:

    +
    TypeDialog Form
    +    ↓
    +typedialog-ai HTTP Server
    +    ↓
    +SurrealDB Backend
    +    ↓
    +LLM Provider (OpenAI, Anthropic, etc.)
    +    ↓
    +Suggestions → Deployed Config
    +
    +

    Key Features:

    +
      +
    • Form Intelligence: Context-aware field suggestions
    • +
    • Database Recommendations: Suggest database type/configuration based on workload
    • +
    • Network Optimization: Generate optimal network topology
    • +
    • Security Policies: AI-generated Cedar policies
    • +
    • Cost Estimation: Predict infrastructure costs
    • +
    +

    Installation:

    +
    # Via provisioning script
    +provisioning install ai-tools
    +
    +# Manual installation
    +wget  [https://github.com/typedialog/typedialog-ai/releases/download/v0.1.0/typedialog-ai-<os>-<arch>](https://github.com/typedialog/typedialog-ai/releases/download/v0.1.0/typedialog-ai-<os>-<arch>)
    +chmod +x typedialog-ai
    +mv typedialog-ai ~/.local/bin/
    +
    +

    Usage:

    +
    # Start AI server
    +typedialog ai serve --db-path ~/.typedialog/ai.db --port 9000
    +
    +# Test connection
    +curl  [http://localhost:9000/health](http://localhost:9000/health)
    +
    +# Get suggestion for database
    +curl -X POST  [http://localhost:9000/suggest/database](http://localhost:9000/suggest/database) \
    +  -H "Content-Type: application/json" \
    +  -d '{"workload": "transactional", "size": "1TB", "replicas": 3}'
    +
    +# Response:
    +# {"suggestion": "PostgreSQL 15 with pgvector", "confidence": 0.92}
    +
    +

    Configuration:

    +
    # ~/.typedialog/ai-config.yaml
    +typedialog-ai:
    +  port: 9000
    +  db_path: ~/.typedialog/ai.db
    +  loglevel: info
    +
    +  llm:
    +    provider: openai              # or: anthropic, local
    +    model: gpt-4
    +    api_key: ${OPENAI_API_KEY}
    +    temperature: 0.7
    +
    +  features:
    +    form_suggestions: true
    +    database_recommendations: true
    +    network_optimization: true
    +    security_policy_generation: true
    +    cost_estimation: true
    +
    +  cache:
    +    enabled: true
    +    ttl: 3600
    +
    +

    Database Schema:

    +
    -- SurrealDB schema for AI suggestions
    +DEFINE TABLE ai_suggestions SCHEMAFULL
    +DEFINE FIELD timestamp ON ai_suggestions TYPE datetime DEFAULT now();
    +DEFINE FIELD context ON ai_suggestions TYPE object;
    +DEFINE FIELD suggestion ON ai_suggestions TYPE string;
    +DEFINE FIELD confidence ON ai_suggestions TYPE float;
    +DEFINE FIELD accepted ON ai_suggestions TYPE bool;
    +
    +DEFINE TABLE ai_models SCHEMAFULL
    +DEFINE FIELD name ON ai_models TYPE string;
    +DEFINE FIELD version ON ai_models TYPE string;
    +DEFINE FIELD provider ON ai_models TYPE string;
    +
    +

    Endpoints:

    +
    + + + + + + + +
    EndpointMethodPurpose
    /healthGETHealth check
    /suggest/databasePOSTDatabase recommendations
    /suggest/networkPOSTNetwork topology
    /suggest/securityPOSTSecurity policies
    /estimate/costPOSTCost estimation
    /parse/natural-languagePOSTParse natural language
    /feedbackPOSTStore suggestion feedback
    +
    +

    typedialog-ag v0.1.0

    +

    AI Agents - Type-safe agents for automation workflows and Nickel transpilation.

    +

    Purpose: Define complex automation workflows using type-safe agent descriptions, then transpile to executable Nickel.

    +

    Architecture:

    +
    Agent Definition (.agent.yaml)
    +    ↓
    +typedialog-ag Type Checker
    +    ↓
    +Agent Execution Plan
    +    ↓
    +Nickel Transpilation
    +    ↓
    +Provisioning Execution
    +
    +

    Key Features:

    +
      +
    • Type-Safe Agents: Strongly-typed agent definitions
    • +
    • Workflow Automation: Chain multiple infrastructure tasks
    • +
    • Nickel Transpilation: Generate Nickel IaC automatically
    • +
    • Agent Orchestration: Parallel and sequential execution
    • +
    • Rollback Support: Automatic rollback on failure
    • +
    +

    Installation:

    +
    # Via provisioning script
    +provisioning install ai-tools
    +
    +# Manual installation
    +wget  [https://github.com/typedialog/typedialog-ag/releases/download/v0.1.0/typedialog-ag-<os>-<arch>](https://github.com/typedialog/typedialog-ag/releases/download/v0.1.0/typedialog-ag-<os>-<arch>)
    +chmod +x typedialog-ag
    +mv typedialog-ag ~/.local/bin/
    +
    +

    Agent Definition Syntax:

    +
    # provisioning/workflows/deploy-k8s.agent.yaml
    +version: "1.0"
    +agent: deploy-k8s
    +description: "Deploy HA Kubernetes cluster with observability stack"
    +
    +types:
    +  CloudProvider:
    +    enum: ["aws", "upcloud", "hetzner"]
    +  NodeConfig:
    +    cpu: int           # 2..64
    +    memory: int        # 4..256 (GB)
    +    disk: int          # 10..1000 (GB)
    +
    +input:
    +  provider: CloudProvider
    +  name: string         # cluster name
    +  nodes: int           # 3..100
    +  node_config: NodeConfig
    +  enable_monitoring: bool = true
    +  enable_backup: bool = true
    +
    +workflow:
    +  - name: validate
    +    task: validate_cluster_config
    +    args:
    +      provider: $input.provider
    +      nodes: $input.nodes
    +      node_config: $input.node_config
    +
    +  - name: create_network
    +    task: create_vpc
    +    depends_on: [validate]
    +    args:
    +      provider: $input.provider
    +      cidr: "10.0.0.0/16"
    +
    +  - name: create_nodes
    +    task: create_nodes
    +    depends_on: [create_network]
    +    parallel: true
    +    args:
    +      provider: $input.provider
    +      count: $input.nodes
    +      config: $input.node_config
    +
    +  - name: install_kubernetes
    +    task: install_kubernetes
    +    depends_on: [create_nodes]
    +    args:
    +      nodes: $create_nodes.output.node_ids
    +      version: "1.28.0"
    +
    +  - name: add_monitoring
    +    task: deploy_observability_stack
    +    depends_on: [install_kubernetes]
    +    when: $input.enable_monitoring
    +    args:
    +      cluster_name: $input.name
    +      storage_class: "ebs"
    +
    +  - name: setup_backup
    +    task: configure_backup
    +    depends_on: [install_kubernetes]
    +    when: $input.enable_backup
    +    args:
    +      cluster_name: $input.name
    +      backup_interval: "daily"
    +
    +output:
    +  cluster_name: string
    +  cluster_id: string
    +  kubeconfig_path: string
    +  monitoring_url: string
    +
    +

    Usage:

    +
    # Type-check agent
    +typedialog ag check deploy-k8s.agent.yaml
    +
    +# Run agent interactively
    +typedialog ag run deploy-k8s.agent.yaml \
    +  --provider upcloud \
    +  --name production-k8s \
    +  --nodes 5 \
    +  --node-config '{"cpu": 8, "memory": 32, "disk": 100}'
    +
    +# Transpile to Nickel
    +typedialog ag transpile deploy-k8s.agent.yaml > deploy-k8s.ncl
    +
    +# Execute generated Nickel
    +provisioning apply deploy-k8s.ncl
    +
    +

    Generated Nickel Output (example):

    +
    {
    +  metadata = {
    +    agent = "deploy-k8s"
    +    version = "1.0"
    +    generated_at = "2026-01-16T01:47:00Z"
    +  }
    +
    +  resources = {
    +    network = {
    +      provider = "upcloud"
    +      vpc = { cidr = "10.0.0.0/16" }
    +    }
    +
    +    compute = {
    +      provider = "upcloud"
    +      nodes = [
    +        { count = 5, cpu = 8, memory = 32, disk = 100 }
    +      ]
    +    }
    +
    +    kubernetes = {
    +      version = "1.28.0"
    +      high_availability = true
    +      monitoring = {
    +        enabled = true
    +        stack = "prometheus-grafana"
    +      }
    +      backup = {
    +        enabled = true
    +        interval = "daily"
    +      }
    +    }
    +  }
    +}
    +
    +

    Agent Features:

    +
    + + + + + + +
    FeaturePurpose
    DependenciesDeclare task ordering (depends_on)
    ParallelismRun independent tasks in parallel
    ConditionalsExecute tasks based on input conditions
    Type SafetyStrong typing on inputs and outputs
    RollbackAutomatic rollback on failure
    LoggingFull execution trace for debugging
    +
    +

    Integration with Provisioning

    +

    Using typedialog-ai in Forms

    +
    # .typedialog/provisioning/form.toml
    +[[elements]]
    +name = "database_type"
    +prompt = "form-database_type-prompt"
    +type = "select"
    +options = ["postgres", "mysql", "mongodb"]
    +
    +# Enable AI suggestions
    +[elements.ai_suggestions]
    +enabled = true
    +context = "workload"
    +provider = "typedialog-ai"
    +endpoint = " [http://localhost:9000/suggest/database"](http://localhost:9000/suggest/database")
    +
    +

    Using typedialog-ag in Workflows

    +
    # Define agent-based workflow
    +provisioning workflow define \
    +  --agent deploy-k8s.agent.yaml \
    +  --name k8s-deployment \
    +  --auto-execute
    +
    +# Run workflow
    +provisioning workflow run k8s-deployment \
    +  --provider upcloud \
    +  --nodes 5
    +
    +

    Performance

    +

    typedialog-ai

    +
      +
    • Suggestion latency: 500ms-2s per suggestion
    • +
    • Database queries: <100ms (cached)
    • +
    • Concurrent users: 50+
    • +
    • SurrealDB storage: <1GB for 10K suggestions
    • +
    +

    typedialog-ag

    +
      +
    • Type checking: <100ms per agent
    • +
    • Transpilation: <500ms to Nickel
    • +
    • Parallel task execution: O(1) overhead
    • +
    • Agent memory: <50MB per agent
    • +
    +

    Configuration

    +

    Enable AI in Provisioning

    +
    # provisioning/config/config.defaults.toml
    +[ai]
    +enabled = true
    +typedialog_ai = true
    +typedialog_ag = true
    +
    +[ai.typedialog]
    +ai_server_url = " [http://localhost:9000"](http://localhost:9000")
    +ag_executable = "typedialog-ag"
    +
    +[ai.form_suggestions]
    +enabled = true
    +providers = ["database", "network", "security"]
    +confidence_threshold = 0.75
    +
    + + +

    AI Service Crate

    +

    The AI Service crate (provisioning/platform/crates/ai-service/) is the central AI processing +microservice for Provisioning. It coordinates LLM integration, knowledge retrieval, and +infrastructure recommendation generation.

    +

    Architecture

    +

    Core Modules

    +

    The AI Service is organized into specialized modules:

    +
    + + + + + + + +
    ModulePurpose
    config.rsConfiguration management and AI service settings
    service.rsMain service logic and request handling
    mcp.rsModel Context Protocol integration for LLM tools
    knowledge.rsKnowledge base management and retrieval
    dag.rsDirected Acyclic Graph for workflow orchestration
    handlers.rsHTTP endpoint handlers
    tool_integration.rsTool registration and execution
    +
    +

    Request Flow

    +
    User Request (natural language)
    +    ↓
    +Handlers (HTTP endpoint)
    +    ↓
    +Intent Recognition (config.rs)
    +    ↓
    +Knowledge Retrieval (knowledge.rs)
    +    ↓
    +MCP Tool Selection (mcp.rs)
    +    ↓
    +LLM Processing (external provider)
    +    ↓
    +DAG Execution Planning (dag.rs)
    +    ↓
    +Infrastructure Generation
    +    ↓
    +Response to User
    +
    +

    Configuration

    +

    Environment Variables

    +
    # LLM Configuration
    +export PROVISIONING_AI_PROVIDER=openai
    +export PROVISIONING_AI_MODEL=gpt-4
    +export PROVISIONING_AI_API_KEY=sk-...
    +
    +# Service Configuration
    +export PROVISIONING_AI_PORT=9091
    +export PROVISIONING_AI_LOG_LEVEL=info
    +export PROVISIONING_AI_TIMEOUT=30
    +
    +# Knowledge Base
    +export PROVISIONING_AI_KNOWLEDGE_PATH=~/.provisioning/knowledge
    +export PROVISIONING_AI_CACHE_TTL=3600
    +
    +# RAG Configuration
    +export PROVISIONING_AI_RAG_ENABLED=true
    +export PROVISIONING_AI_RAG_SIMILARITY_THRESHOLD=0.75
    +
    +

    Configuration File

    +
    # provisioning/config/ai-service.toml
    +[ai_service]
    +port = 9091
    +timeout = 30
    +max_concurrent_requests = 100
    +
    +[llm]
    +provider = "openai"                 # openai, anthropic, local
    +model = "gpt-4"
    +api_key = "${PROVISIONING_AI_API_KEY}"
    +temperature = 0.7
    +max_tokens = 2000
    +
    +[knowledge]
    +enabled = true
    +path = "~/.provisioning/knowledge"
    +cache_ttl = 3600
    +update_interval = 3600
    +
    +[rag]
    +enabled = true
    +similarity_threshold = 0.75
    +max_results = 5
    +embedding_model = "text-embedding-3-small"
    +
    +[dag]
    +max_parallel_tasks = 10
    +timeout_per_task = 60
    +enable_rollback = true
    +
    +[security]
    +validate_inputs = true
    +rate_limit = 1000                   # requests/minute
    +audit_logging = true
    +
    +

    HTTP API

    +

    Endpoints

    +

    Create Infrastructure Request

    +
    POST /v1/infrastructure/create
    +Content-Type: application/json
    +
    +{
    +  "request": "Create 3 web servers with load balancing",
    +  "context": {
    +    "workspace": "production",
    +    "provider": "upcloud",
    +    "environment": "prod"
    +  },
    +  "options": {
    +    "auto_apply": false,
    +    "return_nickel": true,
    +    "validate": true
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "request_id": "req-12345",
    +  "status": "success",
    +  "infrastructure": {
    +    "servers": [
    +      {"name": "web-01", "cpu": 4, "memory": 8},
    +      {"name": "web-02", "cpu": 4, "memory": 8},
    +      {"name": "web-03", "cpu": 4, "memory": 8}
    +    ],
    +    "load_balancer": {"name": "lb-01", "type": "round-robin"}
    +  },
    +  "nickel_config": "{ servers = [...] }",
    +  "confidence": 0.92,
    +  "notes": ["All servers in same availability zone", "Load balancer configured for health checks"]
    +}
    +
    +

    Analyze Configuration

    +
    POST /v1/configuration/analyze
    +Content-Type: application/json
    +
    +{
    +  "configuration": "{ name = \"server-01\", cpu = 2, memory = 4 }",
    +  "context": {"provider": "upcloud", "environment": "prod"}
    +}
    +
    +

    Response:

    +
    {
    +  "analysis": {
    +    "resources": {
    +      "cpu_score": "low",
    +      "memory_score": "minimal",
    +      "recommendation": "Increase to cpu=4, memory=8 for production"
    +    },
    +    "security": {
    +      "findings": ["No backup configured", "No monitoring"],
    +      "recommendations": ["Enable automated backups", "Deploy monitoring agent"]
    +    },
    +    "cost": {
    +      "estimated_monthly": "$45",
    +      "optimization_potential": "20% cost reduction possible"
    +    }
    +  }
    +}
    +
    +

    Generate Policies

    +
    POST /v1/policies/generate
    +Content-Type: application/json
    +
    +{
    +  "requirements": "Allow developers to create servers but not delete, admins full access",
    +  "format": "cedar"
    +}
    +
    +

    Response:

    +
    {
    +  "policies": [
    +    {
    +      "effect": "permit",
    +      "principal": {"role": "developer"},
    +      "action": "CreateServer",
    +      "resource": "Server::*"
    +    },
    +    {
    +      "effect": "permit",
    +      "principal": {"role": "admin"},
    +      "action": ["CreateServer", "DeleteServer", "ModifyServer"],
    +      "resource": "Server::*"
    +    }
    +  ],
    +  "format": "cedar",
    +  "validation": "valid"
    +}
    +
    +

    Get Suggestions

    +
    GET /v1/suggestions?context=database&workload=transactional&scale=large
    +
    +

    Response:

    +
    {
    +  "suggestions": [
    +    {
    +      "type": "database",
    +      "recommendation": "PostgreSQL 15 with pgvector",
    +      "rationale": "Optimal for transactional workload with vector support",
    +      "confidence": 0.95,
    +      "config": {
    +        "engine": "postgres",
    +        "version": "15",
    +        "extensions": ["pgvector"],
    +        "replicas": 3,
    +        "backup": "daily"
    +      }
    +    }
    +  ]
    +}
    +
    +

    Get Health Status

    +
    GET /v1/health
    +
    +

    Response:

    +
    {
    +  "status": "healthy",
    +  "version": "0.1.0",
    +  "llm": {
    +    "provider": "openai",
    +    "model": "gpt-4",
    +    "available": true
    +  },
    +  "knowledge": {
    +    "documents": 1250,
    +    "last_update": "2026-01-16T01:00:00Z"
    +  },
    +  "rag": {
    +    "enabled": true,
    +    "embeddings": 1250,
    +    "search_latency_ms": 45
    +  },
    +  "uptime_seconds": 86400
    +}
    +
    +

    MCP Tool Integration

    +

    Available Tools

    +

    The AI Service registers tools with the MCP server for LLM access:

    +
    // Tools available to LLM
    +tools = [
    +  "create_infrastructure",
    +  "analyze_configuration",
    +  "generate_policies",
    +  "get_recommendations",
    +  "query_knowledge_base",
    +  "estimate_costs",
    +  "check_compatibility",
    +  "validate_nickel"
    +]
    +

    Tool Definitions

    +
    {
    +  "name": "create_infrastructure",
    +  "description": "Create infrastructure from natural language description",
    +  "parameters": {
    +    "type": "object",
    +    "properties": {
    +      "request": {"type": "string"},
    +      "provider": {"type": "string"},
    +      "context": {"type": "object"}
    +    },
    +    "required": ["request"]
    +  }
    +}
    +
    +

    Knowledge Base

    +

    Structure

    +
    knowledge/
    +├── infrastructure/         # Infrastructure patterns
    +│   ├── kubernetes/
    +│   ├── databases/
    +│   ├── networking/
    +│   └── security/
    +├── patterns/               # Design patterns
    +│   ├── high-availability/
    +│   ├── disaster-recovery/
    +│   └── performance/
    +├── providers/              # Provider-specific docs
    +│   ├── aws/
    +│   ├── upcloud/
    +│   └── hetzner/
    +└── best-practices/         # Best practices
    +    ├── security/
    +    ├── operations/
    +    └── cost-optimization/
    +
    +

    Updating Knowledge

    +
    # Add new knowledge document
    +curl -X POST  [http://localhost:9091/v1/knowledge/add](http://localhost:9091/v1/knowledge/add) \
    +  -H "Content-Type: application/json" \
    +  -d '{
    +    "category": "kubernetes",
    +    "title": "HA Kubernetes Setup",
    +    "content": "..."
    +  }'
    +
    +# Update embeddings
    +curl -X POST  [http://localhost:9091/v1/knowledge/reindex](http://localhost:9091/v1/knowledge/reindex)
    +
    +# Get knowledge status
    +curl  [http://localhost:9091/v1/knowledge/status](http://localhost:9091/v1/knowledge/status)
    +
    +

    DAG Execution

    +

    Workflow Planning

    +

    The AI Service uses DAGs to plan complex infrastructure deployments:

    +
    Validate Config
    +    ├→ Create Network
    +    │   └→ Create Nodes
    +    │       └→ Install Kubernetes
    +    │           ├→ Add Monitoring (optional)
    +    │           └→ Setup Backup (optional)
    +    │
    +    └→ Verify Compatibility
    +        └→ Estimate Costs
    +
    +

    Task Execution

    +
    # Execute DAG workflow
    +curl -X POST  [http://localhost:9091/v1/workflow/execute](http://localhost:9091/v1/workflow/execute) \
    +  -H "Content-Type: application/json" \
    +  -d '{
    +    "dag": {
    +      "tasks": [
    +        {"name": "validate", "action": "validate_config"},
    +        {"name": "network", "action": "create_network", "depends_on": ["validate"]},
    +        {"name": "nodes", "action": "create_nodes", "depends_on": ["network"]}
    +      ]
    +    }
    +  }'
    +
    +

    Performance Characteristics

    +

    Latency

    +
    + + + + + + +
    OperationLatency
    Intent recognition50-100ms
    Knowledge retrieval100-200ms
    LLM inference2-5 seconds
    Nickel generation500ms-1s
    DAG planning100-500ms
    Policy generation1-2 seconds
    +
    +

    Throughput

    +
      +
    • Concurrent requests: 100+
    • +
    • QPS: 50+ requests/second
    • +
    • Knowledge search: <50ms for 1000+ documents
    • +
    +

    Resource Usage

    +
      +
    • Memory: 500MB-2GB (with cache)
    • +
    • CPU: 1-4 cores
    • +
    • Storage: 10GB-50GB (knowledge base)
    • +
    • Network: 10Mbps-100Mbps (LLM requests)
    • +
    +

    Monitoring & Observability

    +

    Metrics

    +
    # Prometheus metrics exposed at /metrics
    +provisioning_ai_requests_total{endpoint="/v1/infrastructure/create"}
    +provisioning_ai_request_duration_seconds{endpoint="/v1/infrastructure/create"}
    +provisioning_ai_llm_tokens{provider="openai", model="gpt-4"}
    +provisioning_ai_knowledge_documents_total
    +provisioning_ai_cache_hit_ratio
    +
    +

    Logging

    +
    # View AI Service logs
    +provisioning logs service ai-service --tail 100
    +
    +# Debug mode
    +PROVISIONING_AI_LOG_LEVEL=debug provisioning service start ai-service
    +
    +

    Troubleshooting

    +

    LLM Connection Issues

    +
    # Test LLM connection
    +curl -X POST  [http://localhost:9091/v1/health](http://localhost:9091/v1/health)
    +
    +# Check configuration
    +provisioning config get ai.llm
    +
    +# View logs
    +provisioning logs service ai-service --filter "llm| \ openai"
    +
    +

    Slow Knowledge Retrieval

    +
    # Check knowledge base status
    +curl  [http://localhost:9091/v1/knowledge/status](http://localhost:9091/v1/knowledge/status)
    +
    +# Reindex embeddings
    +curl -X POST  [http://localhost:9091/v1/knowledge/reindex](http://localhost:9091/v1/knowledge/reindex)
    +
    +# Monitor RAG performance
    +curl  [http://localhost:9091/v1/rag/benchmark](http://localhost:9091/v1/rag/benchmark)
    +
    + + +

    RAG & Knowledge Base

    +

    The RAG (Retrieval Augmented Generation) system enhances AI-generated infrastructure with +domain-specific knowledge. It retrieves relevant documentation, best practices, and patterns to +inform infrastructure recommendations.

    +

    Architecture

    +

    Components

    +
    User Query
    +    ↓
    +Query Embedder (text-embedding-3-small)
    +    ↓
    +Vector Similarity Search (SurrealDB)
    +    ↓
    +Knowledge Retrieval (semantic matching)
    +    ↓
    +Context Augmentation
    +    ↓
    +LLM Processing (with knowledge context)
    +    ↓
    +Infrastructure Recommendation
    +
    +

    Knowledge Flow

    +
    Documentation Input
    +    ↓
    +Document Chunking (512 tokens)
    +    ↓
    +Semantic Embedding
    +    ↓
    +Vector Storage (SurrealDB)
    +    ↓
    +Similarity Indexing
    +    ↓
    +Query Time Retrieval
    +
    +

    Knowledge Base Organization

    +

    Document Categories

    +
    + + + + + + +
    CategoryPurposeExamples
    InfrastructureIaC patterns and templatesKubernetes, databases, networking
    Best PracticesOperational guidelinesHA patterns, disaster recovery
    Provider GuidesCloud provider documentationAWS, UpCloud, Hetzner specifics
    PerformanceOptimization guidelinesResource sizing, caching strategies
    SecuritySecurity hardening guidesEncryption, authentication, compliance
    TroubleshootingCommon issues and solutionsPerformance, deployment, debugging
    +
    +

    Document Structure

    +
    id: "doc-k8s-ha-001"
    +category: "infrastructure"
    +subcategory: "kubernetes"
    +title: "High Availability Kubernetes Cluster Setup"
    +tags: ["kubernetes", "high-availability", "production"]
    +created: "2026-01-10T00:00:00Z"
    +updated: "2026-01-16T00:00:00Z"
    +
    +content: |
    +  # High Availability Kubernetes Cluster
    +
    +  For production Kubernetes deployments, ensure:
    +  - Minimum 3 control planes
    +  - Distributed across availability zones
    +  - etcd with persistent storage
    +  - CNI plugin with network policies
    +
    +embedding: [0.123, 0.456]
    +metadata:
    +  provider: ["aws", "upcloud", "hetzner"]
    +  environment: ["production"]
    +  cost_profile: "medium"
    +
    +

    RAG Retrieval Process

    + +

    When processing a user query, the system:

    +
      +
    1. Embed Query: Convert natural language to vector
    2. +
    3. Search Index: Find similar documents (cosine similarity > threshold)
    4. +
    5. Rank Results: Score by relevance
    6. +
    7. Extract Context: Select top N chunks
    8. +
    9. Augment Prompt: Add context to LLM request
    10. +
    +

    Example:

    +
    User Query: "Create a Kubernetes cluster in AWS with auto-scaling"
    +
    +Vector Embedding: [0.234, 0.567, 0.891]
    +
    +Top Matches:
    +1. "HA Kubernetes Setup" (similarity: 0.94)
    +2. "AWS Auto-Scaling Patterns" (similarity: 0.87)
    +3. "Kubernetes Security Hardening" (similarity: 0.76)
    +
    +Retrieved Context:
    +- Minimum 3 control planes for HA
    +- Use AWS ASGs with cluster autoscaler
    +- Enable Pod Disruption Budgets
    +- Configure network policies
    +
    +LLM Prompt with Context:
    +"Create a Kubernetes cluster with the following context:
    +[...retrieved knowledge...]
    +User request: Create a Kubernetes cluster in AWS with auto-scaling"
    +
    +

    Configuration

    +
    [rag]
    +enabled = true
    +similarity_threshold = 0.75
    +max_results = 5
    +chunk_size = 512
    +chunk_overlap = 50
    +
    +[embeddings]
    +model = "text-embedding-3-small"
    +provider = "openai"
    +cache_embeddings = true
    +
    +[vector_store]
    +backend = "surrealdb"
    +index_type = "hnsw"
    +ef_construction = 400
    +ef_search = 200
    +
    +[retrieval]
    +bm25_weight = 0.3
    +semantic_weight = 0.7
    +date_boost = 0.1
    +
    +

    Managing Knowledge

    +

    Adding Documents

    +

    Via API:

    +
    curl -X POST  [http://localhost:9091/v1/knowledge/add](http://localhost:9091/v1/knowledge/add) \
    +  -H "Content-Type: application/json" \
    +  -d '{
    +    "category": "infrastructure",
    +    "title": "PostgreSQL HA Setup",
    +    "content": "For production PostgreSQL: 3+ replicas, streaming replication",
    +    "tags": ["database", "postgresql", "ha"],
    +    "metadata": {
    +      "provider": ["aws", "upcloud"],
    +      "environment": ["production"]
    +    }
    +  }'
    +
    +

    Batch Import:

    +
    # Import from markdown files
    +provisioning ai knowledge import \
    +  --source ./docs/knowledge \
    +  --category infrastructure \
    +  --auto-tag
    +
    +# Import from existing documentation
    +provisioning ai knowledge import \
    +  --source provisioning/docs/src \
    +  --recursive
    +
    +

    Organizing Knowledge

    +
    # List knowledge documents
    +provisioning ai knowledge list --category infrastructure
    +
    +# Search knowledge base
    +provisioning ai knowledge search "kubernetes high availability"
    +
    +# View document
    +provisioning ai knowledge view doc-k8s-ha-001
    +
    +# Update document
    +provisioning ai knowledge update doc-k8s-ha-001 \
    +  --content "Updated content..." \
    +  --tags "kubernetes,ha,production,v1.28"
    +
    +# Delete document
    +provisioning ai knowledge delete doc-k8s-ha-001
    +
    +

    Reindexing

    +
    # Reindex all documents
    +provisioning ai knowledge reindex --all
    +
    +# Reindex specific category
    +provisioning ai knowledge reindex --category infrastructure
    +
    +# Check indexing status
    +provisioning ai knowledge index-status
    +
    +# Rebuild vector index
    +provisioning ai knowledge rebuild-vectors --model text-embedding-3-small
    +
    +

    Knowledge Query API

    +

    Search Endpoint

    +
    POST /v1/knowledge/search
    +Content-Type: application/json
    +
    +{
    +  "query": "kubernetes cluster setup",
    +  "category": "infrastructure",
    +  "tags": ["kubernetes"],
    +  "limit": 5,
    +  "similarity_threshold": 0.75,
    +  "metadata_filter": {
    +    "provider": ["aws", "upcloud"],
    +    "environment": ["production"]
    +  }
    +}
    +
    +

    Response:

    +
    {
    +  "results": [
    +    {
    +      "id": "doc-k8s-ha-001",
    +      "title": "High Availability Kubernetes Cluster",
    +      "category": "infrastructure",
    +      "similarity": 0.94,
    +      "excerpt": "For production Kubernetes deployments, ensure minimum 3 control planes",
    +      "tags": ["kubernetes", "ha", "production"],
    +      "metadata": {
    +        "provider": ["aws", "upcloud", "hetzner"],
    +        "environment": ["production"]
    +      }
    +    }
    +  ],
    +  "search_time_ms": 45,
    +  "total_matches": 12
    +}
    +
    +

    Knowledge Quality

    +

    Maintenance

    +
    # Check knowledge quality
    +provisioning ai knowledge quality-report
    +
    +# Remove duplicate documents
    +provisioning ai knowledge deduplicate
    +
    +# Fix broken references
    +provisioning ai knowledge validate-refs
    +
    +# Update outdated docs
    +provisioning ai knowledge mark-outdated \
    +  --category infrastructure \
    +  --older-than 180d
    +
    +

    Metrics

    +
    # Knowledge base statistics
    +curl  [http://localhost:9091/v1/knowledge/stats](http://localhost:9091/v1/knowledge/stats)
    +
    +

    Response:

    +
    {
    +  "total_documents": 1250,
    +  "total_chunks": 8432,
    +  "categories": {
    +    "infrastructure": 450,
    +    "security": 200,
    +    "best_practices": 300
    +  },
    +  "embedding_coverage": 0.98,
    +  "indexed_chunks": 8256,
    +  "vector_index_size_mb": 245,
    +  "last_reindex": "2026-01-15T23:00:00Z"
    +}
    +
    + +

    RAG uses hybrid search combining semantic and keyword matching:

    +
    BM25 Score (Keyword Match): 0.7
    +Semantic Score (Vector Similarity): 0.92
    +
    +Hybrid Score = (0.3 × 0.7) + (0.7 × 0.92)
    +             = 0.21 + 0.644
    +             = 0.854
    +
    +Relevance: High ✓
    +
    +

    Configuration

    +
    [hybrid_search]
    +bm25_weight = 0.3
    +semantic_weight = 0.7
    +
    +

    Performance

    +

    Retrieval Latency

    +
    + + + + + +
    OperationLatency
    Embed query (512 tokens)100-200ms
    Vector similarity search20-50ms
    BM25 keyword search10-30ms
    Hybrid ranking5-10ms
    Total retrieval50-100ms
    +
    +

    Vector Index Size

    +
      +
    • Documents: 1000 → 8GB storage
    • +
    • Documents: 10000 → 80GB storage
    • +
    • Search latency: Consistent <50ms regardless of size (with HNSW indexing)
    • +
    +

    Security & Privacy

    +

    Access Control

    +
    # Restrict knowledge access
    +provisioning ai knowledge acl set doc-k8s-ha-001 \
    +  --read "admin,developer" \
    +  --write "admin"
    +
    +# Audit knowledge access
    +provisioning ai knowledge audit --document doc-k8s-ha-001
    +
    +

    Data Protection

    +
      +
    • Sensitive Info: Automatically redacted from queries (API keys, passwords)
    • +
    • Document Encryption: Optional at-rest encryption
    • +
    • Query Logging: Audit trail for compliance
    • +
    +
    [security]
    +redact_patterns = ["password", "api_key", "secret"]
    +encrypt_documents = true
    +audit_queries = true
    +
    + + +

    Natural Language Infrastructure

    +

    Use natural language to describe infrastructure requirements and get automatically generated Nickel configurations and deployment plans.

    +

    Overview

    +

    Natural Language Infrastructure (NLI) allows requesting infrastructure changes in plain English:

    +
    # Instead of writing complex Nickel...
    +provisioning ai "Deploy a 3-node HA PostgreSQL cluster with automatic backups in AWS"
    +
    +# Or interactively...
    +provisioning ai interactive
    +
    +# Interactive mode guides you through requirements
    +
    +

    How It Works

    +

    Request Processing Pipeline

    +
    User Natural Language Input
    +    ↓
    +Intent Recognition
    +    ├─ Extract resource type (server, database, cluster)
    +    ├─ Identify constraints (HA, region, size)
    +    └─ Detect options (monitoring, backup, encryption)
    +    ↓
    +RAG Knowledge Retrieval
    +    ├─ Find similar deployments
    +    ├─ Retrieve best practices
    +    └─ Get provider-specific guidance
    +    ↓
    +LLM Inference (GPT-4, Claude 3)
    +    ├─ Generate Nickel schema
    +    ├─ Calculate resource requirements
    +    └─ Create deployment plan
    +    ↓
    +Configuration Validation
    +    ├─ Type checking via Nickel compiler
    +    ├─ Schema validation
    +    └─ Constraint verification
    +    ↓
    +Infrastructure Deployment
    +    ├─ Dry-run simulation
    +    ├─ Cost estimation
    +    └─ User confirmation
    +    ↓
    +Execution & Monitoring
    +
    +

    Command Usage

    +

    Simple Requests

    +
    # Web servers with load balancing
    +provisioning ai "Create 3 web servers with load balancer"
    +
    +# Database setup
    +provisioning ai "Deploy PostgreSQL with 2 replicas and daily backups"
    +
    +# Kubernetes cluster
    +provisioning ai "Create production Kubernetes cluster with Prometheus monitoring"
    +
    +

    Complex Requests

    +
    # Multi-cloud deployment
    +provisioning ai "
    +  Deploy:
    +  - 3 HA Kubernetes clusters (AWS, UpCloud, Hetzner)
    +  - PostgreSQL 15 with synchronous replication
    +  - Redis cluster for caching
    +  - ELK stack for logging
    +  - Prometheus for monitoring
    +  Constraints:
    +  - Cross-region high availability
    +  - Encrypted inter-region communication
    +  - Auto-scaling based on CPU (70%)
    +"
    +
    +# Disaster recovery setup
    +provisioning ai "
    +  Set up disaster recovery for production environment:
    +  - Active-passive failover to secondary region
    +  - Daily automated backups (30-day retention)
    +  - Monthly DR tests with automated reports
    +  - RTO: 4 hours, RPO: 1 hour
    +  - Test failover every week
    +"
    +
    +

    Interactive Mode

    +
    # Start interactive mode
    +provisioning ai interactive
    +
    +# System asks clarifying questions:
    +# Q: What type of infrastructure? (server, database, cluster, other)
    +# Q: Which cloud provider? (aws, upcloud, hetzner, local)
    +# Q: Production or development?
    +# Q: High availability required?
    +# Q: Expected load? (small, medium, large, enterprise)
    +# Q: Monitoring and logging?
    +# Q: Backup strategy?
    +
    +# Shows generated configuration for approval
    +
    +

    Example: Web Application Deployment

    +

    Request

    +
    provisioning ai "
    +  Deploy a production web application:
    +  - Frontend: 3 nginx servers with auto-scaling
    +  - API: 5 FastAPI instances behind load balancer
    +  - Database: HA PostgreSQL with read replicas
    +  - Cache: Redis cluster
    +  - Monitoring: Prometheus and Grafana
    +  - Logs: Elasticsearch + Kibana
    +  Environment: AWS
    +  Region: us-east-1 (primary), eu-west-1 (DR)
    +  Budget: $5000/month max
    +"
    +
    +

    Generated Configuration

    +

    The system generates:

    +

    1. Nickel Infrastructure Definition:

    +
    {
    +  metadata = {
    +    generated_by = "provisioning-ai"
    +    timestamp = "2026-01-16T01:47:00Z"
    +    confidence = 0.94
    +  }
    +
    +  infrastructure = {
    +    frontend = {
    +      servers = 3
    +      cpu = 4
    +      memory = 8
    +      type = "t3.large"
    +      auto_scaling = {
    +        min = 3
    +        max = 10
    +        target_cpu = 70
    +      }
    +    }
    +
    +    api = {
    +      servers = 5
    +      cpu = 8
    +      memory = 16
    +      type = "t3.xlarge"
    +      load_balancer = {
    +        type = "application"
    +        health_check = "/health"
    +      }
    +    }
    +
    +    database = {
    +      type = "postgresql"
    +      version = "15"
    +      replicas = 2
    +      backup = {
    +        enabled = true
    +        frequency = "daily"
    +        retention_days = 30
    +      }
    +    }
    +
    +    cache = {
    +      type = "redis"
    +      nodes = 3
    +      cluster_mode = true
    +      replication = true
    +    }
    +
    +    monitoring = {
    +      prometheus = true
    +      grafana = true
    +      retention_days = 30
    +    }
    +
    +    logging = {
    +      elasticsearch = true
    +      kibana = true
    +      retention_days = 30
         }
       }
     
    -  $result
    +  provider = "aws"
    +  region_primary = "us-east-1"
    +  region_dr = "eu-west-1"
    +
    +  cost_estimate = {
    +    monthly = "$4850"
    +    breakdown = {
    +      compute = "$2500"
    +      database = "$1200"
    +      cache = "$600"
    +      monitoring = "$400"
    +      networking = "$150"
    +    }
    +  }
     }
     
    -

    Best Practices

    -

    1. Validate Early

    -
    # Validate during development
    -provisioning workspace config validate
    +

    2. Deployment Plan:

    +
    Deployment Plan: Web Application (Production)
     
    -# Don't wait for deployment
    -
    -

    2. Use Strict Schemas

    -
    # Be explicit about types and constraints
    -[fields.port]
    -type = "int"
    -min = 1024
    -max = 65535
    +Phase 1: Network & Storage (2-3 hours)
    +  - Create VPCs in us-east-1 and eu-west-1
    +  - Set up inter-region VPN
    +  - Create EBS volumes for database
    +  - Create EFS for shared storage
     
    -# Don't leave fields unvalidated
    -
    -

    3. Document Patterns

    -
    # Include examples in schema
    -[fields.email]
    -type = "string"
    -pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
    -# Example: user@example.com
    -
    -

    4. Handle Deprecation

    -
    # Always provide replacement guidance
    -[deprecated_replacements]
    -old_field = "new_field"  # Clear migration path
    -
    -

    5. Test Schemas

    -
    # Include test cases in comments
    -# Valid: "admin@example.com"
    -# Invalid: "not-an-email"
    -
    -

    Troubleshooting

    -

    Schema File Not Found

    -
    # Error: Schema file not found: /path/to/schema.toml
    +Phase 2: Compute Instances (4-5 hours)
    +  - Launch 3 frontend servers
    +  - Launch 5 API servers
    +  - Create load balancers
    +  - Set up auto-scaling groups
     
    -# Solution: Ensure schema exists
    -ls -la /Users/Akasha/project-provisioning/provisioning/config/*.schema.toml
    -
    -

    Pattern Not Matching

    -
    # Error: Field hostname does not match pattern
    +Phase 3: Databases (3-4 hours)
    +  - Create PostgreSQL primary
    +  - Create read replicas
    +  - Configure replication
    +  - Run initial backup
     
    -# Debug: Test pattern separately
    -echo "my-hostname" | grep -E "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
    -
    -

    Type Mismatch

    -
    # Error: Expected int, got string
    +Phase 4: Cache & Services (2-3 hours)
    +  - Create Redis cluster
    +  - Deploy Prometheus
    +  - Deploy Grafana
    +  - Deploy Elasticsearch/Kibana
     
    -# Check config
    -cat ~/workspaces/dev/config/provisioning.yaml | yq '.server.port'
    -# Output: "8080" (string)
    +Phase 5: Configuration (2-3 hours)
    +  - Configure health checks
    +  - Set up monitoring alerts
    +  - Configure log shipping
    +  - Deploy TLS certificates
     
    -# Fix: Remove quotes
    -vim ~/workspaces/dev/config/provisioning.yaml
    -# Change: port: "8080"
    -# To:     port: 8080
    +Total Estimated Time: 13-18 hours
     
    -

    Additional Resources

    +

    3. Cost Breakdown:

    +
    Monthly Cost Estimate: $4,850
    +
    +Compute               $2,500  (EC2 instances)
    +Database              $1,200  (RDS PostgreSQL)
    +Cache                 $600    (ElastiCache Redis)
    +Monitoring            $400    (CloudWatch + Grafana)
    +Networking            $150    (NAT Gateway, VPN)
    +
    +

    4. Risk Assessment:

    +
    Warnings:
    +- Budget limit reached at $4,850 (max: $5,000)
    +- Cross-region networking latency: 80-100ms
    +- Database failover time: 1-2 minutes
    +
    +Recommendations:
    +- Implement connection pooling in API
    +- Use read replicas for analytics queries
    +- Consider spot instances for non-critical services (30% cost savings)
    +
    +

    Output Formats

    +

    Get Deployment Script

    +
    # Get Bash deployment script
    +provisioning ai "..." --output bash > deploy.sh
    +
    +# Get Nushell script
    +provisioning ai "..." --output nushell > deploy.nu
    +
    +# Get Terraform
    +provisioning ai "..." --output terraform > main.tf
    +
    +# Get Nickel (default)
    +provisioning ai "..." --output nickel > infrastructure.ncl
    +
    +

    Save for Later

    +
    # Save configuration for review
    +provisioning ai "..." --save deployment-plan --review
    +
    +# Deploy from saved plan
    +provisioning apply deployment-plan
    +
    +# Compare with current state
    +provisioning diff deployment-plan
    +
    +

    Configuration

    +

    LLM Provider Selection

    +
    # Use OpenAI (default)
    +export PROVISIONING_AI_PROVIDER=openai
    +export PROVISIONING_AI_MODEL=gpt-4
    +
    +# Use Anthropic
    +export PROVISIONING_AI_PROVIDER=anthropic
    +export PROVISIONING_AI_MODEL=claude-3-opus
    +
    +# Use local model
    +export PROVISIONING_AI_PROVIDER=local
    +export PROVISIONING_AI_MODEL=llama2:70b
    +
    +

    Response Options

    +
    # ~/.config/provisioning/ai.yaml
    +natural_language:
    +  output_format: nickel              # nickel, terraform, bash, nushell
    +  include_cost_estimate: true
    +  include_risk_assessment: true
    +  include_deployment_plan: true
    +  auto_review: false                 # Require approval before deploy
    +  dry_run: true                       # Simulate before execution
    +  confidence_threshold: 0.85          # Reject low-confidence results
    +
    +  style:
    +    verbosity: detailed
    +    include_alternatives: true
    +    explain_reasoning: true
    +
    +

    Advanced Features

    +

    Conditional Infrastructure

    +
    provisioning ai "
    +  Deploy web cluster:
    +  - If environment is production: HA setup with 5 nodes
    +  - If environment is staging: Standard setup with 2 nodes
    +  - If environment is dev: Single node with development tools
    +"
    +
    +

    Cost-Optimized Variants

    +
    # Generate cost-optimized alternative
    +provisioning ai "..." --optimize-for cost
    +
    +# Generate performance-optimized alternative
    +provisioning ai "..." --optimize-for performance
    +
    +# Generate high-availability alternative
    +provisioning ai "..." --optimize-for availability
    +
    +

    Template-Based Generation

    +
    # Use existing templates as base
    +provisioning ai "..." --template kubernetes-ha
    +
    +# List available templates
    +provisioning ai templates list
    +
    +

    Safety & Validation

    +

    Review Before Deploy

    +
    # Generate and review (no auto-execute)
    +provisioning ai "..." --review
    +
    +# Review generated Nickel
    +cat deployment-plan.ncl
    +
    +# Validate configuration
    +provisioning validate deployment-plan.ncl
    +
    +# Dry-run to see what changes
    +provisioning apply --dry-run deployment-plan.ncl
    +
    +# Apply after approval
    +provisioning apply deployment-plan.ncl
    +
    +

    Rollback Support

    +
    # Create deployment with automatic rollback
    +provisioning ai "..." --with-rollback
    +
    +# Manual rollback if issues
    +provisioning workflow rollback --to-checkpoint
    +
    +# View deployment history
    +provisioning history list --type infrastructure
    +
    +

    Limitations

      -
    • Migration Guide
    • -
    • Workspace Guide
    • -
    • Schema Files
    • -
    • Validation Tests
    • +
    • Context Window: Very large infrastructure descriptions may exceed LLM limits
    • +
    • Ambiguity: Unclear requirements may produce suboptimal configurations
    • +
    • Provider Specifics: Some provider-specific features may require manual adjustment
    • +
    • Cost: API calls incur per-token charges
    • +
    • Latency: Processing takes 2-10 seconds depending on complexity
    • +
    + +
@@ -81902,13 +21349,37 @@ vim ~/workspaces/dev/config/provisioning.yaml
+ + + + + + + + diff --git a/docs/book/searchindex.js b/docs/book/searchindex.js index 0ff17de..64cb7bd 100644 --- a/docs/book/searchindex.js +++ b/docs/book/searchindex.js @@ -1 +1 @@ -window.search = JSON.parse('{"doc_urls":["index.html#provisioning-platform-documentation","index.html#quick-navigation","index.html#-getting-started","index.html#-user-guides","index.html#-architecture","index.html#-architecture-decision-records-adrs","index.html#-api-documentation","index.html#-development","index.html#-troubleshooting","index.html#-how-to-guides","index.html#-configuration","index.html#-quick-references","index.html#documentation-structure","index.html#key-concepts","index.html#infrastructure-as-code-iac","index.html#mode-based-architecture","index.html#extension-system","index.html#oci-native-distribution","index.html#documentation-by-role","index.html#for-new-users","index.html#for-developers","index.html#for-operators","index.html#for-architects","index.html#system-capabilities","index.html#-infrastructure-automation","index.html#-workflow-orchestration","index.html#-test-environments","index.html#-mode-based-operation","index.html#-extension-management","index.html#key-achievements","index.html#-batch-workflow-system-v310","index.html#-hybrid-orchestrator-v300","index.html#-configuration-system-v200","index.html#-modular-cli-v320","index.html#-test-environment-service-v340","index.html#-workspace-switching-v205","index.html#technology-stack","index.html#support","index.html#getting-help","index.html#reporting-issues","index.html#contributing","index.html#license","index.html#version-history","getting-started/installation-guide.html#installation-guide","getting-started/installation-guide.html#what-youll-learn","getting-started/installation-guide.html#system-requirements","getting-started/installation-guide.html#operating-system-support","getting-started/installation-guide.html#hardware-requirements","getting-started/installation-guide.html#architecture-support","getting-started/installation-guide.html#prerequisites","getting-started/installation-guide.html#pre-installation-checklist","getting-started/installation-guide.html#installation-methods","getting-started/installation-guide.html#method-1-package-installation-recommended","getting-started/installation-guide.html#method-2-container-installation","getting-started/installation-guide.html#method-3-source-installation","getting-started/installation-guide.html#method-4-manual-installation","getting-started/installation-guide.html#installation-process-details","getting-started/installation-guide.html#what-gets-installed","getting-started/installation-guide.html#post-installation-verification","getting-started/installation-guide.html#basic-verification","getting-started/installation-guide.html#tool-verification","getting-started/installation-guide.html#plugin-verification","getting-started/installation-guide.html#configuration-verification","getting-started/installation-guide.html#environment-setup","getting-started/installation-guide.html#shell-configuration","getting-started/installation-guide.html#configuration-initialization","getting-started/installation-guide.html#first-time-setup","getting-started/installation-guide.html#platform-specific-instructions","getting-started/installation-guide.html#linux-ubuntudebian","getting-started/installation-guide.html#linux-rhelcentosfedora","getting-started/installation-guide.html#macos","getting-started/installation-guide.html#windows-wsl2","getting-started/installation-guide.html#configuration-examples","getting-started/installation-guide.html#basic-configuration","getting-started/installation-guide.html#development-configuration","getting-started/installation-guide.html#upgrade-and-migration","getting-started/installation-guide.html#upgrading-from-previous-version","getting-started/installation-guide.html#migrating-configuration","getting-started/installation-guide.html#troubleshooting-installation-issues","getting-started/installation-guide.html#common-installation-problems","getting-started/installation-guide.html#verification-failures","getting-started/installation-guide.html#getting-help","getting-started/installation-guide.html#next-steps","getting-started/installation-validation-guide.html#installation-validation--bootstrap-guide","getting-started/installation-validation-guide.html#section-1-prerequisites-verification","getting-started/installation-validation-guide.html#step-11-check-system-requirements","getting-started/installation-validation-guide.html#step-12-verify-nushell-installation","getting-started/installation-validation-guide.html#step-13-verify-nickel-installation","getting-started/installation-validation-guide.html#step-14-verify-docker-installation","getting-started/installation-validation-guide.html#step-15-check-provisioning-binary","getting-started/installation-validation-guide.html#prerequisites-checklist","getting-started/installation-validation-guide.html#section-2-bootstrap-installation","getting-started/installation-validation-guide.html#step-21-navigate-to-project-root","getting-started/installation-validation-guide.html#step-22-run-bootstrap-script","getting-started/installation-validation-guide.html#bootstrap-output","getting-started/installation-validation-guide.html#what-bootstrap-does","getting-started/installation-validation-guide.html#section-3-installation-validation","getting-started/installation-validation-guide.html#step-31-verify-workspace-directories","getting-started/installation-validation-guide.html#step-32-verify-generated-configuration-files","getting-started/installation-validation-guide.html#step-33-type-check-nickel-configuration","getting-started/installation-validation-guide.html#step-34-verify-orchestrator-service","getting-started/installation-validation-guide.html#step-35-install-provisioning-cli-optional","getting-started/installation-validation-guide.html#installation-validation-checklist","getting-started/installation-validation-guide.html#section-4-troubleshooting","getting-started/installation-validation-guide.html#issue-nushell-not-found","getting-started/installation-validation-guide.html#issue-nickel-configuration-validation-failed","getting-started/installation-validation-guide.html#issue-docker-not-installed","getting-started/installation-validation-guide.html#issue-configuration-export-failed","getting-started/installation-validation-guide.html#issue-orchestrator-didnt-start","getting-started/installation-validation-guide.html#issue-sudo-password-prompt-during-bootstrap","getting-started/installation-validation-guide.html#issue-permission-denied-on-binary","getting-started/installation-validation-guide.html#section-5-next-steps","getting-started/installation-validation-guide.html#option-1-deploy-workspace_librecloud","getting-started/installation-validation-guide.html#option-2-create-a-new-workspace","getting-started/installation-validation-guide.html#option-3-explore-available-modules","getting-started/installation-validation-guide.html#section-6-verification-checklist","getting-started/installation-validation-guide.html#getting-help","getting-started/installation-validation-guide.html#summary","getting-started/getting-started.html#getting-started-guide","getting-started/getting-started.html#what-youll-learn","getting-started/getting-started.html#prerequisites","getting-started/getting-started.html#essential-concepts","getting-started/getting-started.html#infrastructure-as-code-iac","getting-started/getting-started.html#key-components","getting-started/getting-started.html#configuration-languages","getting-started/getting-started.html#first-time-setup","getting-started/getting-started.html#step-1-initialize-your-configuration","getting-started/getting-started.html#step-2-verify-your-environment","getting-started/getting-started.html#step-3-explore-available-resources","getting-started/getting-started.html#your-first-infrastructure","getting-started/getting-started.html#step-1-create-a-workspace","getting-started/getting-started.html#step-2-examine-the-configuration","getting-started/getting-started.html#step-3-validate-the-configuration","getting-started/getting-started.html#step-4-deploy-infrastructure-check-mode","getting-started/getting-started.html#step-5-create-your-infrastructure","getting-started/getting-started.html#working-with-services","getting-started/getting-started.html#installing-your-first-service","getting-started/getting-started.html#installing-kubernetes","getting-started/getting-started.html#checking-service-status","getting-started/getting-started.html#understanding-commands","getting-started/getting-started.html#command-structure","getting-started/getting-started.html#global-options","getting-started/getting-started.html#essential-commands","getting-started/getting-started.html#working-with-multiple-environments","getting-started/getting-started.html#environment-concepts","getting-started/getting-started.html#switching-environments","getting-started/getting-started.html#environment-specific-configuration","getting-started/getting-started.html#common-workflows","getting-started/getting-started.html#workflow-1-development-environment","getting-started/getting-started.html#workflow-2-service-updates","getting-started/getting-started.html#workflow-3-infrastructure-scaling","getting-started/getting-started.html#interactive-mode","getting-started/getting-started.html#starting-interactive-shell","getting-started/getting-started.html#useful-interactive-commands","getting-started/getting-started.html#configuration-management","getting-started/getting-started.html#understanding-configuration-files","getting-started/getting-started.html#configuration-hierarchy","getting-started/getting-started.html#customizing-your-configuration","getting-started/getting-started.html#monitoring-and-observability","getting-started/getting-started.html#checking-system-status","getting-started/getting-started.html#logging-and-debugging","getting-started/getting-started.html#cost-monitoring","getting-started/getting-started.html#best-practices","getting-started/getting-started.html#1-configuration-management","getting-started/getting-started.html#2-security","getting-started/getting-started.html#3-operational-excellence","getting-started/getting-started.html#4-development-workflow","getting-started/getting-started.html#getting-help","getting-started/getting-started.html#built-in-help-system","getting-started/getting-started.html#command-reference","getting-started/getting-started.html#troubleshooting","getting-started/getting-started.html#real-world-example","getting-started/getting-started.html#step-1-plan-your-infrastructure","getting-started/getting-started.html#step-2-customize-configuration","getting-started/getting-started.html#step-3-deploy-base-infrastructure","getting-started/getting-started.html#step-4-install-services","getting-started/getting-started.html#step-5-deploy-application","getting-started/getting-started.html#next-steps","getting-started/quickstart-cheatsheet.html#provisioning-platform-quick-reference","getting-started/quickstart-cheatsheet.html#quick-navigation","getting-started/quickstart-cheatsheet.html#plugin-commands","getting-started/quickstart-cheatsheet.html#authentication-plugin-nu_plugin_auth","getting-started/quickstart-cheatsheet.html#kms-plugin-nu_plugin_kms","getting-started/quickstart-cheatsheet.html#orchestrator-plugin-nu_plugin_orchestrator","getting-started/quickstart-cheatsheet.html#plugin-performance-comparison","getting-started/quickstart-cheatsheet.html#cli-shortcuts","getting-started/quickstart-cheatsheet.html#infrastructure-shortcuts","getting-started/quickstart-cheatsheet.html#orchestration-shortcuts","getting-started/quickstart-cheatsheet.html#development-shortcuts","getting-started/quickstart-cheatsheet.html#workspace-shortcuts","getting-started/quickstart-cheatsheet.html#configuration-shortcuts","getting-started/quickstart-cheatsheet.html#utility-shortcuts","getting-started/quickstart-cheatsheet.html#generation-shortcuts","getting-started/quickstart-cheatsheet.html#action-shortcuts","getting-started/quickstart-cheatsheet.html#infrastructure-commands","getting-started/quickstart-cheatsheet.html#server-management","getting-started/quickstart-cheatsheet.html#taskserv-management","getting-started/quickstart-cheatsheet.html#cluster-management","getting-started/quickstart-cheatsheet.html#orchestration-commands","getting-started/quickstart-cheatsheet.html#workflow-management","getting-started/quickstart-cheatsheet.html#batch-operations","getting-started/quickstart-cheatsheet.html#orchestrator-management","getting-started/quickstart-cheatsheet.html#configuration-commands","getting-started/quickstart-cheatsheet.html#environment-and-validation","getting-started/quickstart-cheatsheet.html#configuration-files","getting-started/quickstart-cheatsheet.html#http-configuration","getting-started/quickstart-cheatsheet.html#workspace-commands","getting-started/quickstart-cheatsheet.html#workspace-management","getting-started/quickstart-cheatsheet.html#user-preferences","getting-started/quickstart-cheatsheet.html#security-commands","getting-started/quickstart-cheatsheet.html#authentication-via-cli","getting-started/quickstart-cheatsheet.html#multi-factor-authentication-mfa","getting-started/quickstart-cheatsheet.html#secrets-management","getting-started/quickstart-cheatsheet.html#ssh-temporal-keys","getting-started/quickstart-cheatsheet.html#kms-operations-via-cli","getting-started/quickstart-cheatsheet.html#break-glass-emergency-access","getting-started/quickstart-cheatsheet.html#compliance-and-audit","getting-started/quickstart-cheatsheet.html#common-workflows","getting-started/quickstart-cheatsheet.html#complete-deployment-from-scratch","getting-started/quickstart-cheatsheet.html#multi-environment-deployment","getting-started/quickstart-cheatsheet.html#update-infrastructure","getting-started/quickstart-cheatsheet.html#encrypted-secrets-deployment","getting-started/quickstart-cheatsheet.html#debug-and-check-mode","getting-started/quickstart-cheatsheet.html#debug-mode","getting-started/quickstart-cheatsheet.html#check-mode-dry-run","getting-started/quickstart-cheatsheet.html#auto-confirm-mode","getting-started/quickstart-cheatsheet.html#wait-mode","getting-started/quickstart-cheatsheet.html#infrastructure-selection","getting-started/quickstart-cheatsheet.html#output-formats","getting-started/quickstart-cheatsheet.html#json-output","getting-started/quickstart-cheatsheet.html#yaml-output","getting-started/quickstart-cheatsheet.html#table-output-default","getting-started/quickstart-cheatsheet.html#text-output","getting-started/quickstart-cheatsheet.html#performance-tips","getting-started/quickstart-cheatsheet.html#use-plugins-for-frequent-operations","getting-started/quickstart-cheatsheet.html#batch-operations-1","getting-started/quickstart-cheatsheet.html#check-mode-for-testing","getting-started/quickstart-cheatsheet.html#help-system","getting-started/quickstart-cheatsheet.html#command-specific-help","getting-started/quickstart-cheatsheet.html#bi-directional-help","getting-started/quickstart-cheatsheet.html#general-help","getting-started/quickstart-cheatsheet.html#quick-reference-common-flags","getting-started/quickstart-cheatsheet.html#plugin-installation-quick-reference","getting-started/quickstart-cheatsheet.html#related-documentation","getting-started/setup-quickstart.html#setup-quick-start---5-minutes-to-deployment","getting-started/setup-quickstart.html#step-1-check-prerequisites-30-seconds","getting-started/setup-quickstart.html#step-2-install-provisioning-1-minute","getting-started/setup-quickstart.html#step-3-initialize-system-2-minutes","getting-started/setup-quickstart.html#step-4-create-your-first-workspace-1-minute","getting-started/setup-quickstart.html#step-5-deploy-your-first-server-1-minute","getting-started/setup-quickstart.html#verify-everything-works","getting-started/setup-quickstart.html#common-commands-cheat-sheet","getting-started/setup-quickstart.html#troubleshooting-quick-fixes","getting-started/setup-quickstart.html#whats-next","getting-started/setup-quickstart.html#need-help","getting-started/setup-quickstart.html#key-files","getting-started/setup-system-guide.html#provisioning-setup-system-guide","getting-started/setup-system-guide.html#quick-start","getting-started/setup-system-guide.html#prerequisites","getting-started/setup-system-guide.html#30-second-setup","getting-started/setup-system-guide.html#configuration-paths","getting-started/setup-system-guide.html#directory-structure","getting-started/setup-system-guide.html#setup-wizard","getting-started/setup-system-guide.html#configuration-management","getting-started/setup-system-guide.html#hierarchy-highest-to-lowest-priority","getting-started/setup-system-guide.html#configuration-files","getting-started/setup-system-guide.html#multiple-workspaces","getting-started/setup-system-guide.html#configuration-updates","getting-started/setup-system-guide.html#backup--restore","getting-started/setup-system-guide.html#troubleshooting","getting-started/setup-system-guide.html#command-not-found-provisioning","getting-started/setup-system-guide.html#nushell-not-found","getting-started/setup-system-guide.html#cannot-write-to-directory","getting-started/setup-system-guide.html#check-required-tools","getting-started/setup-system-guide.html#faq","getting-started/setup-system-guide.html#getting-help","getting-started/setup-system-guide.html#next-steps","getting-started/quickstart.html#quick-start","getting-started/quickstart.html#-navigate-to-quick-start-guide","getting-started/quickstart.html#quick-commands","getting-started/01-prerequisites.html#prerequisites","getting-started/01-prerequisites.html#hardware-requirements","getting-started/01-prerequisites.html#minimum-requirements-solo-mode","getting-started/01-prerequisites.html#recommended-requirements-multi-user-mode","getting-started/01-prerequisites.html#production-requirements-enterprise-mode","getting-started/01-prerequisites.html#operating-system","getting-started/01-prerequisites.html#supported-platforms","getting-started/01-prerequisites.html#platform-specific-notes","getting-started/01-prerequisites.html#required-software","getting-started/01-prerequisites.html#core-dependencies","getting-started/01-prerequisites.html#optional-dependencies","getting-started/01-prerequisites.html#installation-verification","getting-started/01-prerequisites.html#nushell","getting-started/01-prerequisites.html#nickel","getting-started/01-prerequisites.html#docker","getting-started/01-prerequisites.html#sops","getting-started/01-prerequisites.html#age","getting-started/01-prerequisites.html#installing-missing-dependencies","getting-started/01-prerequisites.html#macos-using-homebrew","getting-started/01-prerequisites.html#ubuntudebian","getting-started/01-prerequisites.html#fedorarhel","getting-started/01-prerequisites.html#network-requirements","getting-started/01-prerequisites.html#firewall-ports","getting-started/01-prerequisites.html#external-connectivity","getting-started/01-prerequisites.html#cloud-provider-credentials-optional","getting-started/01-prerequisites.html#aws","getting-started/01-prerequisites.html#upcloud","getting-started/01-prerequisites.html#next-steps","getting-started/02-installation.html#installation","getting-started/02-installation.html#overview","getting-started/02-installation.html#step-1-clone-the-repository","getting-started/02-installation.html#step-2-install-nushell-plugins","getting-started/02-installation.html#install-nu_plugin_tera-template-rendering","getting-started/02-installation.html#verify-plugin-installation","getting-started/02-installation.html#step-3-add-cli-to-path","getting-started/02-installation.html#step-4-generate-age-encryption-keys","getting-started/02-installation.html#step-5-configure-environment","getting-started/02-installation.html#step-6-initialize-workspace","getting-started/02-installation.html#step-7-validate-installation","getting-started/02-installation.html#optional-install-platform-services","getting-started/02-installation.html#optional-install-platform-with-installer","getting-started/02-installation.html#troubleshooting","getting-started/02-installation.html#nushell-plugin-not-found","getting-started/02-installation.html#permission-denied","getting-started/02-installation.html#age-keys-not-found","getting-started/02-installation.html#next-steps","getting-started/02-installation.html#additional-resources","getting-started/03-first-deployment.html#first-deployment","getting-started/03-first-deployment.html#overview","getting-started/03-first-deployment.html#step-1-configure-infrastructure","getting-started/03-first-deployment.html#step-2-edit-configuration","getting-started/03-first-deployment.html#step-3-create-server-check-mode","getting-started/03-first-deployment.html#step-4-create-server-real","getting-started/03-first-deployment.html#step-5-verify-server","getting-started/03-first-deployment.html#step-6-install-kubernetes-check-mode","getting-started/03-first-deployment.html#step-7-install-kubernetes-real","getting-started/03-first-deployment.html#step-8-verify-installation","getting-started/03-first-deployment.html#common-deployment-patterns","getting-started/03-first-deployment.html#pattern-1-multiple-servers","getting-started/03-first-deployment.html#pattern-2-server-with-multiple-task-services","getting-started/03-first-deployment.html#pattern-3-complete-cluster","getting-started/03-first-deployment.html#deployment-workflow","getting-started/03-first-deployment.html#troubleshooting","getting-started/03-first-deployment.html#server-creation-fails","getting-started/03-first-deployment.html#task-service-installation-fails","getting-started/03-first-deployment.html#ssh-connection-issues","getting-started/03-first-deployment.html#next-steps","getting-started/03-first-deployment.html#additional-resources","getting-started/04-verification.html#verification","getting-started/04-verification.html#overview","getting-started/04-verification.html#step-1-verify-configuration","getting-started/04-verification.html#step-2-verify-servers","getting-started/04-verification.html#step-3-verify-task-services","getting-started/04-verification.html#step-4-verify-kubernetes-if-installed","getting-started/04-verification.html#step-5-verify-platform-services-optional","getting-started/04-verification.html#orchestrator","getting-started/04-verification.html#control-center","getting-started/04-verification.html#kms-service","getting-started/04-verification.html#step-6-run-health-checks","getting-started/04-verification.html#step-7-verify-workflows","getting-started/04-verification.html#common-verification-checks","getting-started/04-verification.html#dns-resolution-if-coredns-installed","getting-started/04-verification.html#network-connectivity","getting-started/04-verification.html#storage-and-resources","getting-started/04-verification.html#troubleshooting-failed-verifications","getting-started/04-verification.html#configuration-validation-failed","getting-started/04-verification.html#server-unreachable","getting-started/04-verification.html#task-service-not-running","getting-started/04-verification.html#platform-service-down","getting-started/04-verification.html#performance-verification","getting-started/04-verification.html#response-time-tests","getting-started/04-verification.html#resource-usage","getting-started/04-verification.html#security-verification","getting-started/04-verification.html#encryption","getting-started/04-verification.html#authentication-if-enabled","getting-started/04-verification.html#verification-checklist","getting-started/04-verification.html#next-steps","getting-started/04-verification.html#additional-resources","getting-started/05-platform-configuration.html#platform-service-configuration","getting-started/05-platform-configuration.html#what-youll-learn","getting-started/05-platform-configuration.html#prerequisites","getting-started/05-platform-configuration.html#platform-services-overview","getting-started/05-platform-configuration.html#deployment-modes","getting-started/05-platform-configuration.html#step-1-initialize-configuration-script","getting-started/05-platform-configuration.html#step-2-choose-configuration-method","getting-started/05-platform-configuration.html#method-a-interactive-typedialog-configuration-recommended","getting-started/05-platform-configuration.html#method-b-quick-mode-configuration-fastest","getting-started/05-platform-configuration.html#method-c-manual-nickel-configuration","getting-started/05-platform-configuration.html#step-3-understand-configuration-layers","getting-started/05-platform-configuration.html#step-4-verify-generated-configuration","getting-started/05-platform-configuration.html#step-5-run-platform-services","getting-started/05-platform-configuration.html#running-a-single-service","getting-started/05-platform-configuration.html#running-multiple-services","getting-started/05-platform-configuration.html#docker-based-deployment","getting-started/05-platform-configuration.html#step-6-verify-services-are-running","getting-started/05-platform-configuration.html#customizing-configuration","getting-started/05-platform-configuration.html#scenario-change-deployment-mode","getting-started/05-platform-configuration.html#scenario-manual-configuration-edit","getting-started/05-platform-configuration.html#scenario-workspace-specific-overrides","getting-started/05-platform-configuration.html#available-configuration-commands","getting-started/05-platform-configuration.html#configuration-file-locations","getting-started/05-platform-configuration.html#public-definitions-part-of-repository","getting-started/05-platform-configuration.html#private-runtime-configs-gitignored","getting-started/05-platform-configuration.html#examples-reference","getting-started/05-platform-configuration.html#troubleshooting-configuration","getting-started/05-platform-configuration.html#issue-script-fails-with-nickel-not-found","getting-started/05-platform-configuration.html#issue-configuration-wont-generate-toml","getting-started/05-platform-configuration.html#issue-service-cant-read-configuration","getting-started/05-platform-configuration.html#issue-services-wont-start-after-config-change","getting-started/05-platform-configuration.html#important-notes","getting-started/05-platform-configuration.html#-runtime-configurations-are-private","getting-started/05-platform-configuration.html#-schemas-are-public","getting-started/05-platform-configuration.html#-configuration-is-idempotent","getting-started/05-platform-configuration.html#-installer-status","getting-started/05-platform-configuration.html#next-steps","getting-started/05-platform-configuration.html#additional-resources","ai/index.html#ai-integration---intelligent-infrastructure-provisioning","ai/index.html#overview","ai/index.html#key-features","ai/index.html#natural-language-configuration","ai/index.html#ai-assisted-forms","ai/index.html#intelligent-troubleshooting","ai/index.html#autonomous-agents","ai/index.html#documentation-structure","ai/index.html#quick-start","ai/index.html#enable-ai-features","ai/index.html#generate-configuration-from-natural-language","ai/index.html#use-ai-assisted-forms","ai/index.html#troubleshoot-with-ai","ai/index.html#security-and-privacy","ai/index.html#supported-llm-providers","ai/index.html#cost-considerations","ai/index.html#architecture-decision-record","ai/index.html#next-steps","ai/architecture.html#ai-integration-architecture","ai/architecture.html#overview","ai/architecture.html#core-components---production-ready","ai/architecture.html#1-ai-service-provisioningplatformai-service","ai/architecture.html#2-rag-system-retrieval-augmented-generation","ai/architecture.html#3-mcp-server-model-context-protocol","ai/architecture.html#4-cli-integration","ai/architecture.html#planned-components---q2-2025","ai/architecture.html#autonomous-agents-typdialog-ag","ai/architecture.html#ai-assisted-forms-typdialog-ai","ai/architecture.html#advanced-features","ai/architecture.html#architecture-diagram","ai/architecture.html#performance-characteristics","ai/architecture.html#security-model","ai/architecture.html#cedar-authorization","ai/architecture.html#secret-protection","ai/architecture.html#local-model-support","ai/architecture.html#configuration","ai/architecture.html#related-documentation","ai/rag-system.html#retrieval-augmented-generation-rag-system","ai/rag-system.html#architecture-overview","ai/rag-system.html#core-components","ai/rag-system.html#1-vector-embeddings","ai/rag-system.html#2-surrealdb-integration","ai/rag-system.html#3-document-chunking","ai/rag-system.html#hybrid-search","ai/rag-system.html#vector-similarity-search","ai/rag-system.html#bm25-keyword-search","ai/rag-system.html#hybrid-results","ai/rag-system.html#semantic-caching","ai/rag-system.html#ingestion-workflow","ai/rag-system.html#document-indexing","ai/rag-system.html#programmatic-indexing","ai/rag-system.html#usage-examples","ai/rag-system.html#query-the-rag-system","ai/rag-system.html#ai-service-integration","ai/rag-system.html#form-assistance-integration","ai/rag-system.html#performance-characteristics","ai/rag-system.html#configuration","ai/rag-system.html#limitations-and-considerations","ai/rag-system.html#document-freshness","ai/rag-system.html#token-limits","ai/rag-system.html#embedding-quality","ai/rag-system.html#monitoring-and-debugging","ai/rag-system.html#query-metrics","ai/rag-system.html#debug-mode","ai/rag-system.html#related-documentation","ai/mcp-integration.html#model-context-protocol-mcp-integration","ai/mcp-integration.html#architecture-overview","ai/mcp-integration.html#mcp-server-launch","ai/mcp-integration.html#available-tools","ai/mcp-integration.html#1-config-generation","ai/mcp-integration.html#2-config-validation","ai/mcp-integration.html#3-documentation-search","ai/mcp-integration.html#4-deployment-troubleshooting","ai/mcp-integration.html#5-get-schema","ai/mcp-integration.html#6-compliance-check","ai/mcp-integration.html#integration-examples","ai/mcp-integration.html#claude-desktop-most-common","ai/mcp-integration.html#openai-function-calling","ai/mcp-integration.html#local-llm-integration-ollama","ai/mcp-integration.html#error-handling","ai/mcp-integration.html#performance","ai/mcp-integration.html#configuration","ai/mcp-integration.html#security","ai/mcp-integration.html#authentication","ai/mcp-integration.html#authorization","ai/mcp-integration.html#data-protection","ai/mcp-integration.html#monitoring-and-debugging","ai/mcp-integration.html#related-documentation","ai/configuration.html#ai-system-configuration-guide","ai/configuration.html#quick-start","ai/configuration.html#minimal-configuration","ai/configuration.html#initialize-configuration","ai/configuration.html#provider-configuration","ai/configuration.html#anthropic-claude","ai/configuration.html#openai-gpt-4","ai/configuration.html#local-models","ai/configuration.html#feature-configuration","ai/configuration.html#enable-specific-features","ai/configuration.html#cache-configuration","ai/configuration.html#cache-strategy","ai/configuration.html#cache-metrics","ai/configuration.html#rate-limiting-and-cost-control","ai/configuration.html#rate-limits","ai/configuration.html#cost-budgeting","ai/configuration.html#track-costs","ai/configuration.html#security-configuration","ai/configuration.html#authentication","ai/configuration.html#authorization-cedar","ai/configuration.html#data-protection","ai/configuration.html#rag-configuration","ai/configuration.html#vector-store-setup","ai/configuration.html#index-management","ai/configuration.html#mcp-server-configuration","ai/configuration.html#mcp-server-setup","ai/configuration.html#mcp-client-configuration","ai/configuration.html#logging-and-observability","ai/configuration.html#logging-configuration","ai/configuration.html#metrics-and-monitoring","ai/configuration.html#health-checks","ai/configuration.html#configuration-validation","ai/configuration.html#environment-variables","ai/configuration.html#common-settings","ai/configuration.html#troubleshooting-configuration","ai/configuration.html#common-issues","ai/configuration.html#upgrading-configuration","ai/configuration.html#backward-compatibility","ai/configuration.html#production-deployment","ai/configuration.html#recommended-production-settings","ai/configuration.html#related-documentation","ai/security-policies.html#ai-security-policies-and-cedar-authorization","ai/security-policies.html#security-model-overview","ai/security-policies.html#defense-in-depth","ai/security-policies.html#cedar-policies","ai/security-policies.html#policy-engine-setup","ai/security-policies.html#policy-best-practices","ai/security-policies.html#data-sanitization","ai/security-policies.html#automatic-pii-removal","ai/security-policies.html#configuration","ai/security-policies.html#example-sanitization","ai/security-policies.html#secret-isolation","ai/security-policies.html#never-access-secrets-directly","ai/security-policies.html#secret-protection-rules","ai/security-policies.html#local-models-support","ai/security-policies.html#air-gapped-deployments","ai/security-policies.html#benefits","ai/security-policies.html#performance-trade-offs","ai/security-policies.html#hsm-integration","ai/security-policies.html#hardware-security-module-support","ai/security-policies.html#encryption","ai/security-policies.html#data-at-rest","ai/security-policies.html#data-in-transit","ai/security-policies.html#audit-logging","ai/security-policies.html#what-gets-logged","ai/security-policies.html#audit-trail-access","ai/security-policies.html#compliance-frameworks","ai/security-policies.html#built-in-compliance-checks","ai/security-policies.html#compliance-reports","ai/security-policies.html#security-best-practices","ai/security-policies.html#for-administrators","ai/security-policies.html#for-developers","ai/security-policies.html#for-operators","ai/security-policies.html#incident-response","ai/security-policies.html#compromised-api-key","ai/security-policies.html#unauthorized-access","ai/security-policies.html#security-checklist","ai/security-policies.html#pre-production","ai/security-policies.html#ongoing","ai/security-policies.html#related-documentation","ai/troubleshooting-with-ai.html#ai-assisted-troubleshooting-and-debugging","ai/troubleshooting-with-ai.html#feature-overview","ai/troubleshooting-with-ai.html#what-it-does","ai/troubleshooting-with-ai.html#troubleshooting-workflow","ai/troubleshooting-with-ai.html#automatic-detection-and-analysis","ai/troubleshooting-with-ai.html#usage-examples","ai/troubleshooting-with-ai.html#example-1-database-connection-timeout","ai/troubleshooting-with-ai.html#example-2-kubernetes-deployment-error","ai/troubleshooting-with-ai.html#cli-commands","ai/troubleshooting-with-ai.html#basic-troubleshooting","ai/troubleshooting-with-ai.html#working-with-logs","ai/troubleshooting-with-ai.html#generate-reports","ai/troubleshooting-with-ai.html#analysis-depth","ai/troubleshooting-with-ai.html#shallow-analysis-fast","ai/troubleshooting-with-ai.html#deep-analysis-thorough","ai/troubleshooting-with-ai.html#integration-with-monitoring","ai/troubleshooting-with-ai.html#automatic-troubleshooting","ai/troubleshooting-with-ai.html#webui-integration","ai/troubleshooting-with-ai.html#learning-from-failures","ai/troubleshooting-with-ai.html#pattern-recognition","ai/troubleshooting-with-ai.html#improvement-tracking","ai/troubleshooting-with-ai.html#configuration","ai/troubleshooting-with-ai.html#troubleshooting-settings","ai/troubleshooting-with-ai.html#failure-detection","ai/troubleshooting-with-ai.html#best-practices","ai/troubleshooting-with-ai.html#for-effective-troubleshooting","ai/troubleshooting-with-ai.html#for-prevention","ai/troubleshooting-with-ai.html#limitations","ai/troubleshooting-with-ai.html#what-ai-can-troubleshoot","ai/troubleshooting-with-ai.html#what-requires-human-review","ai/troubleshooting-with-ai.html#examples-and-guides","ai/troubleshooting-with-ai.html#common-issues---quick-links","ai/troubleshooting-with-ai.html#related-documentation","ai/cost-management.html#ai-cost-management-and-optimization","ai/cost-management.html#cost-overview","ai/cost-management.html#api-provider-pricing","ai/cost-management.html#cost-examples","ai/cost-management.html#cost-control-mechanisms","ai/cost-management.html#request-caching","ai/cost-management.html#cache-configuration","ai/cost-management.html#rate-limiting","ai/cost-management.html#workspace-level-budgets","ai/cost-management.html#cost-tracking","ai/cost-management.html#track-spending","ai/cost-management.html#cost-breakdown","ai/cost-management.html#optimization-strategies","ai/cost-management.html#strategy-1-increase-cache-hit-rate","ai/cost-management.html#strategy-2-use-local-models","ai/cost-management.html#strategy-3-use-haiku-for-simple-tasks","ai/cost-management.html#strategy-4-batch-operations","ai/cost-management.html#strategy-5-smart-feature-enablement","ai/cost-management.html#budget-management-workflow","ai/cost-management.html#1-set-budget","ai/cost-management.html#2-monitor-spending","ai/cost-management.html#3-adjust-if-needed","ai/cost-management.html#4-forecast-and-plan","ai/cost-management.html#cost-allocation","ai/cost-management.html#chargeback-models","ai/cost-management.html#cost-reporting","ai/cost-management.html#generate-reports","ai/cost-management.html#cost-benefit-analysis","ai/cost-management.html#roi-examples","ai/cost-management.html#advanced-cost-optimization","ai/cost-management.html#hybrid-strategy-recommended","ai/cost-management.html#monitoring-and-alerts","ai/cost-management.html#cost-anomaly-detection","ai/cost-management.html#alert-configuration","ai/cost-management.html#related-documentation","ai/natural-language-config.html#natural-language-configuration-generation","ai/natural-language-config.html#feature-overview","ai/natural-language-config.html#what-it-does","ai/natural-language-config.html#primary-use-cases","ai/natural-language-config.html#architecture","ai/natural-language-config.html#generation-pipeline","ai/natural-language-config.html#planned-implementation-details","ai/natural-language-config.html#1-intent-extraction","ai/natural-language-config.html#2-entity-mapping","ai/natural-language-config.html#3-prompt-engineering","ai/natural-language-config.html#4-iterative-refinement","ai/natural-language-config.html#command-interface","ai/natural-language-config.html#cli-usage","ai/natural-language-config.html#interactive-refinement","ai/natural-language-config.html#example-generations","ai/natural-language-config.html#example-1-simple-database","ai/natural-language-config.html#example-2-complex-kubernetes-setup","ai/natural-language-config.html#configuration-and-constraints","ai/natural-language-config.html#configurable-generation-parameters","ai/natural-language-config.html#safety-guardrails","ai/natural-language-config.html#user-workflow","ai/natural-language-config.html#typical-usage-session","ai/natural-language-config.html#integration-with-other-systems","ai/natural-language-config.html#rag-integration","ai/natural-language-config.html#form-assistance","ai/natural-language-config.html#cli-integration","ai/natural-language-config.html#testing-and-validation","ai/natural-language-config.html#test-cases-planned","ai/natural-language-config.html#success-criteria-q2-2025","ai/natural-language-config.html#related-documentation","ai/config-generation.html#configuration-generation-typdialog-prov-gen","ai/config-generation.html#overview","ai/config-generation.html#planned-features","ai/config-generation.html#template-selection","ai/config-generation.html#customization-via-natural-language","ai/config-generation.html#multi-provider-support","ai/config-generation.html#validation-and-testing","ai/config-generation.html#architecture","ai/config-generation.html#integration-points","ai/config-generation.html#related-documentation","ai/ai-assisted-forms.html#ai-assisted-forms-typdialog-ai","ai/ai-assisted-forms.html#feature-overview","ai/ai-assisted-forms.html#what-it-does","ai/ai-assisted-forms.html#primary-use-cases","ai/ai-assisted-forms.html#architecture","ai/ai-assisted-forms.html#user-interface-integration","ai/ai-assisted-forms.html#suggestion-pipeline","ai/ai-assisted-forms.html#planned-features","ai/ai-assisted-forms.html#1-smart-field-suggestions","ai/ai-assisted-forms.html#2-validation-error-explanation","ai/ai-assisted-forms.html#3-field-to-field-context-awareness","ai/ai-assisted-forms.html#4-inline-documentation","ai/ai-assisted-forms.html#5-multi-field-suggestions","ai/ai-assisted-forms.html#implementation-components","ai/ai-assisted-forms.html#frontend-typdialog-ai-javascripttypescript","ai/ai-assisted-forms.html#backend-service-integration","ai/ai-assisted-forms.html#configuration","ai/ai-assisted-forms.html#form-assistant-settings","ai/ai-assisted-forms.html#user-experience-flow","ai/ai-assisted-forms.html#scenario-new-user-configuring-postgresql","ai/ai-assisted-forms.html#integration-with-natural-language-generation","ai/ai-assisted-forms.html#success-criteria-q2-2025","ai/ai-assisted-forms.html#related-documentation","ai/ai-agents.html#autonomous-ai-agents-typdialog-ag","ai/ai-agents.html#feature-overview","ai/ai-agents.html#what-it-does","ai/ai-agents.html#agent-capabilities","ai/ai-agents.html#multi-step-workflow-execution","ai/ai-agents.html#adaptive-decision-making","ai/ai-agents.html#dependency-management","ai/ai-agents.html#architecture","ai/ai-agents.html#agent-design-pattern","ai/ai-agents.html#agent-workflow","ai/ai-agents.html#planned-agent-types","ai/ai-agents.html#1-database-specialist-agent","ai/ai-agents.html#2-kubernetes-specialist-agent","ai/ai-agents.html#3-infrastructure-agent","ai/ai-agents.html#4-monitoring-agent","ai/ai-agents.html#5-compliance-agent","ai/ai-agents.html#usage-examples","ai/ai-agents.html#example-1-development-environment-setup","ai/ai-agents.html#example-2-production-kubernetes-deployment","ai/ai-agents.html#safety-and-control","ai/ai-agents.html#human-in-the-loop-checkpoints","ai/ai-agents.html#decision-logging","ai/ai-agents.html#rollback-capability","ai/ai-agents.html#configuration","ai/ai-agents.html#agent-settings","ai/ai-agents.html#success-criteria-q2-2025","ai/ai-agents.html#related-documentation","architecture/system-overview.html#system-overview","architecture/system-overview.html#executive-summary","architecture/system-overview.html#high-level-architecture","architecture/system-overview.html#system-diagram","architecture/system-overview.html#core-components","architecture/system-overview.html#1-hybrid-architecture-foundation","architecture/system-overview.html#2-configuration-system-v200","architecture/system-overview.html#3-workflow-system-v310","architecture/system-overview.html#4-provider-ecosystem","architecture/system-overview.html#key-architectural-decisions","architecture/system-overview.html#1-hybrid-language-architecture-adr-004","architecture/system-overview.html#2-configuration-driven-architecture-adr-002","architecture/system-overview.html#3-domain-driven-structure-adr-001","architecture/system-overview.html#4-workspace-isolation-adr-003","architecture/system-overview.html#5-registry-based-extensions-adr-005","architecture/system-overview.html#data-flow-architecture","architecture/system-overview.html#configuration-resolution-flow","architecture/system-overview.html#workflow-execution-flow","architecture/system-overview.html#provider-integration-flow","architecture/system-overview.html#technology-stack","architecture/system-overview.html#core-technologies","architecture/system-overview.html#infrastructure-technologies","architecture/system-overview.html#development-technologies","architecture/system-overview.html#scalability-and-performance","architecture/system-overview.html#performance-characteristics","architecture/system-overview.html#scalability-features","architecture/system-overview.html#security-architecture","architecture/system-overview.html#security-layers","architecture/system-overview.html#security-features","architecture/system-overview.html#quality-attributes","architecture/system-overview.html#reliability","architecture/system-overview.html#maintainability","architecture/system-overview.html#extensibility","architecture/architecture-overview.html#provisioning-platform---architecture-overview","architecture/architecture-overview.html#table-of-contents","architecture/architecture-overview.html#executive-summary","architecture/architecture-overview.html#what-is-the-provisioning-platform","architecture/architecture-overview.html#key-characteristics","architecture/architecture-overview.html#architecture-at-a-glance","architecture/architecture-overview.html#key-metrics","architecture/architecture-overview.html#system-architecture","architecture/architecture-overview.html#high-level-architecture","architecture/architecture-overview.html#multi-repository-architecture","architecture/architecture-overview.html#component-architecture","architecture/architecture-overview.html#core-components","architecture/architecture-overview.html#mode-architecture","architecture/architecture-overview.html#mode-based-system-overview","architecture/architecture-overview.html#mode-comparison","architecture/architecture-overview.html#mode-configuration","architecture/architecture-overview.html#mode-specific-workflows","architecture/architecture-overview.html#network-architecture","architecture/architecture-overview.html#service-communication","architecture/architecture-overview.html#port-allocation","architecture/architecture-overview.html#network-security","architecture/architecture-overview.html#data-architecture","architecture/architecture-overview.html#data-storage","architecture/architecture-overview.html#data-flow","architecture/architecture-overview.html#security-architecture","architecture/architecture-overview.html#security-layers","architecture/architecture-overview.html#secret-management","architecture/architecture-overview.html#image-signing-and-verification","architecture/architecture-overview.html#deployment-architecture","architecture/architecture-overview.html#deployment-modes","architecture/architecture-overview.html#integration-architecture","architecture/architecture-overview.html#integration-patterns","architecture/architecture-overview.html#performance-and-scalability","architecture/architecture-overview.html#performance-characteristics","architecture/architecture-overview.html#scalability-limits","architecture/architecture-overview.html#optimization-strategies","architecture/architecture-overview.html#evolution-and-roadmap","architecture/architecture-overview.html#version-history","architecture/architecture-overview.html#roadmap-future-versions","architecture/architecture-overview.html#related-documentation","architecture/architecture-overview.html#architecture","architecture/architecture-overview.html#adrs","architecture/architecture-overview.html#user-guides","architecture/design-principles.html#design-principles","architecture/design-principles.html#overview","architecture/design-principles.html#core-architectural-principles","architecture/design-principles.html#1-project-architecture-principles-pap-compliance","architecture/design-principles.html#2-hybrid-architecture-optimization","architecture/design-principles.html#3-configuration-first-architecture","architecture/design-principles.html#4-domain-driven-structure","architecture/design-principles.html#5-isolation-and-modularity","architecture/design-principles.html#quality-attribute-principles","architecture/design-principles.html#6-reliability-through-recovery","architecture/design-principles.html#7-performance-through-parallelism","architecture/design-principles.html#8-security-through-isolation","architecture/design-principles.html#development-methodology-principles","architecture/design-principles.html#9-configuration-driven-testing","architecture/design-principles.html#error-handling-principles","architecture/design-principles.html#11-fail-fast-recover-gracefully","architecture/design-principles.html#12-observable-operations","architecture/design-principles.html#evolution-and-maintenance-principles","architecture/design-principles.html#13-backward-compatibility","architecture/design-principles.html#14-documentation-driven-development","architecture/design-principles.html#15-technical-debt-management","architecture/design-principles.html#trade-off-management","architecture/design-principles.html#16-explicit-trade-off-documentation","architecture/design-principles.html#conclusion","architecture/integration-patterns.html#integration-patterns","architecture/integration-patterns.html#overview","architecture/integration-patterns.html#core-integration-patterns","architecture/integration-patterns.html#1-hybrid-language-integration","architecture/integration-patterns.html#2-provider-abstraction-pattern","architecture/integration-patterns.html#3-configuration-resolution-pattern","architecture/integration-patterns.html#4-workflow-orchestration-patterns","architecture/integration-patterns.html#5-state-management-patterns","architecture/integration-patterns.html#6-event-and-messaging-patterns","architecture/integration-patterns.html#7-extension-integration-patterns","architecture/integration-patterns.html#8-api-design-patterns","architecture/integration-patterns.html#error-handling-patterns","architecture/integration-patterns.html#structured-error-pattern","architecture/integration-patterns.html#error-recovery-pattern","architecture/integration-patterns.html#performance-optimization-patterns","architecture/integration-patterns.html#caching-strategy-pattern","architecture/integration-patterns.html#streaming-pattern-for-large-data","architecture/integration-patterns.html#testing-integration-patterns","architecture/integration-patterns.html#integration-test-pattern","architecture/orchestrator-integration-model.html#orchestrator-integration-model---deep-dive","architecture/orchestrator-integration-model.html#executive-summary","architecture/orchestrator-integration-model.html#current-architecture-hybrid-orchestrator-v30","architecture/orchestrator-integration-model.html#the-problem-being-solved","architecture/orchestrator-integration-model.html#how-it-works-today-monorepo","architecture/orchestrator-integration-model.html#three-execution-modes","architecture/orchestrator-integration-model.html#integration-patterns","architecture/orchestrator-integration-model.html#pattern-1-cli-submits-tasks-to-orchestrator","architecture/orchestrator-integration-model.html#pattern-2-orchestrator-executes-nushell-scripts","architecture/orchestrator-integration-model.html#pattern-3-bidirectional-communication","architecture/orchestrator-integration-model.html#multi-repo-architecture-impact","architecture/orchestrator-integration-model.html#repository-split-doesnt-change-integration-model","architecture/orchestrator-integration-model.html#configuration-based-integration","architecture/orchestrator-integration-model.html#version-compatibility","architecture/orchestrator-integration-model.html#execution-flow-examples","architecture/orchestrator-integration-model.html#example-1-simple-server-creation-direct-mode","architecture/orchestrator-integration-model.html#example-2-server-creation-with-orchestrator","architecture/orchestrator-integration-model.html#example-3-batch-workflow-with-dependencies","architecture/orchestrator-integration-model.html#why-this-architecture","architecture/orchestrator-integration-model.html#orchestrator-benefits","architecture/orchestrator-integration-model.html#why-not-pure-rust","architecture/orchestrator-integration-model.html#multi-repo-integration-example","architecture/orchestrator-integration-model.html#installation","architecture/orchestrator-integration-model.html#runtime-coordination","architecture/orchestrator-integration-model.html#configuration-examples","architecture/orchestrator-integration-model.html#core-package-config","architecture/orchestrator-integration-model.html#platform-package-config","architecture/orchestrator-integration-model.html#key-takeaways","architecture/orchestrator-integration-model.html#1--orchestrator-is-essential","architecture/orchestrator-integration-model.html#2--integration-is-loose-but-coordinated","architecture/orchestrator-integration-model.html#3--best-of-both-worlds","architecture/orchestrator-integration-model.html#4--multi-repo-doesnt-change-integration","architecture/orchestrator-integration-model.html#conclusion","architecture/multi-repo-architecture.html#multi-repository-architecture-with-oci-registry-support","architecture/multi-repo-architecture.html#overview","architecture/multi-repo-architecture.html#architecture-goals","architecture/multi-repo-architecture.html#repository-structure","architecture/multi-repo-architecture.html#repository-1-provisioning-core","architecture/multi-repo-architecture.html#repository-2-provisioning-extensions","architecture/multi-repo-architecture.html#repository-3-provisioning-platform","architecture/multi-repo-architecture.html#oci-registry-integration","architecture/multi-repo-architecture.html#registry-structure","architecture/multi-repo-architecture.html#oci-artifact-structure","architecture/multi-repo-architecture.html#dependency-management","architecture/multi-repo-architecture.html#workspace-configuration","architecture/multi-repo-architecture.html#dependency-resolution","architecture/multi-repo-architecture.html#dependency-resolution-commands","architecture/multi-repo-architecture.html#oci-client-operations","architecture/multi-repo-architecture.html#cli-commands","architecture/multi-repo-architecture.html#oci-configuration","architecture/multi-repo-architecture.html#extension-development-workflow","architecture/multi-repo-architecture.html#1-develop-extension","architecture/multi-repo-architecture.html#2-test-extension-locally","architecture/multi-repo-architecture.html#3-package-extension","architecture/multi-repo-architecture.html#4-publish-extension","architecture/multi-repo-architecture.html#5-use-published-extension","architecture/multi-repo-architecture.html#registry-deployment-options","architecture/multi-repo-architecture.html#local-registry-solo-development","architecture/multi-repo-architecture.html#remote-registry-multi-userenterprise","architecture/multi-repo-architecture.html#migration-from-monorepo","architecture/multi-repo-architecture.html#phase-1-parallel-structure-current","architecture/multi-repo-architecture.html#phase-2-gradual-migration","architecture/multi-repo-architecture.html#phase-3-repository-split","architecture/multi-repo-architecture.html#phase-4-deprecate-monorepo","architecture/multi-repo-architecture.html#benefits-summary","architecture/multi-repo-architecture.html#modularity","architecture/multi-repo-architecture.html#distribution","architecture/multi-repo-architecture.html#security","architecture/multi-repo-architecture.html#developer-experience","architecture/multi-repo-architecture.html#operations","architecture/multi-repo-architecture.html#ecosystem","architecture/multi-repo-architecture.html#implementation-status","architecture/multi-repo-architecture.html#related-documentation","architecture/multi-repo-strategy.html#multi-repository-strategy-analysis","architecture/multi-repo-strategy.html#executive-summary","architecture/multi-repo-strategy.html#repository-architecture-options","architecture/multi-repo-strategy.html#option-a-pure-monorepo-original-recommendation","architecture/multi-repo-strategy.html#option-b-multi-repo-with-submodules--not-recommended","architecture/multi-repo-strategy.html#option-c-multi-repo-with-package-dependencies--recommended","architecture/multi-repo-strategy.html#recommended-multi-repo-architecture","architecture/multi-repo-strategy.html#repository-1-provisioning-core","architecture/multi-repo-strategy.html#repository-2-provisioning-platform","architecture/multi-repo-strategy.html#repository-3-provisioning-extensions","architecture/multi-repo-strategy.html#repository-4-provisioning-workspace","architecture/multi-repo-strategy.html#repository-5-provisioning-distribution","architecture/multi-repo-strategy.html#dependency-and-integration-model","architecture/multi-repo-strategy.html#package-based-dependencies-not-submodules","architecture/multi-repo-strategy.html#integration-mechanisms","architecture/multi-repo-strategy.html#version-management-strategy","architecture/multi-repo-strategy.html#semantic-versioning-per-repository","architecture/multi-repo-strategy.html#compatibility-matrix","architecture/multi-repo-strategy.html#release-coordination","architecture/multi-repo-strategy.html#development-workflow","architecture/multi-repo-strategy.html#working-on-single-repository","architecture/multi-repo-strategy.html#working-across-repositories","architecture/multi-repo-strategy.html#testing-cross-repo-integration","architecture/multi-repo-strategy.html#distribution-strategy","architecture/multi-repo-strategy.html#individual-repository-releases","architecture/multi-repo-strategy.html#bundle-releases-coordinated","architecture/multi-repo-strategy.html#user-installation-options","architecture/multi-repo-strategy.html#repository-ownership-and-contribution-model","architecture/multi-repo-strategy.html#core-team-ownership","architecture/multi-repo-strategy.html#contribution-workflow","architecture/multi-repo-strategy.html#cicd-strategy","architecture/multi-repo-strategy.html#per-repository-cicd","architecture/multi-repo-strategy.html#integration-testing-distribution-repo","architecture/multi-repo-strategy.html#file-and-directory-structure-comparison","architecture/multi-repo-strategy.html#monorepo-structure","architecture/multi-repo-strategy.html#multi-repo-structure","architecture/multi-repo-strategy.html#decision-matrix","architecture/multi-repo-strategy.html#recommended-approach-multi-repo","architecture/multi-repo-strategy.html#why-multi-repo-wins-for-this-project","architecture/multi-repo-strategy.html#implementation-strategy","architecture/multi-repo-strategy.html#conclusion","architecture/multi-repo-strategy.html#next-steps","architecture/database-and-config-architecture.html#database-and-configuration-architecture","architecture/database-and-config-architecture.html#control-center-database-dbs","architecture/database-and-config-architecture.html#database-type--surrealdb--in-memory-backend","architecture/database-and-config-architecture.html#database-configuration","architecture/database-and-config-architecture.html#why-surrealdb-kv-mem","architecture/database-and-config-architecture.html#additional-database-support","architecture/database-and-config-architecture.html#orchestrator-database","architecture/database-and-config-architecture.html#storage-type--filesystem--file-based-queue","architecture/database-and-config-architecture.html#optional-surrealdb-backend","architecture/database-and-config-architecture.html#configuration-loading-architecture","architecture/database-and-config-architecture.html#hierarchical-configuration-system","architecture/database-and-config-architecture.html#variable-interpolation","architecture/database-and-config-architecture.html#service-specific-config-files","architecture/database-and-config-architecture.html#central-configuration","architecture/database-and-config-architecture.html#workspace-aware-paths","architecture/database-and-config-architecture.html#environment-variable-overrides","architecture/database-and-config-architecture.html#control-center","architecture/database-and-config-architecture.html#orchestrator","architecture/database-and-config-architecture.html#naming-convention","architecture/database-and-config-architecture.html#docker-vs-native-configuration","architecture/database-and-config-architecture.html#docker-deployment","architecture/database-and-config-architecture.html#native-deployment","architecture/database-and-config-architecture.html#configuration-validation","architecture/database-and-config-architecture.html#kms-database","architecture/database-and-config-architecture.html#summary","architecture/database-and-config-architecture.html#control-center-database","architecture/database-and-config-architecture.html#orchestrator-database-1","architecture/database-and-config-architecture.html#configuration-loading","architecture/database-and-config-architecture.html#best-practices","architecture/ecosystem-integration.html#prov-ecosystem--provctl-integration","architecture/ecosystem-integration.html#overview","architecture/ecosystem-integration.html#architecture","architecture/ecosystem-integration.html#three-layer-integration","architecture/ecosystem-integration.html#components","architecture/ecosystem-integration.html#1-runtime-abstraction","architecture/ecosystem-integration.html#2-ssh-advanced","architecture/ecosystem-integration.html#3-backup-system","architecture/ecosystem-integration.html#4-gitops-events","architecture/ecosystem-integration.html#5-service-management","architecture/ecosystem-integration.html#code-quality-standards","architecture/ecosystem-integration.html#rust-provisioning-bridge","architecture/ecosystem-integration.html#nushell","architecture/ecosystem-integration.html#nickel","architecture/ecosystem-integration.html#file-structure","architecture/ecosystem-integration.html#usage","architecture/ecosystem-integration.html#runtime-abstraction","architecture/ecosystem-integration.html#ssh-advanced","architecture/ecosystem-integration.html#backup-system","architecture/ecosystem-integration.html#gitops-events","architecture/ecosystem-integration.html#service-management","architecture/ecosystem-integration.html#integration-points","architecture/ecosystem-integration.html#cli-commands","architecture/ecosystem-integration.html#configuration","architecture/ecosystem-integration.html#plugins","architecture/ecosystem-integration.html#testing","architecture/ecosystem-integration.html#rust-tests","architecture/ecosystem-integration.html#nushell-tests","architecture/ecosystem-integration.html#performance","architecture/ecosystem-integration.html#migration-path","architecture/ecosystem-integration.html#next-steps","architecture/ecosystem-integration.html#references","architecture/package-and-loader-system.html#nickel-package-and-module-loader-system","architecture/package-and-loader-system.html#architecture-overview","architecture/package-and-loader-system.html#benefits","architecture/package-and-loader-system.html#components","architecture/package-and-loader-system.html#1-core-nickel-package-provisioningschemas","architecture/package-and-loader-system.html#2-module-discovery-system","architecture/package-and-loader-system.html#3-module-loading-system","architecture/package-and-loader-system.html#workspace-structure","architecture/package-and-loader-system.html#new-workspace-layout","architecture/package-and-loader-system.html#import-patterns","architecture/package-and-loader-system.html#package-distribution","architecture/package-and-loader-system.html#building-core-package","architecture/package-and-loader-system.html#package-installation-methods","architecture/package-and-loader-system.html#developer-workflows","architecture/package-and-loader-system.html#1-new-project-setup","architecture/package-and-loader-system.html#2-extension-development","architecture/package-and-loader-system.html#3-workspace-migration","architecture/package-and-loader-system.html#4-multi-environment-management","architecture/package-and-loader-system.html#module-management","architecture/package-and-loader-system.html#listing-and-validation","architecture/package-and-loader-system.html#unloading-modules","architecture/package-and-loader-system.html#module-information","architecture/package-and-loader-system.html#cicd-integration","architecture/package-and-loader-system.html#pipeline-example","architecture/package-and-loader-system.html#troubleshooting","architecture/package-and-loader-system.html#common-issues","architecture/package-and-loader-system.html#debug-commands","architecture/package-and-loader-system.html#best-practices","architecture/package-and-loader-system.html#1-version-management","architecture/package-and-loader-system.html#2-module-organization","architecture/package-and-loader-system.html#3-security","architecture/package-and-loader-system.html#4-performance","architecture/package-and-loader-system.html#migration-guide","architecture/package-and-loader-system.html#1-backup-current-workspace","architecture/package-and-loader-system.html#2-analyze-migration-requirements","architecture/package-and-loader-system.html#3-perform-migration","architecture/package-and-loader-system.html#4-load-required-modules","architecture/package-and-loader-system.html#5-test-and-validate","architecture/package-and-loader-system.html#6-deploy","architecture/package-and-loader-system.html#future-enhancements","architecture/config-loading-architecture.html#modular-configuration-loading-architecture","architecture/config-loading-architecture.html#overview","architecture/config-loading-architecture.html#architecture-layers","architecture/config-loading-architecture.html#layer-1-minimal-loader-0023s","architecture/config-loading-architecture.html#layer-2-lazy-loader-decision-layer","architecture/config-loading-architecture.html#layer-3-full-loader-0091s","architecture/config-loading-architecture.html#performance-characteristics","architecture/config-loading-architecture.html#benchmarks","architecture/config-loading-architecture.html#performance-gains","architecture/config-loading-architecture.html#module-dependency-graph","architecture/config-loading-architecture.html#usage-examples","architecture/config-loading-architecture.html#fast-path-help-commands","architecture/config-loading-architecture.html#medium-path-status-operations","architecture/config-loading-architecture.html#full-path-infrastructure-operations","architecture/config-loading-architecture.html#implementation-details","architecture/config-loading-architecture.html#lazy-loading-decision-logic","architecture/config-loading-architecture.html#minimal-config-structure","architecture/config-loading-architecture.html#full-config-structure","architecture/config-loading-architecture.html#migration-path","architecture/config-loading-architecture.html#for-cli-commands","architecture/config-loading-architecture.html#for-new-modules","architecture/config-loading-architecture.html#future-optimizations","architecture/config-loading-architecture.html#phase-2-per-command-config-caching","architecture/config-loading-architecture.html#phase-3-configuration-profiles","architecture/config-loading-architecture.html#phase-4-parallel-config-loading","architecture/config-loading-architecture.html#maintenance-notes","architecture/config-loading-architecture.html#adding-new-functions-to-minimal-loader","architecture/config-loading-architecture.html#modifying-full-loader","architecture/config-loading-architecture.html#performance-testing","architecture/config-loading-architecture.html#see-also","architecture/nickel-executable-examples.html#nickel-executable-examples--test-cases","architecture/nickel-executable-examples.html#setup-run-examples-locally","architecture/nickel-executable-examples.html#prerequisites","architecture/nickel-executable-examples.html#directory-structure-for-examples","architecture/nickel-executable-examples.html#example-1-simple-server-configuration-executable","architecture/nickel-executable-examples.html#step-1-create-contract-file","architecture/nickel-executable-examples.html#step-2-create-defaults-file","architecture/nickel-executable-examples.html#step-3-create-main-module-with-hybrid-interface","architecture/nickel-executable-examples.html#test-export-and-validate-json","architecture/nickel-executable-examples.html#usage-in-consumer-module","architecture/nickel-executable-examples.html#example-2-complex-provider-extension-production-pattern","architecture/nickel-executable-examples.html#create-provider-structure","architecture/nickel-executable-examples.html#provider-contracts","architecture/nickel-executable-examples.html#provider-defaults","architecture/nickel-executable-examples.html#provider-main-module","architecture/nickel-executable-examples.html#test-provider-configuration","architecture/nickel-executable-examples.html#consumer-using-provider","architecture/nickel-executable-examples.html#example-3-real-world-pattern---taskserv-configuration","architecture/nickel-executable-examples.html#taskserv-contracts-from-wuji","architecture/nickel-executable-examples.html#taskserv-defaults","architecture/nickel-executable-examples.html#taskserv-main","architecture/nickel-executable-examples.html#test-taskserv-setup","architecture/nickel-executable-examples.html#example-4-composition--extension-pattern","architecture/nickel-executable-examples.html#base-infrastructure","architecture/nickel-executable-examples.html#extending-infrastructure-nickel-advantage","architecture/nickel-executable-examples.html#example-5-validation--error-handling","architecture/nickel-executable-examples.html#validation-functions","architecture/nickel-executable-examples.html#using-validations","architecture/nickel-executable-examples.html#test-suite-bash-script","architecture/nickel-executable-examples.html#run-all-examples","architecture/nickel-executable-examples.html#quick-commands-reference","architecture/nickel-executable-examples.html#common-nickel-operations","architecture/nickel-executable-examples.html#troubleshooting-examples","architecture/nickel-executable-examples.html#problem-unexpected-token-with-multiple-let","architecture/nickel-executable-examples.html#problem-function-serialization-fails","architecture/nickel-executable-examples.html#problem-null-values-cause-export-issues","architecture/nickel-executable-examples.html#summary","architecture/orchestrator-info.html#cli-code","architecture/orchestrator-info.html#returns-workflow_id--abc-123","architecture/orchestrator-auth-integration.html#orchestrator-authentication--authorization-integration","architecture/orchestrator-auth-integration.html#overview","architecture/orchestrator-auth-integration.html#architecture","architecture/orchestrator-auth-integration.html#security-middleware-chain","architecture/orchestrator-auth-integration.html#implementation-details","architecture/orchestrator-auth-integration.html#1-security-context-builder-middlewaresecurity_contextrs","architecture/orchestrator-auth-integration.html#2-enhanced-authentication-middleware-middlewareauthrs","architecture/orchestrator-auth-integration.html#3-mfa-verification-middleware-middlewaremfars","architecture/orchestrator-auth-integration.html#4-enhanced-authorization-middleware-middlewareauthzrs","architecture/orchestrator-auth-integration.html#5-rate-limiting-middleware-middlewarerate_limitrs","architecture/orchestrator-auth-integration.html#6-security-integration-module-security_integrationrs","architecture/orchestrator-auth-integration.html#integration-with-appstate","architecture/orchestrator-auth-integration.html#updated-appstate-structure","architecture/orchestrator-auth-integration.html#initialization-in-mainrs","architecture/orchestrator-auth-integration.html#protected-endpoints","architecture/orchestrator-auth-integration.html#endpoint-categories","architecture/orchestrator-auth-integration.html#complete-authentication-flow","architecture/orchestrator-auth-integration.html#step-by-step-flow","architecture/orchestrator-auth-integration.html#configuration","architecture/orchestrator-auth-integration.html#environment-variables","architecture/orchestrator-auth-integration.html#development-mode","architecture/orchestrator-auth-integration.html#testing","architecture/orchestrator-auth-integration.html#integration-tests","architecture/orchestrator-auth-integration.html#file-summary","architecture/orchestrator-auth-integration.html#benefits","architecture/orchestrator-auth-integration.html#security","architecture/orchestrator-auth-integration.html#architecture-1","architecture/orchestrator-auth-integration.html#operations","architecture/orchestrator-auth-integration.html#future-enhancements","architecture/orchestrator-auth-integration.html#related-documentation","architecture/orchestrator-auth-integration.html#version-history","architecture/repo-dist-analysis.html#repository-and-distribution-architecture-analysis","architecture/repo-dist-analysis.html#executive-summary","architecture/repo-dist-analysis.html#current-state-analysis","architecture/repo-dist-analysis.html#strengths","architecture/repo-dist-analysis.html#critical-issues","architecture/repo-dist-analysis.html#recommended-architecture","architecture/repo-dist-analysis.html#1-monorepo-structure","architecture/repo-dist-analysis.html#key-principles","architecture/repo-dist-analysis.html#distribution-strategy","architecture/repo-dist-analysis.html#package-types","architecture/repo-dist-analysis.html#installation-paths","architecture/repo-dist-analysis.html#configuration-hierarchy","architecture/repo-dist-analysis.html#build-system","architecture/repo-dist-analysis.html#build-tools-structure","architecture/repo-dist-analysis.html#build-system-implementation","architecture/repo-dist-analysis.html#justfile-integration","architecture/repo-dist-analysis.html#installation-system","architecture/repo-dist-analysis.html#installer-script","architecture/repo-dist-analysis.html#bash-installer-for-systems-without-nushell","architecture/repo-dist-analysis.html#implementation-plan","architecture/repo-dist-analysis.html#phase-1-repository-restructuring-3-4-days","architecture/repo-dist-analysis.html#phase-2-build-system-implementation-3-4-days","architecture/repo-dist-analysis.html#phase-3-installation-system-2-3-days","architecture/repo-dist-analysis.html#phase-4-package-registry-optional-2-3-days","architecture/repo-dist-analysis.html#phase-5-documentation-and-release-2-days","architecture/repo-dist-analysis.html#migration-strategy","architecture/repo-dist-analysis.html#for-existing-users","architecture/repo-dist-analysis.html#for-developers","architecture/repo-dist-analysis.html#success-criteria","architecture/repo-dist-analysis.html#repository-structure","architecture/repo-dist-analysis.html#build-system-1","architecture/repo-dist-analysis.html#installation","architecture/repo-dist-analysis.html#distribution","architecture/repo-dist-analysis.html#documentation","architecture/repo-dist-analysis.html#risks-and-mitigations","architecture/repo-dist-analysis.html#risk-1-breaking-changes-for-existing-users","architecture/repo-dist-analysis.html#risk-2-build-system-complexity","architecture/repo-dist-analysis.html#risk-3-installation-path-conflicts","architecture/repo-dist-analysis.html#risk-4-cross-platform-issues","architecture/repo-dist-analysis.html#risk-5-dependency-management","architecture/repo-dist-analysis.html#timeline-summary","architecture/repo-dist-analysis.html#next-steps","architecture/repo-dist-analysis.html#conclusion","architecture/repo-dist-analysis.html#references","architecture/typedialog-nickel-integration.html#typedialog--nickel-integration-guide","architecture/typedialog-nickel-integration.html#what-is-typedialog","architecture/typedialog-nickel-integration.html#architecture","architecture/typedialog-nickel-integration.html#three-layers","architecture/typedialog-nickel-integration.html#data-flow","architecture/typedialog-nickel-integration.html#setup","architecture/typedialog-nickel-integration.html#installation","architecture/typedialog-nickel-integration.html#verify-installation","architecture/typedialog-nickel-integration.html#basic-workflow","architecture/typedialog-nickel-integration.html#step-1-define-nickel-schema","architecture/typedialog-nickel-integration.html#step-2-define-typedialog-form-toml","architecture/typedialog-nickel-integration.html#step-3-render-form-cli","architecture/typedialog-nickel-integration.html#step-4-validate-against-nickel-schema","architecture/typedialog-nickel-integration.html#step-5-output-to-nickel","architecture/typedialog-nickel-integration.html#real-world-example-1-infrastructure-wizard","architecture/typedialog-nickel-integration.html#scenario","architecture/typedialog-nickel-integration.html#step-1-define-nickel-schema-for-infrastructure","architecture/typedialog-nickel-integration.html#step-2-create-comprehensive-form","architecture/typedialog-nickel-integration.html#step-3-run-interactive-wizard","architecture/typedialog-nickel-integration.html#step-4-use-output-in-infrastructure","architecture/typedialog-nickel-integration.html#real-world-example-2-server-configuration-form","architecture/typedialog-nickel-integration.html#form-definition-advanced","architecture/typedialog-nickel-integration.html#output-structure","architecture/typedialog-nickel-integration.html#api-integration","architecture/typedialog-nickel-integration.html#typedialog-rest-endpoints","architecture/typedialog-nickel-integration.html#response-format","architecture/typedialog-nickel-integration.html#submit-form","architecture/typedialog-nickel-integration.html#response","architecture/typedialog-nickel-integration.html#validation","architecture/typedialog-nickel-integration.html#contract-based-validation","architecture/typedialog-nickel-integration.html#validation-rules-in-form","architecture/typedialog-nickel-integration.html#integration-with-provisioning-platform","architecture/typedialog-nickel-integration.html#use-case-infrastructure-initialization","architecture/typedialog-nickel-integration.html#implementation-in-nushell","architecture/typedialog-nickel-integration.html#advanced-features","architecture/typedialog-nickel-integration.html#conditional-visibility","architecture/typedialog-nickel-integration.html#dynamic-defaults","architecture/typedialog-nickel-integration.html#custom-validation","architecture/typedialog-nickel-integration.html#output-formats","architecture/typedialog-nickel-integration.html#backends","architecture/typedialog-nickel-integration.html#1-cli-command-line-prompts","architecture/typedialog-nickel-integration.html#2-tui-terminal-user-interface---ratatui","architecture/typedialog-nickel-integration.html#3-web-http-server---axum","architecture/typedialog-nickel-integration.html#troubleshooting","architecture/typedialog-nickel-integration.html#problem-form-doesnt-match-nickel-contract","architecture/typedialog-nickel-integration.html#problem-validation-fails","architecture/typedialog-nickel-integration.html#problem-output-not-valid-nickel","architecture/typedialog-nickel-integration.html#complete-example-end-to-end-workflow","architecture/typedialog-nickel-integration.html#step-1-define-nickel-schema-1","architecture/typedialog-nickel-integration.html#step-2-define-form","architecture/typedialog-nickel-integration.html#step-3-user-interaction","architecture/typedialog-nickel-integration.html#step-4-output","architecture/typedialog-nickel-integration.html#step-5-use-in-provisioning","architecture/typedialog-nickel-integration.html#summary","architecture/adr/adr-001-project-structure.html#adr-001-project-structure-decision","architecture/adr/adr-001-project-structure.html#status","architecture/adr/adr-001-project-structure.html#context","architecture/adr/adr-001-project-structure.html#decision","architecture/adr/adr-001-project-structure.html#key-structural-principles","architecture/adr/adr-001-project-structure.html#domain-organization","architecture/adr/adr-001-project-structure.html#consequences","architecture/adr/adr-001-project-structure.html#positive","architecture/adr/adr-001-project-structure.html#negative","architecture/adr/adr-001-project-structure.html#neutral","architecture/adr/adr-001-project-structure.html#alternatives-considered","architecture/adr/adr-001-project-structure.html#alternative-1-monolithic-structure","architecture/adr/adr-001-project-structure.html#alternative-2-microservice-architecture","architecture/adr/adr-001-project-structure.html#alternative-3-language-based-organization","architecture/adr/adr-001-project-structure.html#alternative-4-feature-based-organization","architecture/adr/adr-001-project-structure.html#alternative-5-layer-based-architecture","architecture/adr/adr-001-project-structure.html#references","architecture/adr/adr-002-distribution-strategy.html#adr-002-distribution-strategy","architecture/adr/adr-002-distribution-strategy.html#status","architecture/adr/adr-002-distribution-strategy.html#context","architecture/adr/adr-002-distribution-strategy.html#decision","architecture/adr/adr-002-distribution-strategy.html#distribution-layers","architecture/adr/adr-002-distribution-strategy.html#distribution-structure","architecture/adr/adr-002-distribution-strategy.html#key-distribution-principles","architecture/adr/adr-002-distribution-strategy.html#consequences","architecture/adr/adr-002-distribution-strategy.html#positive","architecture/adr/adr-002-distribution-strategy.html#negative","architecture/adr/adr-002-distribution-strategy.html#neutral","architecture/adr/adr-002-distribution-strategy.html#alternatives-considered","architecture/adr/adr-002-distribution-strategy.html#alternative-1-monolithic-distribution","architecture/adr/adr-002-distribution-strategy.html#alternative-2-container-only-distribution","architecture/adr/adr-002-distribution-strategy.html#alternative-3-source-only-distribution","architecture/adr/adr-002-distribution-strategy.html#alternative-4-plugin-based-distribution","architecture/adr/adr-002-distribution-strategy.html#alternative-5-environment-based-distribution","architecture/adr/adr-002-distribution-strategy.html#implementation-details","architecture/adr/adr-002-distribution-strategy.html#distribution-build-process","architecture/adr/adr-002-distribution-strategy.html#configuration-hierarchy","architecture/adr/adr-002-distribution-strategy.html#workspace-management","architecture/adr/adr-002-distribution-strategy.html#references","architecture/adr/adr-003-workspace-isolation.html#adr-003-workspace-isolation","architecture/adr/adr-003-workspace-isolation.html#status","architecture/adr/adr-003-workspace-isolation.html#context","architecture/adr/adr-003-workspace-isolation.html#decision","architecture/adr/adr-003-workspace-isolation.html#workspace-structure","architecture/adr/adr-003-workspace-isolation.html#configuration-hierarchy-precedence-order","architecture/adr/adr-003-workspace-isolation.html#key-isolation-principles","architecture/adr/adr-003-workspace-isolation.html#consequences","architecture/adr/adr-003-workspace-isolation.html#positive","architecture/adr/adr-003-workspace-isolation.html#negative","architecture/adr/adr-003-workspace-isolation.html#neutral","architecture/adr/adr-003-workspace-isolation.html#alternatives-considered","architecture/adr/adr-003-workspace-isolation.html#alternative-1-system-wide-configuration-only","architecture/adr/adr-003-workspace-isolation.html#alternative-2-home-directory-dotfiles","architecture/adr/adr-003-workspace-isolation.html#alternative-3-xdg-base-directory-specification","architecture/adr/adr-003-workspace-isolation.html#alternative-4-container-based-isolation","architecture/adr/adr-003-workspace-isolation.html#alternative-5-database-based-configuration","architecture/adr/adr-003-workspace-isolation.html#implementation-details","architecture/adr/adr-003-workspace-isolation.html#workspace-initialization","architecture/adr/adr-003-workspace-isolation.html#configuration-resolution-process","architecture/adr/adr-003-workspace-isolation.html#backup-and-migration","architecture/adr/adr-003-workspace-isolation.html#security-considerations","architecture/adr/adr-003-workspace-isolation.html#references","architecture/adr/adr-004-hybrid-architecture.html#adr-004-hybrid-architecture","architecture/adr/adr-004-hybrid-architecture.html#status","architecture/adr/adr-004-hybrid-architecture.html#context","architecture/adr/adr-004-hybrid-architecture.html#decision","architecture/adr/adr-004-hybrid-architecture.html#architecture-layers","architecture/adr/adr-004-hybrid-architecture.html#integration-patterns","architecture/adr/adr-004-hybrid-architecture.html#key-architectural-principles","architecture/adr/adr-004-hybrid-architecture.html#consequences","architecture/adr/adr-004-hybrid-architecture.html#positive","architecture/adr/adr-004-hybrid-architecture.html#negative","architecture/adr/adr-004-hybrid-architecture.html#neutral","architecture/adr/adr-004-hybrid-architecture.html#alternatives-considered","architecture/adr/adr-004-hybrid-architecture.html#alternative-1-pure-nushell-implementation","architecture/adr/adr-004-hybrid-architecture.html#alternative-2-complete-rust-rewrite","architecture/adr/adr-004-hybrid-architecture.html#alternative-3-pure-go-implementation","architecture/adr/adr-004-hybrid-architecture.html#alternative-4-pythonshell-hybrid","architecture/adr/adr-004-hybrid-architecture.html#alternative-5-container-based-separation","architecture/adr/adr-004-hybrid-architecture.html#implementation-details","architecture/adr/adr-004-hybrid-architecture.html#orchestrator-components","architecture/adr/adr-004-hybrid-architecture.html#integration-protocols","architecture/adr/adr-004-hybrid-architecture.html#development-workflow","architecture/adr/adr-004-hybrid-architecture.html#monitoring-and-observability","architecture/adr/adr-004-hybrid-architecture.html#migration-strategy","architecture/adr/adr-004-hybrid-architecture.html#phase-1-core-infrastructure-completed","architecture/adr/adr-004-hybrid-architecture.html#phase-2-workflow-integration-completed","architecture/adr/adr-004-hybrid-architecture.html#phase-3-advanced-features-completed","architecture/adr/adr-004-hybrid-architecture.html#references","architecture/adr/adr-005-extension-framework.html#adr-005-extension-framework","architecture/adr/adr-005-extension-framework.html#status","architecture/adr/adr-005-extension-framework.html#context","architecture/adr/adr-005-extension-framework.html#decision","architecture/adr/adr-005-extension-framework.html#extension-architecture","architecture/adr/adr-005-extension-framework.html#extension-structure","architecture/adr/adr-005-extension-framework.html#extension-manifest-extensiontoml","architecture/adr/adr-005-extension-framework.html#key-framework-principles","architecture/adr/adr-005-extension-framework.html#consequences","architecture/adr/adr-005-extension-framework.html#positive","architecture/adr/adr-005-extension-framework.html#negative","architecture/adr/adr-005-extension-framework.html#neutral","architecture/adr/adr-005-extension-framework.html#alternatives-considered","architecture/adr/adr-005-extension-framework.html#alternative-1-filesystem-based-extensions","architecture/adr/adr-005-extension-framework.html#alternative-2-database-backed-registry","architecture/adr/adr-005-extension-framework.html#alternative-3-package-manager-integration","architecture/adr/adr-005-extension-framework.html#alternative-4-container-based-extensions","architecture/adr/adr-005-extension-framework.html#alternative-5-plugin-architecture","architecture/adr/adr-005-extension-framework.html#implementation-details","architecture/adr/adr-005-extension-framework.html#extension-discovery-process","architecture/adr/adr-005-extension-framework.html#extension-loading-lifecycle","architecture/adr/adr-005-extension-framework.html#configuration-integration","architecture/adr/adr-005-extension-framework.html#security-and-isolation","architecture/adr/adr-005-extension-framework.html#development-support","architecture/adr/adr-005-extension-framework.html#extension-development-patterns","architecture/adr/adr-005-extension-framework.html#provider-extension-pattern","architecture/adr/adr-005-extension-framework.html#task-service-extension-pattern","architecture/adr/adr-005-extension-framework.html#references","architecture/adr/adr-006-provisioning-cli-refactoring.html#adr-006-provisioning-cli-refactoring-to-modular-architecture","architecture/adr/adr-006-provisioning-cli-refactoring.html#context","architecture/adr/adr-006-provisioning-cli-refactoring.html#problems-identified","architecture/adr/adr-006-provisioning-cli-refactoring.html#decision","architecture/adr/adr-006-provisioning-cli-refactoring.html#key-components","architecture/adr/adr-006-provisioning-cli-refactoring.html#architecture-principles","architecture/adr/adr-006-provisioning-cli-refactoring.html#1-separation-of-concerns","architecture/adr/adr-006-provisioning-cli-refactoring.html#2-single-responsibility","architecture/adr/adr-006-provisioning-cli-refactoring.html#3-dry-dont-repeat-yourself","architecture/adr/adr-006-provisioning-cli-refactoring.html#4-openclosed-principle","architecture/adr/adr-006-provisioning-cli-refactoring.html#5-dependency-inversion","architecture/adr/adr-006-provisioning-cli-refactoring.html#implementation-details","architecture/adr/adr-006-provisioning-cli-refactoring.html#migration-path-completed-in-2-phases","architecture/adr/adr-006-provisioning-cli-refactoring.html#bi-directional-help-system","architecture/adr/adr-006-provisioning-cli-refactoring.html#command-shortcuts","architecture/adr/adr-006-provisioning-cli-refactoring.html#testing","architecture/adr/adr-006-provisioning-cli-refactoring.html#test-coverage","architecture/adr/adr-006-provisioning-cli-refactoring.html#test-results","architecture/adr/adr-006-provisioning-cli-refactoring.html#results","architecture/adr/adr-006-provisioning-cli-refactoring.html#quantitative-improvements","architecture/adr/adr-006-provisioning-cli-refactoring.html#qualitative-improvements","architecture/adr/adr-006-provisioning-cli-refactoring.html#trade-offs","architecture/adr/adr-006-provisioning-cli-refactoring.html#advantages","architecture/adr/adr-006-provisioning-cli-refactoring.html#disadvantages","architecture/adr/adr-006-provisioning-cli-refactoring.html#examples","architecture/adr/adr-006-provisioning-cli-refactoring.html#before-repetitive-flag-handling","architecture/adr/adr-006-provisioning-cli-refactoring.html#after-clean-reusable","architecture/adr/adr-006-provisioning-cli-refactoring.html#future-considerations","architecture/adr/adr-006-provisioning-cli-refactoring.html#potential-enhancements","architecture/adr/adr-006-provisioning-cli-refactoring.html#migration-guide-for-contributors","architecture/adr/adr-006-provisioning-cli-refactoring.html#related-documentation","architecture/adr/adr-006-provisioning-cli-refactoring.html#conclusion","architecture/adr/adr-007-kms-simplification.html#adr-007-kms-service-simplification-to-age-and-cosmian-backends","architecture/adr/adr-007-kms-simplification.html#context","architecture/adr/adr-007-kms-simplification.html#problems-with-4-backend-approach","architecture/adr/adr-007-kms-simplification.html#key-insights","architecture/adr/adr-007-kms-simplification.html#decision","architecture/adr/adr-007-kms-simplification.html#consequences","architecture/adr/adr-007-kms-simplification.html#positive","architecture/adr/adr-007-kms-simplification.html#negative","architecture/adr/adr-007-kms-simplification.html#neutral","architecture/adr/adr-007-kms-simplification.html#implementation","architecture/adr/adr-007-kms-simplification.html#files-created","architecture/adr/adr-007-kms-simplification.html#files-modified","architecture/adr/adr-007-kms-simplification.html#files-deleted","architecture/adr/adr-007-kms-simplification.html#dependencies-changed","architecture/adr/adr-007-kms-simplification.html#migration-path","architecture/adr/adr-007-kms-simplification.html#for-development","architecture/adr/adr-007-kms-simplification.html#for-production","architecture/adr/adr-007-kms-simplification.html#alternatives-considered","architecture/adr/adr-007-kms-simplification.html#alternative-1-keep-all-4-backends","architecture/adr/adr-007-kms-simplification.html#alternative-2-only-cosmian-no-age","architecture/adr/adr-007-kms-simplification.html#alternative-3-only-age-no-production-backend","architecture/adr/adr-007-kms-simplification.html#alternative-4-age--hashicorp-vault","architecture/adr/adr-007-kms-simplification.html#metrics","architecture/adr/adr-007-kms-simplification.html#code-reduction","architecture/adr/adr-007-kms-simplification.html#dependency-reduction","architecture/adr/adr-007-kms-simplification.html#compilation-time","architecture/adr/adr-007-kms-simplification.html#compliance","architecture/adr/adr-007-kms-simplification.html#security-considerations","architecture/adr/adr-007-kms-simplification.html#testing-requirements","architecture/adr/adr-007-kms-simplification.html#references","architecture/adr/adr-007-kms-simplification.html#notes","architecture/adr/adr-008-cedar-authorization.html#adr-008-cedar-authorization-policy-engine-integration","architecture/adr/adr-008-cedar-authorization.html#context-and-problem-statement","architecture/adr/adr-008-cedar-authorization.html#decision-drivers","architecture/adr/adr-008-cedar-authorization.html#considered-options","architecture/adr/adr-008-cedar-authorization.html#option-1-code-based-authorization-current-state","architecture/adr/adr-008-cedar-authorization.html#option-2-opa-open-policy-agent","architecture/adr/adr-008-cedar-authorization.html#option-3-cedar-policy-engine-chosen","architecture/adr/adr-008-cedar-authorization.html#option-4-casbin","architecture/adr/adr-008-cedar-authorization.html#decision-outcome","architecture/adr/adr-008-cedar-authorization.html#rationale","architecture/adr/adr-008-cedar-authorization.html#implementation-details","architecture/adr/adr-008-cedar-authorization.html#integration-points","architecture/adr/adr-008-cedar-authorization.html#security-best-practices","architecture/adr/adr-008-cedar-authorization.html#consequences","architecture/adr/adr-008-cedar-authorization.html#positive","architecture/adr/adr-008-cedar-authorization.html#negative","architecture/adr/adr-008-cedar-authorization.html#neutral","architecture/adr/adr-008-cedar-authorization.html#compliance","architecture/adr/adr-008-cedar-authorization.html#security-standards","architecture/adr/adr-008-cedar-authorization.html#audit-requirements","architecture/adr/adr-008-cedar-authorization.html#migration-path","architecture/adr/adr-008-cedar-authorization.html#phase-1-implementation-completed","architecture/adr/adr-008-cedar-authorization.html#phase-2-rollout-next","architecture/adr/adr-008-cedar-authorization.html#phase-3-enhancement-future","architecture/adr/adr-008-cedar-authorization.html#alternatives-considered","architecture/adr/adr-008-cedar-authorization.html#alternative-1-continue-with-code-based-authorization","architecture/adr/adr-008-cedar-authorization.html#alternative-2-hybrid-approach","architecture/adr/adr-008-cedar-authorization.html#references","architecture/adr/adr-008-cedar-authorization.html#related-adrs","architecture/adr/adr-008-cedar-authorization.html#notes","architecture/adr/adr-009-security-system-complete.html#adr-009-complete-security-system-implementation","architecture/adr/adr-009-security-system-complete.html#context","architecture/adr/adr-009-security-system-complete.html#decision","architecture/adr/adr-009-security-system-complete.html#implementation-summary","architecture/adr/adr-009-security-system-complete.html#total-implementation","architecture/adr/adr-009-security-system-complete.html#architecture-components","architecture/adr/adr-009-security-system-complete.html#group-1-foundation-13485-lines","architecture/adr/adr-009-security-system-complete.html#group-2-kms-integration-9331-lines","architecture/adr/adr-009-security-system-complete.html#group-3-security-features-8948-lines","architecture/adr/adr-009-security-system-complete.html#group-4-advanced-features-7935-lines","architecture/adr/adr-009-security-system-complete.html#security-architecture-flow","architecture/adr/adr-009-security-system-complete.html#end-to-end-request-flow","architecture/adr/adr-009-security-system-complete.html#emergency-access-flow","architecture/adr/adr-009-security-system-complete.html#technology-stack","architecture/adr/adr-009-security-system-complete.html#backend-rust","architecture/adr/adr-009-security-system-complete.html#frontend-typescriptreact","architecture/adr/adr-009-security-system-complete.html#cli-nushell","architecture/adr/adr-009-security-system-complete.html#infrastructure","architecture/adr/adr-009-security-system-complete.html#security-guarantees","architecture/adr/adr-009-security-system-complete.html#authentication","architecture/adr/adr-009-security-system-complete.html#authorization","architecture/adr/adr-009-security-system-complete.html#secrets-management","architecture/adr/adr-009-security-system-complete.html#audit--compliance","architecture/adr/adr-009-security-system-complete.html#emergency-access","architecture/adr/adr-009-security-system-complete.html#performance-characteristics","architecture/adr/adr-009-security-system-complete.html#deployment-options","architecture/adr/adr-009-security-system-complete.html#development","architecture/adr/adr-009-security-system-complete.html#production","architecture/adr/adr-009-security-system-complete.html#configuration","architecture/adr/adr-009-security-system-complete.html#environment-variables","architecture/adr/adr-009-security-system-complete.html#config-files","architecture/adr/adr-009-security-system-complete.html#testing","architecture/adr/adr-009-security-system-complete.html#run-all-tests","architecture/adr/adr-009-security-system-complete.html#integration-tests","architecture/adr/adr-009-security-system-complete.html#monitoring--alerts","architecture/adr/adr-009-security-system-complete.html#metrics-to-monitor","architecture/adr/adr-009-security-system-complete.html#alerts-to-configure","architecture/adr/adr-009-security-system-complete.html#maintenance","architecture/adr/adr-009-security-system-complete.html#daily","architecture/adr/adr-009-security-system-complete.html#weekly","architecture/adr/adr-009-security-system-complete.html#monthly","architecture/adr/adr-009-security-system-complete.html#quarterly","architecture/adr/adr-009-security-system-complete.html#migration-path","architecture/adr/adr-009-security-system-complete.html#from-existing-system","architecture/adr/adr-009-security-system-complete.html#future-enhancements","architecture/adr/adr-009-security-system-complete.html#planned-not-implemented","architecture/adr/adr-009-security-system-complete.html#under-consideration","architecture/adr/adr-009-security-system-complete.html#consequences","architecture/adr/adr-009-security-system-complete.html#positive","architecture/adr/adr-009-security-system-complete.html#negative","architecture/adr/adr-009-security-system-complete.html#mitigations","architecture/adr/adr-009-security-system-complete.html#related-documentation","architecture/adr/adr-009-security-system-complete.html#approval","architecture/adr/adr-010-configuration-format-strategy.html#adr-010-configuration-file-format-strategy","architecture/adr/adr-010-configuration-format-strategy.html#context","architecture/adr/adr-010-configuration-format-strategy.html#decision","architecture/adr/adr-010-configuration-format-strategy.html#implementation-strategy","architecture/adr/adr-010-configuration-format-strategy.html#phase-1-documentation-complete","architecture/adr/adr-010-configuration-format-strategy.html#phase-2-workspace-config-migration-in-progress","architecture/adr/adr-010-configuration-format-strategy.html#phase-3-template-file-reorganization-in-progress","architecture/adr/adr-010-configuration-format-strategy.html#rationale-for-each-format","architecture/adr/adr-010-configuration-format-strategy.html#kcl-for-workspace-configuration","architecture/adr/adr-010-configuration-format-strategy.html#toml-for-application-configuration","architecture/adr/adr-010-configuration-format-strategy.html#yaml-for-metadata-and-kubernetes-resources","architecture/adr/adr-010-configuration-format-strategy.html#configuration-hierarchy-priority","architecture/adr/adr-010-configuration-format-strategy.html#migration-path","architecture/adr/adr-010-configuration-format-strategy.html#for-existing-workspaces","architecture/adr/adr-010-configuration-format-strategy.html#for-new-workspaces","architecture/adr/adr-010-configuration-format-strategy.html#file-format-guidelines-for-developers","architecture/adr/adr-010-configuration-format-strategy.html#when-to-use-each-format","architecture/adr/adr-010-configuration-format-strategy.html#consequences","architecture/adr/adr-010-configuration-format-strategy.html#benefits","architecture/adr/adr-010-configuration-format-strategy.html#trade-offs","architecture/adr/adr-010-configuration-format-strategy.html#risk-mitigation","architecture/adr/adr-010-configuration-format-strategy.html#template-file-reorganization","architecture/adr/adr-010-configuration-format-strategy.html#problem","architecture/adr/adr-010-configuration-format-strategy.html#solution","architecture/adr/adr-010-configuration-format-strategy.html#outcome","architecture/adr/adr-010-configuration-format-strategy.html#references","architecture/adr/adr-010-configuration-format-strategy.html#existing-kcl-schemas","architecture/adr/adr-010-configuration-format-strategy.html#related-adrs","architecture/adr/adr-010-configuration-format-strategy.html#decision-status","architecture/adr/adr-011-nickel-migration.html#adr-011-migration-from-kcl-to-nickel","architecture/adr/adr-011-nickel-migration.html#context","architecture/adr/adr-011-nickel-migration.html#problems-with-kcl","architecture/adr/adr-011-nickel-migration.html#project-needs","architecture/adr/adr-011-nickel-migration.html#decision","architecture/adr/adr-011-nickel-migration.html#key-changes","architecture/adr/adr-011-nickel-migration.html#implementation-summary","architecture/adr/adr-011-nickel-migration.html#migration-complete","architecture/adr/adr-011-nickel-migration.html#platform-schemas-provisioningschemas","architecture/adr/adr-011-nickel-migration.html#extensions-provisioningextensions","architecture/adr/adr-011-nickel-migration.html#active-workspaces-workspace_librecloudnickel","architecture/adr/adr-011-nickel-migration.html#backward-compatibility","architecture/adr/adr-011-nickel-migration.html#comparison-kcl-vs-nickel","architecture/adr/adr-011-nickel-migration.html#architecture-patterns","architecture/adr/adr-011-nickel-migration.html#three-file-pattern","architecture/adr/adr-011-nickel-migration.html#hybrid-pattern-benefits","architecture/adr/adr-011-nickel-migration.html#domain-organized-architecture","architecture/adr/adr-011-nickel-migration.html#production-deployment-patterns","architecture/adr/adr-011-nickel-migration.html#two-mode-strategy","architecture/adr/adr-011-nickel-migration.html#ecosystem-integration","architecture/adr/adr-011-nickel-migration.html#typedialog-bidirectional-nickel-integration","architecture/adr/adr-011-nickel-migration.html#technical-patterns","architecture/adr/adr-011-nickel-migration.html#expression-based-structure","architecture/adr/adr-011-nickel-migration.html#schema-inheritance--record-merging","architecture/adr/adr-011-nickel-migration.html#optional-fields","architecture/adr/adr-011-nickel-migration.html#union-types","architecture/adr/adr-011-nickel-migration.html#booleannull-conversion","architecture/adr/adr-011-nickel-migration.html#quality-metrics","architecture/adr/adr-011-nickel-migration.html#consequences","architecture/adr/adr-011-nickel-migration.html#positive-","architecture/adr/adr-011-nickel-migration.html#challenges-","architecture/adr/adr-011-nickel-migration.html#mitigations","architecture/adr/adr-011-nickel-migration.html#migration-status","architecture/adr/adr-011-nickel-migration.html#completed-phase-1-4","architecture/adr/adr-011-nickel-migration.html#in-progress-workspace-level","architecture/adr/adr-011-nickel-migration.html#future-optional","architecture/adr/adr-011-nickel-migration.html#related-documentation","architecture/adr/adr-011-nickel-migration.html#development-guides","architecture/adr/adr-011-nickel-migration.html#related-adrs","architecture/adr/adr-011-nickel-migration.html#referenced-files","architecture/adr/adr-011-nickel-migration.html#approval","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#adr-014-nushell-nickel-plugin---cli-wrapper-architecture","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#status","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#context","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#system-requirements","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#decision","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#architecture-diagram","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#implementation-characteristics","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#rationale","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#why-cli-wrapper-is-the-correct-choice","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#the-module-system-problem","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#documentation-gap","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#why-nickel-is-different-from-simple-use-cases","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#consequences","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#positive","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#negative","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#mitigation-strategies","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#alternatives-considered","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#alternative-1-pure-rust-with-nickel-lang-core","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#alternative-2-hybrid-pure-rust--cli-fallback","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#alternative-3-webassembly-version","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#alternative-4-use-nickel-lsp","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#implementation-details","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#command-set","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#critical-implementation-detail-command-syntax","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#caching-strategy","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#json-output-processing","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#testing-strategy","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#configuration-integration","architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.html#references","architecture/adr/adr-013-typdialog-integration.html#adr-013-typdialog-web-ui-backend-integration-for-interactive-configuration","architecture/adr/adr-013-typdialog-integration.html#status","architecture/adr/adr-013-typdialog-integration.html#context","architecture/adr/adr-013-typdialog-integration.html#the-interactive-configuration-problem","architecture/adr/adr-013-typdialog-integration.html#use-cases-requiring-interactive-input","architecture/adr/adr-013-typdialog-integration.html#requirements-for-interactive-input-system","architecture/adr/adr-013-typdialog-integration.html#decision","architecture/adr/adr-013-typdialog-integration.html#architecture-diagram","architecture/adr/adr-013-typdialog-integration.html#implementation-characteristics","architecture/adr/adr-013-typdialog-integration.html#rationale","architecture/adr/adr-013-typdialog-integration.html#why-tui-dialog-integration-is-required","architecture/adr/adr-013-typdialog-integration.html#the-nushell-limitation","architecture/adr/adr-013-typdialog-integration.html#the-nickel-constraint","architecture/adr/adr-013-typdialog-integration.html#why-rust--tui-dialog-is-the-solution","architecture/adr/adr-013-typdialog-integration.html#consequences","architecture/adr/adr-013-typdialog-integration.html#positive","architecture/adr/adr-013-typdialog-integration.html#negative","architecture/adr/adr-013-typdialog-integration.html#mitigation-strategies","architecture/adr/adr-013-typdialog-integration.html#alternatives-considered","architecture/adr/adr-013-typdialog-integration.html#alternative-1-shell-based-prompts-current-state","architecture/adr/adr-013-typdialog-integration.html#alternative-2-web-based-forms","architecture/adr/adr-013-typdialog-integration.html#alternative-3-custom-tui-per-use-case","architecture/adr/adr-013-typdialog-integration.html#alternative-4-external-form-tool-dialog-whiptail","architecture/adr/adr-013-typdialog-integration.html#alternative-5-text-based-config-files-only","architecture/adr/adr-013-typdialog-integration.html#implementation-details","architecture/adr/adr-013-typdialog-integration.html#form-definition-pattern","architecture/adr/adr-013-typdialog-integration.html#integration-with-nickel","architecture/adr/adr-013-typdialog-integration.html#cli-command-structure","architecture/adr/adr-013-typdialog-integration.html#validation-rules","architecture/adr/adr-013-typdialog-integration.html#security-password-handling","architecture/adr/adr-013-typdialog-integration.html#testing-strategy","architecture/adr/adr-013-typdialog-integration.html#configuration-integration","architecture/adr/adr-013-typdialog-integration.html#documentation-requirements","architecture/adr/adr-013-typdialog-integration.html#migration-path","architecture/adr/adr-013-typdialog-integration.html#references","architecture/adr/adr-014-secretumvault-integration.html#adr-014-secretumvault-integration-for-secrets-management","architecture/adr/adr-014-secretumvault-integration.html#status","architecture/adr/adr-014-secretumvault-integration.html#context","architecture/adr/adr-014-secretumvault-integration.html#current-secrets-management-challenges","architecture/adr/adr-014-secretumvault-integration.html#problems-without-centralized-secrets-management","architecture/adr/adr-014-secretumvault-integration.html#use-cases-requiring-centralized-secrets-management","architecture/adr/adr-014-secretumvault-integration.html#requirements-for-secrets-management-system","architecture/adr/adr-014-secretumvault-integration.html#decision","architecture/adr/adr-014-secretumvault-integration.html#architecture-diagram","architecture/adr/adr-014-secretumvault-integration.html#implementation-characteristics","architecture/adr/adr-014-secretumvault-integration.html#rationale","architecture/adr/adr-014-secretumvault-integration.html#why-secretumvault-is-required","architecture/adr/adr-014-secretumvault-integration.html#why-not-continue-with-sops-alone","architecture/adr/adr-014-secretumvault-integration.html#why-secretumvault-over-hashicorp-vault","architecture/adr/adr-014-secretumvault-integration.html#integration-with-existing-security-architecture","architecture/adr/adr-014-secretumvault-integration.html#consequences","architecture/adr/adr-014-secretumvault-integration.html#positive","architecture/adr/adr-014-secretumvault-integration.html#negative","architecture/adr/adr-014-secretumvault-integration.html#mitigation-strategies","architecture/adr/adr-014-secretumvault-integration.html#alternatives-considered","architecture/adr/adr-014-secretumvault-integration.html#alternative-1-continue-with-sops-only","architecture/adr/adr-014-secretumvault-integration.html#alternative-2-hashicorp-vault","architecture/adr/adr-014-secretumvault-integration.html#alternative-3-cloud-provider-native-aws-secrets-manager-azure-key-vault","architecture/adr/adr-014-secretumvault-integration.html#alternative-4-cyberark-1password-and-others","architecture/adr/adr-014-secretumvault-integration.html#alternative-5-build-custom-secrets-manager","architecture/adr/adr-014-secretumvault-integration.html#implementation-details","architecture/adr/adr-014-secretumvault-integration.html#secretumvault-deployment","architecture/adr/adr-014-secretumvault-integration.html#rust-client-library","architecture/adr/adr-014-secretumvault-integration.html#nushell-integration","architecture/adr/adr-014-secretumvault-integration.html#nickel-configuration-integration","architecture/adr/adr-014-secretumvault-integration.html#cedar-policy-for-secret-access","architecture/adr/adr-014-secretumvault-integration.html#dynamic-database-credentials","architecture/adr/adr-014-secretumvault-integration.html#secret-rotation-automation","architecture/adr/adr-014-secretumvault-integration.html#audit-log-format","architecture/adr/adr-014-secretumvault-integration.html#testing-strategy","architecture/adr/adr-014-secretumvault-integration.html#configuration-integration","architecture/adr/adr-014-secretumvault-integration.html#migration-path","architecture/adr/adr-014-secretumvault-integration.html#documentation-requirements","architecture/adr/adr-014-secretumvault-integration.html#references","architecture/adr/adr-015-ai-integration-architecture.html#adr-015-ai-integration-architecture-for-intelligent-infrastructure-provisioning","architecture/adr/adr-015-ai-integration-architecture.html#status","architecture/adr/adr-015-ai-integration-architecture.html#context","architecture/adr/adr-015-ai-integration-architecture.html#the-infrastructure-complexity-problem","architecture/adr/adr-015-ai-integration-architecture.html#ai-integration-opportunities","architecture/adr/adr-015-ai-integration-architecture.html#ai-components-overview","architecture/adr/adr-015-ai-integration-architecture.html#requirements-for-ai-integration","architecture/adr/adr-015-ai-integration-architecture.html#decision","architecture/adr/adr-015-ai-integration-architecture.html#architecture-diagram","architecture/adr/adr-015-ai-integration-architecture.html#component-responsibilities","architecture/adr/adr-015-ai-integration-architecture.html#rationale","architecture/adr/adr-015-ai-integration-architecture.html#why-ai-integration-is-essential","architecture/adr/adr-015-ai-integration-architecture.html#why-schema-aware-ai-is-critical","architecture/adr/adr-015-ai-integration-architecture.html#why-rag-retrieval-augmented-generation-is-essential","architecture/adr/adr-015-ai-integration-architecture.html#why-human-in-the-loop-is-non-negotiable","architecture/adr/adr-015-ai-integration-architecture.html#why-multi-provider-support-matters","architecture/adr/adr-015-ai-integration-architecture.html#consequences","architecture/adr/adr-015-ai-integration-architecture.html#positive","architecture/adr/adr-015-ai-integration-architecture.html#negative","architecture/adr/adr-015-ai-integration-architecture.html#mitigation-strategies","architecture/adr/adr-015-ai-integration-architecture.html#alternatives-considered","architecture/adr/adr-015-ai-integration-architecture.html#alternative-1-no-ai-integration","architecture/adr/adr-015-ai-integration-architecture.html#alternative-2-generic-ai-code-generation-github-copilot-approach","architecture/adr/adr-015-ai-integration-architecture.html#alternative-3-ai-only-for-documentationsearch","architecture/adr/adr-015-ai-integration-architecture.html#alternative-4-fully-autonomous-ai-no-human-approval","architecture/adr/adr-015-ai-integration-architecture.html#alternative-5-single-llm-provider-lock-in","architecture/adr/adr-015-ai-integration-architecture.html#implementation-details","architecture/adr/adr-015-ai-integration-architecture.html#ai-service-api","architecture/adr/adr-015-ai-integration-architecture.html#mcp-server-integration","architecture/adr/adr-015-ai-integration-architecture.html#rag-system-implementation","architecture/adr/adr-015-ai-integration-architecture.html#typdialog-ai-integration","architecture/adr/adr-015-ai-integration-architecture.html#typdialog-ag-agents","architecture/adr/adr-015-ai-integration-architecture.html#cedar-policies-for-ai","architecture/adr/adr-015-ai-integration-architecture.html#testing-strategy","architecture/adr/adr-015-ai-integration-architecture.html#security-considerations","architecture/adr/adr-015-ai-integration-architecture.html#cost-analysis","architecture/adr/adr-015-ai-integration-architecture.html#references","roadmap/index.html#advanced-features--roadmap","roadmap/index.html#status-legend","roadmap/index.html#fully-implemented-features","roadmap/index.html#ai-integration-system--","roadmap/index.html#native-nushell-plugins--","roadmap/index.html#nickel-workflow-system--","roadmap/index.html#using-these-features","roadmap/index.html#roadmap---future-enhancements","roadmap/index.html#q1-2025","roadmap/index.html#q2-2025-planned","roadmap/index.html#q3-2025-planned","roadmap/index.html#q4-2025-planned","roadmap/ai-integration.html#ai-integration---production-features","roadmap/ai-integration.html#overview","roadmap/ai-integration.html#planned-features","roadmap/ai-integration.html#1-natural-language-configuration","roadmap/ai-integration.html#2-ai-assisted-forms","roadmap/ai-integration.html#3-rag-system-retrieval-augmented-generation","roadmap/ai-integration.html#4-ai-agents","roadmap/ai-integration.html#5-configuration-generation-from-templates","roadmap/ai-integration.html#6-security-policies-with-ai","roadmap/ai-integration.html#7-cost-management","roadmap/ai-integration.html#8-mcp-integration","roadmap/ai-integration.html#dependencies","roadmap/ai-integration.html#implementation-approach","roadmap/ai-integration.html#phase-1-foundation-q1-2025","roadmap/ai-integration.html#phase-2-enhancement-q2-2025","roadmap/ai-integration.html#phase-3-automation-q3-2025","roadmap/ai-integration.html#phase-4-integration-q4-2025","roadmap/ai-integration.html#current-workarounds","roadmap/ai-integration.html#contributing","roadmap/ai-integration.html#related-resources","roadmap/native-plugins.html#native-nushell-plugins---complete-implementation","roadmap/native-plugins.html#current-status","roadmap/native-plugins.html#-implemented","roadmap/native-plugins.html#-fully-implemented","roadmap/native-plugins.html#plugin-architecture","roadmap/native-plugins.html#three-tier-approach","roadmap/native-plugins.html#integration-points","roadmap/native-plugins.html#development-roadmap","roadmap/native-plugins.html#phase-1-http-fallback--complete","roadmap/native-plugins.html#phase-2-plugin-framework--in-progress","roadmap/native-plugins.html#phase-3-native-plugins-planned","roadmap/native-plugins.html#phase-4-integration-planned","roadmap/native-plugins.html#using-plugins-today","roadmap/native-plugins.html#available","roadmap/native-plugins.html#fallback-http-based","roadmap/native-plugins.html#manual-nushell-workflows","roadmap/native-plugins.html#plugin-development-guide","roadmap/native-plugins.html#troubleshooting","roadmap/native-plugins.html#plugin-not-found","roadmap/native-plugins.html#plugin-timeout","roadmap/native-plugins.html#plugin-not-in-help","roadmap/native-plugins.html#related-documents","roadmap/native-plugins.html#feedback--contributions","roadmap/nickel-workflows.html#nickel-workflow-system---complete-implementation","roadmap/nickel-workflows.html#current-implementation","roadmap/nickel-workflows.html#-nushell-workflows-production-ready","roadmap/nickel-workflows.html#-nickel-workflows-implemented","roadmap/nickel-workflows.html#architecture","roadmap/nickel-workflows.html#available-capabilities","roadmap/nickel-workflows.html#comparison-nushell-vs-nickel-workflows","roadmap/nickel-workflows.html#when-to-use-which","roadmap/nickel-workflows.html#implementation-status","roadmap/nickel-workflows.html#completed-implementation","roadmap/nickel-workflows.html#ongoing-enhancements","roadmap/nickel-workflows.html#current-workarounds","roadmap/nickel-workflows.html#migration-path","roadmap/nickel-workflows.html#example-future-nickel-workflow","roadmap/nickel-workflows.html#related-documents","roadmap/nickel-workflows.html#contributing","api-reference/rest-api.html#rest-api-reference","api-reference/rest-api.html#overview","api-reference/rest-api.html#base-urls","api-reference/rest-api.html#authentication","api-reference/rest-api.html#jwt-authentication","api-reference/rest-api.html#getting-access-token","api-reference/rest-api.html#orchestrator-api-endpoints","api-reference/rest-api.html#health-check","api-reference/rest-api.html#task-management","api-reference/rest-api.html#workflow-submission","api-reference/rest-api.html#batch-operations","api-reference/rest-api.html#state-management","api-reference/rest-api.html#rollback-and-recovery","api-reference/rest-api.html#control-center-api-endpoints","api-reference/rest-api.html#authentication-1","api-reference/rest-api.html#user-management","api-reference/rest-api.html#policy-management","api-reference/rest-api.html#audit-logging","api-reference/rest-api.html#error-responses","api-reference/rest-api.html#http-status-codes","api-reference/rest-api.html#rate-limiting","api-reference/rest-api.html#monitoring-endpoints","api-reference/rest-api.html#get-metrics","api-reference/rest-api.html#websocket-ws","api-reference/rest-api.html#sdk-examples","api-reference/rest-api.html#python-sdk-example","api-reference/rest-api.html#javascriptnodejs-sdk-example","api-reference/rest-api.html#webhook-integration","api-reference/rest-api.html#webhook-configuration","api-reference/rest-api.html#webhook-payload","api-reference/rest-api.html#pagination","api-reference/rest-api.html#api-versioning","api-reference/rest-api.html#testing","api-reference/websocket.html#websocket-api-reference","api-reference/websocket.html#overview","api-reference/websocket.html#websocket-endpoints","api-reference/websocket.html#primary-websocket-endpoint","api-reference/websocket.html#specialized-websocket-endpoints","api-reference/websocket.html#authentication","api-reference/websocket.html#jwt-token-authentication","api-reference/websocket.html#connection-authentication-flow","api-reference/websocket.html#event-types-and-schemas","api-reference/websocket.html#core-event-types","api-reference/websocket.html#custom-event-types","api-reference/websocket.html#client-side-javascript-api","api-reference/websocket.html#connection-management","api-reference/websocket.html#real-time-dashboard-example","api-reference/websocket.html#server-side-implementation","api-reference/websocket.html#rust-websocket-handler","api-reference/websocket.html#event-filtering-and-subscriptions","api-reference/websocket.html#client-side-filtering","api-reference/websocket.html#server-side-event-filtering","api-reference/websocket.html#error-handling-and-reconnection","api-reference/websocket.html#connection-errors","api-reference/websocket.html#heartbeat-and-keep-alive","api-reference/websocket.html#performance-considerations","api-reference/websocket.html#message-batching","api-reference/websocket.html#compression","api-reference/websocket.html#rate-limiting","api-reference/websocket.html#security-considerations","api-reference/websocket.html#authentication-and-authorization","api-reference/websocket.html#message-validation","api-reference/websocket.html#data-sanitization","api-reference/extensions.html#extension-development-api","api-reference/extensions.html#overview","api-reference/extensions.html#extension-structure","api-reference/extensions.html#standard-directory-layout","api-reference/extensions.html#provider-extension-api","api-reference/extensions.html#provider-interface","api-reference/extensions.html#provider-development-template","api-reference/extensions.html#provider-registration","api-reference/extensions.html#task-service-extension-api","api-reference/extensions.html#task-service-interface","api-reference/extensions.html#task-service-development-template","api-reference/extensions.html#cluster-extension-api","api-reference/extensions.html#cluster-interface","api-reference/extensions.html#cluster-development-template","api-reference/extensions.html#extension-registration-and-discovery","api-reference/extensions.html#extension-registry","api-reference/extensions.html#registration-api","api-reference/extensions.html#extension-validation","api-reference/extensions.html#testing-extensions","api-reference/extensions.html#test-framework","api-reference/extensions.html#running-tests","api-reference/extensions.html#documentation-requirements","api-reference/extensions.html#extension-documentation","api-reference/extensions.html#api-documentation-template","api-reference/extensions.html#best-practices","api-reference/extensions.html#development-guidelines","api-reference/extensions.html#performance-considerations","api-reference/extensions.html#security-best-practices","api-reference/sdks.html#sdk-documentation","api-reference/sdks.html#available-sdks","api-reference/sdks.html#official-sdks","api-reference/sdks.html#community-sdks","api-reference/sdks.html#python-sdk","api-reference/sdks.html#installation","api-reference/sdks.html#quick-start","api-reference/sdks.html#advanced-usage","api-reference/sdks.html#api-reference","api-reference/sdks.html#javascripttypescript-sdk","api-reference/sdks.html#installation-1","api-reference/sdks.html#quick-start-1","api-reference/sdks.html#react-integration","api-reference/sdks.html#nodejs-cli-tool","api-reference/sdks.html#api-reference-1","api-reference/sdks.html#go-sdk","api-reference/sdks.html#installation-2","api-reference/sdks.html#quick-start-2","api-reference/sdks.html#websocket-integration","api-reference/sdks.html#http-client-with-retry-logic","api-reference/sdks.html#rust-sdk","api-reference/sdks.html#installation-3","api-reference/sdks.html#quick-start-3","api-reference/sdks.html#websocket-integration-1","api-reference/sdks.html#batch-operations","api-reference/sdks.html#best-practices","api-reference/sdks.html#authentication-and-security","api-reference/sdks.html#error-handling","api-reference/sdks.html#performance-optimization","api-reference/sdks.html#websocket-connections","api-reference/sdks.html#testing","api-reference/integration-examples.html#integration-examples","api-reference/integration-examples.html#overview","api-reference/integration-examples.html#complete-integration-examples","api-reference/integration-examples.html#python-integration","api-reference/integration-examples.html#nodejsjavascript-integration","api-reference/integration-examples.html#error-handling-strategies","api-reference/integration-examples.html#comprehensive-error-handling","api-reference/integration-examples.html#circuit-breaker-pattern","api-reference/integration-examples.html#performance-optimization","api-reference/integration-examples.html#connection-pooling-and-caching","api-reference/integration-examples.html#websocket-connection-pooling","api-reference/integration-examples.html#sdk-documentation","api-reference/integration-examples.html#python-sdk","api-reference/integration-examples.html#javascripttypescript-sdk","api-reference/integration-examples.html#common-integration-patterns","api-reference/integration-examples.html#workflow-orchestration-pipeline","api-reference/integration-examples.html#event-driven-architecture","api-reference/provider-api.html#provider-api-reference","api-reference/provider-api.html#overview","api-reference/provider-api.html#supported-providers","api-reference/provider-api.html#provider-interface","api-reference/provider-api.html#required-functions","api-reference/provider-api.html#provider-configuration","api-reference/provider-api.html#creating-a-custom-provider","api-reference/provider-api.html#1-directory-structure","api-reference/provider-api.html#2-implementation-template","api-reference/provider-api.html#3-nickel-schema","api-reference/provider-api.html#provider-discovery","api-reference/provider-api.html#provider-api-examples","api-reference/provider-api.html#create-servers","api-reference/provider-api.html#list-servers","api-reference/provider-api.html#get-pricing","api-reference/provider-api.html#testing-providers","api-reference/provider-api.html#provider-development-guide","api-reference/provider-api.html#api-stability","api-reference/nushell-api.html#nushell-api-reference","api-reference/nushell-api.html#overview","api-reference/nushell-api.html#core-modules","api-reference/nushell-api.html#configuration-module","api-reference/nushell-api.html#server-module","api-reference/nushell-api.html#task-service-module","api-reference/nushell-api.html#workspace-module","api-reference/nushell-api.html#provider-module","api-reference/nushell-api.html#diagnostics--utilities","api-reference/nushell-api.html#diagnostics-module","api-reference/nushell-api.html#hints-module","api-reference/nushell-api.html#usage-example","api-reference/nushell-api.html#api-conventions","api-reference/nushell-api.html#best-practices","api-reference/nushell-api.html#source-code","api-reference/path-resolution.html#path-resolution-api","api-reference/path-resolution.html#overview","api-reference/path-resolution.html#configuration-resolution-hierarchy","api-reference/path-resolution.html#configuration-search-paths","api-reference/path-resolution.html#path-resolution-api-1","api-reference/path-resolution.html#core-functions","api-reference/path-resolution.html#path-interpolation","api-reference/path-resolution.html#extension-discovery-api","api-reference/path-resolution.html#provider-discovery","api-reference/path-resolution.html#task-service-discovery","api-reference/path-resolution.html#cluster-discovery","api-reference/path-resolution.html#environment-management-api","api-reference/path-resolution.html#environment-detection","api-reference/path-resolution.html#environment-switching","api-reference/path-resolution.html#workspace-management-api","api-reference/path-resolution.html#workspace-discovery","api-reference/path-resolution.html#project-structure-analysis","api-reference/path-resolution.html#caching-and-performance","api-reference/path-resolution.html#path-caching","api-reference/path-resolution.html#cross-platform-compatibility","api-reference/path-resolution.html#path-normalization","api-reference/path-resolution.html#configuration-validation-api","api-reference/path-resolution.html#path-validation","api-reference/path-resolution.html#command-line-interface","api-reference/path-resolution.html#path-resolution-commands","api-reference/path-resolution.html#integration-examples","api-reference/path-resolution.html#python-integration","api-reference/path-resolution.html#javascriptnodejs-integration","api-reference/path-resolution.html#error-handling","api-reference/path-resolution.html#common-error-scenarios","api-reference/path-resolution.html#error-recovery","api-reference/path-resolution.html#performance-considerations","api-reference/path-resolution.html#best-practices","api-reference/path-resolution.html#monitoring","api-reference/path-resolution.html#security-considerations","api-reference/path-resolution.html#path-traversal-protection","api-reference/path-resolution.html#access-control","development/infrastructure-specific-extensions.html#infrastructure-specific-extension-development","development/infrastructure-specific-extensions.html#table-of-contents","development/infrastructure-specific-extensions.html#overview","development/infrastructure-specific-extensions.html#infrastructure-assessment","development/infrastructure-specific-extensions.html#identifying-extension-needs","development/infrastructure-specific-extensions.html#requirements-gathering","development/infrastructure-specific-extensions.html#custom-taskserv-development","development/infrastructure-specific-extensions.html#company-specific-application-taskserv","development/infrastructure-specific-extensions.html#compliance-focused-taskserv","development/infrastructure-specific-extensions.html#provider-specific-extensions","development/infrastructure-specific-extensions.html#custom-cloud-provider-integration","development/infrastructure-specific-extensions.html#multi-environment-management","development/infrastructure-specific-extensions.html#environment-specific-configuration-management","development/infrastructure-specific-extensions.html#integration-patterns","development/infrastructure-specific-extensions.html#legacy-system-integration","development/infrastructure-specific-extensions.html#real-world-examples","development/infrastructure-specific-extensions.html#example-1-financial-services-company","development/infrastructure-specific-extensions.html#example-2-healthcare-organization","development/infrastructure-specific-extensions.html#example-3-manufacturing-company","development/infrastructure-specific-extensions.html#usage-examples","development/command-handler-guide.html#command-handler-developer-guide","development/command-handler-guide.html#overview","development/command-handler-guide.html#key-architecture-principles","development/command-handler-guide.html#architecture-components","development/command-handler-guide.html#adding-new-commands","development/command-handler-guide.html#step-1-choose-the-right-domain-handler","development/command-handler-guide.html#step-2-add-command-to-handler","development/command-handler-guide.html#step-3-add-shortcuts-optional","development/command-handler-guide.html#modifying-existing-handlers","development/command-handler-guide.html#example-enhancing-the-taskserv-command","development/command-handler-guide.html#working-with-flags","development/command-handler-guide.html#using-centralized-flag-handling","development/command-handler-guide.html#available-flag-parsing","development/command-handler-guide.html#adding-new-flags","development/command-handler-guide.html#adding-new-shortcuts","development/command-handler-guide.html#shortcut-naming-conventions","development/command-handler-guide.html#example-adding-a-new-shortcut","development/command-handler-guide.html#testing-your-changes","development/command-handler-guide.html#running-the-test-suite","development/command-handler-guide.html#test-coverage","development/command-handler-guide.html#adding-tests-for-your-changes","development/command-handler-guide.html#manual-testing","development/command-handler-guide.html#common-patterns","development/command-handler-guide.html#pattern-1-simple-command-handler","development/command-handler-guide.html#pattern-2-command-with-validation","development/command-handler-guide.html#pattern-3-command-with-subcommands","development/command-handler-guide.html#pattern-4-command-with-flag-based-routing","development/command-handler-guide.html#best-practices","development/command-handler-guide.html#1-keep-handlers-focused","development/command-handler-guide.html#2-use-descriptive-error-messages","development/command-handler-guide.html#3-leverage-centralized-functions","development/command-handler-guide.html#4-document-your-changes","development/command-handler-guide.html#5-test-thoroughly","development/command-handler-guide.html#troubleshooting","development/command-handler-guide.html#issue-module-not-found","development/command-handler-guide.html#issue-parse-mismatch-expected-colon","development/command-handler-guide.html#issue-command-not-routing-correctly","development/command-handler-guide.html#issue-flags-not-being-passed","development/command-handler-guide.html#quick-reference","development/command-handler-guide.html#file-locations","development/command-handler-guide.html#key-functions","development/command-handler-guide.html#testing-commands","development/command-handler-guide.html#further-reading","development/command-handler-guide.html#contributing","development/workflow.html#development-workflow-guide","development/workflow.html#table-of-contents","development/workflow.html#overview","development/workflow.html#development-setup","development/workflow.html#initial-environment-setup","development/workflow.html#tool-installation","development/workflow.html#ide-configuration","development/workflow.html#daily-development-workflow","development/workflow.html#morning-routine","development/workflow.html#development-cycle","development/workflow.html#testing-during-development","development/workflow.html#end-of-day-routine","development/workflow.html#code-organization","development/workflow.html#nushell-code-structure","development/workflow.html#rust-code-structure","development/workflow.html#nickel-schema-organization","development/workflow.html#testing-strategies","development/workflow.html#test-driven-development","development/workflow.html#nushell-testing","development/workflow.html#rust-testing","development/workflow.html#nickel-testing","development/workflow.html#test-automation","development/workflow.html#debugging-techniques","development/workflow.html#debug-configuration","development/workflow.html#nushell-debugging","development/workflow.html#rust-debugging","development/workflow.html#log-analysis","development/workflow.html#integration-workflows","development/workflow.html#existing-system-integration","development/workflow.html#api-integration-testing","development/workflow.html#database-integration","development/workflow.html#external-tool-integration","development/workflow.html#collaboration-guidelines","development/workflow.html#branch-strategy","development/workflow.html#code-review-process","development/workflow.html#documentation-requirements","development/workflow.html#communication","development/workflow.html#quality-assurance","development/workflow.html#code-quality-checks","development/workflow.html#performance-monitoring","development/workflow.html#best-practices","development/workflow.html#configuration-management","development/workflow.html#error-handling","development/workflow.html#resource-management","development/workflow.html#testing-best-practices","development/integration.html#integration-guide","development/integration.html#table-of-contents","development/integration.html#overview","development/integration.html#existing-system-integration","development/integration.html#command-line-interface-integration","development/integration.html#configuration-system-bridge","development/integration.html#data-integration","development/integration.html#process-integration","development/integration.html#api-compatibility-and-versioning","development/integration.html#rest-api-versioning","development/integration.html#api-compatibility-layer","development/integration.html#schema-evolution","development/integration.html#client-sdk-compatibility","development/integration.html#database-migration-strategies","development/integration.html#database-architecture-evolution","development/integration.html#migration-scripts","development/integration.html#data-integrity-verification","development/integration.html#deployment-considerations","development/integration.html#deployment-architecture","development/integration.html#deployment-strategies","development/integration.html#configuration-deployment","development/integration.html#container-integration","development/integration.html#monitoring-and-observability","development/integration.html#integrated-monitoring-architecture","development/integration.html#metrics-integration","development/integration.html#logging-integration","development/integration.html#health-check-integration","development/integration.html#legacy-system-bridge","development/integration.html#bridge-architecture","development/integration.html#bridge-operation-modes","development/integration.html#migration-pathways","development/integration.html#migration-phases","development/integration.html#migration-automation","development/integration.html#troubleshooting-integration-issues","development/integration.html#common-integration-problems","development/integration.html#debug-tools","development/build-system.html#build-system-documentation","development/build-system.html#table-of-contents","development/build-system.html#overview","development/build-system.html#quick-start","development/build-system.html#makefile-reference","development/build-system.html#build-configuration","development/build-system.html#build-targets","development/build-system.html#build-tools","development/build-system.html#core-build-scripts","development/build-system.html#distribution-tools","development/build-system.html#package-tools","development/build-system.html#release-tools","development/build-system.html#cross-platform-compilation","development/build-system.html#supported-platforms","development/build-system.html#cross-compilation-setup","development/build-system.html#cross-compilation-usage","development/build-system.html#dependency-management","development/build-system.html#build-dependencies","development/build-system.html#dependency-validation","development/build-system.html#dependency-caching","development/build-system.html#troubleshooting","development/build-system.html#common-build-issues","development/build-system.html#build-performance-issues","development/build-system.html#distribution-issues","development/build-system.html#debug-mode","development/build-system.html#cicd-integration","development/build-system.html#github-actions","development/build-system.html#release-automation","development/build-system.html#local-ci-testing","development/distribution-process.html#distribution-process-documentation","development/distribution-process.html#table-of-contents","development/distribution-process.html#overview","development/distribution-process.html#distribution-architecture","development/distribution-process.html#distribution-components","development/distribution-process.html#build-pipeline","development/distribution-process.html#distribution-variants","development/distribution-process.html#release-process","development/distribution-process.html#release-types","development/distribution-process.html#step-by-step-release-process","development/distribution-process.html#release-automation","development/distribution-process.html#package-generation","development/distribution-process.html#binary-packages","development/distribution-process.html#container-images","development/distribution-process.html#installers","development/distribution-process.html#multi-platform-distribution","development/distribution-process.html#supported-platforms","development/distribution-process.html#cross-platform-build","development/distribution-process.html#distribution-matrix","development/distribution-process.html#validation-and-testing","development/distribution-process.html#distribution-validation","development/distribution-process.html#testing-framework","development/distribution-process.html#package-validation","development/distribution-process.html#release-management","development/distribution-process.html#release-workflow","development/distribution-process.html#versioning-strategy","development/distribution-process.html#artifact-management","development/distribution-process.html#rollback-procedures","development/distribution-process.html#rollback-scenarios","development/distribution-process.html#rollback-process","development/distribution-process.html#rollback-safety","development/distribution-process.html#emergency-procedures","development/distribution-process.html#cicd-integration","development/distribution-process.html#github-actions-integration","development/distribution-process.html#gitlab-ci-integration","development/distribution-process.html#jenkins-integration","development/distribution-process.html#troubleshooting","development/distribution-process.html#common-issues","development/distribution-process.html#release-issues","development/distribution-process.html#debug-and-monitoring","development/implementation-guide.html#repository-restructuring---implementation-guide","development/implementation-guide.html#overview","development/implementation-guide.html#prerequisites","development/implementation-guide.html#required-tools","development/implementation-guide.html#recommended-tools","development/implementation-guide.html#before-starting","development/implementation-guide.html#phase-1-repository-restructuring-days-1-4","development/implementation-guide.html#day-1-backup-and-analysis","development/implementation-guide.html#day-2-directory-restructuring","development/implementation-guide.html#day-3-update-path-references","development/implementation-guide.html#day-4-validation-and-testing","development/implementation-guide.html#phase-2-build-system-implementation-days-5-8","development/implementation-guide.html#day-5-build-system-core","development/implementation-guide.html#day-6-8-continue-with-platform-extensions-and-validation","development/implementation-guide.html#phase-3-installation-system-days-9-11","development/implementation-guide.html#day-9-nushell-installer","development/implementation-guide.html#rollback-procedures","development/implementation-guide.html#if-phase-1-fails","development/implementation-guide.html#if-build-system-fails","development/implementation-guide.html#if-installation-fails","development/implementation-guide.html#checklist","development/implementation-guide.html#phase-1-repository-restructuring","development/implementation-guide.html#phase-2-build-system","development/implementation-guide.html#phase-3-installation","development/implementation-guide.html#phase-4-registry-optional","development/implementation-guide.html#phase-5-documentation","development/implementation-guide.html#notes","development/implementation-guide.html#support","development/project-structure.html#project-structure-guide","development/project-structure.html#table-of-contents","development/project-structure.html#overview","development/project-structure.html#new-structure-vs-legacy","development/project-structure.html#new-development-structure-src","development/project-structure.html#legacy-structure-preserved","development/project-structure.html#development-workspace-workspace","development/project-structure.html#core-directories","development/project-structure.html#srccore---core-development-libraries","development/project-structure.html#srctools---build-and-development-tools","development/project-structure.html#srcorchestrator---hybrid-orchestrator","development/project-structure.html#srcprovisioning---enhanced-provisioning","development/project-structure.html#workspace---development-workspace","development/project-structure.html#development-workspace","development/project-structure.html#workspace-management","development/project-structure.html#extension-development","development/project-structure.html#configuration-hierarchy","development/project-structure.html#file-naming-conventions","development/project-structure.html#nushell-files-nu","development/project-structure.html#configuration-files","development/project-structure.html#nickel-files-ncl","development/project-structure.html#build-and-distribution","development/project-structure.html#navigation-guide","development/project-structure.html#finding-components","development/project-structure.html#common-workflows","development/project-structure.html#legacy-compatibility","development/project-structure.html#migration-path","development/project-structure.html#for-users","development/project-structure.html#for-developers","development/project-structure.html#migration-tools","development/project-structure.html#architecture-benefits","development/project-structure.html#development-efficiency","development/project-structure.html#production-reliability","development/project-structure.html#maintenance-benefits","development/ctrl-c-implementation-notes.html#ctrl-c-handling-implementation-notes","development/ctrl-c-implementation-notes.html#overview","development/ctrl-c-implementation-notes.html#problem-statement","development/ctrl-c-implementation-notes.html#solution-architecture","development/ctrl-c-implementation-notes.html#key-principle-return-values-not-exit-codes","development/ctrl-c-implementation-notes.html#three-layer-approach","development/ctrl-c-implementation-notes.html#implementation-details","development/ctrl-c-implementation-notes.html#1-helper-functions-sshnu11-32","development/ctrl-c-implementation-notes.html#2-pre-emptive-warning-sshnu155-160","development/ctrl-c-implementation-notes.html#3-ctrl-c-detection-sshnu171-199","development/ctrl-c-implementation-notes.html#4-state-accumulation-pattern-sshnu122-129","development/ctrl-c-implementation-notes.html#5-caller-handling-createnu262-266-generatenu269-273","development/ctrl-c-implementation-notes.html#error-flow-diagram","development/ctrl-c-implementation-notes.html#nushell-idioms-used","development/ctrl-c-implementation-notes.html#1-do---ignore-errors--complete","development/ctrl-c-implementation-notes.html#2-reduce-for-accumulation","development/ctrl-c-implementation-notes.html#3-early-returns-for-error-handling","development/ctrl-c-implementation-notes.html#testing-scenarios","development/ctrl-c-implementation-notes.html#scenario-1-ctrl-c-during-first-sudo-command","development/ctrl-c-implementation-notes.html#scenario-2-pre-cached-credentials","development/ctrl-c-implementation-notes.html#scenario-3-wrong-password-3-times","development/ctrl-c-implementation-notes.html#scenario-4-multiple-servers-cancel-on-second","development/ctrl-c-implementation-notes.html#maintenance-notes","development/ctrl-c-implementation-notes.html#adding-new-sudo-commands","development/ctrl-c-implementation-notes.html#common-pitfalls","development/ctrl-c-implementation-notes.html#future-improvements","development/ctrl-c-implementation-notes.html#references","development/ctrl-c-implementation-notes.html#related-files","development/ctrl-c-implementation-notes.html#changelog","development/auth-metadata-guide.html#metadata-driven-authentication-system---implementation-guide","development/auth-metadata-guide.html#table-of-contents","development/auth-metadata-guide.html#overview","development/auth-metadata-guide.html#architecture","development/auth-metadata-guide.html#system-components","development/auth-metadata-guide.html#data-flow","development/auth-metadata-guide.html#metadata-caching","development/auth-metadata-guide.html#installation","development/auth-metadata-guide.html#prerequisites","development/auth-metadata-guide.html#installation-steps","development/auth-metadata-guide.html#usage-guide","development/auth-metadata-guide.html#basic-commands","development/auth-metadata-guide.html#authentication-flow","development/auth-metadata-guide.html#check-mode-bypass-auth-for-testing","development/auth-metadata-guide.html#non-interactive-cicd-mode","development/auth-metadata-guide.html#migration-path","development/auth-metadata-guide.html#phase-1-from-old-input-to-metadata","development/auth-metadata-guide.html#phase-2-adding-metadata-headers","development/auth-metadata-guide.html#phase-3-validating-migration","development/auth-metadata-guide.html#developer-guide","development/auth-metadata-guide.html#adding-new-commands-with-metadata","development/auth-metadata-guide.html#metadata-field-reference","development/auth-metadata-guide.html#standard-tags","development/auth-metadata-guide.html#performance-optimization-patterns","development/auth-metadata-guide.html#testing","development/auth-metadata-guide.html#running-tests","development/auth-metadata-guide.html#test-coverage","development/auth-metadata-guide.html#expected-results","development/auth-metadata-guide.html#troubleshooting","development/auth-metadata-guide.html#issue-command-not-found","development/auth-metadata-guide.html#issue-auth-check-failing","development/auth-metadata-guide.html#issue-slow-command-execution","development/auth-metadata-guide.html#issue-nushell-syntax-error","development/auth-metadata-guide.html#performance-characteristics","development/auth-metadata-guide.html#baseline-metrics","development/auth-metadata-guide.html#real-world-impact","development/auth-metadata-guide.html#next-steps","development/kms-simplification.html#kms-simplification-migration-guide","development/kms-simplification.html#overview","development/kms-simplification.html#what-changed","development/kms-simplification.html#removed","development/kms-simplification.html#added","development/kms-simplification.html#modified","development/kms-simplification.html#why-this-change","development/kms-simplification.html#problems-with-previous-approach","development/kms-simplification.html#benefits-of-simplified-approach","development/kms-simplification.html#migration-steps","development/kms-simplification.html#for-development-environments","development/kms-simplification.html#for-production-environments","development/kms-simplification.html#configuration-comparison","development/kms-simplification.html#before-4-backends","development/kms-simplification.html#after-2-backends","development/kms-simplification.html#breaking-changes","development/kms-simplification.html#api-changes","development/kms-simplification.html#code-migration","development/kms-simplification.html#rust-code","development/kms-simplification.html#nushell-code","development/kms-simplification.html#rollback-plan","development/kms-simplification.html#testing-the-migration","development/kms-simplification.html#development-testing","development/kms-simplification.html#production-testing","development/kms-simplification.html#troubleshooting","development/kms-simplification.html#age-keys-not-found","development/kms-simplification.html#cosmian-connection-failed","development/kms-simplification.html#compilation-errors","development/kms-simplification.html#support","development/kms-simplification.html#timeline","development/kms-simplification.html#faqs","development/kms-simplification.html#checklist","development/kms-simplification.html#development-migration","development/kms-simplification.html#production-migration","development/kms-simplification.html#conclusion","development/glossary.html#provisioning-platform-glossary","development/glossary.html#a","development/glossary.html#adr-architecture-decision-record","development/glossary.html#agent","development/glossary.html#anchor-link","development/glossary.html#api-gateway","development/glossary.html#auth-authentication","development/glossary.html#authorization","development/glossary.html#b","development/glossary.html#batch-operation","development/glossary.html#break-glass","development/glossary.html#c","development/glossary.html#cedar","development/glossary.html#checkpoint","development/glossary.html#cli-command-line-interface","development/glossary.html#cluster","development/glossary.html#compliance","development/glossary.html#config-configuration","development/glossary.html#control-center","development/glossary.html#coredns","development/glossary.html#cross-reference","development/glossary.html#d","development/glossary.html#dependency","development/glossary.html#diagnostics","development/glossary.html#dynamic-secrets","development/glossary.html#e","development/glossary.html#environment","development/glossary.html#extension","development/glossary.html#f","development/glossary.html#feature","development/glossary.html#g","development/glossary.html#gdpr-general-data-protection-regulation","development/glossary.html#glossary","development/glossary.html#guide","development/glossary.html#h","development/glossary.html#health-check","development/glossary.html#hybrid-architecture","development/glossary.html#i","development/glossary.html#infrastructure","development/glossary.html#integration","development/glossary.html#internal-link","development/glossary.html#j","development/glossary.html#jwt-json-web-token","development/glossary.html#k","development/glossary.html#nickel-nickel-configuration-language","development/glossary.html#kms-key-management-service","development/glossary.html#kubernetes","development/glossary.html#l","development/glossary.html#layer","development/glossary.html#m","development/glossary.html#mcp-model-context-protocol","development/glossary.html#mfa-multi-factor-authentication","development/glossary.html#migration","development/glossary.html#module","development/glossary.html#n","development/glossary.html#nushell","development/glossary.html#o","development/glossary.html#oci-open-container-initiative","development/glossary.html#operation","development/glossary.html#orchestrator","development/glossary.html#p","development/glossary.html#pap-project-architecture-principles","development/glossary.html#platform-service","development/glossary.html#plugin","development/glossary.html#provider","development/glossary.html#q","development/glossary.html#quick-reference","development/glossary.html#r","development/glossary.html#rbac-role-based-access-control","development/glossary.html#registry","development/glossary.html#rest-api","development/glossary.html#rollback","development/glossary.html#rustyvault","development/glossary.html#s","development/glossary.html#schema","development/glossary.html#secrets-management","development/glossary.html#security-system","development/glossary.html#server","development/glossary.html#service","development/glossary.html#shortcut","development/glossary.html#sops-secrets-operations","development/glossary.html#ssh-secure-shell","development/glossary.html#state-management","development/glossary.html#t","development/glossary.html#task","development/glossary.html#taskserv","development/glossary.html#template","development/glossary.html#test-environment","development/glossary.html#topology","development/glossary.html#totp-time-based-one-time-password","development/glossary.html#troubleshooting","development/glossary.html#u","development/glossary.html#ui-user-interface","development/glossary.html#update","development/glossary.html#v","development/glossary.html#validation","development/glossary.html#version","development/glossary.html#w","development/glossary.html#webauthn","development/glossary.html#workflow","development/glossary.html#workspace","development/glossary.html#x-z","development/glossary.html#yaml","development/glossary.html#symbol-and-acronym-index","development/glossary.html#cross-reference-map","development/glossary.html#by-topic-area","development/glossary.html#by-user-journey","development/glossary.html#terminology-guidelines","development/glossary.html#writing-style","development/glossary.html#avoiding-confusion","development/glossary.html#contributing-to-the-glossary","development/glossary.html#adding-new-terms","development/glossary.html#updating-existing-terms","development/glossary.html#version-history","development/mcp-server.html#mcp-server---model-context-protocol","development/mcp-server.html#overview","development/mcp-server.html#performance-results","development/mcp-server.html#architecture","development/mcp-server.html#key-features","development/mcp-server.html#rust-vs-python-comparison","development/mcp-server.html#usage","development/mcp-server.html#configuration","development/mcp-server.html#integration-benefits","development/mcp-server.html#next-steps","development/mcp-server.html#related-documentation","development/typedialog-platform-config-guide.html#typedialog-platform-configuration-guide","development/typedialog-platform-config-guide.html#overview","development/typedialog-platform-config-guide.html#quick-start","development/typedialog-platform-config-guide.html#1-configure-a-platform-service-5-minutes","development/typedialog-platform-config-guide.html#2-review-generated-configuration","development/typedialog-platform-config-guide.html#3-validate-configuration","development/typedialog-platform-config-guide.html#4-services-use-generated-config","development/typedialog-platform-config-guide.html#interactive-configuration-workflow","development/typedialog-platform-config-guide.html#recommended-approach-use-typedialog-forms","development/typedialog-platform-config-guide.html#advanced-approach-manual-nickel-editing","development/typedialog-platform-config-guide.html#configuration-structure","development/typedialog-platform-config-guide.html#single-file-three-sections","development/typedialog-platform-config-guide.html#available-configuration-sections","development/typedialog-platform-config-guide.html#service-specific-configuration","development/typedialog-platform-config-guide.html#orchestrator-service","development/typedialog-platform-config-guide.html#kms-service","development/typedialog-platform-config-guide.html#control-center-service","development/typedialog-platform-config-guide.html#deployment-modes","development/typedialog-platform-config-guide.html#new-platform-services-phase-13-19","development/typedialog-platform-config-guide.html#vault-service","development/typedialog-platform-config-guide.html#extension-registry-service","development/typedialog-platform-config-guide.html#rag-retrieval-augmented-generation-service","development/typedialog-platform-config-guide.html#ai-service","development/typedialog-platform-config-guide.html#provisioning-daemon","development/typedialog-platform-config-guide.html#using-typedialog-forms","development/typedialog-platform-config-guide.html#form-navigation","development/typedialog-platform-config-guide.html#field-types","development/typedialog-platform-config-guide.html#special-values","development/typedialog-platform-config-guide.html#validation--export","development/typedialog-platform-config-guide.html#validating-configuration","development/typedialog-platform-config-guide.html#exporting-to-service-formats","development/typedialog-platform-config-guide.html#updating-configuration","development/typedialog-platform-config-guide.html#change-a-setting","development/typedialog-platform-config-guide.html#using-typedialog-to-update","development/typedialog-platform-config-guide.html#troubleshooting","development/typedialog-platform-config-guide.html#form-wont-load","development/typedialog-platform-config-guide.html#validation-fails","development/typedialog-platform-config-guide.html#export-creates-empty-files","development/typedialog-platform-config-guide.html#services-dont-use-new-config","development/typedialog-platform-config-guide.html#configuration-examples","development/typedialog-platform-config-guide.html#development-setup","development/typedialog-platform-config-guide.html#production-setup","development/typedialog-platform-config-guide.html#multi-provider-setup","development/typedialog-platform-config-guide.html#best-practices","development/typedialog-platform-config-guide.html#1-use-typedialog-for-initial-setup","development/typedialog-platform-config-guide.html#2-never-edit-generated-files","development/typedialog-platform-config-guide.html#3-validate-before-deploy","development/typedialog-platform-config-guide.html#4-use-environment-variables-for-secrets","development/typedialog-platform-config-guide.html#5-document-changes","development/typedialog-platform-config-guide.html#related-documentation","development/typedialog-platform-config-guide.html#core-resources","development/typedialog-platform-config-guide.html#platform-services","development/typedialog-platform-config-guide.html#public-definition-locations","development/typedialog-platform-config-guide.html#getting-help","development/typedialog-platform-config-guide.html#validation-errors","development/typedialog-platform-config-guide.html#configuration-questions","development/typedialog-platform-config-guide.html#test-configuration","development/extensions/index.html#extension-development-guide","development/extensions/index.html#table-of-contents","development/extensions/index.html#overview","development/extensions/index.html#extension-types","development/extensions/index.html#extension-architecture","development/extensions/index.html#extension-discovery","development/extensions/index.html#provider-development","development/extensions/index.html#provider-architecture","development/extensions/index.html#creating-a-new-provider","development/extensions/index.html#provider-structure","development/extensions/index.html#provider-implementation","development/extensions/index.html#provider-testing","development/extensions/index.html#task-service-development","development/extensions/index.html#task-service-architecture","development/extensions/index.html#creating-a-new-task-service","development/extensions/index.html#task-service-structure","development/extensions/index.html#task-service-implementation","development/extensions/index.html#cluster-development","development/extensions/index.html#cluster-architecture","development/extensions/index.html#creating-a-new-cluster","development/extensions/index.html#cluster-implementation","development/extensions/index.html#testing-and-validation","development/extensions/index.html#testing-framework","development/extensions/index.html#extension-testing-commands","development/extensions/index.html#automated-testing","development/extensions/index.html#publishing-and-distribution","development/extensions/index.html#extension-publishing","development/extensions/index.html#publishing-commands","development/extensions/index.html#extension-registry","development/extensions/index.html#best-practices","development/extensions/index.html#code-quality","development/extensions/index.html#error-handling","development/extensions/index.html#testing-practices","development/extensions/index.html#documentation-standards","development/extensions/index.html#troubleshooting","development/extensions/index.html#common-development-issues","development/extensions/index.html#debug-mode","development/extensions/index.html#performance-optimization","development/extensions/extension-development.html#extension-development-guide","development/extensions/extension-development.html#what-youll-learn","development/extensions/extension-development.html#extension-architecture","development/extensions/extension-development.html#extension-types","development/extensions/extension-development.html#extension-structure","development/extensions/extension-development.html#extension-metadata","development/extensions/extension-development.html#creating-custom-providers","development/extensions/extension-development.html#provider-architecture","development/extensions/extension-development.html#step-1-define-provider-schema","development/extensions/extension-development.html#step-2-implement-provider-logic","development/extensions/extension-development.html#step-3-provider-registration","development/extensions/extension-development.html#creating-custom-task-services","development/extensions/extension-development.html#task-service-architecture","development/extensions/extension-development.html#step-1-define-service-schema","development/extensions/extension-development.html#step-2-implement-service-logic","development/extensions/extension-development.html#creating-custom-clusters","development/extensions/extension-development.html#cluster-architecture","development/extensions/extension-development.html#step-1-define-cluster-schema","development/extensions/extension-development.html#step-2-implement-cluster-logic","development/extensions/extension-development.html#extension-testing","development/extensions/extension-development.html#test-structure","development/extensions/extension-development.html#example-unit-test","development/extensions/extension-development.html#integration-test","development/extensions/extension-development.html#publishing-extensions","development/extensions/extension-development.html#extension-package-structure","development/extensions/extension-development.html#publishing-configuration","development/extensions/extension-development.html#publishing-process","development/extensions/extension-development.html#best-practices","development/extensions/extension-development.html#1-code-organization","development/extensions/extension-development.html#2-error-handling","development/extensions/extension-development.html#3-configuration-validation","development/extensions/extension-development.html#4-testing","development/extensions/extension-development.html#5-documentation","development/extensions/extension-development.html#next-steps","development/extensions/extension-registry.html#extension-registry-service","development/extensions/extension-registry.html#features","development/extensions/extension-registry.html#architecture","development/extensions/extension-registry.html#dual-trait-system","development/extensions/extension-registry.html#request-strategies","development/extensions/extension-registry.html#installation","development/extensions/extension-registry.html#configuration","development/extensions/extension-registry.html#single-instance-configuration-legacy---auto-migrated","development/extensions/extension-registry.html#multi-instance-configuration-recommended","development/extensions/extension-registry.html#configuration-notes","development/extensions/extension-registry.html#environment-variable-overrides","development/extensions/extension-registry.html#api-endpoints","development/extensions/extension-registry.html#extension-operations","development/extensions/extension-registry.html#system-endpoints","development/extensions/extension-registry.html#extension-naming-conventions","development/extensions/extension-registry.html#gitea-repositories","development/extensions/extension-registry.html#oci-artifacts","development/extensions/extension-registry.html#deployment","development/extensions/extension-registry.html#docker","development/extensions/extension-registry.html#kubernetes","development/extensions/extension-registry.html#migration-guide-single-to-multi-instance","development/extensions/extension-registry.html#automatic-migration","development/extensions/extension-registry.html#before-migration","development/extensions/extension-registry.html#after-migration-automatic","development/extensions/extension-registry.html#gradual-upgrade-path","development/extensions/extension-registry.html#benefits-of-upgrading","development/extensions/extension-registry.html#related-documentation","development/providers/quick-provider-guide.html#quick-developer-guide-adding-new-providers","development/providers/quick-provider-guide.html#prerequisites","development/providers/quick-provider-guide.html#5-minute-provider-addition","development/providers/quick-provider-guide.html#step-1-create-provider-directory","development/providers/quick-provider-guide.html#step-2-copy-template-and-customize","development/providers/quick-provider-guide.html#step-3-update-provider-metadata","development/providers/quick-provider-guide.html#step-4-implement-core-functions","development/providers/quick-provider-guide.html#step-5-create-provider-specific-functions","development/providers/quick-provider-guide.html#step-6-test-your-provider","development/providers/quick-provider-guide.html#step-7-add-provider-to-infrastructure","development/providers/quick-provider-guide.html#provider-templates","development/providers/quick-provider-guide.html#cloud-provider-template","development/providers/quick-provider-guide.html#container-platform-template","development/providers/quick-provider-guide.html#bare-metal-provider-template","development/providers/quick-provider-guide.html#best-practices","development/providers/quick-provider-guide.html#1-error-handling","development/providers/quick-provider-guide.html#2-authentication","development/providers/quick-provider-guide.html#3-rate-limiting","development/providers/quick-provider-guide.html#4-provider-capabilities","development/providers/quick-provider-guide.html#testing-checklist","development/providers/quick-provider-guide.html#common-issues","development/providers/quick-provider-guide.html#provider-not-found","development/providers/quick-provider-guide.html#interface-validation-failed","development/providers/quick-provider-guide.html#authentication-errors","development/providers/quick-provider-guide.html#next-steps","development/providers/quick-provider-guide.html#getting-help","development/providers/provider-agnostic-architecture.html#provider-agnostic-architecture-documentation","development/providers/provider-agnostic-architecture.html#overview","development/providers/provider-agnostic-architecture.html#architecture-components","development/providers/provider-agnostic-architecture.html#1-provider-interface-interfacenu","development/providers/provider-agnostic-architecture.html#2-provider-registry-registrynu","development/providers/provider-agnostic-architecture.html#3-provider-loader-loadernu","development/providers/provider-agnostic-architecture.html#4-provider-adapters","development/providers/provider-agnostic-architecture.html#5-provider-agnostic-middleware-middleware_provider_agnosticnu","development/providers/provider-agnostic-architecture.html#multi-provider-support","development/providers/provider-agnostic-architecture.html#example-mixed-provider-infrastructure","development/providers/provider-agnostic-architecture.html#multi-provider-deployment","development/providers/provider-agnostic-architecture.html#provider-capabilities","development/providers/provider-agnostic-architecture.html#migration-guide","development/providers/provider-agnostic-architecture.html#from-old-middleware","development/providers/provider-agnostic-architecture.html#migration-steps","development/providers/provider-agnostic-architecture.html#adding-new-providers","development/providers/provider-agnostic-architecture.html#1-create-provider-adapter","development/providers/provider-agnostic-architecture.html#2-provider-discovery","development/providers/provider-agnostic-architecture.html#3-test-new-provider","development/providers/provider-agnostic-architecture.html#best-practices","development/providers/provider-agnostic-architecture.html#provider-development","development/providers/provider-agnostic-architecture.html#multi-provider-deployments","development/providers/provider-agnostic-architecture.html#profile-based-security","development/providers/provider-agnostic-architecture.html#troubleshooting","development/providers/provider-agnostic-architecture.html#common-issues","development/providers/provider-agnostic-architecture.html#debug-commands","development/providers/provider-agnostic-architecture.html#performance-benefits","development/providers/provider-agnostic-architecture.html#future-enhancements","development/providers/provider-agnostic-architecture.html#api-reference","development/providers/provider-development-guide.html#cloud-provider-development-guide","development/providers/provider-development-guide.html#overview-4-task-completion-framework","development/providers/provider-development-guide.html#execution-sequence","development/providers/provider-development-guide.html#nushell-01090-core-rules","development/providers/provider-development-guide.html#rule-1-module-system--imports","development/providers/provider-development-guide.html#rule-2-function-signatures","development/providers/provider-development-guide.html#rule-3-return-early-fail-fast","development/providers/provider-development-guide.html#rule-4-modern-error-handling-critical","development/providers/provider-development-guide.html#rule-5-atomic-operations","development/providers/provider-development-guide.html#rule-12-structured-error-returns","development/providers/provider-development-guide.html#critical-violations-instant-fail","development/providers/provider-development-guide.html#nickel-iac-three-file-pattern","development/providers/provider-development-guide.html#contractsncl-type-definitions","development/providers/provider-development-guide.html#defaultsncl-default-values","development/providers/provider-development-guide.html#mainncl-public-api","development/providers/provider-development-guide.html#versionncl-version-tracking","development/providers/provider-development-guide.html#tarea-1-nushell-compliance","development/providers/provider-development-guide.html#identify-violations","development/providers/provider-development-guide.html#fix-mutable-loops-accumulation-pattern","development/providers/provider-development-guide.html#fix-mutable-loops-recursive-pattern","development/providers/provider-development-guide.html#fix-error-handling","development/providers/provider-development-guide.html#validation","development/providers/provider-development-guide.html#tarea-2-test-infrastructure","development/providers/provider-development-guide.html#directory-structure","development/providers/provider-development-guide.html#mock-api-responses","development/providers/provider-development-guide.html#unit-tests-14-tests","development/providers/provider-development-guide.html#integration-tests-37-tests-across-3-modules","development/providers/provider-development-guide.html#test-orchestrator","development/providers/provider-development-guide.html#validation-1","development/providers/provider-development-guide.html#tarea-3-runtime-templates","development/providers/provider-development-guide.html#directory-structure-1","development/providers/provider-development-guide.html#template-example","development/providers/provider-development-guide.html#validation-2","development/providers/provider-development-guide.html#tarea-4-nickel-schema-validation","development/providers/provider-development-guide.html#complete-validation-script","development/providers/provider-development-guide.html#reference-implementations","development/providers/provider-development-guide.html#quick-start","development/providers/provider-distribution-guide.html#provider-distribution-guide","development/providers/provider-distribution-guide.html#table-of-contents","development/providers/provider-distribution-guide.html#overview","development/providers/provider-distribution-guide.html#module-loader-approach","development/providers/provider-distribution-guide.html#purpose","development/providers/provider-distribution-guide.html#how-it-works","development/providers/provider-distribution-guide.html#key-features","development/providers/provider-distribution-guide.html#best-use-cases","development/providers/provider-distribution-guide.html#example-workflow","development/providers/provider-distribution-guide.html#file-structure","development/providers/provider-distribution-guide.html#provider-packs-approach","development/providers/provider-distribution-guide.html#purpose-1","development/providers/provider-distribution-guide.html#how-it-works-1","development/providers/provider-distribution-guide.html#key-features-1","development/providers/provider-distribution-guide.html#best-use-cases-1","development/providers/provider-distribution-guide.html#example-workflow-1","development/providers/provider-distribution-guide.html#file-structure-1","development/providers/provider-distribution-guide.html#package-metadata-example","development/providers/provider-distribution-guide.html#comparison-matrix","development/providers/provider-distribution-guide.html#recommended-hybrid-workflow","development/providers/provider-distribution-guide.html#development-phase","development/providers/provider-distribution-guide.html#release-phase","development/providers/provider-distribution-guide.html#production-deployment","development/providers/provider-distribution-guide.html#command-reference","development/providers/provider-distribution-guide.html#module-loader-commands","development/providers/provider-distribution-guide.html#provider-pack-commands","development/providers/provider-distribution-guide.html#real-world-scenarios","development/providers/provider-distribution-guide.html#scenario-1-solo-developer---local-infrastructure","development/providers/provider-distribution-guide.html#scenario-2-small-team---shared-development","development/providers/provider-distribution-guide.html#scenario-3-medium-team---multiple-projects","development/providers/provider-distribution-guide.html#scenario-4-enterprise---production-infrastructure","development/providers/provider-distribution-guide.html#scenario-5-open-source---public-distribution","development/providers/provider-distribution-guide.html#best-practices","development/providers/provider-distribution-guide.html#for-development","development/providers/provider-distribution-guide.html#for-releases","development/providers/provider-distribution-guide.html#for-production","development/providers/provider-distribution-guide.html#for-cicd","development/providers/provider-distribution-guide.html#migration-path","development/providers/provider-distribution-guide.html#from-module-loader-to-packs","development/providers/provider-distribution-guide.html#from-packs-back-to-module-loader","development/providers/provider-distribution-guide.html#configuration","development/providers/provider-distribution-guide.html#environment-variables","development/providers/provider-distribution-guide.html#config-files","development/providers/provider-distribution-guide.html#troubleshooting","development/providers/provider-distribution-guide.html#module-loader-issues","development/providers/provider-distribution-guide.html#provider-pack-issues","development/providers/provider-distribution-guide.html#conclusion","development/providers/provider-distribution-guide.html#additional-resources","development/providers/provider-comparison.html#provider-comparison-matrix","development/providers/provider-comparison.html#feature-comparison","development/providers/provider-comparison.html#compute","development/providers/provider-comparison.html#block-storage","development/providers/provider-comparison.html#object-storage","development/providers/provider-comparison.html#load-balancing","development/providers/provider-comparison.html#managed-databases","development/providers/provider-comparison.html#kubernetes","development/providers/provider-comparison.html#cdnedge","development/providers/provider-comparison.html#dns","development/providers/provider-comparison.html#pricing-comparison","development/providers/provider-comparison.html#compute-pricing-monthly","development/providers/provider-comparison.html#storage-pricing-monthly","development/providers/provider-comparison.html#data-transfer-pricing","development/providers/provider-comparison.html#total-cost-of-ownership-tco-examples","development/providers/provider-comparison.html#regional-availability","development/providers/provider-comparison.html#hetzner-regions","development/providers/provider-comparison.html#upcloud-regions","development/providers/provider-comparison.html#aws-regions-selection","development/providers/provider-comparison.html#digitalocean-regions","development/providers/provider-comparison.html#regional-coverage-summary","development/providers/provider-comparison.html#compliance-and-certifications","development/providers/provider-comparison.html#security-standards","development/providers/provider-comparison.html#industry-specific-compliance","development/providers/provider-comparison.html#data-residency-support","development/providers/provider-comparison.html#use-case-recommendations","development/providers/provider-comparison.html#1-cost-sensitive-startups","development/providers/provider-comparison.html#2-enterprise-production","development/providers/provider-comparison.html#3-high-performance-computing","development/providers/provider-comparison.html#4-multi-region-global-application","development/providers/provider-comparison.html#5-database-heavy-applications","development/providers/provider-comparison.html#6-web-applications","development/providers/provider-comparison.html#provider-strength-matrix","development/providers/provider-comparison.html#performance-","development/providers/provider-comparison.html#cost-","development/providers/provider-comparison.html#ease-of-use-","development/providers/provider-comparison.html#enterprise-features-","development/providers/provider-comparison.html#decision-matrix","development/providers/provider-comparison.html#conclusion","development/taskservs/taskserv-quick-guide.html#taskserv-quick-guide","development/taskservs/taskserv-quick-guide.html#-quick-start","development/taskservs/taskserv-quick-guide.html#create-a-new-taskserv-interactive","development/taskservs/taskserv-quick-guide.html#create-a-new-taskserv-direct","development/taskservs/taskserv-quick-guide.html#-5-minute-setup","development/taskservs/taskserv-quick-guide.html#1-choose-your-method","development/taskservs/taskserv-quick-guide.html#2-basic-structure","development/taskservs/taskserv-quick-guide.html#3-essential-files","development/taskservs/taskserv-quick-guide.html#4-test-your-taskserv","development/taskservs/taskserv-quick-guide.html#-common-patterns","development/taskservs/taskserv-quick-guide.html#web-service","development/taskservs/taskserv-quick-guide.html#database-service","development/taskservs/taskserv-quick-guide.html#background-worker","development/taskservs/taskserv-quick-guide.html#-cli-shortcuts","development/taskservs/taskserv-quick-guide.html#discovery","development/taskservs/taskserv-quick-guide.html#development","development/taskservs/taskserv-quick-guide.html#testing","development/taskservs/taskserv-quick-guide.html#-categories-reference","development/taskservs/taskserv-quick-guide.html#-troubleshooting","development/taskservs/taskserv-quick-guide.html#taskserv-not-found","development/taskservs/taskserv-quick-guide.html#layer-resolution-issues","development/taskservs/taskserv-quick-guide.html#nickel-syntax-errors","development/taskservs/taskserv-quick-guide.html#-pro-tips","development/taskservs/taskserv-quick-guide.html#-next-steps","development/taskservs/taskserv-categorization.html#taskserv-categorization-plan","development/taskservs/taskserv-categorization.html#categories-and-taskservs-38-total","development/taskservs/taskserv-categorization.html#kubernetes--1","development/taskservs/taskserv-categorization.html#networking--6","development/taskservs/taskserv-categorization.html#container-runtime--6","development/taskservs/taskserv-categorization.html#storage--4","development/taskservs/taskserv-categorization.html#databases--2","development/taskservs/taskserv-categorization.html#development--6","development/taskservs/taskserv-categorization.html#infrastructure--6","development/taskservs/taskserv-categorization.html#misc--1","development/taskservs/taskserv-categorization.html#keep-in-root--6","operations/deployment-guide.html#platform-deployment-guide","operations/deployment-guide.html#table-of-contents","operations/deployment-guide.html#prerequisites","operations/deployment-guide.html#required-software","operations/deployment-guide.html#required-tools-mode-dependent","operations/deployment-guide.html#system-requirements","operations/deployment-guide.html#directory-structure","operations/deployment-guide.html#deployment-modes","operations/deployment-guide.html#mode-selection-matrix","operations/deployment-guide.html#mode-characteristics","operations/deployment-guide.html#quick-start","operations/deployment-guide.html#1-clone-repository","operations/deployment-guide.html#2-select-deployment-mode","operations/deployment-guide.html#3-set-environment-variables","operations/deployment-guide.html#4-build-all-services","operations/deployment-guide.html#5-start-services-order-matters","operations/deployment-guide.html#6-verify-services","operations/deployment-guide.html#solo-mode-deployment","operations/deployment-guide.html#step-1-verify-solo-configuration-files","operations/deployment-guide.html#step-2-set-solo-environment-variables","operations/deployment-guide.html#step-3-build-services","operations/deployment-guide.html#step-4-create-local-data-directories","operations/deployment-guide.html#step-5-start-services","operations/deployment-guide.html#step-6-test-services","operations/deployment-guide.html#step-7-verify-persistence-optional","operations/deployment-guide.html#cleanup","operations/deployment-guide.html#multiuser-mode-deployment","operations/deployment-guide.html#prerequisites-1","operations/deployment-guide.html#step-1-deploy-surrealdb","operations/deployment-guide.html#step-2-verify-surrealdb-connectivity","operations/deployment-guide.html#step-3-set-multiuser-environment-variables","operations/deployment-guide.html#step-4-build-services","operations/deployment-guide.html#step-5-create-shared-data-directories","operations/deployment-guide.html#step-6-start-services-on-multiple-machines","operations/deployment-guide.html#step-7-test-multi-machine-setup","operations/deployment-guide.html#step-8-enable-user-access","operations/deployment-guide.html#monitoring-multiuser-deployment","operations/deployment-guide.html#cicd-mode-deployment","operations/deployment-guide.html#step-1-understand-ephemeral-nature","operations/deployment-guide.html#step-2-set-cicd-environment-variables","operations/deployment-guide.html#step-3-containerize-services-optional","operations/deployment-guide.html#step-4-github-actions-example","operations/deployment-guide.html#step-5-run-cicd-tests","operations/deployment-guide.html#enterprise-mode-deployment","operations/deployment-guide.html#prerequisites-2","operations/deployment-guide.html#step-1-deploy-infrastructure","operations/deployment-guide.html#step-2-set-enterprise-environment-variables","operations/deployment-guide.html#step-3-deploy-services-across-cluster","operations/deployment-guide.html#step-4-monitor-cluster-health","operations/deployment-guide.html#step-5-enable-monitoring--alerting","operations/deployment-guide.html#step-6-backup--recovery","operations/deployment-guide.html#service-management","operations/deployment-guide.html#starting-services","operations/deployment-guide.html#stopping-services","operations/deployment-guide.html#restarting-services","operations/deployment-guide.html#checking-service-status","operations/deployment-guide.html#health-checks--monitoring","operations/deployment-guide.html#manual-health-verification","operations/deployment-guide.html#service-integration-tests","operations/deployment-guide.html#monitoring-dashboards","operations/deployment-guide.html#alerting","operations/deployment-guide.html#troubleshooting","operations/deployment-guide.html#service-wont-start","operations/deployment-guide.html#configuration-loading-fails","operations/deployment-guide.html#database-connection-issues","operations/deployment-guide.html#service-crashes-on-startup","operations/deployment-guide.html#high-memory-usage","operations/deployment-guide.html#networkdns-issues","operations/deployment-guide.html#data-persistence-issues","operations/deployment-guide.html#debugging-checklist","operations/deployment-guide.html#configuration-updates","operations/deployment-guide.html#updating-service-configuration","operations/deployment-guide.html#mode-migration","operations/deployment-guide.html#production-checklist","operations/deployment-guide.html#getting-help","operations/deployment-guide.html#community-resources","operations/deployment-guide.html#internal-support","operations/deployment-guide.html#useful-commands-reference","operations/service-management-guide.html#service-management-guide","operations/service-management-guide.html#table-of-contents","operations/service-management-guide.html#overview","operations/service-management-guide.html#key-features","operations/service-management-guide.html#supported-services","operations/service-management-guide.html#service-architecture","operations/service-management-guide.html#system-architecture","operations/service-management-guide.html#component-responsibilities","operations/service-management-guide.html#service-registry","operations/service-management-guide.html#configuration-file","operations/service-management-guide.html#service-definition-structure","operations/service-management-guide.html#example-orchestrator-service","operations/service-management-guide.html#platform-commands","operations/service-management-guide.html#start-platform","operations/service-management-guide.html#stop-platform","operations/service-management-guide.html#restart-platform","operations/service-management-guide.html#platform-status","operations/service-management-guide.html#platform-health","operations/service-management-guide.html#platform-logs","operations/service-management-guide.html#service-commands","operations/service-management-guide.html#list-services","operations/service-management-guide.html#service-status","operations/service-management-guide.html#start-service","operations/service-management-guide.html#stop-service","operations/service-management-guide.html#restart-service","operations/service-management-guide.html#service-health","operations/service-management-guide.html#service-logs","operations/service-management-guide.html#check-required-services","operations/service-management-guide.html#service-dependencies","operations/service-management-guide.html#validate-services","operations/service-management-guide.html#readiness-report","operations/service-management-guide.html#monitor-service","operations/service-management-guide.html#deployment-modes","operations/service-management-guide.html#binary-deployment","operations/service-management-guide.html#docker-deployment","operations/service-management-guide.html#docker-compose-deployment","operations/service-management-guide.html#kubernetes-deployment","operations/service-management-guide.html#remote-deployment","operations/service-management-guide.html#health-monitoring","operations/service-management-guide.html#health-check-types","operations/service-management-guide.html#health-check-configuration","operations/service-management-guide.html#continuous-monitoring","operations/service-management-guide.html#dependency-management","operations/service-management-guide.html#dependency-graph","operations/service-management-guide.html#startup-order","operations/service-management-guide.html#dependency-resolution","operations/service-management-guide.html#conflicts","operations/service-management-guide.html#reverse-dependencies","operations/service-management-guide.html#safe-stop","operations/service-management-guide.html#pre-flight-checks","operations/service-management-guide.html#purpose","operations/service-management-guide.html#check-types","operations/service-management-guide.html#automatic-checks","operations/service-management-guide.html#manual-validation","operations/service-management-guide.html#auto-start","operations/service-management-guide.html#troubleshooting","operations/service-management-guide.html#service-wont-start","operations/service-management-guide.html#service-health-check-failing","operations/service-management-guide.html#dependency-issues","operations/service-management-guide.html#circular-dependencies","operations/service-management-guide.html#pid-file-stale","operations/service-management-guide.html#port-conflicts","operations/service-management-guide.html#docker-issues","operations/service-management-guide.html#service-logs-1","operations/service-management-guide.html#advanced-usage","operations/service-management-guide.html#custom-service-registration","operations/service-management-guide.html#integration-with-workflows","operations/service-management-guide.html#cicd-integration","operations/service-management-guide.html#monitoring-integration","operations/service-management-guide.html#related-documentation","operations/service-management-guide.html#quick-reference","operations/service-management-guide.html#platform-commands-manage-all-services","operations/service-management-guide.html#service-commands-individual-services","operations/service-management-guide.html#dependency--validation","operations/service-management-guide.html#registered-services","operations/service-management-guide.html#docker-compose","operations/service-management-guide.html#service-state-directories","operations/service-management-guide.html#health-check-endpoints","operations/service-management-guide.html#common-workflows","operations/service-management-guide.html#troubleshooting-1","operations/service-management-guide.html#integration-with-operations","operations/service-management-guide.html#advanced-usage-1","operations/service-management-guide.html#key-files","operations/service-management-guide.html#getting-help","operations/monitoring-alerting-setup.html#service-monitoring--alerting-setup","operations/monitoring-alerting-setup.html#overview","operations/monitoring-alerting-setup.html#architecture","operations/monitoring-alerting-setup.html#prerequisites","operations/monitoring-alerting-setup.html#software-requirements","operations/monitoring-alerting-setup.html#system-requirements","operations/monitoring-alerting-setup.html#ports","operations/monitoring-alerting-setup.html#service-metrics-endpoints","operations/monitoring-alerting-setup.html#prometheus-configuration","operations/monitoring-alerting-setup.html#1-create-prometheus-config","operations/monitoring-alerting-setup.html#2-start-prometheus","operations/monitoring-alerting-setup.html#3-verify-prometheus","operations/monitoring-alerting-setup.html#alert-rules-configuration","operations/monitoring-alerting-setup.html#1-create-alert-rules","operations/monitoring-alerting-setup.html#2-validate-alert-rules","operations/monitoring-alerting-setup.html#alertmanager-configuration","operations/monitoring-alerting-setup.html#1-create-alertmanager-config","operations/monitoring-alerting-setup.html#2-start-alertmanager","operations/monitoring-alerting-setup.html#3-verify-alertmanager","operations/monitoring-alerting-setup.html#grafana-dashboards","operations/monitoring-alerting-setup.html#1-install-grafana","operations/monitoring-alerting-setup.html#2-add-prometheus-data-source","operations/monitoring-alerting-setup.html#3-create-platform-overview-dashboard","operations/monitoring-alerting-setup.html#4-import-dashboard-via-api","operations/monitoring-alerting-setup.html#health-check-monitoring","operations/monitoring-alerting-setup.html#1-service-health-check-script","operations/monitoring-alerting-setup.html#2-liveness-probe-configuration","operations/monitoring-alerting-setup.html#log-aggregation-elk-stack","operations/monitoring-alerting-setup.html#1-elasticsearch-setup","operations/monitoring-alerting-setup.html#2-filebeat-configuration","operations/monitoring-alerting-setup.html#3-kibana-dashboard","operations/monitoring-alerting-setup.html#monitoring-dashboard-queries","operations/monitoring-alerting-setup.html#common-prometheus-queries","operations/monitoring-alerting-setup.html#alert-testing","operations/monitoring-alerting-setup.html#1-test-alert-firing","operations/monitoring-alerting-setup.html#2-stop-service-to-trigger-alert","operations/monitoring-alerting-setup.html#3-generate-load-to-test-error-alerts","operations/monitoring-alerting-setup.html#backup--retention-policies","operations/monitoring-alerting-setup.html#1-prometheus-data-backup","operations/monitoring-alerting-setup.html#2-prometheus-retention-configuration","operations/monitoring-alerting-setup.html#maintenance--troubleshooting","operations/monitoring-alerting-setup.html#common-issues","operations/monitoring-alerting-setup.html#production-deployment-checklist","operations/monitoring-alerting-setup.html#quick-commands-reference","operations/monitoring-alerting-setup.html#documentation--runbooks","operations/monitoring-alerting-setup.html#sample-runbook-service-down","operations/monitoring-alerting-setup.html#resources","operations/coredns-guide.html#coredns-integration-guide","operations/coredns-guide.html#table-of-contents","operations/coredns-guide.html#overview","operations/coredns-guide.html#key-features","operations/coredns-guide.html#installation","operations/coredns-guide.html#prerequisites","operations/coredns-guide.html#install-coredns-binary","operations/coredns-guide.html#verify-installation","operations/coredns-guide.html#configuration","operations/coredns-guide.html#nickel-configuration-schema","operations/coredns-guide.html#configuration-modes","operations/coredns-guide.html#cli-commands","operations/coredns-guide.html#service-management","operations/coredns-guide.html#health--monitoring","operations/coredns-guide.html#zone-management","operations/coredns-guide.html#list-zones","operations/coredns-guide.html#create-zone","operations/coredns-guide.html#show-zone-details","operations/coredns-guide.html#delete-zone","operations/coredns-guide.html#record-management","operations/coredns-guide.html#add-records","operations/coredns-guide.html#remove-records","operations/coredns-guide.html#update-records","operations/coredns-guide.html#list-records","operations/coredns-guide.html#docker-deployment","operations/coredns-guide.html#prerequisites-1","operations/coredns-guide.html#start-coredns-in-docker","operations/coredns-guide.html#manage-docker-container","operations/coredns-guide.html#update-docker-image","operations/coredns-guide.html#remove-container","operations/coredns-guide.html#view-configuration","operations/coredns-guide.html#integration","operations/coredns-guide.html#automatic-server-registration","operations/coredns-guide.html#manual-registration","operations/coredns-guide.html#sync-infrastructure-with-dns","operations/coredns-guide.html#service-registration","operations/coredns-guide.html#query-dns","operations/coredns-guide.html#using-cli","operations/coredns-guide.html#using-dig","operations/coredns-guide.html#troubleshooting","operations/coredns-guide.html#coredns-not-starting","operations/coredns-guide.html#dns-queries-not-working","operations/coredns-guide.html#zone-file-validation-errors","operations/coredns-guide.html#docker-container-issues","operations/coredns-guide.html#dynamic-updates-not-working","operations/coredns-guide.html#advanced-topics","operations/coredns-guide.html#custom-corefile-plugins","operations/coredns-guide.html#backup-and-restore","operations/coredns-guide.html#zone-file-backup","operations/coredns-guide.html#metrics-and-monitoring","operations/coredns-guide.html#multi-zone-setup","operations/coredns-guide.html#split-horizon-dns","operations/coredns-guide.html#configuration-reference","operations/coredns-guide.html#corednsconfig-fields","operations/coredns-guide.html#localcoredns-fields","operations/coredns-guide.html#dynamicdns-fields","operations/coredns-guide.html#examples","operations/coredns-guide.html#complete-setup-example","operations/coredns-guide.html#docker-deployment-example","operations/coredns-guide.html#best-practices","operations/coredns-guide.html#see-also","operations/coredns-guide.html#quick-reference","operations/coredns-guide.html#installation-1","operations/coredns-guide.html#service-management-1","operations/coredns-guide.html#zone-management-1","operations/coredns-guide.html#record-management-1","operations/coredns-guide.html#dns-queries","operations/coredns-guide.html#configuration-1","operations/coredns-guide.html#docker-deployment-1","operations/coredns-guide.html#common-workflows","operations/coredns-guide.html#troubleshooting-1","operations/coredns-guide.html#file-locations","operations/coredns-guide.html#configuration-example","operations/coredns-guide.html#environment-variables","operations/coredns-guide.html#default-values","operations/coredns-guide.html#see-also-1","operations/production-readiness-checklist.html#production-readiness-checklist","operations/production-readiness-checklist.html#executive-summary","operations/production-readiness-checklist.html#quality-metrics","operations/production-readiness-checklist.html#pre-deployment-verification","operations/production-readiness-checklist.html#1-system-requirements-","operations/production-readiness-checklist.html#2-code-quality-","operations/production-readiness-checklist.html#3-testing-","operations/production-readiness-checklist.html#4-security-","operations/production-readiness-checklist.html#5-documentation-","operations/production-readiness-checklist.html#6-deployment-readiness-","operations/production-readiness-checklist.html#pre-production-checklist","operations/production-readiness-checklist.html#team-preparation","operations/production-readiness-checklist.html#infrastructure-preparation","operations/production-readiness-checklist.html#configuration-preparation","operations/production-readiness-checklist.html#testing-in-production-like-environment","operations/production-readiness-checklist.html#deployment-steps","operations/production-readiness-checklist.html#phase-1-installation-30-minutes","operations/production-readiness-checklist.html#phase-2-initial-configuration-15-minutes","operations/production-readiness-checklist.html#phase-3-workspace-setup-10-minutes","operations/production-readiness-checklist.html#phase-4-verification-10-minutes","operations/production-readiness-checklist.html#post-deployment-verification","operations/production-readiness-checklist.html#immediate-within-1-hour","operations/production-readiness-checklist.html#daily-first-week","operations/production-readiness-checklist.html#weekly-first-month","operations/production-readiness-checklist.html#ongoing-production","operations/production-readiness-checklist.html#troubleshooting-reference","operations/production-readiness-checklist.html#issue-setup-wizard-wont-start","operations/production-readiness-checklist.html#issue-configuration-validation-fails","operations/production-readiness-checklist.html#issue-health-check-shows-warnings","operations/production-readiness-checklist.html#issue-deployment-fails","operations/production-readiness-checklist.html#performance-baselines","operations/production-readiness-checklist.html#support-and-escalation","operations/production-readiness-checklist.html#level-1-support-team","operations/production-readiness-checklist.html#level-2-support-engineering","operations/production-readiness-checklist.html#level-3-support-development","operations/production-readiness-checklist.html#rollback-procedure","operations/production-readiness-checklist.html#success-criteria","operations/production-readiness-checklist.html#sign-off","operations/break-glass-training-guide.html#break-glass-emergency-access---training-guide","operations/break-glass-training-guide.html#-what-is-break-glass","operations/break-glass-training-guide.html#key-principles","operations/break-glass-training-guide.html#-table-of-contents","operations/break-glass-training-guide.html#when-to-use-break-glass","operations/break-glass-training-guide.html#-valid-emergency-scenarios","operations/break-glass-training-guide.html#criteria-checklist","operations/break-glass-training-guide.html#when-not-to-use","operations/break-glass-training-guide.html#-invalid-scenarios-do-not-use-break-glass","operations/break-glass-training-guide.html#consequences-of-misuse","operations/break-glass-training-guide.html#roles--responsibilities","operations/break-glass-training-guide.html#requester","operations/break-glass-training-guide.html#approvers","operations/break-glass-training-guide.html#security-team","operations/break-glass-training-guide.html#break-glass-workflow","operations/break-glass-training-guide.html#phase-1-request-5-minutes","operations/break-glass-training-guide.html#phase-2-approval-10-15-minutes","operations/break-glass-training-guide.html#phase-3-activation-1-2-minutes","operations/break-glass-training-guide.html#phase-4-usage-variable","operations/break-glass-training-guide.html#phase-5-revocation-immediate","operations/break-glass-training-guide.html#using-the-system","operations/break-glass-training-guide.html#cli-commands","operations/break-glass-training-guide.html#web-ui-control-center","operations/break-glass-training-guide.html#examples","operations/break-glass-training-guide.html#example-1-production-database-outage","operations/break-glass-training-guide.html#example-2-security-incident","operations/break-glass-training-guide.html#example-3-accidental-data-deletion","operations/break-glass-training-guide.html#auditing--compliance","operations/break-glass-training-guide.html#what-is-logged","operations/break-glass-training-guide.html#retention","operations/break-glass-training-guide.html#compliance-reports","operations/break-glass-training-guide.html#post-incident-review","operations/break-glass-training-guide.html#within-24-hours","operations/break-glass-training-guide.html#review-checklist","operations/break-glass-training-guide.html#output","operations/break-glass-training-guide.html#faq","operations/break-glass-training-guide.html#q-how-quickly-can-break-glass-be-activated","operations/break-glass-training-guide.html#q-can-i-use-break-glass-for-scheduled-maintenance","operations/break-glass-training-guide.html#q-what-if-i-cant-get-2-approvers","operations/break-glass-training-guide.html#q-can-approvers-be-from-the-same-team","operations/break-glass-training-guide.html#q-what-if-security-team-revokes-my-session","operations/break-glass-training-guide.html#q-can-i-extend-an-active-session","operations/break-glass-training-guide.html#q-what-happens-if-i-forget-to-revoke","operations/break-glass-training-guide.html#q-is-break-glass-monitored","operations/break-glass-training-guide.html#q-can-i-practice-break-glass","operations/break-glass-training-guide.html#emergency-contacts","operations/break-glass-training-guide.html#during-incident","operations/break-glass-training-guide.html#escalation-path","operations/break-glass-training-guide.html#communication-channels","operations/break-glass-training-guide.html#training-certification","operations/cedar-policies-production-guide.html#cedar-policies-production-guide","operations/cedar-policies-production-guide.html#table-of-contents","operations/cedar-policies-production-guide.html#introduction","operations/cedar-policies-production-guide.html#why-cedar","operations/cedar-policies-production-guide.html#cedar-policy-basics","operations/cedar-policies-production-guide.html#core-concepts","operations/cedar-policies-production-guide.html#entities","operations/cedar-policies-production-guide.html#actions","operations/cedar-policies-production-guide.html#production-policy-strategy","operations/cedar-policies-production-guide.html#security-levels","operations/cedar-policies-production-guide.html#policy-templates","operations/cedar-policies-production-guide.html#1-role-based-access-control-rbac","operations/cedar-policies-production-guide.html#2-team-based-policies","operations/cedar-policies-production-guide.html#3-time-based-restrictions","operations/cedar-policies-production-guide.html#4-ip-based-restrictions","operations/cedar-policies-production-guide.html#5-resource-specific-policies","operations/cedar-policies-production-guide.html#6-self-service-policies","operations/cedar-policies-production-guide.html#policy-development-workflow","operations/cedar-policies-production-guide.html#step-1-define-requirements","operations/cedar-policies-production-guide.html#step-2-write-policy","operations/cedar-policies-production-guide.html#step-3-validate-syntax","operations/cedar-policies-production-guide.html#step-4-test-in-development","operations/cedar-policies-production-guide.html#step-5-review--approve","operations/cedar-policies-production-guide.html#step-6-deploy-to-production","operations/cedar-policies-production-guide.html#testing-policies","operations/cedar-policies-production-guide.html#unit-testing","operations/cedar-policies-production-guide.html#integration-testing","operations/cedar-policies-production-guide.html#load-testing","operations/cedar-policies-production-guide.html#deployment","operations/cedar-policies-production-guide.html#development--staging--production","operations/cedar-policies-production-guide.html#rollback-procedure","operations/cedar-policies-production-guide.html#monitoring--auditing","operations/cedar-policies-production-guide.html#monitor-authorization-decisions","operations/cedar-policies-production-guide.html#alert-on-suspicious-activity","operations/cedar-policies-production-guide.html#policy-usage-statistics","operations/cedar-policies-production-guide.html#troubleshooting","operations/cedar-policies-production-guide.html#policy-not-applying","operations/cedar-policies-production-guide.html#unexpected-denials","operations/cedar-policies-production-guide.html#policy-conflicts","operations/cedar-policies-production-guide.html#best-practices","operations/cedar-policies-production-guide.html#1-start-restrictive-loosen-gradually","operations/cedar-policies-production-guide.html#2-use-annotations","operations/cedar-policies-production-guide.html#3-principle-of-least-privilege","operations/cedar-policies-production-guide.html#4-document-context-requirements","operations/cedar-policies-production-guide.html#5-separate-policies-by-concern","operations/cedar-policies-production-guide.html#6-version-control","operations/cedar-policies-production-guide.html#7-regular-policy-audits","operations/cedar-policies-production-guide.html#quick-reference","operations/cedar-policies-production-guide.html#common-policy-patterns","operations/cedar-policies-production-guide.html#useful-commands","operations/cedar-policies-production-guide.html#support","operations/mfa-admin-setup-guide.html#mfa-admin-setup-guide---production-operations-manual","operations/mfa-admin-setup-guide.html#-table-of-contents","operations/mfa-admin-setup-guide.html#overview","operations/mfa-admin-setup-guide.html#what-is-mfa","operations/mfa-admin-setup-guide.html#why-mfa-for-admins","operations/mfa-admin-setup-guide.html#mfa-methods-supported","operations/mfa-admin-setup-guide.html#mfa-requirements","operations/mfa-admin-setup-guide.html#mandatory-mfa-enforcement","operations/mfa-admin-setup-guide.html#grace-period","operations/mfa-admin-setup-guide.html#timeline-for-rollout","operations/mfa-admin-setup-guide.html#admin-enrollment-process","operations/mfa-admin-setup-guide.html#step-1-initial-login-password-only","operations/mfa-admin-setup-guide.html#step-2-choose-mfa-method","operations/mfa-admin-setup-guide.html#step-3-enroll-mfa-device","operations/mfa-admin-setup-guide.html#step-4-verify-and-activate","operations/mfa-admin-setup-guide.html#totp-setup-authenticator-apps","operations/mfa-admin-setup-guide.html#supported-authenticator-apps","operations/mfa-admin-setup-guide.html#step-by-step-totp-enrollment","operations/mfa-admin-setup-guide.html#webauthn-setup-hardware-keys","operations/mfa-admin-setup-guide.html#supported-webauthn-devices","operations/mfa-admin-setup-guide.html#step-by-step-webauthn-enrollment","operations/mfa-admin-setup-guide.html#enforcing-mfa-via-cedar-policies","operations/mfa-admin-setup-guide.html#production-mfa-enforcement-policy","operations/mfa-admin-setup-guide.html#developmentstaging-policies-mfa-recommended-not-required","operations/mfa-admin-setup-guide.html#policy-deployment","operations/mfa-admin-setup-guide.html#testing-mfa-enforcement","operations/mfa-admin-setup-guide.html#backup-codes-management","operations/mfa-admin-setup-guide.html#generating-backup-codes","operations/mfa-admin-setup-guide.html#using-backup-codes","operations/mfa-admin-setup-guide.html#backup-code-storage-best-practices","operations/mfa-admin-setup-guide.html#recovery-procedures","operations/mfa-admin-setup-guide.html#scenario-1-lost-authenticator-device-totp","operations/mfa-admin-setup-guide.html#scenario-2-lost-webauthn-key-yubikey","operations/mfa-admin-setup-guide.html#scenario-3-all-mfa-methods-lost","operations/mfa-admin-setup-guide.html#scenario-4-backup-codes-depleted","operations/mfa-admin-setup-guide.html#troubleshooting","operations/mfa-admin-setup-guide.html#issue-1-invalid-totp-code-error","operations/mfa-admin-setup-guide.html#issue-2-webauthn-not-detected","operations/mfa-admin-setup-guide.html#issue-3-mfa-required-despite-verification","operations/mfa-admin-setup-guide.html#issue-4-qr-code-not-displaying","operations/mfa-admin-setup-guide.html#issue-5-backup-code-not-working","operations/mfa-admin-setup-guide.html#best-practices","operations/mfa-admin-setup-guide.html#for-individual-admins","operations/mfa-admin-setup-guide.html#for-security-teams","operations/mfa-admin-setup-guide.html#for-platform-admins","operations/mfa-admin-setup-guide.html#audit-and-compliance","operations/mfa-admin-setup-guide.html#mfa-audit-logging","operations/mfa-admin-setup-guide.html#compliance-reports","operations/mfa-admin-setup-guide.html#mfa-metrics-dashboard","operations/mfa-admin-setup-guide.html#quick-reference-card","operations/mfa-admin-setup-guide.html#daily-admin-operations","operations/mfa-admin-setup-guide.html#mfa-management","operations/mfa-admin-setup-guide.html#emergency-procedures","operations/mfa-admin-setup-guide.html#summary-checklist","operations/mfa-admin-setup-guide.html#for-new-admins","operations/mfa-admin-setup-guide.html#for-security-team","operations/mfa-admin-setup-guide.html#for-platform-team","operations/mfa-admin-setup-guide.html#support-and-resources","operations/mfa-admin-setup-guide.html#documentation","operations/mfa-admin-setup-guide.html#configuration-files","operations/mfa-admin-setup-guide.html#cli-help","operations/mfa-admin-setup-guide.html#contact","operations/orchestrator.html#provisioning-orchestrator","operations/orchestrator.html#architecture","operations/orchestrator.html#key-features","operations/orchestrator.html#quick-start","operations/orchestrator.html#build-and-run","operations/orchestrator.html#submit-workflow","operations/orchestrator.html#api-endpoints","operations/orchestrator.html#core-endpoints","operations/orchestrator.html#workflow-endpoints","operations/orchestrator.html#test-environment-endpoints","operations/orchestrator.html#test-environment-service","operations/orchestrator.html#test-environment-types","operations/orchestrator.html#nushell-cli-integration","operations/orchestrator.html#topology-templates","operations/orchestrator.html#storage-backends","operations/orchestrator.html#related-documentation","operations/orchestrator-system.html#hybrid-orchestrator-architecture-v300","operations/orchestrator-system.html#-orchestrator-implementation-completed-2025-09-25","operations/orchestrator-system.html#architecture-overview","operations/orchestrator-system.html#orchestrator-management","operations/orchestrator-system.html#workflow-system","operations/orchestrator-system.html#server-workflows","operations/orchestrator-system.html#taskserv-workflows","operations/orchestrator-system.html#cluster-workflows","operations/orchestrator-system.html#workflow-management","operations/orchestrator-system.html#rest-api-endpoints","operations/control-center.html#control-center---cedar-policy-engine","operations/control-center.html#key-features","operations/control-center.html#cedar-policy-engine","operations/control-center.html#security--authentication","operations/control-center.html#compliance-framework","operations/control-center.html#anomaly-detection","operations/control-center.html#storage--persistence","operations/control-center.html#quick-start","operations/control-center.html#installation","operations/control-center.html#configuration","operations/control-center.html#start-server","operations/control-center.html#test-policy-evaluation","operations/control-center.html#policy-examples","operations/control-center.html#multi-factor-authentication-policy","operations/control-center.html#production-approval-policy","operations/control-center.html#geographic-restrictions","operations/control-center.html#cli-commands","operations/control-center.html#policy-management","operations/control-center.html#compliance-checking","operations/control-center.html#api-endpoints","operations/control-center.html#policy-evaluation","operations/control-center.html#policy-versions","operations/control-center.html#compliance","operations/control-center.html#anomaly-detection-1","operations/control-center.html#architecture","operations/control-center.html#core-components","operations/control-center.html#configuration-driven-design","operations/control-center.html#deployment","operations/control-center.html#docker","operations/control-center.html#kubernetes","operations/control-center.html#related-documentation","operations/installer.html#provisioning-platform-installer","operations/installer.html#features","operations/installer.html#installation","operations/installer.html#usage","operations/installer.html#interactive-tui-default","operations/installer.html#headless-mode-automation","operations/installer.html#configuration-generation","operations/installer.html#deployment-platforms","operations/installer.html#docker-compose","operations/installer.html#orbstack-macos","operations/installer.html#podman-rootless","operations/installer.html#kubernetes","operations/installer.html#deployment-modes","operations/installer.html#solo-mode-development","operations/installer.html#multi-user-mode-team","operations/installer.html#cicd-mode-automation","operations/installer.html#enterprise-mode-production","operations/installer.html#cli-options","operations/installer.html#cicd-integration","operations/installer.html#gitlab-ci","operations/installer.html#github-actions","operations/installer.html#nushell-scripts-fallback","operations/installer.html#related-documentation","operations/installer-system.html#provisioning-platform-installer-v350","operations/installer-system.html#-flexible-installation-and-configuration-system","operations/installer-system.html#installation-modes","operations/installer-system.html#1--interactive-tui-mode","operations/installer-system.html#2--headless-mode","operations/installer-system.html#3--unattended-mode","operations/installer-system.html#deployment-modes","operations/installer-system.html#configuration-system","operations/installer-system.html#toml-configuration","operations/installer-system.html#configuration-loading-priority","operations/installer-system.html#mcp-integration","operations/installer-system.html#deployment-automation","operations/installer-system.html#nushell-scripts","operations/installer-system.html#self-installation","operations/installer-system.html#command-reference","operations/installer-system.html#integration-examples","operations/installer-system.html#gitops-workflow","operations/installer-system.html#terraform-integration","operations/installer-system.html#ansible-integration","operations/installer-system.html#configuration-templates","operations/installer-system.html#documentation","operations/installer-system.html#help-and-support","operations/installer-system.html#nushell-fallback","operations/provisioning-server.html#provisioning-api-server","operations/provisioning-server.html#features","operations/provisioning-server.html#architecture","operations/provisioning-server.html#installation","operations/provisioning-server.html#configuration","operations/provisioning-server.html#usage","operations/provisioning-server.html#starting-the-server","operations/provisioning-server.html#authentication","operations/provisioning-server.html#api-endpoints","operations/provisioning-server.html#authentication-1","operations/provisioning-server.html#servers","operations/provisioning-server.html#taskservs","operations/provisioning-server.html#workflows","operations/provisioning-server.html#operations","operations/provisioning-server.html#system","operations/provisioning-server.html#rbac-roles","operations/provisioning-server.html#admin-role","operations/provisioning-server.html#operator-role","operations/provisioning-server.html#developer-role","operations/provisioning-server.html#viewer-role","operations/provisioning-server.html#security-best-practices","operations/provisioning-server.html#cicd-integration","operations/provisioning-server.html#github-actions","operations/provisioning-server.html#related-documentation","infrastructure/infrastructure-management.html#infrastructure-management-guide","infrastructure/infrastructure-management.html#what-youll-learn","infrastructure/infrastructure-management.html#infrastructure-concepts","infrastructure/infrastructure-management.html#infrastructure-components","infrastructure/infrastructure-management.html#infrastructure-lifecycle","infrastructure/infrastructure-management.html#server-management","infrastructure/infrastructure-management.html#understanding-server-configuration","infrastructure/infrastructure-management.html#server-lifecycle-commands","infrastructure/infrastructure-management.html#task-service-management","infrastructure/infrastructure-management.html#understanding-task-services","infrastructure/infrastructure-management.html#task-service-configuration","infrastructure/infrastructure-management.html#task-service-commands","infrastructure/infrastructure-management.html#version-management","infrastructure/infrastructure-management.html#cluster-management","infrastructure/infrastructure-management.html#understanding-clusters","infrastructure/infrastructure-management.html#cluster-commands","infrastructure/infrastructure-management.html#network-management","infrastructure/infrastructure-management.html#network-configuration","infrastructure/infrastructure-management.html#network-commands","infrastructure/infrastructure-management.html#storage-management","infrastructure/infrastructure-management.html#storage-configuration","infrastructure/infrastructure-management.html#storage-commands","infrastructure/infrastructure-management.html#monitoring-and-observability","infrastructure/infrastructure-management.html#monitoring-setup","infrastructure/infrastructure-management.html#health-checks","infrastructure/infrastructure-management.html#metrics-and-alerting","infrastructure/infrastructure-management.html#cost-management","infrastructure/infrastructure-management.html#cost-monitoring","infrastructure/infrastructure-management.html#cost-optimization","infrastructure/infrastructure-management.html#scaling-strategies","infrastructure/infrastructure-management.html#manual-scaling","infrastructure/infrastructure-management.html#auto-scaling-configuration","infrastructure/infrastructure-management.html#disaster-recovery","infrastructure/infrastructure-management.html#backup-strategies","infrastructure/infrastructure-management.html#recovery-procedures","infrastructure/infrastructure-management.html#advanced-infrastructure-patterns","infrastructure/infrastructure-management.html#multi-region-deployment","infrastructure/infrastructure-management.html#blue-green-deployment","infrastructure/infrastructure-management.html#canary-deployment","infrastructure/infrastructure-management.html#troubleshooting-infrastructure","infrastructure/infrastructure-management.html#common-issues","infrastructure/infrastructure-management.html#performance-optimization","infrastructure/infrastructure-management.html#testing-infrastructure","infrastructure/infrastructure-management.html#why-test-infrastructure","infrastructure/infrastructure-management.html#test-environment-types","infrastructure/infrastructure-management.html#managing-test-environments","infrastructure/infrastructure-management.html#available-topology-templates","infrastructure/infrastructure-management.html#test-environment-workflow","infrastructure/infrastructure-management.html#cicd-integration","infrastructure/infrastructure-management.html#prerequisites","infrastructure/infrastructure-management.html#advanced-testing","infrastructure/infrastructure-management.html#documentation","infrastructure/infrastructure-management.html#best-practices","infrastructure/infrastructure-management.html#1-infrastructure-design","infrastructure/infrastructure-management.html#2-operational-excellence","infrastructure/infrastructure-management.html#3-security","infrastructure/infrastructure-management.html#4-cost-optimization","infrastructure/infrastructure-management.html#next-steps","infrastructure/infrastructure-from-code-guide.html#infrastructure-from-code-iac-guide","infrastructure/infrastructure-from-code-guide.html#overview","infrastructure/infrastructure-from-code-guide.html#quick-start","infrastructure/infrastructure-from-code-guide.html#1-detect-technologies-in-your-project","infrastructure/infrastructure-from-code-guide.html#2-analyze-infrastructure-gaps","infrastructure/infrastructure-from-code-guide.html#3-run-full-workflow","infrastructure/infrastructure-from-code-guide.html#command-reference","infrastructure/infrastructure-from-code-guide.html#detect","infrastructure/infrastructure-from-code-guide.html#complete","infrastructure/infrastructure-from-code-guide.html#ifc-workflow","infrastructure/infrastructure-from-code-guide.html#organization-specific-inference-rules","infrastructure/infrastructure-from-code-guide.html#understanding-inference-rules","infrastructure/infrastructure-from-code-guide.html#creating-custom-rules","infrastructure/infrastructure-from-code-guide.html#default-rules","infrastructure/infrastructure-from-code-guide.html#output-formats","infrastructure/infrastructure-from-code-guide.html#text-output-default","infrastructure/infrastructure-from-code-guide.html#json-output","infrastructure/infrastructure-from-code-guide.html#yaml-output","infrastructure/infrastructure-from-code-guide.html#practical-examples","infrastructure/infrastructure-from-code-guide.html#example-1-nodejs--postgresql-project","infrastructure/infrastructure-from-code-guide.html#example-2-python-django-project","infrastructure/infrastructure-from-code-guide.html#example-3-microservices-architecture","infrastructure/infrastructure-from-code-guide.html#integration-with-automation","infrastructure/infrastructure-from-code-guide.html#cicd-pipeline-example","infrastructure/infrastructure-from-code-guide.html#configuration-as-code-integration","infrastructure/infrastructure-from-code-guide.html#troubleshooting","infrastructure/infrastructure-from-code-guide.html#detector-binary-not-found","infrastructure/infrastructure-from-code-guide.html#no-technologies-detected","infrastructure/infrastructure-from-code-guide.html#organization-rules-not-being-applied","infrastructure/infrastructure-from-code-guide.html#advanced-usage","infrastructure/infrastructure-from-code-guide.html#custom-rule-template","infrastructure/infrastructure-from-code-guide.html#validate-rule-files","infrastructure/infrastructure-from-code-guide.html#export-rules-for-integration","infrastructure/infrastructure-from-code-guide.html#best-practices","infrastructure/infrastructure-from-code-guide.html#related-commands","infrastructure/infrastructure-from-code-guide.html#support-and-documentation","infrastructure/infrastructure-from-code-guide.html#quick-reference","infrastructure/infrastructure-from-code-guide.html#3-step-workflow","infrastructure/infrastructure-from-code-guide.html#common-commands","infrastructure/infrastructure-from-code-guide.html#output-formats-1","infrastructure/infrastructure-from-code-guide.html#organization-rules","infrastructure/infrastructure-from-code-guide.html#example-nodejs--postgresql","infrastructure/infrastructure-from-code-guide.html#cicd-integration","infrastructure/infrastructure-from-code-guide.html#json-output-examples","infrastructure/infrastructure-from-code-guide.html#flag-reference","infrastructure/infrastructure-from-code-guide.html#troubleshooting-1","infrastructure/infrastructure-from-code-guide.html#environment-variables","infrastructure/infrastructure-from-code-guide.html#default-inference-rules","infrastructure/infrastructure-from-code-guide.html#useful-aliases","infrastructure/infrastructure-from-code-guide.html#tips--tricks","infrastructure/infrastructure-from-code-guide.html#related-guides","infrastructure/batch-workflow-system.html#batch-workflow-system-v310---token-optimized-architecture","infrastructure/batch-workflow-system.html#-batch-workflow-system-completed-2025-09-25","infrastructure/batch-workflow-system.html#key-achievements","infrastructure/batch-workflow-system.html#batch-workflow-commands","infrastructure/batch-workflow-system.html#nickel-workflow-schema","infrastructure/batch-workflow-system.html#rest-api-endpoints-batch-operations","infrastructure/batch-workflow-system.html#system-benefits","infrastructure/batch-workflow-multi-provider.html#multi-provider-batch-workflow-examples","infrastructure/batch-workflow-multi-provider.html#table-of-contents","infrastructure/batch-workflow-multi-provider.html#overview","infrastructure/batch-workflow-multi-provider.html#workflow-1-coordinated-multi-provider-deployment","infrastructure/batch-workflow-multi-provider.html#workflow-definition","infrastructure/batch-workflow-multi-provider.html#execution-flow","infrastructure/batch-workflow-multi-provider.html#workflow-2-multi-provider-disaster-recovery-failover","infrastructure/batch-workflow-multi-provider.html#workflow-definition-1","infrastructure/batch-workflow-multi-provider.html#failover-timeline","infrastructure/batch-workflow-multi-provider.html#workflow-3-cost-optimization-workload-migration","infrastructure/batch-workflow-multi-provider.html#workflow-definition-2","infrastructure/batch-workflow-multi-provider.html#workflow-4-multi-region-database-replication","infrastructure/batch-workflow-multi-provider.html#workflow-definition-3","infrastructure/batch-workflow-multi-provider.html#best-practices","infrastructure/batch-workflow-multi-provider.html#1-workflow-design","infrastructure/batch-workflow-multi-provider.html#2-orchestration","infrastructure/batch-workflow-multi-provider.html#3-cost-management","infrastructure/batch-workflow-multi-provider.html#troubleshooting","infrastructure/batch-workflow-multi-provider.html#issue-workflow-stuck-in-phase","infrastructure/batch-workflow-multi-provider.html#issue-rollback-failed","infrastructure/batch-workflow-multi-provider.html#issue-data-inconsistency-after-failover","infrastructure/batch-workflow-multi-provider.html#summary","infrastructure/cli-architecture.html#modular-cli-architecture-v320---major-refactoring","infrastructure/cli-architecture.html#-cli-refactoring-completed-2025-09-30","infrastructure/cli-architecture.html#architecture-improvements","infrastructure/cli-architecture.html#command-shortcuts-reference","infrastructure/cli-architecture.html#infrastructure","infrastructure/cli-architecture.html#orchestration","infrastructure/cli-architecture.html#development","infrastructure/cli-architecture.html#workspace","infrastructure/cli-architecture.html#configuration","infrastructure/cli-architecture.html#utilities","infrastructure/cli-architecture.html#generation","infrastructure/cli-architecture.html#special-commands","infrastructure/cli-architecture.html#bi-directional-help-system","infrastructure/cli-architecture.html#cli-internal-architecture","infrastructure/configuration-system.html#configuration-system-v200","infrastructure/configuration-system.html#-migration-completed-2025-09-23","infrastructure/configuration-system.html#configuration-files","infrastructure/configuration-system.html#essential-commands","infrastructure/configuration-system.html#configuration-architecture","infrastructure/configuration-system.html#configuration-loading-hierarchy-priority","infrastructure/configuration-system.html#file-type-guidelines","infrastructure/cli-reference.html#cli-reference","infrastructure/cli-reference.html#what-youll-learn","infrastructure/cli-reference.html#command-structure","infrastructure/cli-reference.html#global-options","infrastructure/cli-reference.html#output-formats","infrastructure/cli-reference.html#core-commands","infrastructure/cli-reference.html#help---show-help-information","infrastructure/cli-reference.html#version---show-version-information","infrastructure/cli-reference.html#env---environment-information","infrastructure/cli-reference.html#server-management-commands","infrastructure/cli-reference.html#server-create---create-servers","infrastructure/cli-reference.html#server-delete---delete-servers","infrastructure/cli-reference.html#server-list---list-servers","infrastructure/cli-reference.html#server-ssh---ssh-access","infrastructure/cli-reference.html#server-price---cost-information","infrastructure/cli-reference.html#task-service-commands","infrastructure/cli-reference.html#taskserv-create---install-services","infrastructure/cli-reference.html#taskserv-delete---remove-services","infrastructure/cli-reference.html#taskserv-list---list-services","infrastructure/cli-reference.html#taskserv-generate---generate-configurations","infrastructure/cli-reference.html#taskserv-check-updates---version-management","infrastructure/cli-reference.html#cluster-management-commands","infrastructure/cli-reference.html#cluster-create---deploy-clusters","infrastructure/cli-reference.html#cluster-delete---remove-clusters","infrastructure/cli-reference.html#cluster-list---list-clusters","infrastructure/cli-reference.html#cluster-scale---scale-clusters","infrastructure/cli-reference.html#infrastructure-commands","infrastructure/cli-reference.html#generate---generate-configurations","infrastructure/cli-reference.html#show---display-information","infrastructure/cli-reference.html#list---list-resources","infrastructure/cli-reference.html#validate---validate-configuration","infrastructure/cli-reference.html#configuration-commands","infrastructure/cli-reference.html#init---initialize-configuration","infrastructure/cli-reference.html#template---template-management","infrastructure/cli-reference.html#advanced-commands","infrastructure/cli-reference.html#nu---interactive-shell","infrastructure/cli-reference.html#sops---secret-management","infrastructure/cli-reference.html#context---context-management","infrastructure/cli-reference.html#workflow-commands","infrastructure/cli-reference.html#workflows---batch-operations","infrastructure/cli-reference.html#orchestrator---orchestrator-management","infrastructure/cli-reference.html#scripting-and-automation","infrastructure/cli-reference.html#exit-codes","infrastructure/cli-reference.html#environment-variables","infrastructure/cli-reference.html#batch-operations","infrastructure/cli-reference.html#json-output-processing","infrastructure/cli-reference.html#command-chaining-and-pipelines","infrastructure/cli-reference.html#sequential-operations","infrastructure/cli-reference.html#complex-workflows","infrastructure/cli-reference.html#integration-with-other-tools","infrastructure/cli-reference.html#cicd-integration","infrastructure/cli-reference.html#monitoring-integration","infrastructure/cli-reference.html#backup-automation","infrastructure/dynamic-secrets-guide.html#dynamic-secrets-guide","infrastructure/dynamic-secrets-guide.html#quick-reference","infrastructure/dynamic-secrets-guide.html#quick-commands","infrastructure/dynamic-secrets-guide.html#secret-types","infrastructure/dynamic-secrets-guide.html#rest-api-endpoints","infrastructure/dynamic-secrets-guide.html#aws-sts-example","infrastructure/dynamic-secrets-guide.html#ssh-key-example","infrastructure/dynamic-secrets-guide.html#configuration","infrastructure/dynamic-secrets-guide.html#troubleshooting","infrastructure/dynamic-secrets-guide.html#provider-not-found","infrastructure/dynamic-secrets-guide.html#ttl-exceeds-maximum","infrastructure/dynamic-secrets-guide.html#secret-not-renewable","infrastructure/dynamic-secrets-guide.html#missing-required-parameter","infrastructure/dynamic-secrets-guide.html#security-features","infrastructure/dynamic-secrets-guide.html#support","infrastructure/mode-system-guide.html#mode-system-quick-reference","infrastructure/mode-system-guide.html#quick-start","infrastructure/mode-system-guide.html#available-modes","infrastructure/mode-system-guide.html#mode-comparison","infrastructure/mode-system-guide.html#solo-mode","infrastructure/mode-system-guide.html#multi-user-mode","infrastructure/mode-system-guide.html#cicd-mode","infrastructure/mode-system-guide.html#enterprise-mode","infrastructure/mode-system-guide.html#common-operations","infrastructure/mode-system-guide.html#initialize-mode-system","infrastructure/mode-system-guide.html#check-current-mode","infrastructure/mode-system-guide.html#list-all-modes","infrastructure/mode-system-guide.html#switch-mode","infrastructure/mode-system-guide.html#show-mode-details","infrastructure/mode-system-guide.html#validate-mode","infrastructure/mode-system-guide.html#compare-modes","infrastructure/mode-system-guide.html#oci-registry-management","infrastructure/mode-system-guide.html#solo-mode-only","infrastructure/mode-system-guide.html#mode-specific-workflows","infrastructure/mode-system-guide.html#solo-mode-workflow","infrastructure/mode-system-guide.html#multi-user-mode-workflow","infrastructure/mode-system-guide.html#cicd-mode-workflow","infrastructure/mode-system-guide.html#enterprise-mode-workflow","infrastructure/mode-system-guide.html#configuration-files","infrastructure/mode-system-guide.html#mode-templates","infrastructure/mode-system-guide.html#active-mode-configuration","infrastructure/mode-system-guide.html#oci-registry-namespaces","infrastructure/mode-system-guide.html#troubleshooting","infrastructure/mode-system-guide.html#mode-switch-fails","infrastructure/mode-system-guide.html#cannot-start-oci-registry-solo-mode","infrastructure/mode-system-guide.html#authentication-fails-multi-usercicdenterprise","infrastructure/mode-system-guide.html#workspace-locking-issues-multi-userenterprise","infrastructure/mode-system-guide.html#oci-registry-connection-fails","infrastructure/mode-system-guide.html#environment-variables","infrastructure/mode-system-guide.html#best-practices","infrastructure/mode-system-guide.html#1-use-appropriate-mode","infrastructure/mode-system-guide.html#2-validate-before-switching","infrastructure/mode-system-guide.html#3-backup-active-configuration","infrastructure/mode-system-guide.html#4-use-check-mode","infrastructure/mode-system-guide.html#5-lock-workspaces-in-multi-userenterprise","infrastructure/mode-system-guide.html#6-pull-extensions-from-oci-multi-usercicdenterprise","infrastructure/mode-system-guide.html#security-considerations","infrastructure/mode-system-guide.html#solo-mode-1","infrastructure/mode-system-guide.html#multi-user-mode-1","infrastructure/mode-system-guide.html#cicd-mode-1","infrastructure/mode-system-guide.html#enterprise-mode-1","infrastructure/mode-system-guide.html#support-and-documentation","infrastructure/config-rendering-guide.html#configuration-rendering-guide","infrastructure/config-rendering-guide.html#overview","infrastructure/config-rendering-guide.html#quick-start","infrastructure/config-rendering-guide.html#starting-the-daemon","infrastructure/config-rendering-guide.html#simple-nickel-rendering","infrastructure/config-rendering-guide.html#rest-api-reference","infrastructure/config-rendering-guide.html#post-configrender","infrastructure/config-rendering-guide.html#get-configstats","infrastructure/config-rendering-guide.html#post-configstatsreset","infrastructure/config-rendering-guide.html#nickel-rendering","infrastructure/config-rendering-guide.html#basic-nickel-configuration","infrastructure/config-rendering-guide.html#nickel-with-lazy-evaluation","infrastructure/config-rendering-guide.html#expected-nickel-rendering-time","infrastructure/config-rendering-guide.html#tera-template-rendering","infrastructure/config-rendering-guide.html#basic-tera-template","infrastructure/config-rendering-guide.html#tera-filters-and-functions","infrastructure/config-rendering-guide.html#expected-tera-rendering-time","infrastructure/config-rendering-guide.html#performance-characteristics","infrastructure/config-rendering-guide.html#caching-strategy","infrastructure/config-rendering-guide.html#benchmarks","infrastructure/config-rendering-guide.html#memory-usage","infrastructure/config-rendering-guide.html#error-handling","infrastructure/config-rendering-guide.html#common-errors","infrastructure/config-rendering-guide.html#integration-examples","infrastructure/config-rendering-guide.html#using-with-nushell","infrastructure/config-rendering-guide.html#using-with-python","infrastructure/config-rendering-guide.html#using-with-curl","infrastructure/config-rendering-guide.html#troubleshooting","infrastructure/config-rendering-guide.html#daemon-wont-start","infrastructure/config-rendering-guide.html#very-slow-rendering","infrastructure/config-rendering-guide.html#rendering-hangs","infrastructure/config-rendering-guide.html#out-of-memory","infrastructure/config-rendering-guide.html#best-practices","infrastructure/config-rendering-guide.html#see-also","infrastructure/config-rendering-guide.html#quick-reference","infrastructure/config-rendering-guide.html#api-endpoint","infrastructure/config-rendering-guide.html#request-template","infrastructure/config-rendering-guide.html#quick-examples","infrastructure/config-rendering-guide.html#statistics","infrastructure/config-rendering-guide.html#performance-guide","infrastructure/config-rendering-guide.html#status-codes","infrastructure/config-rendering-guide.html#response-fields","infrastructure/config-rendering-guide.html#languages-comparison","infrastructure/config-rendering-guide.html#caching","infrastructure/config-rendering-guide.html#common-tasks","infrastructure/config-rendering-guide.html#error-examples","infrastructure/config-rendering-guide.html#integration-quick-start","infrastructure/config-rendering-guide.html#environment-variables","infrastructure/config-rendering-guide.html#useful-commands","infrastructure/config-rendering-guide.html#troubleshooting-checklist","infrastructure/configuration.html#configuration-guide","infrastructure/configuration.html#what-youll-learn","infrastructure/configuration.html#configuration-architecture","infrastructure/configuration.html#configuration-hierarchy","infrastructure/configuration.html#configuration-file-types","infrastructure/configuration.html#understanding-configuration-sections","infrastructure/configuration.html#core-system-configuration","infrastructure/configuration.html#path-configuration","infrastructure/configuration.html#debug-and-logging","infrastructure/configuration.html#output-configuration","infrastructure/configuration.html#provider-configuration","infrastructure/configuration.html#encryption-sops-configuration","infrastructure/configuration.html#configuration-interpolation","infrastructure/configuration.html#basic-interpolation-patterns","infrastructure/configuration.html#advanced-interpolation","infrastructure/configuration.html#interpolation-examples","infrastructure/configuration.html#environment-specific-configuration","infrastructure/configuration.html#environment-detection","infrastructure/configuration.html#environment-configuration-files","infrastructure/configuration.html#environment-switching","infrastructure/configuration.html#user-configuration-customization","infrastructure/configuration.html#creating-your-user-configuration","infrastructure/configuration.html#common-user-customizations","infrastructure/configuration.html#project-specific-configuration","infrastructure/configuration.html#project-configuration-file-provisioningtoml","infrastructure/configuration.html#infrastructure-specific-configuration-provisioningtoml","infrastructure/configuration.html#configuration-validation","infrastructure/configuration.html#built-in-validation","infrastructure/configuration.html#custom-validation-rules","infrastructure/configuration.html#troubleshooting-configuration","infrastructure/configuration.html#common-configuration-issues","infrastructure/configuration.html#configuration-debugging","infrastructure/configuration.html#configuration-reset","infrastructure/configuration.html#advanced-configuration-patterns","infrastructure/configuration.html#dynamic-configuration-loading","infrastructure/configuration.html#configuration-templating","infrastructure/configuration.html#multi-region-configuration","infrastructure/configuration.html#configuration-profiles","infrastructure/configuration.html#configuration-management-best-practices","infrastructure/configuration.html#1-version-control","infrastructure/configuration.html#2-documentation","infrastructure/configuration.html#3-validation","infrastructure/configuration.html#4-backup","infrastructure/configuration.html#5-security","infrastructure/configuration.html#configuration-migration","infrastructure/configuration.html#migrating-from-environment-variables","infrastructure/configuration.html#upgrading-configuration-format","infrastructure/configuration.html#next-steps","infrastructure/workspaces/workspace-setup.html#workspace-setup-guide","infrastructure/workspaces/workspace-setup.html#quick-start","infrastructure/workspaces/workspace-setup.html#1-create-a-new-workspace-automatic","infrastructure/workspaces/workspace-setup.html#2-workspace-structure-auto-generated","infrastructure/workspaces/workspace-setup.html#3-understanding-nickel-configuration","infrastructure/workspaces/workspace-setup.html#4-auto-generated-documentation","infrastructure/workspaces/workspace-setup.html#5-customize-your-workspace","infrastructure/workspaces/workspace-setup.html#next-steps-after-workspace-creation","infrastructure/workspaces/workspace-setup.html#1-read-your-auto-generated-documentation","infrastructure/workspaces/workspace-setup.html#2-customize-your-configuration","infrastructure/workspaces/workspace-setup.html#3-validate-your-configuration","infrastructure/workspaces/workspace-setup.html#4-add-multiple-infrastructures","infrastructure/workspaces/workspace-setup.html#5-configure-providers","infrastructure/workspaces/workspace-setup.html#workspace-management-commands","infrastructure/workspaces/workspace-setup.html#list-workspaces","infrastructure/workspaces/workspace-setup.html#activate-a-workspace","infrastructure/workspaces/workspace-setup.html#show-active-workspace","infrastructure/workspaces/workspace-setup.html#deploy-infrastructure","infrastructure/workspaces/workspace-setup.html#troubleshooting","infrastructure/workspaces/workspace-setup.html#invalid-nickel-syntax","infrastructure/workspaces/workspace-setup.html#configuration-issues","infrastructure/workspaces/workspace-setup.html#getting-help","infrastructure/workspaces/workspace-setup.html#next-steps","infrastructure/workspaces/workspace-guide.html#workspace-guide","infrastructure/workspaces/workspace-guide.html#-workspace-switching-guide","infrastructure/workspaces/workspace-guide.html#quick-start","infrastructure/workspaces/workspace-guide.html#additional-workspace-resources","infrastructure/workspaces/workspace-switching-guide.html#workspace-switching-guide","infrastructure/workspaces/workspace-switching-guide.html#overview","infrastructure/workspaces/workspace-switching-guide.html#quick-start","infrastructure/workspaces/workspace-switching-guide.html#list-available-workspaces","infrastructure/workspaces/workspace-switching-system.html#workspace-switching-system-v205","infrastructure/workspaces/workspace-switching-system.html#-workspace-switching-completed-2025-10-02","infrastructure/workspaces/workspace-switching-system.html#key-features","infrastructure/workspaces/workspace-switching-system.html#workspace-management-commands","infrastructure/workspaces/workspace-switching-system.html#central-user-configuration","infrastructure/workspaces/workspace-switching-system.html#usage-example","infrastructure/workspaces/workspace-switching-system.html#integration-with-config-system","infrastructure/workspaces/workspace-switching-system.html#benefits","infrastructure/workspaces/workspace-config-architecture.html#workspace-configuration-architecture","infrastructure/workspaces/workspace-config-architecture.html#overview","infrastructure/workspaces/workspace-config-architecture.html#critical-design-principle","infrastructure/workspaces/workspace-config-architecture.html#configuration-hierarchy","infrastructure/workspaces/workspace-config-architecture.html#workspace-structure","infrastructure/workspaces/workspace-config-architecture.html#template-system","infrastructure/workspaces/workspace-config-architecture.html#available-templates","infrastructure/workspaces/workspace-config-architecture.html#template-variables","infrastructure/workspaces/workspace-config-architecture.html#workspace-initialization","infrastructure/workspaces/workspace-config-architecture.html#command","infrastructure/workspaces/workspace-config-architecture.html#process","infrastructure/workspaces/workspace-config-architecture.html#user-context","infrastructure/workspaces/workspace-config-architecture.html#purpose","infrastructure/workspaces/workspace-config-architecture.html#example","infrastructure/workspaces/workspace-config-architecture.html#configuration-loading-process","infrastructure/workspaces/workspace-config-architecture.html#1-determine-active-workspace","infrastructure/workspaces/workspace-config-architecture.html#2-load-workspace-config","infrastructure/workspaces/workspace-config-architecture.html#3-load-provider-configs","infrastructure/workspaces/workspace-config-architecture.html#4-load-platform-configs","infrastructure/workspaces/workspace-config-architecture.html#5-apply-user-context","infrastructure/workspaces/workspace-config-architecture.html#6-apply-environment-variables","infrastructure/workspaces/workspace-config-architecture.html#migration-from-old-system","infrastructure/workspaces/workspace-config-architecture.html#before-env-based","infrastructure/workspaces/workspace-config-architecture.html#after-workspace-based","infrastructure/workspaces/workspace-config-architecture.html#breaking-changes","infrastructure/workspaces/workspace-config-architecture.html#workspace-management-commands","infrastructure/workspaces/workspace-config-architecture.html#initialize-workspace","infrastructure/workspaces/workspace-config-architecture.html#list-workspaces","infrastructure/workspaces/workspace-config-architecture.html#activate-workspace","infrastructure/workspaces/workspace-config-architecture.html#get-active-workspace","infrastructure/workspaces/workspace-config-architecture.html#implementation-files","infrastructure/workspaces/workspace-config-architecture.html#core-files","infrastructure/workspaces/workspace-config-architecture.html#key-changes-in-config-loader","infrastructure/workspaces/workspace-config-architecture.html#configuration-schema","infrastructure/workspaces/workspace-config-architecture.html#main-workspace-config-provisioningyaml","infrastructure/workspaces/workspace-config-architecture.html#provider-config-providerstoml","infrastructure/workspaces/workspace-config-architecture.html#user-context-ws_nameyaml","infrastructure/workspaces/workspace-config-architecture.html#benefits","infrastructure/workspaces/workspace-config-architecture.html#security-considerations","infrastructure/workspaces/workspace-config-architecture.html#generated-gitignore","infrastructure/workspaces/workspace-config-architecture.html#secret-management","infrastructure/workspaces/workspace-config-architecture.html#troubleshooting","infrastructure/workspaces/workspace-config-architecture.html#no-active-workspace-error","infrastructure/workspaces/workspace-config-architecture.html#config-file-not-found","infrastructure/workspaces/workspace-config-architecture.html#provider-not-configured","infrastructure/workspaces/workspace-config-architecture.html#future-enhancements","infrastructure/workspaces/workspace-config-architecture.html#summary","infrastructure/workspaces/workspace-config-architecture.html#related-documentation","infrastructure/workspaces/workspace-config-commands.html#workspace-configuration-management-commands","infrastructure/workspaces/workspace-config-commands.html#overview","infrastructure/workspaces/workspace-config-commands.html#command-summary","infrastructure/workspaces/workspace-config-commands.html#commands","infrastructure/workspaces/workspace-config-commands.html#show-workspace-configuration","infrastructure/workspaces/workspace-config-commands.html#validate-workspace-configuration","infrastructure/workspaces/workspace-config-commands.html#generate-provider-configuration","infrastructure/workspaces/workspace-config-commands.html#edit-configuration-files","infrastructure/workspaces/workspace-config-commands.html#show-configuration-hierarchy","infrastructure/workspaces/workspace-config-commands.html#list-configuration-files","infrastructure/workspaces/workspace-config-commands.html#workspace-selection","infrastructure/workspaces/workspace-config-commands.html#configuration-file-locations","infrastructure/workspaces/workspace-config-commands.html#configuration-hierarchy","infrastructure/workspaces/workspace-config-commands.html#examples","infrastructure/workspaces/workspace-config-commands.html#complete-workflow","infrastructure/workspaces/workspace-config-commands.html#multi-workspace-management","infrastructure/workspaces/workspace-config-commands.html#configuration-troubleshooting","infrastructure/workspaces/workspace-config-commands.html#integration-with-other-commands","infrastructure/workspaces/workspace-config-commands.html#tips","infrastructure/workspaces/workspace-config-commands.html#see-also","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-enforcement-and-version-tracking-guide","infrastructure/workspaces/workspace-enforcement-guide.html#table-of-contents","infrastructure/workspaces/workspace-enforcement-guide.html#overview","infrastructure/workspaces/workspace-enforcement-guide.html#key-features","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-requirement","infrastructure/workspaces/workspace-enforcement-guide.html#commands-that-require-workspace","infrastructure/workspaces/workspace-enforcement-guide.html#commands-that-dont-require-workspace","infrastructure/workspaces/workspace-enforcement-guide.html#what-happens-without-a-workspace","infrastructure/workspaces/workspace-enforcement-guide.html#version-tracking","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-metadata","infrastructure/workspaces/workspace-enforcement-guide.html#version-components","infrastructure/workspaces/workspace-enforcement-guide.html#checking-workspace-version","infrastructure/workspaces/workspace-enforcement-guide.html#migration-framework","infrastructure/workspaces/workspace-enforcement-guide.html#when-migration-is-needed","infrastructure/workspaces/workspace-enforcement-guide.html#compatibility-scenarios","infrastructure/workspaces/workspace-enforcement-guide.html#running-migrations","infrastructure/workspaces/workspace-enforcement-guide.html#migration-process","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-backups","infrastructure/workspaces/workspace-enforcement-guide.html#command-reference","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-version-commands","infrastructure/workspaces/workspace-enforcement-guide.html#workspace-management-commands","infrastructure/workspaces/workspace-enforcement-guide.html#troubleshooting","infrastructure/workspaces/workspace-enforcement-guide.html#problem-no-active-workspace","infrastructure/workspaces/workspace-enforcement-guide.html#problem-workspace-has-invalid-structure","infrastructure/workspaces/workspace-enforcement-guide.html#problem-workspace-version-is-incompatible","infrastructure/workspaces/workspace-enforcement-guide.html#problem-migration-failed","infrastructure/workspaces/workspace-enforcement-guide.html#problem-cant-activate-workspace-after-migration","infrastructure/workspaces/workspace-enforcement-guide.html#best-practices","infrastructure/workspaces/workspace-enforcement-guide.html#1-always-use-named-workspaces","infrastructure/workspaces/workspace-enforcement-guide.html#2-let-system-create-backups","infrastructure/workspaces/workspace-enforcement-guide.html#3-check-compatibility-before-operations","infrastructure/workspaces/workspace-enforcement-guide.html#4-migrate-after-system-upgrades","infrastructure/workspaces/workspace-enforcement-guide.html#5-keep-backups-for-safety","infrastructure/workspaces/workspace-enforcement-guide.html#6-use-version-control-for-workspace-configs","infrastructure/workspaces/workspace-enforcement-guide.html#7-document-custom-migrations","infrastructure/workspaces/workspace-enforcement-guide.html#migration-history","infrastructure/workspaces/workspace-enforcement-guide.html#summary","infrastructure/workspaces/workspace-infra-reference.html#unified-workspaceinfrastructure-reference-system","infrastructure/workspaces/workspace-infra-reference.html#overview","infrastructure/workspaces/workspace-infra-reference.html#quick-start","infrastructure/workspaces/workspace-infra-reference.html#temporal-override-single-command","infrastructure/workspaces/workspace-infra-reference.html#persistent-activation","infrastructure/workspaces/workspace-infra-reference.html#notation-syntax","infrastructure/workspaces/workspace-infra-reference.html#basic-format","infrastructure/workspaces/workspace-infra-reference.html#examples","infrastructure/workspaces/workspace-infra-reference.html#resolution-priority","infrastructure/workspaces/workspace-infra-reference.html#usage-patterns","infrastructure/workspaces/workspace-infra-reference.html#pattern-1-temporal-override-for-commands","infrastructure/workspaces/workspace-infra-reference.html#pattern-2-persistent-workspace-activation","infrastructure/workspaces/workspace-infra-reference.html#pattern-3-pwd-based-inference","infrastructure/workspaces/workspace-infra-reference.html#pattern-4-default-infrastructure-management","infrastructure/workspaces/workspace-infra-reference.html#command-reference","infrastructure/workspaces/workspace-infra-reference.html#workspace-commands","infrastructure/workspaces/workspace-infra-reference.html#common-commands-with--ws","infrastructure/workspaces/workspace-infra-reference.html#features","infrastructure/workspaces/workspace-infra-reference.html#-unified-notation","infrastructure/workspaces/workspace-infra-reference.html#-temporal-override","infrastructure/workspaces/workspace-infra-reference.html#-persistent-defaults","infrastructure/workspaces/workspace-infra-reference.html#-smart-detection","infrastructure/workspaces/workspace-infra-reference.html#-error-handling","infrastructure/workspaces/workspace-infra-reference.html#environment-context","infrastructure/workspaces/workspace-infra-reference.html#temp_workspace-variable","infrastructure/workspaces/workspace-infra-reference.html#validation","infrastructure/workspaces/workspace-infra-reference.html#validating-notation","infrastructure/workspaces/workspace-infra-reference.html#error-cases","infrastructure/workspaces/workspace-infra-reference.html#configuration","infrastructure/workspaces/workspace-infra-reference.html#user-configuration","infrastructure/workspaces/workspace-infra-reference.html#workspace-schema","infrastructure/workspaces/workspace-infra-reference.html#best-practices","infrastructure/workspaces/workspace-infra-reference.html#1-use-persistent-activation-for-long-sessions","infrastructure/workspaces/workspace-infra-reference.html#2-use-temporal-override-for-ad-hoc-operations","infrastructure/workspaces/workspace-infra-reference.html#3-navigate-with-pwd-for-context-awareness","infrastructure/workspaces/workspace-infra-reference.html#4-set-meaningful-defaults","infrastructure/workspaces/workspace-infra-reference.html#troubleshooting","infrastructure/workspaces/workspace-infra-reference.html#issue-workspace-not-found-in-registry","infrastructure/workspaces/workspace-infra-reference.html#issue-infrastructure-not-found","infrastructure/workspaces/workspace-infra-reference.html#issue-temporal-override-not-working","infrastructure/workspaces/workspace-infra-reference.html#issue-pwd-detection-not-working","infrastructure/workspaces/workspace-infra-reference.html#migration-from-old-system","infrastructure/workspaces/workspace-infra-reference.html#old-way","infrastructure/workspaces/workspace-infra-reference.html#new-way","infrastructure/workspaces/workspace-infra-reference.html#performance-notes","infrastructure/workspaces/workspace-infra-reference.html#backward-compatibility","infrastructure/workspaces/workspace-infra-reference.html#see-also","security/authentication-layer-guide.html#authentication-layer-implementation-guide","security/authentication-layer-guide.html#overview","security/authentication-layer-guide.html#key-features","security/authentication-layer-guide.html#--jwt-authentication","security/authentication-layer-guide.html#--mfa-support","security/authentication-layer-guide.html#--security-policies","security/authentication-layer-guide.html#--audit-logging","security/authentication-layer-guide.html#--user-friendly-error-messages","security/authentication-layer-guide.html#quick-start","security/authentication-layer-guide.html#1-login-to-platform","security/authentication-layer-guide.html#2-enroll-mfa-first-time","security/authentication-layer-guide.html#3-verify-mfa-for-sensitive-operations","security/authentication-layer-guide.html#4-check-authentication-status","security/authentication-layer-guide.html#protected-operations","security/authentication-layer-guide.html#server-operations","security/authentication-layer-guide.html#task-service-operations","security/authentication-layer-guide.html#cluster-operations","security/authentication-layer-guide.html#batch-workflows","security/authentication-layer-guide.html#configuration","security/authentication-layer-guide.html#security-settings-configdefaultstoml","security/authentication-layer-guide.html#environment-specific-configuration","security/authentication-layer-guide.html#authentication-bypass-devtest-only","security/authentication-layer-guide.html#environment-variable-method","security/authentication-layer-guide.html#per-command-flag","security/authentication-layer-guide.html#check-mode-always-bypasses-auth","security/authentication-layer-guide.html#error-messages","security/authentication-layer-guide.html#not-authenticated","security/authentication-layer-guide.html#mfa-required","security/authentication-layer-guide.html#token-expired","security/authentication-layer-guide.html#audit-logging","security/authentication-layer-guide.html#viewing-audit-logs","security/authentication-layer-guide.html#integration-with-control-center","security/authentication-layer-guide.html#starting-control-center","security/authentication-layer-guide.html#testing-authentication","security/authentication-layer-guide.html#manual-testing","security/authentication-layer-guide.html#automated-testing","security/authentication-layer-guide.html#troubleshooting","security/authentication-layer-guide.html#plugin-not-available","security/authentication-layer-guide.html#control-center-not-running","security/authentication-layer-guide.html#mfa-not-working","security/authentication-layer-guide.html#keyring-access-issues","security/authentication-layer-guide.html#architecture","security/authentication-layer-guide.html#authentication-flow","security/authentication-layer-guide.html#file-structure","security/authentication-layer-guide.html#related-documentation","security/authentication-layer-guide.html#summary-of-changes","security/authentication-layer-guide.html#best-practices","security/authentication-layer-guide.html#for-users","security/authentication-layer-guide.html#for-developers","security/authentication-layer-guide.html#for-operators","security/authentication-layer-guide.html#license","security/authentication-layer-guide.html#quick-reference","security/authentication-layer-guide.html#quick-commands","security/authentication-layer-guide.html#protected-operations-1","security/authentication-layer-guide.html#bypass-authentication-devtest-only","security/authentication-layer-guide.html#configuration-1","security/authentication-layer-guide.html#error-messages-1","security/authentication-layer-guide.html#troubleshooting-1","security/authentication-layer-guide.html#audit-logs","security/authentication-layer-guide.html#cicd-integration","security/authentication-layer-guide.html#performance","security/authentication-layer-guide.html#related-docs","security/authentication-layer-guide.html#setup-guide","security/authentication-layer-guide.html#complete-authentication-setup-guide","security/authentication-layer-guide.html#step-1-start-control-center","security/authentication-layer-guide.html#step-2-find-default-credentials","security/authentication-layer-guide.html#step-3-log-in","security/authentication-layer-guide.html#step-4-now-create-your-server","security/authentication-layer-guide.html#-alternative-skip-auth-for-development","security/authentication-layer-guide.html#-troubleshooting","security/config-encryption-guide.html#configuration-encryption-guide","security/config-encryption-guide.html#overview","security/config-encryption-guide.html#table-of-contents","security/config-encryption-guide.html#prerequisites","security/config-encryption-guide.html#required-tools","security/config-encryption-guide.html#verify-installation","security/config-encryption-guide.html#quick-start","security/config-encryption-guide.html#1-initialize-encryption","security/config-encryption-guide.html#2-set-environment-variables","security/config-encryption-guide.html#3-validate-setup","security/config-encryption-guide.html#4-encrypt-your-first-config","security/config-encryption-guide.html#configuration-encryption","security/config-encryption-guide.html#file-naming-conventions","security/config-encryption-guide.html#encrypt-a-configuration-file","security/config-encryption-guide.html#decrypt-a-configuration-file","security/config-encryption-guide.html#edit-encrypted-files","security/config-encryption-guide.html#check-encryption-status","security/config-encryption-guide.html#kms-backends","security/config-encryption-guide.html#age-recommended-for-development","security/config-encryption-guide.html#aws-kms-production","security/config-encryption-guide.html#hashicorp-vault-enterprise","security/config-encryption-guide.html#cosmian-kms-confidential-computing","security/config-encryption-guide.html#cli-commands","security/config-encryption-guide.html#configuration-encryption-commands","security/config-encryption-guide.html#examples","security/config-encryption-guide.html#integration-with-config-loader","security/config-encryption-guide.html#automatic-decryption","security/config-encryption-guide.html#manual-loading","security/config-encryption-guide.html#configuration-hierarchy-with-encryption","security/config-encryption-guide.html#best-practices","security/config-encryption-guide.html#1-encrypt-all-sensitive-data","security/config-encryption-guide.html#2-use-appropriate-kms-backend","security/config-encryption-guide.html#3-key-management","security/config-encryption-guide.html#4-file-organization","security/config-encryption-guide.html#5-git-integration","security/config-encryption-guide.html#6-rotation-strategy","security/config-encryption-guide.html#7-audit-and-monitoring","security/config-encryption-guide.html#troubleshooting","security/config-encryption-guide.html#sops-not-found","security/config-encryption-guide.html#age-key-not-found","security/config-encryption-guide.html#sops_age_recipients-not-set","security/config-encryption-guide.html#decryption-failed","security/config-encryption-guide.html#aws-kms-access-denied","security/config-encryption-guide.html#vault-connection-failed","security/config-encryption-guide.html#security-considerations","security/config-encryption-guide.html#threat-model","security/config-encryption-guide.html#security-best-practices","security/config-encryption-guide.html#additional-resources","security/config-encryption-guide.html#support","security/config-encryption-guide.html#quick-reference","security/config-encryption-guide.html#setup-one-time","security/config-encryption-guide.html#common-commands","security/config-encryption-guide.html#file-naming-conventions-1","security/config-encryption-guide.html#quick-workflow","security/config-encryption-guide.html#kms-backends-1","security/config-encryption-guide.html#security-checklist","security/config-encryption-guide.html#troubleshooting-1","security/config-encryption-guide.html#testing","security/config-encryption-guide.html#integration","security/config-encryption-guide.html#emergency-key-recovery","security/config-encryption-guide.html#advanced","security/config-encryption-guide.html#documentation","security/security-system.html#complete-security-system-v400","security/security-system.html#-enterprise-grade-security-implementation","security/security-system.html#core-security-components","security/security-system.html#1--authentication--jwt","security/security-system.html#2--authorization--cedar","security/security-system.html#3--multi-factor-authentication--mfa","security/security-system.html#4--secrets-management","security/security-system.html#5--key-management-system--kms","security/security-system.html#6--audit-logging","security/security-system.html#7--break-glass-emergency-access","security/security-system.html#8--compliance-management","security/security-system.html#9--audit-query-system","security/security-system.html#10--token-management","security/security-system.html#11--access-control","security/security-system.html#12--encryption","security/security-system.html#performance-characteristics","security/security-system.html#quick-reference","security/security-system.html#architecture","security/security-system.html#configuration","security/security-system.html#documentation","security/security-system.html#help-commands","security/rustyvault-kms-guide.html#rustyvault-kms-backend-guide","security/rustyvault-kms-guide.html#overview","security/rustyvault-kms-guide.html#why-rustyvault","security/rustyvault-kms-guide.html#architecture-position","security/rustyvault-kms-guide.html#installation","security/rustyvault-kms-guide.html#option-1-standalone-rustyvault-server","security/rustyvault-kms-guide.html#option-2-docker-deployment","security/rustyvault-kms-guide.html#option-3-from-source","security/rustyvault-kms-guide.html#configuration","security/rustyvault-kms-guide.html#rustyvault-server-configuration","security/rustyvault-kms-guide.html#initialize-rustyvault","security/rustyvault-kms-guide.html#enable-transit-engine","security/rustyvault-kms-guide.html#kms-service-configuration","security/rustyvault-kms-guide.html#update-provisioningconfigkmstoml","security/rustyvault-kms-guide.html#environment-variables","security/rustyvault-kms-guide.html#usage","security/rustyvault-kms-guide.html#start-kms-service","security/rustyvault-kms-guide.html#cli-operations","security/rustyvault-kms-guide.html#rest-api-usage","security/rustyvault-kms-guide.html#advanced-features","security/rustyvault-kms-guide.html#context-based-encryption-aad","security/rustyvault-kms-guide.html#envelope-encryption","security/rustyvault-kms-guide.html#key-rotation","security/rustyvault-kms-guide.html#production-deployment","security/rustyvault-kms-guide.html#high-availability-setup","security/rustyvault-kms-guide.html#tls-configuration","security/rustyvault-kms-guide.html#auto-unseal-aws-kms","security/rustyvault-kms-guide.html#monitoring","security/rustyvault-kms-guide.html#health-checks","security/rustyvault-kms-guide.html#audit-logging","security/rustyvault-kms-guide.html#troubleshooting","security/rustyvault-kms-guide.html#common-issues","security/rustyvault-kms-guide.html#migration-from-other-backends","security/rustyvault-kms-guide.html#from-hashicorp-vault","security/rustyvault-kms-guide.html#from-age","security/rustyvault-kms-guide.html#security-considerations","security/rustyvault-kms-guide.html#best-practices","security/rustyvault-kms-guide.html#token-policies","security/rustyvault-kms-guide.html#performance","security/rustyvault-kms-guide.html#benchmarks-estimated","security/rustyvault-kms-guide.html#optimization-tips","security/rustyvault-kms-guide.html#related-documentation","security/rustyvault-kms-guide.html#support","security/secretumvault-kms-guide.html#secretumvault-kms-backend-guide","security/secretumvault-kms-guide.html#overview","security/secretumvault-kms-guide.html#what-is-secretumvault","security/secretumvault-kms-guide.html#when-to-use-secretumvault","security/secretumvault-kms-guide.html#deployment-modes","security/secretumvault-kms-guide.html#development-mode-embedded","security/secretumvault-kms-guide.html#staging-mode-service--surrealdb","security/secretumvault-kms-guide.html#production-mode-service--etcd","security/secretumvault-kms-guide.html#configuration","security/secretumvault-kms-guide.html#environment-variables","security/secretumvault-kms-guide.html#configuration-files","security/secretumvault-kms-guide.html#operations","security/secretumvault-kms-guide.html#encrypt-data","security/secretumvault-kms-guide.html#decrypt-data","security/secretumvault-kms-guide.html#generate-data-keys","security/secretumvault-kms-guide.html#health-and-status","security/secretumvault-kms-guide.html#key-rotation","security/secretumvault-kms-guide.html#storage-backends","security/secretumvault-kms-guide.html#filesystem-development","security/secretumvault-kms-guide.html#surrealdb-staging","security/secretumvault-kms-guide.html#etcd-production","security/secretumvault-kms-guide.html#postgresql-enterprise","security/secretumvault-kms-guide.html#troubleshooting","security/secretumvault-kms-guide.html#connection-errors","security/secretumvault-kms-guide.html#authentication-failures","security/secretumvault-kms-guide.html#storage-backend-errors","security/secretumvault-kms-guide.html#performance-issues","security/secretumvault-kms-guide.html#debugging","security/secretumvault-kms-guide.html#security-best-practices","security/secretumvault-kms-guide.html#token-management","security/secretumvault-kms-guide.html#tlsssl","security/secretumvault-kms-guide.html#access-control","security/secretumvault-kms-guide.html#key-rotation-1","security/secretumvault-kms-guide.html#backup-and-recovery","security/secretumvault-kms-guide.html#migration-guide","security/secretumvault-kms-guide.html#from-age-to-secretumvault","security/secretumvault-kms-guide.html#from-rustyvault-to-secretumvault","security/secretumvault-kms-guide.html#from-cosmian-to-secretumvault","security/secretumvault-kms-guide.html#performance-tuning","security/secretumvault-kms-guide.html#development-filesystem","security/secretumvault-kms-guide.html#staging-surrealdb","security/secretumvault-kms-guide.html#production-etcd","security/secretumvault-kms-guide.html#compliance-and-audit","security/secretumvault-kms-guide.html#audit-logging","security/secretumvault-kms-guide.html#compliance-reports","security/secretumvault-kms-guide.html#advanced-topics","security/secretumvault-kms-guide.html#cedar-authorization-policies","security/secretumvault-kms-guide.html#key-encryption-keys-kek","security/secretumvault-kms-guide.html#multi-region-setup","security/secretumvault-kms-guide.html#support-and-resources","security/secretumvault-kms-guide.html#see-also","security/ssh-temporal-keys-user-guide.html#ssh-temporal-keys---user-guide","security/ssh-temporal-keys-user-guide.html#quick-start","security/ssh-temporal-keys-user-guide.html#generate-and-connect-with-temporary-key","security/ssh-temporal-keys-user-guide.html#manual-key-management","security/ssh-temporal-keys-user-guide.html#key-features","security/ssh-temporal-keys-user-guide.html#automatic-expiration","security/ssh-temporal-keys-user-guide.html#multiple-key-types","security/ssh-temporal-keys-user-guide.html#security-benefits","security/ssh-temporal-keys-user-guide.html#common-usage-patterns","security/ssh-temporal-keys-user-guide.html#development-workflow","security/ssh-temporal-keys-user-guide.html#production-deployment","security/ssh-temporal-keys-user-guide.html#multi-server-access","security/ssh-temporal-keys-user-guide.html#command-reference","security/ssh-temporal-keys-user-guide.html#ssh-generate-key","security/ssh-temporal-keys-user-guide.html#ssh-deploy-key","security/ssh-temporal-keys-user-guide.html#ssh-list-keys","security/ssh-temporal-keys-user-guide.html#ssh-get-key","security/ssh-temporal-keys-user-guide.html#ssh-revoke-key","security/ssh-temporal-keys-user-guide.html#ssh-connect","security/ssh-temporal-keys-user-guide.html#ssh-stats","security/ssh-temporal-keys-user-guide.html#ssh-cleanup","security/ssh-temporal-keys-user-guide.html#ssh-test","security/ssh-temporal-keys-user-guide.html#ssh-help","security/ssh-temporal-keys-user-guide.html#duration-formats","security/ssh-temporal-keys-user-guide.html#working-with-private-keys","security/ssh-temporal-keys-user-guide.html#saving-private-keys","security/ssh-temporal-keys-user-guide.html#using-ssh-agent","security/ssh-temporal-keys-user-guide.html#troubleshooting","security/ssh-temporal-keys-user-guide.html#key-deployment-fails","security/ssh-temporal-keys-user-guide.html#private-key-not-working","security/ssh-temporal-keys-user-guide.html#cleanup-not-running","security/ssh-temporal-keys-user-guide.html#best-practices","security/ssh-temporal-keys-user-guide.html#security","security/ssh-temporal-keys-user-guide.html#workflow-integration","security/ssh-temporal-keys-user-guide.html#advanced-usage","security/ssh-temporal-keys-user-guide.html#vault-integration","security/ssh-temporal-keys-user-guide.html#scripting","security/ssh-temporal-keys-user-guide.html#api-integration","security/ssh-temporal-keys-user-guide.html#faq","security/ssh-temporal-keys-user-guide.html#support","security/ssh-temporal-keys-user-guide.html#see-also","security/plugin-integration-guide.html#nushell-plugin-integration-guide","security/plugin-integration-guide.html#table-of-contents","security/plugin-integration-guide.html#overview","security/plugin-integration-guide.html#architecture-benefits","security/plugin-integration-guide.html#key-features","security/plugin-integration-guide.html#why-native-plugins","security/plugin-integration-guide.html#performance-comparison","security/plugin-integration-guide.html#use-case-batch-processing","security/plugin-integration-guide.html#developer-experience-benefits","security/plugin-integration-guide.html#prerequisites","security/plugin-integration-guide.html#required-software","security/plugin-integration-guide.html#optional-dependencies","security/plugin-integration-guide.html#platform-support","security/plugin-integration-guide.html#installation","security/plugin-integration-guide.html#step-1-clone-or-navigate-to-plugin-directory","security/plugin-integration-guide.html#step-2-build-all-plugins","security/plugin-integration-guide.html#step-3-register-plugins-with-nushell","security/plugin-integration-guide.html#step-4-verify-installation","security/plugin-integration-guide.html#step-5-configure-environment-optional","security/plugin-integration-guide.html#quick-start-5-minutes","security/plugin-integration-guide.html#1-authentication-workflow","security/plugin-integration-guide.html#2-kms-operations","security/plugin-integration-guide.html#3-orchestrator-operations","security/plugin-integration-guide.html#4-combined-workflow","security/plugin-integration-guide.html#authentication-plugin-nu_plugin_auth","security/plugin-integration-guide.html#available-commands","security/plugin-integration-guide.html#command-reference","security/plugin-integration-guide.html#environment-variables","security/plugin-integration-guide.html#troubleshooting-authentication","security/plugin-integration-guide.html#kms-plugin-nu_plugin_kms","security/plugin-integration-guide.html#supported-backends","security/plugin-integration-guide.html#backend-selection-guide","security/plugin-integration-guide.html#available-commands-1","security/plugin-integration-guide.html#command-reference-1","security/plugin-integration-guide.html#backend-configuration","security/plugin-integration-guide.html#performance-benchmarks","security/plugin-integration-guide.html#troubleshooting-kms","security/plugin-integration-guide.html#orchestrator-plugin-nu_plugin_orchestrator","security/plugin-integration-guide.html#available-commands-2","security/plugin-integration-guide.html#command-reference-2","security/plugin-integration-guide.html#environment-variables-1","security/plugin-integration-guide.html#performance-comparison-1","security/plugin-integration-guide.html#troubleshooting-orchestrator","security/plugin-integration-guide.html#integration-examples","security/plugin-integration-guide.html#example-1-complete-authenticated-deployment","security/plugin-integration-guide.html#example-2-batch-secret-rotation","security/plugin-integration-guide.html#example-3-multi-environment-deployment","security/plugin-integration-guide.html#example-4-automated-backup-and-encryption","security/plugin-integration-guide.html#example-5-health-monitoring-dashboard","security/plugin-integration-guide.html#best-practices","security/plugin-integration-guide.html#when-to-use-plugins-vs-http","security/plugin-integration-guide.html#performance-optimization","security/plugin-integration-guide.html#error-handling","security/plugin-integration-guide.html#security-best-practices","security/plugin-integration-guide.html#troubleshooting","security/plugin-integration-guide.html#common-issues-across-plugins","security/plugin-integration-guide.html#platform-specific-issues","security/plugin-integration-guide.html#debugging-techniques","security/plugin-integration-guide.html#migration-guide","security/plugin-integration-guide.html#migrating-from-http-to-plugin-based","security/plugin-integration-guide.html#rollback-strategy","security/plugin-integration-guide.html#advanced-configuration","security/plugin-integration-guide.html#custom-plugin-paths","security/plugin-integration-guide.html#environment-specific-configuration","security/plugin-integration-guide.html#plugin-aliases","security/plugin-integration-guide.html#custom-commands","security/plugin-integration-guide.html#security-considerations","security/plugin-integration-guide.html#threat-model","security/plugin-integration-guide.html#secure-deployment","security/plugin-integration-guide.html#faq","security/plugin-integration-guide.html#related-documentation","security/nushell-plugins-guide.html#nushell-plugins-for-provisioning-platform","security/nushell-plugins-guide.html#overview","security/nushell-plugins-guide.html#why-native-plugins","security/nushell-plugins-guide.html#installation","security/nushell-plugins-guide.html#prerequisites","security/nushell-plugins-guide.html#build-from-source","security/nushell-plugins-guide.html#register-with-nushell","security/nushell-plugins-guide.html#verify-installation","security/nushell-plugins-guide.html#plugin-nu_plugin_auth","security/nushell-plugins-guide.html#commands","security/nushell-plugins-guide.html#environment-variables","security/nushell-plugins-guide.html#error-handling","security/nushell-plugins-guide.html#plugin-nu_plugin_kms","security/nushell-plugins-guide.html#supported-backends","security/nushell-plugins-guide.html#commands-1","security/nushell-plugins-guide.html#environment-variables-1","security/nushell-plugins-guide.html#performance-comparison","security/nushell-plugins-guide.html#plugin-nu_plugin_orchestrator","security/nushell-plugins-guide.html#commands-2","security/nushell-plugins-guide.html#environment-variables-2","security/nushell-plugins-guide.html#performance-comparison-1","security/nushell-plugins-guide.html#pipeline-examples","security/nushell-plugins-guide.html#authentication-flow","security/nushell-plugins-guide.html#kms-operations","security/nushell-plugins-guide.html#orchestrator-monitoring","security/nushell-plugins-guide.html#combined-workflow","security/nushell-plugins-guide.html#troubleshooting","security/nushell-plugins-guide.html#auth-plugin","security/nushell-plugins-guide.html#kms-plugin","security/nushell-plugins-guide.html#orchestrator-plugin","security/nushell-plugins-guide.html#development","security/nushell-plugins-guide.html#building-from-source","security/nushell-plugins-guide.html#adding-to-cicd","security/nushell-plugins-guide.html#advanced-usage","security/nushell-plugins-guide.html#custom-plugin-configuration","security/nushell-plugins-guide.html#plugin-aliases","security/nushell-plugins-guide.html#security-best-practices","security/nushell-plugins-guide.html#authentication","security/nushell-plugins-guide.html#kms-operations-1","security/nushell-plugins-guide.html#orchestrator","security/nushell-plugins-guide.html#faq","security/nushell-plugins-guide.html#related-documentation","security/nushell-plugins-system.html#nushell-plugins-integration-v100---see-detailed-guide-for-complete-reference","security/nushell-plugins-system.html#overview","security/nushell-plugins-system.html#performance-improvements","security/nushell-plugins-system.html#three-native-plugins","security/nushell-plugins-system.html#quick-commands","security/nushell-plugins-system.html#installation","security/nushell-plugins-system.html#benefits","security/plugin-usage-guide.html#provisioning-plugins-usage-guide","security/plugin-usage-guide.html#overview","security/plugin-usage-guide.html#installation","security/plugin-usage-guide.html#prerequisites","security/plugin-usage-guide.html#quick-install","security/plugin-usage-guide.html#manual-installation","security/plugin-usage-guide.html#usage","security/plugin-usage-guide.html#authentication-plugin","security/plugin-usage-guide.html#kms-plugin","security/plugin-usage-guide.html#orchestrator-plugin","security/plugin-usage-guide.html#plugin-status","security/plugin-usage-guide.html#testing-plugins","security/plugin-usage-guide.html#list-registered-plugins","security/plugin-usage-guide.html#performance-comparison","security/plugin-usage-guide.html#graceful-fallback","security/plugin-usage-guide.html#troubleshooting","security/plugin-usage-guide.html#plugins-not-found-after-installation","security/plugin-usage-guide.html#command-not-found-errors","security/plugin-usage-guide.html#plugins-crash-or-are-unresponsive","security/plugin-usage-guide.html#integration-with-provisioning-cli","security/plugin-usage-guide.html#advanced-configuration","security/plugin-usage-guide.html#custom-data-directory","security/plugin-usage-guide.html#custom-auth-url","security/plugin-usage-guide.html#kms-backend-selection","security/plugin-usage-guide.html#building-plugins-from-source","security/plugin-usage-guide.html#architecture","security/plugin-usage-guide.html#security-notes","security/plugin-usage-guide.html#support","security/secrets-management-guide.html#secrets-management-system---configuration-guide","security/secrets-management-guide.html#overview","security/secrets-management-guide.html#secret-sources","security/secrets-management-guide.html#1-sops-secrets-operations","security/secrets-management-guide.html#2-kms-key-management-service","security/secrets-management-guide.html#3-rustyvault-hashicorp-vault-compatible","security/secrets-management-guide.html#4-local-dev-fallback","security/secrets-management-guide.html#auto-detection-logic","security/secrets-management-guide.html#configuration-matrix","security/secrets-management-guide.html#production-recommended-setup","security/secrets-management-guide.html#minimal-setup-single-source","security/secrets-management-guide.html#enhanced-setup-fallback-chain","security/secrets-management-guide.html#high-availability-setup","security/secrets-management-guide.html#validation--testing","security/secrets-management-guide.html#check-configuration","security/secrets-management-guide.html#test-ssh-key-retrieval","security/secrets-management-guide.html#migration-path","security/secrets-management-guide.html#from-local-dev-to-sops","security/secrets-management-guide.html#from-sops-to-vault","security/secrets-management-guide.html#security-best-practices","security/secrets-management-guide.html#1-never-commit-secrets","security/secrets-management-guide.html#2-rotate-keys-regularly","security/secrets-management-guide.html#3-restrict-access","security/secrets-management-guide.html#4-audit-logging","security/secrets-management-guide.html#troubleshooting","security/secrets-management-guide.html#sops-issues","security/secrets-management-guide.html#kms-issues","security/secrets-management-guide.html#vault-issues","security/secrets-management-guide.html#faq","security/secrets-management-guide.html#architecture","security/secrets-management-guide.html#integration-with-ssh-utilities","security/kms-service.html#kms-service---key-management-service","security/kms-service.html#supported-backends","security/kms-service.html#architecture","security/kms-service.html#quick-start","security/kms-service.html#development-setup-age","security/kms-service.html#production-setup-cosmian","security/kms-service.html#rest-api-examples","security/kms-service.html#encrypt-data","security/kms-service.html#decrypt-data","security/kms-service.html#nushell-cli-integration","security/kms-service.html#backend-comparison","security/kms-service.html#integration-points","security/kms-service.html#deployment","security/kms-service.html#docker","security/kms-service.html#kubernetes","security/kms-service.html#security-best-practices","security/kms-service.html#related-documentation","integration/gitea-integration-guide.html#gitea-integration-guide","integration/gitea-integration-guide.html#table-of-contents","integration/gitea-integration-guide.html#overview","integration/gitea-integration-guide.html#architecture","integration/gitea-integration-guide.html#setup","integration/gitea-integration-guide.html#prerequisites","integration/gitea-integration-guide.html#configuration","integration/gitea-integration-guide.html#workspace-git-integration","integration/gitea-integration-guide.html#initialize-workspace-with-git","integration/gitea-integration-guide.html#clone-existing-workspace","integration/gitea-integration-guide.html#pushpull-changes","integration/gitea-integration-guide.html#branch-management","integration/gitea-integration-guide.html#git-status","integration/gitea-integration-guide.html#workspace-locking","integration/gitea-integration-guide.html#lock-types","integration/gitea-integration-guide.html#acquire-lock","integration/gitea-integration-guide.html#check-lock-status","integration/gitea-integration-guide.html#release-lock","integration/gitea-integration-guide.html#force-release-lock-admin","integration/gitea-integration-guide.html#automatic-locking","integration/gitea-integration-guide.html#lock-cleanup","integration/gitea-integration-guide.html#extension-publishing","integration/gitea-integration-guide.html#publish-extension","integration/gitea-integration-guide.html#list-published-extensions","integration/gitea-integration-guide.html#download-extension","integration/gitea-integration-guide.html#extension-metadata","integration/gitea-integration-guide.html#publishing-workflow","integration/gitea-integration-guide.html#service-management","integration/gitea-integration-guide.html#startstop-gitea","integration/gitea-integration-guide.html#check-status","integration/gitea-integration-guide.html#view-logs","integration/gitea-integration-guide.html#install-gitea-binary","integration/gitea-integration-guide.html#api-reference","integration/gitea-integration-guide.html#repository-operations","integration/gitea-integration-guide.html#release-operations","integration/gitea-integration-guide.html#workspace-operations","integration/gitea-integration-guide.html#locking-operations","integration/gitea-integration-guide.html#troubleshooting","integration/gitea-integration-guide.html#gitea-not-starting","integration/gitea-integration-guide.html#token-authentication-failed","integration/gitea-integration-guide.html#cannot-push-to-repository","integration/gitea-integration-guide.html#lock-already-exists","integration/gitea-integration-guide.html#extension-validation-failed","integration/gitea-integration-guide.html#docker-volume-permissions","integration/gitea-integration-guide.html#best-practices","integration/gitea-integration-guide.html#workspace-management","integration/gitea-integration-guide.html#extension-publishing-1","integration/gitea-integration-guide.html#security","integration/gitea-integration-guide.html#performance","integration/gitea-integration-guide.html#advanced-usage","integration/gitea-integration-guide.html#custom-gitea-deployment","integration/gitea-integration-guide.html#webhooks-integration","integration/gitea-integration-guide.html#batch-extension-publishing","integration/gitea-integration-guide.html#references","integration/service-mesh-ingress-guide.html#service-mesh--ingress-guide","integration/service-mesh-ingress-guide.html#comparison","integration/service-mesh-ingress-guide.html#understanding-the-difference","integration/service-mesh-ingress-guide.html#service-mesh-options","integration/service-mesh-ingress-guide.html#ingress-controller-options","integration/service-mesh-ingress-guide.html#recommended-combinations","integration/service-mesh-ingress-guide.html#1-linkerd--nginx-ingress-recommended-for-most-users","integration/service-mesh-ingress-guide.html#2-istio-standalone","integration/service-mesh-ingress-guide.html#3-linkerd--traefik","integration/service-mesh-ingress-guide.html#4-no-mesh--nginx-ingress-simple-deployments","integration/service-mesh-ingress-guide.html#decision-matrix","integration/service-mesh-ingress-guide.html#migration-paths","integration/service-mesh-ingress-guide.html#from-istio-to-linkerd","integration/service-mesh-ingress-guide.html#between-ingress-controllers","integration/service-mesh-ingress-guide.html#examples","integration/service-mesh-ingress-guide.html#example-1-linkerd--nginx-ingress-deployment","integration/service-mesh-ingress-guide.html#example-2-istio-standalone-deployment","integration/service-mesh-ingress-guide.html#example-3-linkerd--traefik-modern-cloud-native","integration/service-mesh-ingress-guide.html#example-4-minimal-setup-just-nginx-no-service-mesh","integration/service-mesh-ingress-guide.html#enable-sidecar-injection-for-services","integration/service-mesh-ingress-guide.html#for-linkerd","integration/service-mesh-ingress-guide.html#for-istio","integration/service-mesh-ingress-guide.html#monitoring-and-observability","integration/service-mesh-ingress-guide.html#linkerd-dashboard","integration/service-mesh-ingress-guide.html#istio-dashboards","integration/service-mesh-ingress-guide.html#traefik-dashboard","integration/service-mesh-ingress-guide.html#quick-reference","integration/service-mesh-ingress-guide.html#installation-commands","integration/service-mesh-ingress-guide.html#common-installation-combinations","integration/service-mesh-ingress-guide.html#verification-commands","integration/service-mesh-ingress-guide.html#troubleshooting","integration/service-mesh-ingress-guide.html#uninstallation","integration/service-mesh-ingress-guide.html#performance-tuning","integration/service-mesh-ingress-guide.html#complete-workspace-directory-structure","integration/service-mesh-ingress-guide.html#next-steps","integration/service-mesh-ingress-guide.html#additional-resources","integration/oci-registry-guide.html#oci-registry-user-guide","integration/oci-registry-guide.html#table-of-contents","integration/oci-registry-guide.html#overview","integration/oci-registry-guide.html#what-are-oci-artifacts","integration/oci-registry-guide.html#quick-start","integration/oci-registry-guide.html#prerequisites","integration/oci-registry-guide.html#1-start-local-oci-registry-development","integration/oci-registry-guide.html#2-pull-an-extension","integration/oci-registry-guide.html#3-list-available-extensions","integration/oci-registry-guide.html#4-configure-workspace-to-use-oci","integration/oci-registry-guide.html#5-resolve-dependencies","integration/oci-registry-guide.html#oci-commands-reference","integration/oci-registry-guide.html#pull-extension","integration/oci-registry-guide.html#push-extension","integration/oci-registry-guide.html#list-extensions","integration/oci-registry-guide.html#search-extensions","integration/oci-registry-guide.html#show-tags-versions","integration/oci-registry-guide.html#inspect-extension","integration/oci-registry-guide.html#login-to-registry","integration/oci-registry-guide.html#logout-from-registry","integration/oci-registry-guide.html#delete-extension","integration/oci-registry-guide.html#copy-extension","integration/oci-registry-guide.html#show-oci-configuration","integration/oci-registry-guide.html#dependency-management","integration/oci-registry-guide.html#dependency-configuration","integration/oci-registry-guide.html#resolve-dependencies","integration/oci-registry-guide.html#check-for-updates","integration/oci-registry-guide.html#update-dependency","integration/oci-registry-guide.html#dependency-tree","integration/oci-registry-guide.html#validate-dependencies","integration/oci-registry-guide.html#extension-development","integration/oci-registry-guide.html#create-new-extension","integration/oci-registry-guide.html#extension-manifest","integration/oci-registry-guide.html#test-extension-locally","integration/oci-registry-guide.html#validate-extension","integration/oci-registry-guide.html#package-extension","integration/oci-registry-guide.html#publish-extension","integration/oci-registry-guide.html#registry-setup","integration/oci-registry-guide.html#local-registry-development","integration/oci-registry-guide.html#remote-registry-production","integration/oci-registry-guide.html#troubleshooting","integration/oci-registry-guide.html#no-oci-tool-found","integration/oci-registry-guide.html#connection-refused","integration/oci-registry-guide.html#tls-certificate-error","integration/oci-registry-guide.html#authentication-failed","integration/oci-registry-guide.html#extension-not-found","integration/oci-registry-guide.html#dependency-resolution-failed","integration/oci-registry-guide.html#best-practices","integration/oci-registry-guide.html#version-pinning","integration/oci-registry-guide.html#semantic-versioning","integration/oci-registry-guide.html#dependency-management-1","integration/oci-registry-guide.html#security","integration/oci-registry-guide.html#related-documentation","integration/integrations-quickstart.html#prov-ecosystem--provctl-integrations---quick-start-guide","integration/integrations-quickstart.html#overview","integration/integrations-quickstart.html#quick-start-commands","integration/integrations-quickstart.html#-30-second-test","integration/integrations-quickstart.html#1-runtime-abstraction","integration/integrations-quickstart.html#what-it-does","integration/integrations-quickstart.html#commands","integration/integrations-quickstart.html#examples","integration/integrations-quickstart.html#configuration","integration/integrations-quickstart.html#2-ssh-advanced-operations","integration/integrations-quickstart.html#what-it-does-1","integration/integrations-quickstart.html#commands-1","integration/integrations-quickstart.html#deployment-strategies","integration/integrations-quickstart.html#example-multi-host-deployment","integration/integrations-quickstart.html#retry-strategies","integration/integrations-quickstart.html#3-backup-system","integration/integrations-quickstart.html#what-it-does-2","integration/integrations-quickstart.html#commands-2","integration/integrations-quickstart.html#backend-comparison","integration/integrations-quickstart.html#example-automated-daily-backups-to-s3","integration/integrations-quickstart.html#dry-run-test-first","integration/integrations-quickstart.html#4-gitops-event-driven-deployments","integration/integrations-quickstart.html#what-it-does-3","integration/integrations-quickstart.html#commands-3","integration/integrations-quickstart.html#example-gitops-configuration","integration/integrations-quickstart.html#5-service-management","integration/integrations-quickstart.html#what-it-does-4","integration/integrations-quickstart.html#commands-4","integration/integrations-quickstart.html#example-install-custom-service","integration/integrations-quickstart.html#-common-workflows","integration/integrations-quickstart.html#workflow-1-multi-platform-deployment","integration/integrations-quickstart.html#workflow-2-large-scale-ssh-operations","integration/integrations-quickstart.html#workflow-3-automated-backups","integration/integrations-quickstart.html#workflow-4-continuous-deployment-from-git","integration/integrations-quickstart.html#-advanced-configuration","integration/integrations-quickstart.html#using-with-nickel-configuration","integration/integrations-quickstart.html#-tips--tricks","integration/integrations-quickstart.html#tip-1-dry-run-mode","integration/integrations-quickstart.html#tip-2-output-formats","integration/integrations-quickstart.html#tip-3-integration-with-scripts","integration/integrations-quickstart.html#-troubleshooting","integration/integrations-quickstart.html#problem-no-container-runtime-detected","integration/integrations-quickstart.html#problem-ssh-connection-timeout","integration/integrations-quickstart.html#problem-backup-fails-with-permission-denied","integration/integrations-quickstart.html#-learn-more","integration/integrations-quickstart.html#-need-help","integration/secrets-service-layer-complete.html#secrets-service-layer-sst---complete-user-guide","integration/secrets-service-layer-complete.html#-executive-summary","integration/secrets-service-layer-complete.html#-key-features","integration/secrets-service-layer-complete.html#-quick-start-5-minutes","integration/secrets-service-layer-complete.html#1-register-the-workspace-librecloud","integration/secrets-service-layer-complete.html#2-create-your-first-database-secret","integration/secrets-service-layer-complete.html#3-retrieve-the-secret","integration/secrets-service-layer-complete.html#4-list-secrets-by-domain","integration/secrets-service-layer-complete.html#-complete-guide-by-phases","integration/secrets-service-layer-complete.html#phase-1-database-and-application-secrets","integration/secrets-service-layer-complete.html#phase-2-ssh-keys-and-provider-credentials","integration/secrets-service-layer-complete.html#phase-3-auto-rotation","integration/secrets-service-layer-complete.html#phase-32-share-secrets-across-workspaces","integration/secrets-service-layer-complete.html#phase-34-monitoring-and-alerts","integration/secrets-service-layer-complete.html#-cedar-authorization","integration/secrets-service-layer-complete.html#example-policy-production-secret-access","integration/secrets-service-layer-complete.html#verify-authorization","integration/secrets-service-layer-complete.html#-data-structure","integration/secrets-service-layer-complete.html#secret-in-database","integration/secrets-service-layer-complete.html#secret-hierarchy","integration/secrets-service-layer-complete.html#-complete-workflows","integration/secrets-service-layer-complete.html#workflow-1-create-and-rotate-database-credential","integration/secrets-service-layer-complete.html#workflow-2-share-secret-between-workspaces","integration/secrets-service-layer-complete.html#workflow-3-access-temporal-ssh-secret","integration/secrets-service-layer-complete.html#-practical-examples","integration/secrets-service-layer-complete.html#example-1-manage-postgresql-secrets","integration/secrets-service-layer-complete.html#example-2-temporary-ssh-access","integration/secrets-service-layer-complete.html#example-3-cicd-integration","integration/secrets-service-layer-complete.html#-security","integration/secrets-service-layer-complete.html#encryption","integration/secrets-service-layer-complete.html#access-control","integration/secrets-service-layer-complete.html#audit","integration/secrets-service-layer-complete.html#-test-results","integration/secrets-service-layer-complete.html#all-25-integration-tests-passing","integration/secrets-service-layer-complete.html#-troubleshooting","integration/secrets-service-layer-complete.html#problem-authorization-denied-by-cedar-policy","integration/secrets-service-layer-complete.html#problem-secret-not-found","integration/secrets-service-layer-complete.html#problem-mfa-required","integration/secrets-service-layer-complete.html#-complete-documentation","integration/secrets-service-layer-complete.html#-next-steps-future","integration/oci-registry-platform.html#oci-registry-service","integration/oci-registry-platform.html#supported-registries","integration/oci-registry-platform.html#features","integration/oci-registry-platform.html#quick-start","integration/oci-registry-platform.html#start-zot-registry-default","integration/oci-registry-platform.html#start-harbor-registry","integration/oci-registry-platform.html#default-namespaces","integration/oci-registry-platform.html#management","integration/oci-registry-platform.html#nushell-commands","integration/oci-registry-platform.html#docker-compose","integration/oci-registry-platform.html#registry-comparison","integration/oci-registry-platform.html#security","integration/oci-registry-platform.html#authentication","integration/oci-registry-platform.html#monitoring","integration/oci-registry-platform.html#health-checks","integration/oci-registry-platform.html#metrics","integration/oci-registry-platform.html#related-documentation","testing/test-environment-guide.html#test-environment-guide","testing/test-environment-guide.html#overview","testing/test-environment-guide.html#architecture","testing/test-environment-guide.html#test-environment-types","testing/test-environment-guide.html#1-single-taskserv-test","testing/test-environment-guide.html#2-server-simulation","testing/test-environment-guide.html#3-cluster-topology","testing/test-environment-guide.html#quick-start","testing/test-environment-guide.html#prerequisites","testing/test-environment-guide.html#basic-workflow","testing/test-environment-guide.html#topology-templates","testing/test-environment-guide.html#available-templates","testing/test-environment-guide.html#using-templates","testing/test-environment-guide.html#custom-topology","testing/test-environment-guide.html#commands-reference","testing/test-environment-guide.html#environment-management","testing/test-environment-guide.html#test-execution","testing/test-environment-guide.html#quick-test","testing/test-environment-guide.html#rest-api","testing/test-environment-guide.html#create-environment","testing/test-environment-guide.html#list-environments","testing/test-environment-guide.html#run-tests","testing/test-environment-guide.html#cleanup","testing/test-environment-guide.html#use-cases","testing/test-environment-guide.html#1-taskserv-development","testing/test-environment-guide.html#2-multi-taskserv-integration","testing/test-environment-guide.html#3-cluster-validation","testing/test-environment-guide.html#4-cicd-integration","testing/test-environment-guide.html#advanced-features","testing/test-environment-guide.html#resource-limits","testing/test-environment-guide.html#network-isolation","testing/test-environment-guide.html#auto-cleanup","testing/test-environment-guide.html#multiple-environments","testing/test-environment-guide.html#troubleshooting","testing/test-environment-guide.html#docker-not-running","testing/test-environment-guide.html#orchestrator-not-running","testing/test-environment-guide.html#environment-creation-fails","testing/test-environment-guide.html#out-of-resources","testing/test-environment-guide.html#best-practices","testing/test-environment-guide.html#1-use-templates","testing/test-environment-guide.html#2-auto-cleanup","testing/test-environment-guide.html#3-resource-planning","testing/test-environment-guide.html#4-parallel-testing","testing/test-environment-guide.html#configuration","testing/test-environment-guide.html#default-settings","testing/test-environment-guide.html#custom-config","testing/test-environment-guide.html#related-documentation","testing/test-environment-guide.html#version-history","testing/test-environment-system.html#test-environment-service-v340","testing/test-environment-system.html#-test-environment-service-completed-2025-10-06","testing/test-environment-system.html#key-features","testing/test-environment-system.html#test-environment-types","testing/test-environment-system.html#1-single-taskserv-testing","testing/test-environment-system.html#2-server-simulation","testing/test-environment-system.html#3-multi-node-cluster-topology","testing/test-environment-system.html#test-environment-management","testing/test-environment-system.html#available-topology-templates","testing/test-environment-system.html#rest-api-endpoints","testing/test-environment-system.html#prerequisites","testing/test-environment-system.html#architecture","testing/test-environment-system.html#configuration","testing/test-environment-system.html#use-cases","testing/test-environment-system.html#cicd-integration-example","testing/test-environment-system.html#documentation","testing/test-environment-system.html#command-shortcuts","testing/taskserv-validation-guide.html#taskserv-validation-and-testing-guide","testing/taskserv-validation-guide.html#overview","testing/taskserv-validation-guide.html#validation-levels","testing/taskserv-validation-guide.html#1-static-validation","testing/taskserv-validation-guide.html#2-dependency-validation","testing/taskserv-validation-guide.html#3-check-mode-dry-run","testing/taskserv-validation-guide.html#4-sandbox-testing","testing/taskserv-validation-guide.html#complete-validation-workflow","testing/taskserv-validation-guide.html#recommended-validation-sequence","testing/taskserv-validation-guide.html#quick-validation-all-levels","testing/taskserv-validation-guide.html#validation-commands-reference","testing/taskserv-validation-guide.html#provisioning-taskserv-validate","testing/taskserv-validation-guide.html#provisioning-taskserv-check-deps","testing/taskserv-validation-guide.html#provisioning-taskserv-create----check","testing/taskserv-validation-guide.html#provisioning-taskserv-test","testing/taskserv-validation-guide.html#validation-output","testing/taskserv-validation-guide.html#static-validation","testing/taskserv-validation-guide.html#dependency-validation","testing/taskserv-validation-guide.html#check-mode-output","testing/taskserv-validation-guide.html#test-output","testing/taskserv-validation-guide.html#integration-with-cicd","testing/taskserv-validation-guide.html#gitlab-ci-example","testing/taskserv-validation-guide.html#github-actions-example","testing/taskserv-validation-guide.html#troubleshooting","testing/taskserv-validation-guide.html#shellcheck-not-found","testing/taskserv-validation-guide.html#dockerpodman-not-available","testing/taskserv-validation-guide.html#nickel-type-checking-errors","testing/taskserv-validation-guide.html#dependency-conflicts","testing/taskserv-validation-guide.html#advanced-usage","testing/taskserv-validation-guide.html#custom-validation-scripts","testing/taskserv-validation-guide.html#batch-validation","testing/taskserv-validation-guide.html#automated-testing","testing/taskserv-validation-guide.html#best-practices","testing/taskserv-validation-guide.html#before-deployment","testing/taskserv-validation-guide.html#during-development","testing/taskserv-validation-guide.html#in-cicd","testing/taskserv-validation-guide.html#related-documentation","testing/taskserv-validation-guide.html#version-history","troubleshooting/troubleshooting-guide.html#troubleshooting-guide","troubleshooting/troubleshooting-guide.html#what-youll-learn","troubleshooting/troubleshooting-guide.html#general-troubleshooting-approach","troubleshooting/troubleshooting-guide.html#1-identify-the-problem","troubleshooting/troubleshooting-guide.html#2-gather-information","troubleshooting/troubleshooting-guide.html#3-use-diagnostic-commands","troubleshooting/troubleshooting-guide.html#installation-and-setup-issues","troubleshooting/troubleshooting-guide.html#issue-installation-fails","troubleshooting/troubleshooting-guide.html#issue-command-not-found","troubleshooting/troubleshooting-guide.html#issue-nushell-plugin-errors","troubleshooting/troubleshooting-guide.html#configuration-issues","troubleshooting/troubleshooting-guide.html#issue-configuration-not-found","troubleshooting/troubleshooting-guide.html#issue-configuration-validation-errors","troubleshooting/troubleshooting-guide.html#issue-interpolation-failures","troubleshooting/troubleshooting-guide.html#server-management-issues","troubleshooting/troubleshooting-guide.html#issue-server-creation-fails","troubleshooting/troubleshooting-guide.html#issue-ssh-access-fails","troubleshooting/troubleshooting-guide.html#task-service-issues","troubleshooting/troubleshooting-guide.html#issue-service-installation-fails","troubleshooting/troubleshooting-guide.html#issue-service-not-running","troubleshooting/troubleshooting-guide.html#cluster-management-issues","troubleshooting/troubleshooting-guide.html#issue-cluster-deployment-fails","troubleshooting/troubleshooting-guide.html#performance-issues","troubleshooting/troubleshooting-guide.html#issue-slow-operations","troubleshooting/troubleshooting-guide.html#issue-high-memory-usage","troubleshooting/troubleshooting-guide.html#network-and-connectivity-issues","troubleshooting/troubleshooting-guide.html#issue-api-connectivity-problems","troubleshooting/troubleshooting-guide.html#security-and-encryption-issues","troubleshooting/troubleshooting-guide.html#issue-sops-decryption-fails","troubleshooting/troubleshooting-guide.html#issue-access-denied-errors","troubleshooting/troubleshooting-guide.html#data-and-storage-issues","troubleshooting/troubleshooting-guide.html#issue-disk-space-problems","troubleshooting/troubleshooting-guide.html#recovery-procedures","troubleshooting/troubleshooting-guide.html#configuration-recovery","troubleshooting/troubleshooting-guide.html#infrastructure-recovery","troubleshooting/troubleshooting-guide.html#service-recovery","troubleshooting/troubleshooting-guide.html#prevention-strategies","troubleshooting/troubleshooting-guide.html#regular-maintenance","troubleshooting/troubleshooting-guide.html#monitoring-setup","troubleshooting/troubleshooting-guide.html#best-practices","troubleshooting/troubleshooting-guide.html#getting-additional-help","troubleshooting/troubleshooting-guide.html#debug-information-collection","troubleshooting/troubleshooting-guide.html#support-channels","guides/from-scratch.html#complete-deployment-guide-from-scratch-to-production","guides/from-scratch.html#table-of-contents","guides/from-scratch.html#prerequisites","guides/from-scratch.html#recommended-hardware","guides/from-scratch.html#step-1-install-nushell","guides/from-scratch.html#macos-via-homebrew","guides/from-scratch.html#linux-via-package-manager","guides/from-scratch.html#linuxmacos-via-cargo","guides/from-scratch.html#windows-via-winget","guides/from-scratch.html#configure-nushell","guides/from-scratch.html#step-2-install-nushell-plugins-recommended","guides/from-scratch.html#why-install-plugins","guides/from-scratch.html#prerequisites-for-building-plugins","guides/from-scratch.html#build-plugins","guides/from-scratch.html#register-plugins-with-nushell","guides/from-scratch.html#verify-plugin-installation","guides/from-scratch.html#configure-plugin-environments","guides/from-scratch.html#test-plugins-quick-smoke-test","guides/from-scratch.html#skip-plugins-not-recommended","guides/from-scratch.html#step-3-install-required-tools","guides/from-scratch.html#essential-tools","guides/from-scratch.html#optional-but-recommended-tools","guides/from-scratch.html#step-4-clone-and-setup-project","guides/from-scratch.html#clone-repository","guides/from-scratch.html#add-cli-to-path-optional","guides/from-scratch.html#step-5-initialize-workspace","guides/from-scratch.html#create-new-workspace","guides/from-scratch.html#workspace-configuration-format","guides/from-scratch.html#verify-workspace","guides/from-scratch.html#view-and-validate-workspace-configuration","guides/from-scratch.html#step-6-configure-environment","guides/from-scratch.html#set-provider-credentials","guides/from-scratch.html#encrypt-sensitive-data","guides/from-scratch.html#configure-local-overrides","guides/from-scratch.html#step-7-discover-and-load-modules","guides/from-scratch.html#discover-available-modules","guides/from-scratch.html#load-modules-into-workspace","guides/from-scratch.html#step-8-validate-configuration","guides/from-scratch.html#step-9-deploy-servers","guides/from-scratch.html#preview-server-creation-dry-run","guides/from-scratch.html#create-servers","guides/from-scratch.html#verify-server-creation","guides/from-scratch.html#step-10-install-task-services","guides/from-scratch.html#install-kubernetes-check-mode-first","guides/from-scratch.html#install-kubernetes","guides/from-scratch.html#install-additional-services","guides/from-scratch.html#step-11-create-clusters","guides/from-scratch.html#create-buildkit-cluster-check-mode","guides/from-scratch.html#create-buildkit-cluster","guides/from-scratch.html#verify-cluster","guides/from-scratch.html#step-12-verify-deployment","guides/from-scratch.html#comprehensive-health-check","guides/from-scratch.html#run-validation-tests","guides/from-scratch.html#expected-results","guides/from-scratch.html#step-13-post-deployment","guides/from-scratch.html#configure-kubectl-access","guides/from-scratch.html#set-up-monitoring-optional","guides/from-scratch.html#configure-cicd-integration-optional","guides/from-scratch.html#backup-configuration","guides/from-scratch.html#troubleshooting","guides/from-scratch.html#server-creation-fails","guides/from-scratch.html#taskserv-installation-fails","guides/from-scratch.html#plugin-commands-dont-work","guides/from-scratch.html#kms-encryption-fails","guides/from-scratch.html#orchestrator-not-running","guides/from-scratch.html#configuration-validation-errors","guides/from-scratch.html#next-steps","guides/from-scratch.html#explore-advanced-features","guides/from-scratch.html#learn-more","guides/from-scratch.html#get-help","guides/from-scratch.html#summary","guides/update-infrastructure.html#update-existing-infrastructure","guides/update-infrastructure.html#overview","guides/update-infrastructure.html#update-strategies","guides/update-infrastructure.html#strategy-1-in-place-updates-fastest","guides/update-infrastructure.html#strategy-2-rolling-updates-recommended","guides/update-infrastructure.html#strategy-3-blue-green-deployment-safest","guides/update-infrastructure.html#step-1-check-for-updates","guides/update-infrastructure.html#11-check-all-task-services","guides/update-infrastructure.html#12-check-specific-task-service","guides/update-infrastructure.html#13-check-version-status","guides/update-infrastructure.html#14-check-for-security-updates","guides/update-infrastructure.html#step-2-plan-your-update","guides/update-infrastructure.html#21-review-current-configuration","guides/update-infrastructure.html#22-backup-configuration","guides/update-infrastructure.html#23-create-update-plan","guides/update-infrastructure.html#step-3-update-task-services","guides/update-infrastructure.html#31-update-non-critical-service-cilium-example","guides/update-infrastructure.html#32-update-critical-service-kubernetes-example","guides/update-infrastructure.html#33-update-database-postgresql-example","guides/update-infrastructure.html#step-4-update-multiple-services","guides/update-infrastructure.html#41-batch-update-sequentially","guides/update-infrastructure.html#42-parallel-update-non-dependent-services","guides/update-infrastructure.html#step-5-update-server-configuration","guides/update-infrastructure.html#51-update-server-resources","guides/update-infrastructure.html#52-update-server-os","guides/update-infrastructure.html#step-6-rollback-procedures","guides/update-infrastructure.html#61-rollback-task-service","guides/update-infrastructure.html#62-rollback-from-backup","guides/update-infrastructure.html#63-emergency-rollback","guides/update-infrastructure.html#step-7-post-update-verification","guides/update-infrastructure.html#71-verify-all-components","guides/update-infrastructure.html#72-verify-version-updates","guides/update-infrastructure.html#73-run-integration-tests","guides/update-infrastructure.html#74-monitor-for-issues","guides/update-infrastructure.html#update-checklist","guides/update-infrastructure.html#common-update-scenarios","guides/update-infrastructure.html#scenario-1-minor-security-patch","guides/update-infrastructure.html#scenario-2-major-version-upgrade","guides/update-infrastructure.html#scenario-3-emergency-hotfix","guides/update-infrastructure.html#troubleshooting-updates","guides/update-infrastructure.html#issue-update-fails-mid-process","guides/update-infrastructure.html#issue-service-not-starting-after-update","guides/update-infrastructure.html#issue-data-migration-fails","guides/update-infrastructure.html#best-practices","guides/update-infrastructure.html#next-steps","guides/update-infrastructure.html#quick-reference","guides/customize-infrastructure.html#customize-infrastructure","guides/customize-infrastructure.html#overview","guides/customize-infrastructure.html#the-layer-system","guides/customize-infrastructure.html#understanding-layers","guides/customize-infrastructure.html#view-layer-resolution","guides/customize-infrastructure.html#test-layer-resolution","guides/customize-infrastructure.html#using-templates","guides/customize-infrastructure.html#list-available-templates","guides/customize-infrastructure.html#view-template-details","guides/customize-infrastructure.html#apply-template","guides/customize-infrastructure.html#validate-template-usage","guides/customize-infrastructure.html#creating-custom-templates","guides/customize-infrastructure.html#step-1-create-template-structure","guides/customize-infrastructure.html#step-2-write-template-configuration","guides/customize-infrastructure.html#step-3-create-template-metadata","guides/customize-infrastructure.html#step-4-test-custom-template","guides/customize-infrastructure.html#configuration-inheritance-examples","guides/customize-infrastructure.html#example-1-override-single-value","guides/customize-infrastructure.html#example-2-add-custom-configuration","guides/customize-infrastructure.html#example-3-environment-specific-configuration","guides/customize-infrastructure.html#advanced-customization-patterns","guides/customize-infrastructure.html#pattern-1-multi-environment-setup","guides/customize-infrastructure.html#pattern-2-shared-configuration-library","guides/customize-infrastructure.html#pattern-3-dynamic-configuration","guides/customize-infrastructure.html#pattern-4-conditional-configuration","guides/customize-infrastructure.html#layer-statistics","guides/customize-infrastructure.html#customization-workflow","guides/customize-infrastructure.html#complete-customization-example","guides/customize-infrastructure.html#best-practices","guides/customize-infrastructure.html#1-use-layers-correctly","guides/customize-infrastructure.html#2-template-organization","guides/customize-infrastructure.html#3-documentation","guides/customize-infrastructure.html#4-version-control","guides/customize-infrastructure.html#troubleshooting-customizations","guides/customize-infrastructure.html#issue-configuration-not-applied","guides/customize-infrastructure.html#issue-conflicting-configurations","guides/customize-infrastructure.html#issue-template-not-found","guides/customize-infrastructure.html#next-steps","guides/customize-infrastructure.html#quick-reference","guides/infrastructure-setup.html#infrastructure-setup-quick-reference","guides/infrastructure-setup.html#quick-start","guides/infrastructure-setup.html#1-generate-infrastructure-configs-solo-mode","guides/infrastructure-setup.html#2-validate-generated-configs","guides/infrastructure-setup.html#3-compare-solo-vs-enterprise","guides/infrastructure-setup.html#infrastructure-components","guides/infrastructure-setup.html#available-schemas-6","guides/infrastructure-setup.html#configuration-examples-2","guides/infrastructure-setup.html#automation-scripts-3","guides/infrastructure-setup.html#workflow-platform-config--infrastructure-config","guides/infrastructure-setup.html#two-tier-configuration-system","guides/infrastructure-setup.html#complete-deployment-workflow","guides/infrastructure-setup.html#resource-allocation-reference","guides/infrastructure-setup.html#solo-mode-development","guides/infrastructure-setup.html#enterprise-mode-production","guides/infrastructure-setup.html#common-tasks","guides/infrastructure-setup.html#generate-solo-infrastructure","guides/infrastructure-setup.html#generate-enterprise-infrastructure","guides/infrastructure-setup.html#validate-json-structure","guides/infrastructure-setup.html#check-resource-limits","guides/infrastructure-setup.html#compare-modes","guides/infrastructure-setup.html#validation-commands","guides/infrastructure-setup.html#type-check-schemas","guides/infrastructure-setup.html#validate-examples","guides/infrastructure-setup.html#test-export","guides/infrastructure-setup.html#platform-config-examples","guides/infrastructure-setup.html#solo-platform-config","guides/infrastructure-setup.html#enterprise-platform-config","guides/infrastructure-setup.html#configuration-files-reference","guides/infrastructure-setup.html#platform-configs-services-internally","guides/infrastructure-setup.html#infrastructure-schemas","guides/infrastructure-setup.html#typedialog-integration","guides/infrastructure-setup.html#automation-scripts","guides/infrastructure-setup.html#integration-status","guides/infrastructure-setup.html#next-steps","guides/infrastructure-setup.html#now-available","guides/infrastructure-setup.html#when-typedialog-binary-becomes-available","guides/infrastructure-setup.html#key-files","guides/extension-development-quickstart.html#extension-development-quick-start-guide","guides/extension-development-quickstart.html#prerequisites","guides/extension-development-quickstart.html#quick-start-creating-your-first-extension","guides/extension-development-quickstart.html#step-1-create-extension-from-template","guides/extension-development-quickstart.html#step-2-navigate-and-customize","guides/extension-development-quickstart.html#step-3-customize-configuration","guides/extension-development-quickstart.html#step-4-test-your-extension","guides/extension-development-quickstart.html#step-5-use-in-workspace","guides/extension-development-quickstart.html#common-extension-patterns","guides/extension-development-quickstart.html#database-service-extension","guides/extension-development-quickstart.html#monitoring-service-extension","guides/extension-development-quickstart.html#legacy-system-integration","guides/extension-development-quickstart.html#advanced-customization","guides/extension-development-quickstart.html#custom-provider-development","guides/extension-development-quickstart.html#complete-infrastructure-stack","guides/extension-development-quickstart.html#testing-and-validation","guides/extension-development-quickstart.html#local-testing-workflow","guides/extension-development-quickstart.html#continuous-integration-testing","guides/extension-development-quickstart.html#best-practices-summary","guides/extension-development-quickstart.html#1-extension-design","guides/extension-development-quickstart.html#2-dependencies","guides/extension-development-quickstart.html#3-security","guides/extension-development-quickstart.html#4-documentation","guides/extension-development-quickstart.html#5-testing","guides/extension-development-quickstart.html#common-issues-and-solutions","guides/extension-development-quickstart.html#extension-not-discovered","guides/extension-development-quickstart.html#nickel-type-errors","guides/extension-development-quickstart.html#loading-failures","guides/extension-development-quickstart.html#next-steps","guides/extension-development-quickstart.html#support","guides/guide-system.html#interactive-guides-and-quick-reference-v330","guides/guide-system.html#-guide-system-added-2025-09-30","guides/guide-system.html#available-guides","guides/guide-system.html#guide-features","guides/guide-system.html#recommended-setup","guides/guide-system.html#quick-start-with-guides","guides/guide-system.html#guide-content","guides/guide-system.html#access-from-help-system","guides/guide-system.html#guide-shortcuts","guides/guide-system.html#documentation-location","guides/workspace-generation-quick-reference.html#workspace-generation---quick-reference","guides/workspace-generation-quick-reference.html#quick-start-create-a-workspace","guides/workspace-generation-quick-reference.html#what-gets-created-automatically","guides/workspace-generation-quick-reference.html#key-files-created","guides/workspace-generation-quick-reference.html#master-configuration-configconfigncl","guides/workspace-generation-quick-reference.html#infrastructure-infradefaultmainncl","guides/workspace-generation-quick-reference.html#auto-generated-guides","guides/workspace-generation-quick-reference.html#initialization-process-8-steps","guides/workspace-generation-quick-reference.html#common-commands","guides/workspace-generation-quick-reference.html#workspace-management","guides/workspace-generation-quick-reference.html#configuration","guides/workspace-generation-quick-reference.html#deployment","guides/workspace-generation-quick-reference.html#workspace-directory-structure","guides/workspace-generation-quick-reference.html#auto-generated-structure","guides/workspace-generation-quick-reference.html#customization-guide","guides/workspace-generation-quick-reference.html#edit-configuration","guides/workspace-generation-quick-reference.html#add-multiple-infrastructures","guides/workspace-generation-quick-reference.html#configure-providers","guides/workspace-generation-quick-reference.html#next-steps","guides/workspace-generation-quick-reference.html#documentation-references","guides/multi-provider-deployment.html#multi-provider-deployment-guide","guides/multi-provider-deployment.html#table-of-contents","guides/multi-provider-deployment.html#overview","guides/multi-provider-deployment.html#supported-providers","guides/multi-provider-deployment.html#key-concepts","guides/multi-provider-deployment.html#why-multiple-providers","guides/multi-provider-deployment.html#cost-optimization","guides/multi-provider-deployment.html#high-availability-and-disaster-recovery","guides/multi-provider-deployment.html#compliance-and-data-residency","guides/multi-provider-deployment.html#vendor-lock-in-avoidance","guides/multi-provider-deployment.html#performance-and-latency","guides/multi-provider-deployment.html#provider-selection-strategy","guides/multi-provider-deployment.html#decision-framework","guides/multi-provider-deployment.html#regional-availability","guides/multi-provider-deployment.html#cost-analysis","guides/multi-provider-deployment.html#compliance-and-certifications","guides/multi-provider-deployment.html#workspace-configuration","guides/multi-provider-deployment.html#multi-provider-workspace-structure","guides/multi-provider-deployment.html#provider-credential-management","guides/multi-provider-deployment.html#multi-provider-workspace-definition","guides/multi-provider-deployment.html#architecture-patterns","guides/multi-provider-deployment.html#pattern-1-compute--storage-split","guides/multi-provider-deployment.html#pattern-2-primary--backup","guides/multi-provider-deployment.html#pattern-3-multi-region-high-availability","guides/multi-provider-deployment.html#pattern-4-hybrid-cloud","guides/multi-provider-deployment.html#implementation-examples","guides/multi-provider-deployment.html#example-1-three-provider-web-application","guides/multi-provider-deployment.html#example-2-multi-region-disaster-recovery","guides/multi-provider-deployment.html#example-3-cost-optimized-deployment","guides/multi-provider-deployment.html#best-practices","guides/multi-provider-deployment.html#1-provider-selection","guides/multi-provider-deployment.html#2-network-security","guides/multi-provider-deployment.html#3-data-consistency","guides/multi-provider-deployment.html#4-disaster-recovery","guides/multi-provider-deployment.html#5-compliance-and-governance","guides/multi-provider-deployment.html#6-monitoring-and-alerting","guides/multi-provider-deployment.html#7-cost-management","guides/multi-provider-deployment.html#troubleshooting","guides/multi-provider-deployment.html#issue-network-connectivity-between-providers","guides/multi-provider-deployment.html#issue-database-replication-lag","guides/multi-provider-deployment.html#issue-failover-not-working","guides/multi-provider-deployment.html#issue-cost-spike-across-providers","guides/multi-provider-deployment.html#conclusion","guides/multi-provider-networking.html#multi-provider-networking-guide","guides/multi-provider-networking.html#table-of-contents","guides/multi-provider-networking.html#overview","guides/multi-provider-networking.html#architecture","guides/multi-provider-networking.html#provider-sdnprivate-network-solutions","guides/multi-provider-networking.html#hetzner-vswitch","guides/multi-provider-networking.html#upcloud-vlan-virtual-lan","guides/multi-provider-networking.html#aws-vpc-virtual-private-cloud","guides/multi-provider-networking.html#digitalocean-vpc-virtual-private-cloud","guides/multi-provider-networking.html#private-network-configuration","guides/multi-provider-networking.html#hetzner-vswitch-configuration-nickel","guides/multi-provider-networking.html#aws-vpc-configuration-nickel","guides/multi-provider-networking.html#digitalocean-vpc-configuration-nickel","guides/multi-provider-networking.html#vpn-tunnel-setup","guides/multi-provider-networking.html#ipsec-vpn-between-providers","guides/multi-provider-networking.html#wireguard-vpn-alternative-simpler","guides/multi-provider-networking.html#multi-provider-routing","guides/multi-provider-networking.html#define-cross-provider-routes-nickel","guides/multi-provider-networking.html#static-routes-on-hetzner","guides/multi-provider-networking.html#aws-route-tables","guides/multi-provider-networking.html#security-considerations","guides/multi-provider-networking.html#1-encryption","guides/multi-provider-networking.html#2-firewall-rules","guides/multi-provider-networking.html#3-network-segmentation","guides/multi-provider-networking.html#4-dns-security","guides/multi-provider-networking.html#implementation-examples","guides/multi-provider-networking.html#complete-multi-provider-network-setup-nushell","guides/multi-provider-networking.html#troubleshooting","guides/multi-provider-networking.html#issue-no-connectivity-between-providers","guides/multi-provider-networking.html#issue-high-latency-between-providers","guides/multi-provider-networking.html#issue-dns-not-resolving-across-providers","guides/multi-provider-networking.html#issue-vpn-tunnel-drops","guides/multi-provider-networking.html#summary","guides/provider-digitalocean.html#digitalocean-provider-guide","guides/provider-digitalocean.html#table-of-contents","guides/provider-digitalocean.html#overview","guides/provider-digitalocean.html#digitalocean-pricing-model","guides/provider-digitalocean.html#supported-resources","guides/provider-digitalocean.html#why-digitalocean","guides/provider-digitalocean.html#when-to-choose-digitalocean","guides/provider-digitalocean.html#cost-comparison","guides/provider-digitalocean.html#setup-and-configuration","guides/provider-digitalocean.html#prerequisites","guides/provider-digitalocean.html#step-1-create-digitalocean-api-token","guides/provider-digitalocean.html#step-2-configure-environment-variables","guides/provider-digitalocean.html#step-3-verify-configuration","guides/provider-digitalocean.html#step-4-configure-workspace","guides/provider-digitalocean.html#available-resources","guides/provider-digitalocean.html#1-droplets-compute","guides/provider-digitalocean.html#2-volumes-block-storage","guides/provider-digitalocean.html#3-spaces-object-storage","guides/provider-digitalocean.html#4-load-balancer","guides/provider-digitalocean.html#5-managed-databases","guides/provider-digitalocean.html#6-kubernetes-doks","guides/provider-digitalocean.html#7-cdn","guides/provider-digitalocean.html#8-domains-and-dns","guides/provider-digitalocean.html#9-vpc-virtual-private-cloud","guides/provider-digitalocean.html#10-firewall","guides/provider-digitalocean.html#nickel-schema-reference","guides/provider-digitalocean.html#droplet-configuration","guides/provider-digitalocean.html#load-balancer-configuration","guides/provider-digitalocean.html#volume-configuration","guides/provider-digitalocean.html#managed-database-configuration","guides/provider-digitalocean.html#configuration-examples","guides/provider-digitalocean.html#example-1-simple-web-server","guides/provider-digitalocean.html#example-2-web-application-with-database","guides/provider-digitalocean.html#example-3-high-performance-storage","guides/provider-digitalocean.html#best-practices","guides/provider-digitalocean.html#1-droplet-management","guides/provider-digitalocean.html#2-firewall-configuration","guides/provider-digitalocean.html#3-database-best-practices","guides/provider-digitalocean.html#4-volume-management","guides/provider-digitalocean.html#5-load-balancer-configuration","guides/provider-digitalocean.html#6-cost-optimization","guides/provider-digitalocean.html#troubleshooting","guides/provider-digitalocean.html#issue-droplet-not-accessible","guides/provider-digitalocean.html#issue-volume-not-mounting","guides/provider-digitalocean.html#issue-load-balancer-health-checks-failing","guides/provider-digitalocean.html#issue-database-connection-issues","guides/provider-digitalocean.html#summary","guides/provider-hetzner.html#hetzner-provider-guide","guides/provider-hetzner.html#table-of-contents","guides/provider-hetzner.html#overview","guides/provider-hetzner.html#hetzner-pricing-model","guides/provider-hetzner.html#price-comparison-2-vcpu-4-gb-ram","guides/provider-hetzner.html#supported-resources","guides/provider-hetzner.html#why-hetzner","guides/provider-hetzner.html#when-to-choose-hetzner","guides/provider-hetzner.html#cost-advantages","guides/provider-hetzner.html#setup-and-configuration","guides/provider-hetzner.html#prerequisites","guides/provider-hetzner.html#step-1-create-hetzner-api-token","guides/provider-hetzner.html#step-2-configure-environment-variables","guides/provider-hetzner.html#step-3-install-hcloud-cli-optional","guides/provider-hetzner.html#step-4-configure-ssh-key","guides/provider-hetzner.html#step-5-configure-workspace","guides/provider-hetzner.html#available-resources","guides/provider-hetzner.html#1-cloud-servers-compute","guides/provider-hetzner.html#2-volumes-block-storage","guides/provider-hetzner.html#3-object-storage","guides/provider-hetzner.html#4-floating-ips","guides/provider-hetzner.html#5-load-balancer","guides/provider-hetzner.html#6-networkvswitch","guides/provider-hetzner.html#7-firewall","guides/provider-hetzner.html#nickel-schema-reference","guides/provider-hetzner.html#cloud-server-configuration","guides/provider-hetzner.html#volume-configuration","guides/provider-hetzner.html#load-balancer-configuration","guides/provider-hetzner.html#firewall-configuration","guides/provider-hetzner.html#configuration-examples","guides/provider-hetzner.html#example-1-single-server-web-server","guides/provider-hetzner.html#example-2-web-application-with-load-balancer-and-storage","guides/provider-hetzner.html#example-3-high-performance-compute-cluster","guides/provider-hetzner.html#best-practices","guides/provider-hetzner.html#1-server-selection-and-sizing","guides/provider-hetzner.html#2-network-architecture","guides/provider-hetzner.html#3-storage-strategy","guides/provider-hetzner.html#4-firewall-configuration","guides/provider-hetzner.html#5-monitoring-and-health-checks","guides/provider-hetzner.html#6-cost-optimization","guides/provider-hetzner.html#troubleshooting","guides/provider-hetzner.html#issue-cannot-connect-to-server","guides/provider-hetzner.html#issue-volume-attachment-failed","guides/provider-hetzner.html#issue-high-data-transfer-costs","guides/provider-hetzner.html#issue-load-balancer-not-routing-traffic","guides/provider-hetzner.html#summary","../examples/workspaces/multi-provider-web-app/index.html#multi-provider-web-app-workspace","../examples/workspaces/multi-region-ha/index.html#multi-region-high-availability-workspace","../examples/workspaces/cost-optimized/index.html#cost-optimized-multi-provider-workspace","quick-reference/master.html#quick-reference-master-index","quick-reference/master.html#available-quick-references","quick-reference/master.html#topic-specific-guides-with-embedded-quick-references","quick-reference/master.html#using-quick-references","quick-reference/platform-operations-cheatsheet.html#platform-operations-cheatsheet","quick-reference/platform-operations-cheatsheet.html#mode-selection-one-command","quick-reference/platform-operations-cheatsheet.html#service-ports--endpoints","quick-reference/platform-operations-cheatsheet.html#service-startup-order-matters","quick-reference/platform-operations-cheatsheet.html#quick-checks-all-services","quick-reference/platform-operations-cheatsheet.html#configuration-management","quick-reference/platform-operations-cheatsheet.html#view-config-files","quick-reference/platform-operations-cheatsheet.html#apply-config-changes","quick-reference/platform-operations-cheatsheet.html#service-control","quick-reference/platform-operations-cheatsheet.html#stop-services","quick-reference/platform-operations-cheatsheet.html#restart-services","quick-reference/platform-operations-cheatsheet.html#check-logs","quick-reference/platform-operations-cheatsheet.html#database-management","quick-reference/platform-operations-cheatsheet.html#surrealdb-multiuserenterprise","quick-reference/platform-operations-cheatsheet.html#etcd-enterprise-ha","quick-reference/platform-operations-cheatsheet.html#environment-variable-overrides","quick-reference/platform-operations-cheatsheet.html#override-individual-settings","quick-reference/platform-operations-cheatsheet.html#health--status-checks","quick-reference/platform-operations-cheatsheet.html#quick-status-30-seconds","quick-reference/platform-operations-cheatsheet.html#detailed-status","quick-reference/platform-operations-cheatsheet.html#performance--monitoring","quick-reference/platform-operations-cheatsheet.html#system-resources","quick-reference/platform-operations-cheatsheet.html#service-performance","quick-reference/platform-operations-cheatsheet.html#troubleshooting-quick-fixes","quick-reference/platform-operations-cheatsheet.html#service-wont-start","quick-reference/platform-operations-cheatsheet.html#high-memory-usage","quick-reference/platform-operations-cheatsheet.html#database-connection-error","quick-reference/platform-operations-cheatsheet.html#services-not-communicating","quick-reference/platform-operations-cheatsheet.html#emergency-procedures","quick-reference/platform-operations-cheatsheet.html#full-service-recovery","quick-reference/platform-operations-cheatsheet.html#rollback-to-previous-configuration","quick-reference/platform-operations-cheatsheet.html#data-recovery","quick-reference/platform-operations-cheatsheet.html#file-locations","quick-reference/platform-operations-cheatsheet.html#mode-quick-reference-matrix","quick-reference/platform-operations-cheatsheet.html#common-command-patterns","quick-reference/platform-operations-cheatsheet.html#deploy-mode-change","quick-reference/platform-operations-cheatsheet.html#restart-single-service-without-downtime","quick-reference/platform-operations-cheatsheet.html#scale-workers-for-load","quick-reference/platform-operations-cheatsheet.html#diagnostic-bundle","quick-reference/platform-operations-cheatsheet.html#essential-references","quick-reference/general.html#rag-system---quick-reference-guide","quick-reference/general.html#-what-you-have","quick-reference/general.html#complete-rag-system","quick-reference/general.html#key-files","quick-reference/general.html#-quick-start","quick-reference/general.html#build--test","quick-reference/general.html#run-example","quick-reference/general.html#check-tests","quick-reference/general.html#-documentation-files","quick-reference/general.html#-configuration","quick-reference/general.html#environment-variables","quick-reference/general.html#surrealdb","quick-reference/general.html#model","quick-reference/general.html#-key-capabilities","quick-reference/general.html#1-ask-questions","quick-reference/general.html#2-semantic-search","quick-reference/general.html#3-workspace-awareness","quick-reference/general.html#4-mcp-integration","quick-reference/general.html#-performance","quick-reference/general.html#-whats-working","quick-reference/general.html#-whats-not-implemented-phase-7","quick-reference/general.html#-next-steps","quick-reference/general.html#this-week","quick-reference/general.html#next-week-phase-7a","quick-reference/general.html#weeks-3-4-phase-7b","quick-reference/general.html#-how-to-use","quick-reference/general.html#as-a-library","quick-reference/general.html#via-mcp-server-when-enabled","quick-reference/general.html#from-cli-via-example","quick-reference/general.html#-integration-points","quick-reference/general.html#current","quick-reference/general.html#future-phase-7","quick-reference/general.html#-known-issues","quick-reference/general.html#-metrics","quick-reference/general.html#code-quality","quick-reference/general.html#performance","quick-reference/general.html#-tips","quick-reference/general.html#for-development","quick-reference/general.html#for-deployment","quick-reference/general.html#for-debugging","quick-reference/general.html#-learning-resources","quick-reference/general.html#-architecture-overview","quick-reference/general.html#-security","quick-reference/general.html#-support","quick-reference/justfile-recipes.html#justfile-recipes---quick-reference","quick-reference/justfile-recipes.html#authentication-authjust","quick-reference/justfile-recipes.html#kms-kmsjust","quick-reference/justfile-recipes.html#orchestrator-orchestratorjust","quick-reference/justfile-recipes.html#plugin-testing","quick-reference/justfile-recipes.html#common-workflows","quick-reference/justfile-recipes.html#complete-authentication-setup","quick-reference/justfile-recipes.html#production-deployment-workflow","quick-reference/justfile-recipes.html#kms-setup-and-testing","quick-reference/justfile-recipes.html#monitoring-operations","quick-reference/justfile-recipes.html#cleanup-operations","quick-reference/justfile-recipes.html#tips","quick-reference/justfile-recipes.html#recipe-count","quick-reference/justfile-recipes.html#documentation","quick-reference/oci.html#oci-registry-quick-reference","quick-reference/oci.html#prerequisites","quick-reference/oci.html#quick-start-5-minutes","quick-reference/oci.html#common-commands","quick-reference/oci.html#extension-discovery","quick-reference/oci.html#extension-installation","quick-reference/oci.html#extension-publishing","quick-reference/oci.html#dependency-management","quick-reference/oci.html#configuration-templates","quick-reference/oci.html#workspace-oci-configuration","quick-reference/oci.html#extension-manifest","quick-reference/oci.html#extension-development-workflow","quick-reference/oci.html#registry-management","quick-reference/oci.html#local-registry-development","quick-reference/oci.html#remote-registry-production","quick-reference/oci.html#migration-from-monorepo","quick-reference/oci.html#troubleshooting","quick-reference/oci.html#registry-not-running","quick-reference/oci.html#authentication-failed","quick-reference/oci.html#extension-not-found","quick-reference/oci.html#dependency-resolution-failed","quick-reference/oci.html#best-practices","quick-reference/oci.html#versioning","quick-reference/oci.html#dependencies","quick-reference/oci.html#security","quick-reference/oci.html#common-patterns","quick-reference/oci.html#pull-and-install","quick-reference/oci.html#update-extensions","quick-reference/oci.html#copy-between-registries","quick-reference/oci.html#publish-multiple-extensions","quick-reference/oci.html#environment-variables","quick-reference/oci.html#file-locations","quick-reference/oci.html#reference-links","quick-reference/sudo-password-handling.html#sudo-password-handling---quick-reference","quick-reference/sudo-password-handling.html#when-sudo-is-required","quick-reference/sudo-password-handling.html#quick-solutions","quick-reference/sudo-password-handling.html#-best-cache-credentials-first","quick-reference/sudo-password-handling.html#-alternative-disable-host-fixing","quick-reference/sudo-password-handling.html#-manual-enter-password-when-prompted","quick-reference/sudo-password-handling.html#ctrl-c-handling","quick-reference/sudo-password-handling.html#ctrl-c-behavior","quick-reference/sudo-password-handling.html#graceful-handling-non-ctrl-c-cancellation","quick-reference/sudo-password-handling.html#recommended-approach","quick-reference/sudo-password-handling.html#common-commands","quick-reference/sudo-password-handling.html#troubleshooting","quick-reference/sudo-password-handling.html#environment-specific-settings","quick-reference/sudo-password-handling.html#development-local","quick-reference/sudo-password-handling.html#cicd-automation","quick-reference/sudo-password-handling.html#production-servers","quick-reference/sudo-password-handling.html#what-fix_local_hosts-does","quick-reference/sudo-password-handling.html#security-note","configuration/config-validation.html#configuration-validation-guide","configuration/config-validation.html#overview","configuration/config-validation.html#schema-validation-features","configuration/config-validation.html#1-required-fields-validation","configuration/config-validation.html#2-type-validation","configuration/config-validation.html#3-enum-validation","configuration/config-validation.html#4-range-validation","configuration/config-validation.html#5-pattern-validation","configuration/config-validation.html#6-deprecated-fields","configuration/config-validation.html#using-schema-validator","configuration/config-validation.html#command-line","configuration/config-validation.html#programmatic-usage","configuration/config-validation.html#pretty-print-results","configuration/config-validation.html#schema-examples","configuration/config-validation.html#workspace-schema","configuration/config-validation.html#provider-schema-aws","configuration/config-validation.html#platform-service-schema-orchestrator","configuration/config-validation.html#kms-service-schema","configuration/config-validation.html#validation-workflow","configuration/config-validation.html#1-development","configuration/config-validation.html#2-cicd-pipeline","configuration/config-validation.html#3-pre-deployment","configuration/config-validation.html#error-messages","configuration/config-validation.html#clear-error-format","configuration/config-validation.html#error-details","configuration/config-validation.html#common-validation-patterns","configuration/config-validation.html#pattern-1-hostname-validation","configuration/config-validation.html#pattern-2-email-validation","configuration/config-validation.html#pattern-3-semantic-version","configuration/config-validation.html#pattern-4-url-validation","configuration/config-validation.html#pattern-5-ipv4-address","configuration/config-validation.html#pattern-6-aws-resource-id","configuration/config-validation.html#testing-validation","configuration/config-validation.html#unit-tests","configuration/config-validation.html#integration-tests","configuration/config-validation.html#custom-validation","configuration/config-validation.html#best-practices","configuration/config-validation.html#1-validate-early","configuration/config-validation.html#2-use-strict-schemas","configuration/config-validation.html#3-document-patterns","configuration/config-validation.html#4-handle-deprecation","configuration/config-validation.html#5-test-schemas","configuration/config-validation.html#troubleshooting","configuration/config-validation.html#schema-file-not-found","configuration/config-validation.html#pattern-not-matching","configuration/config-validation.html#type-mismatch","configuration/config-validation.html#additional-resources"],"index":{"documentStore":{"docInfo":{"0":{"body":47,"breadcrumbs":4,"title":3},"1":{"body":0,"breadcrumbs":3,"title":2},"10":{"body":7,"breadcrumbs":2,"title":1},"100":{"body":73,"breadcrumbs":8,"title":5},"1000":{"body":0,"breadcrumbs":4,"title":1},"1001":{"body":13,"breadcrumbs":6,"title":3},"1002":{"body":11,"breadcrumbs":5,"title":2},"1003":{"body":14,"breadcrumbs":5,"title":2},"1004":{"body":31,"breadcrumbs":5,"title":2},"1005":{"body":9,"breadcrumbs":6,"title":4},"1006":{"body":43,"breadcrumbs":3,"title":1},"1007":{"body":0,"breadcrumbs":3,"title":1},"1008":{"body":76,"breadcrumbs":5,"title":3},"1009":{"body":0,"breadcrumbs":3,"title":1},"101":{"body":60,"breadcrumbs":9,"title":6},"1010":{"body":74,"breadcrumbs":5,"title":3},"1011":{"body":94,"breadcrumbs":5,"title":3},"1012":{"body":88,"breadcrumbs":5,"title":3},"1013":{"body":90,"breadcrumbs":5,"title":3},"1014":{"body":76,"breadcrumbs":5,"title":3},"1015":{"body":4,"breadcrumbs":5,"title":3},"1016":{"body":28,"breadcrumbs":5,"title":3},"1017":{"body":35,"breadcrumbs":3,"title":1},"1018":{"body":27,"breadcrumbs":3,"title":1},"1019":{"body":72,"breadcrumbs":4,"title":2},"102":{"body":33,"breadcrumbs":6,"title":3},"1020":{"body":0,"breadcrumbs":3,"title":1},"1021":{"body":24,"breadcrumbs":4,"title":2},"1022":{"body":31,"breadcrumbs":4,"title":2},"1023":{"body":36,"breadcrumbs":4,"title":2},"1024":{"body":27,"breadcrumbs":4,"title":2},"1025":{"body":34,"breadcrumbs":4,"title":2},"1026":{"body":0,"breadcrumbs":4,"title":2},"1027":{"body":24,"breadcrumbs":4,"title":2},"1028":{"body":13,"breadcrumbs":3,"title":1},"1029":{"body":14,"breadcrumbs":3,"title":1},"103":{"body":5,"breadcrumbs":6,"title":3},"1030":{"body":0,"breadcrumbs":3,"title":1},"1031":{"body":17,"breadcrumbs":4,"title":2},"1032":{"body":4,"breadcrumbs":4,"title":2},"1033":{"body":31,"breadcrumbs":3,"title":1},"1034":{"body":44,"breadcrumbs":4,"title":2},"1035":{"body":23,"breadcrumbs":4,"title":2},"1036":{"body":32,"breadcrumbs":3,"title":1},"1037":{"body":22,"breadcrumbs":8,"title":5},"1038":{"body":21,"breadcrumbs":5,"title":2},"1039":{"body":40,"breadcrumbs":4,"title":1},"104":{"body":20,"breadcrumbs":6,"title":3},"1040":{"body":0,"breadcrumbs":4,"title":1},"1041":{"body":40,"breadcrumbs":8,"title":5},"1042":{"body":51,"breadcrumbs":7,"title":4},"1043":{"body":55,"breadcrumbs":7,"title":4},"1044":{"body":0,"breadcrumbs":5,"title":2},"1045":{"body":59,"breadcrumbs":6,"title":3},"1046":{"body":28,"breadcrumbs":5,"title":2},"1047":{"body":0,"breadcrumbs":5,"title":2},"1048":{"body":24,"breadcrumbs":6,"title":3},"1049":{"body":35,"breadcrumbs":6,"title":3},"105":{"body":30,"breadcrumbs":8,"title":5},"1050":{"body":0,"breadcrumbs":5,"title":2},"1051":{"body":41,"breadcrumbs":7,"title":4},"1052":{"body":32,"breadcrumbs":6,"title":3},"1053":{"body":22,"breadcrumbs":6,"title":3},"1054":{"body":35,"breadcrumbs":7,"title":4},"1055":{"body":0,"breadcrumbs":5,"title":2},"1056":{"body":26,"breadcrumbs":5,"title":2},"1057":{"body":17,"breadcrumbs":5,"title":2},"1058":{"body":18,"breadcrumbs":5,"title":2},"1059":{"body":0,"breadcrumbs":5,"title":2},"106":{"body":16,"breadcrumbs":6,"title":3},"1060":{"body":36,"breadcrumbs":5,"title":2},"1061":{"body":0,"breadcrumbs":4,"title":1},"1062":{"body":53,"breadcrumbs":5,"title":2},"1063":{"body":25,"breadcrumbs":5,"title":2},"1064":{"body":0,"breadcrumbs":5,"title":2},"1065":{"body":13,"breadcrumbs":6,"title":3},"1066":{"body":14,"breadcrumbs":6,"title":3},"1067":{"body":15,"breadcrumbs":5,"title":2},"1068":{"body":14,"breadcrumbs":5,"title":2},"1069":{"body":4,"breadcrumbs":5,"title":2},"107":{"body":34,"breadcrumbs":7,"title":4},"1070":{"body":5,"breadcrumbs":7,"title":4},"1071":{"body":5,"breadcrumbs":7,"title":4},"1072":{"body":3,"breadcrumbs":6,"title":3},"1073":{"body":13,"breadcrumbs":7,"title":4},"1074":{"body":6,"breadcrumbs":6,"title":3},"1075":{"body":5,"breadcrumbs":5,"title":2},"1076":{"body":17,"breadcrumbs":5,"title":2},"1077":{"body":0,"breadcrumbs":7,"title":4},"1078":{"body":17,"breadcrumbs":4,"title":1},"1079":{"body":0,"breadcrumbs":5,"title":2},"108":{"body":45,"breadcrumbs":7,"title":4},"1080":{"body":69,"breadcrumbs":8,"title":5},"1081":{"body":29,"breadcrumbs":9,"title":6},"1082":{"body":30,"breadcrumbs":8,"title":5},"1083":{"body":0,"breadcrumbs":5,"title":2},"1084":{"body":36,"breadcrumbs":4,"title":1},"1085":{"body":22,"breadcrumbs":5,"title":2},"1086":{"body":29,"breadcrumbs":6,"title":3},"1087":{"body":0,"breadcrumbs":5,"title":2},"1088":{"body":12,"breadcrumbs":7,"title":4},"1089":{"body":14,"breadcrumbs":7,"title":4},"109":{"body":24,"breadcrumbs":9,"title":6},"1090":{"body":17,"breadcrumbs":7,"title":4},"1091":{"body":0,"breadcrumbs":5,"title":2},"1092":{"body":23,"breadcrumbs":7,"title":4},"1093":{"body":28,"breadcrumbs":6,"title":3},"1094":{"body":18,"breadcrumbs":6,"title":3},"1095":{"body":0,"breadcrumbs":5,"title":2},"1096":{"body":25,"breadcrumbs":5,"title":2},"1097":{"body":17,"breadcrumbs":5,"title":2},"1098":{"body":0,"breadcrumbs":5,"title":2},"1099":{"body":13,"breadcrumbs":9,"title":6},"11":{"body":11,"breadcrumbs":3,"title":2},"110":{"body":15,"breadcrumbs":7,"title":4},"1100":{"body":15,"breadcrumbs":7,"title":4},"1101":{"body":14,"breadcrumbs":8,"title":5},"1102":{"body":0,"breadcrumbs":5,"title":2},"1103":{"body":11,"breadcrumbs":8,"title":5},"1104":{"body":12,"breadcrumbs":6,"title":3},"1105":{"body":28,"breadcrumbs":5,"title":2},"1106":{"body":20,"breadcrumbs":4,"title":1},"1107":{"body":19,"breadcrumbs":8,"title":5},"1108":{"body":0,"breadcrumbs":7,"title":4},"1109":{"body":14,"breadcrumbs":4,"title":1},"111":{"body":3,"breadcrumbs":7,"title":4},"1110":{"body":7,"breadcrumbs":6,"title":3},"1111":{"body":0,"breadcrumbs":9,"title":6},"1112":{"body":13,"breadcrumbs":8,"title":5},"1113":{"body":34,"breadcrumbs":8,"title":5},"1114":{"body":63,"breadcrumbs":10,"title":7},"1115":{"body":40,"breadcrumbs":7,"title":4},"1116":{"body":36,"breadcrumbs":6,"title":3},"1117":{"body":0,"breadcrumbs":10,"title":7},"1118":{"body":5,"breadcrumbs":6,"title":3},"1119":{"body":27,"breadcrumbs":5,"title":2},"112":{"body":17,"breadcrumbs":7,"title":4},"1120":{"body":23,"breadcrumbs":5,"title":2},"1121":{"body":93,"breadcrumbs":6,"title":3},"1122":{"body":37,"breadcrumbs":6,"title":3},"1123":{"body":66,"breadcrumbs":6,"title":3},"1124":{"body":0,"breadcrumbs":10,"title":7},"1125":{"body":18,"breadcrumbs":6,"title":3},"1126":{"body":64,"breadcrumbs":5,"title":2},"1127":{"body":80,"breadcrumbs":5,"title":2},"1128":{"body":52,"breadcrumbs":6,"title":3},"1129":{"body":0,"breadcrumbs":8,"title":5},"113":{"body":11,"breadcrumbs":8,"title":5},"1130":{"body":87,"breadcrumbs":5,"title":2},"1131":{"body":60,"breadcrumbs":7,"title":4},"1132":{"body":0,"breadcrumbs":8,"title":5},"1133":{"body":41,"breadcrumbs":5,"title":2},"1134":{"body":66,"breadcrumbs":5,"title":2},"1135":{"body":0,"breadcrumbs":7,"title":4},"1136":{"body":109,"breadcrumbs":5,"title":2},"1137":{"body":0,"breadcrumbs":6,"title":3},"1138":{"body":68,"breadcrumbs":6,"title":3},"1139":{"body":0,"breadcrumbs":5,"title":2},"114":{"body":26,"breadcrumbs":8,"title":5},"1140":{"body":16,"breadcrumbs":7,"title":4},"1141":{"body":25,"breadcrumbs":7,"title":4},"1142":{"body":13,"breadcrumbs":9,"title":6},"1143":{"body":48,"breadcrumbs":4,"title":1},"1144":{"body":77,"breadcrumbs":4,"title":2},"1145":{"body":264,"breadcrumbs":6,"title":4},"1146":{"body":8,"breadcrumbs":7,"title":4},"1147":{"body":24,"breadcrumbs":4,"title":1},"1148":{"body":0,"breadcrumbs":4,"title":1},"1149":{"body":95,"breadcrumbs":6,"title":3},"115":{"body":82,"breadcrumbs":7,"title":4},"1150":{"body":0,"breadcrumbs":5,"title":2},"1151":{"body":88,"breadcrumbs":8,"title":5},"1152":{"body":59,"breadcrumbs":8,"title":5},"1153":{"body":72,"breadcrumbs":8,"title":5},"1154":{"body":68,"breadcrumbs":8,"title":5},"1155":{"body":87,"breadcrumbs":8,"title":5},"1156":{"body":72,"breadcrumbs":8,"title":5},"1157":{"body":0,"breadcrumbs":5,"title":2},"1158":{"body":54,"breadcrumbs":6,"title":3},"1159":{"body":92,"breadcrumbs":5,"title":2},"116":{"body":36,"breadcrumbs":5,"title":2},"1160":{"body":0,"breadcrumbs":5,"title":2},"1161":{"body":45,"breadcrumbs":5,"title":2},"1162":{"body":0,"breadcrumbs":6,"title":3},"1163":{"body":225,"breadcrumbs":6,"title":3},"1164":{"body":0,"breadcrumbs":4,"title":1},"1165":{"body":23,"breadcrumbs":5,"title":2},"1166":{"body":10,"breadcrumbs":5,"title":2},"1167":{"body":0,"breadcrumbs":4,"title":1},"1168":{"body":41,"breadcrumbs":5,"title":2},"1169":{"body":48,"breadcrumbs":5,"title":2},"117":{"body":33,"breadcrumbs":4,"title":1},"1170":{"body":0,"breadcrumbs":4,"title":1},"1171":{"body":23,"breadcrumbs":4,"title":1},"1172":{"body":15,"breadcrumbs":4,"title":1},"1173":{"body":17,"breadcrumbs":4,"title":1},"1174":{"body":55,"breadcrumbs":5,"title":2},"1175":{"body":16,"breadcrumbs":5,"title":2},"1176":{"body":20,"breadcrumbs":5,"title":2},"1177":{"body":12,"breadcrumbs":7,"title":4},"1178":{"body":29,"breadcrumbs":5,"title":2},"1179":{"body":0,"breadcrumbs":6,"title":3},"118":{"body":15,"breadcrumbs":5,"title":3},"1180":{"body":58,"breadcrumbs":4,"title":1},"1181":{"body":81,"breadcrumbs":5,"title":2},"1182":{"body":0,"breadcrumbs":5,"title":2},"1183":{"body":380,"breadcrumbs":6,"title":3},"1184":{"body":33,"breadcrumbs":5,"title":2},"1185":{"body":0,"breadcrumbs":5,"title":2},"1186":{"body":115,"breadcrumbs":5,"title":2},"1187":{"body":89,"breadcrumbs":5,"title":2},"1188":{"body":25,"breadcrumbs":5,"title":2},"1189":{"body":0,"breadcrumbs":5,"title":2},"119":{"body":17,"breadcrumbs":4,"title":2},"1190":{"body":35,"breadcrumbs":6,"title":3},"1191":{"body":411,"breadcrumbs":6,"title":3},"1192":{"body":309,"breadcrumbs":5,"title":2},"1193":{"body":0,"breadcrumbs":5,"title":2},"1194":{"body":641,"breadcrumbs":5,"title":2},"1195":{"body":204,"breadcrumbs":8,"title":5},"1196":{"body":0,"breadcrumbs":5,"title":2},"1197":{"body":158,"breadcrumbs":10,"title":7},"1198":{"body":118,"breadcrumbs":11,"title":8},"1199":{"body":86,"breadcrumbs":10,"title":7},"12":{"body":108,"breadcrumbs":3,"title":2},"120":{"body":16,"breadcrumbs":3,"title":1},"1200":{"body":88,"breadcrumbs":11,"title":8},"1201":{"body":50,"breadcrumbs":9,"title":6},"1202":{"body":0,"breadcrumbs":5,"title":2},"1203":{"body":41,"breadcrumbs":5,"title":2},"1204":{"body":18,"breadcrumbs":4,"title":1},"1205":{"body":0,"breadcrumbs":5,"title":2},"1206":{"body":25,"breadcrumbs":5,"title":2},"1207":{"body":20,"breadcrumbs":5,"title":2},"1208":{"body":19,"breadcrumbs":4,"title":1},"1209":{"body":15,"breadcrumbs":4,"title":1},"121":{"body":0,"breadcrumbs":4,"title":2},"1210":{"body":13,"breadcrumbs":4,"title":1},"1211":{"body":0,"breadcrumbs":5,"title":2},"1212":{"body":23,"breadcrumbs":9,"title":6},"1213":{"body":14,"breadcrumbs":8,"title":5},"1214":{"body":17,"breadcrumbs":8,"title":5},"1215":{"body":18,"breadcrumbs":8,"title":5},"1216":{"body":19,"breadcrumbs":7,"title":4},"1217":{"body":64,"breadcrumbs":5,"title":2},"1218":{"body":46,"breadcrumbs":5,"title":2},"1219":{"body":62,"breadcrumbs":4,"title":1},"122":{"body":22,"breadcrumbs":5,"title":3},"1220":{"body":24,"breadcrumbs":4,"title":1},"1221":{"body":18,"breadcrumbs":7,"title":4},"1222":{"body":26,"breadcrumbs":4,"title":1},"1223":{"body":0,"breadcrumbs":4,"title":1},"1224":{"body":9,"breadcrumbs":5,"title":2},"1225":{"body":16,"breadcrumbs":5,"title":2},"1226":{"body":0,"breadcrumbs":4,"title":1},"1227":{"body":17,"breadcrumbs":4,"title":1},"1228":{"body":4,"breadcrumbs":5,"title":2},"1229":{"body":0,"breadcrumbs":5,"title":2},"123":{"body":29,"breadcrumbs":4,"title":2},"1230":{"body":17,"breadcrumbs":8,"title":5},"1231":{"body":106,"breadcrumbs":9,"title":6},"1232":{"body":41,"breadcrumbs":8,"title":5},"1233":{"body":9,"breadcrumbs":9,"title":6},"1234":{"body":24,"breadcrumbs":7,"title":4},"1235":{"body":0,"breadcrumbs":9,"title":6},"1236":{"body":6,"breadcrumbs":4,"title":1},"1237":{"body":36,"breadcrumbs":9,"title":6},"1238":{"body":234,"breadcrumbs":8,"title":5},"1239":{"body":31,"breadcrumbs":8,"title":5},"124":{"body":19,"breadcrumbs":4,"title":2},"1240":{"body":60,"breadcrumbs":8,"title":5},"1241":{"body":0,"breadcrumbs":10,"title":7},"1242":{"body":258,"breadcrumbs":6,"title":3},"1243":{"body":35,"breadcrumbs":5,"title":2},"1244":{"body":0,"breadcrumbs":5,"title":2},"1245":{"body":21,"breadcrumbs":6,"title":3},"1246":{"body":17,"breadcrumbs":5,"title":2},"1247":{"body":22,"breadcrumbs":5,"title":2},"1248":{"body":20,"breadcrumbs":4,"title":1},"1249":{"body":0,"breadcrumbs":4,"title":1},"125":{"body":0,"breadcrumbs":5,"title":3},"1250":{"body":29,"breadcrumbs":6,"title":3},"1251":{"body":18,"breadcrumbs":6,"title":3},"1252":{"body":0,"breadcrumbs":6,"title":3},"1253":{"body":34,"breadcrumbs":7,"title":4},"1254":{"body":41,"breadcrumbs":5,"title":2},"1255":{"body":0,"breadcrumbs":5,"title":2},"1256":{"body":20,"breadcrumbs":5,"title":2},"1257":{"body":27,"breadcrumbs":5,"title":2},"1258":{"body":16,"breadcrumbs":5,"title":2},"1259":{"body":42,"breadcrumbs":5,"title":2},"126":{"body":11,"breadcrumbs":6,"title":4},"1260":{"body":5,"breadcrumbs":4,"title":1},"1261":{"body":14,"breadcrumbs":8,"title":5},"1262":{"body":16,"breadcrumbs":9,"title":6},"1263":{"body":21,"breadcrumbs":8,"title":5},"1264":{"body":0,"breadcrumbs":4,"title":1},"1265":{"body":28,"breadcrumbs":9,"title":6},"1266":{"body":22,"breadcrumbs":6,"title":3},"1267":{"body":17,"breadcrumbs":7,"title":4},"1268":{"body":0,"breadcrumbs":8,"title":5},"1269":{"body":10,"breadcrumbs":8,"title":5},"127":{"body":24,"breadcrumbs":6,"title":4},"1270":{"body":36,"breadcrumbs":7,"title":4},"1271":{"body":10,"breadcrumbs":7,"title":4},"1272":{"body":11,"breadcrumbs":6,"title":3},"1273":{"body":15,"breadcrumbs":7,"title":4},"1274":{"body":57,"breadcrumbs":4,"title":1},"1275":{"body":0,"breadcrumbs":9,"title":5},"1276":{"body":1,"breadcrumbs":5,"title":1},"1277":{"body":105,"breadcrumbs":5,"title":1},"1278":{"body":47,"breadcrumbs":5,"title":1},"1279":{"body":45,"breadcrumbs":7,"title":3},"128":{"body":19,"breadcrumbs":7,"title":5},"1280":{"body":38,"breadcrumbs":6,"title":2},"1281":{"body":0,"breadcrumbs":5,"title":1},"1282":{"body":58,"breadcrumbs":5,"title":1},"1283":{"body":40,"breadcrumbs":5,"title":1},"1284":{"body":25,"breadcrumbs":5,"title":1},"1285":{"body":0,"breadcrumbs":6,"title":2},"1286":{"body":16,"breadcrumbs":8,"title":4},"1287":{"body":17,"breadcrumbs":8,"title":4},"1288":{"body":14,"breadcrumbs":9,"title":5},"1289":{"body":15,"breadcrumbs":9,"title":5},"129":{"body":7,"breadcrumbs":4,"title":2},"1290":{"body":15,"breadcrumbs":9,"title":5},"1291":{"body":20,"breadcrumbs":5,"title":1},"1292":{"body":0,"breadcrumbs":8,"title":4},"1293":{"body":1,"breadcrumbs":5,"title":1},"1294":{"body":103,"breadcrumbs":5,"title":1},"1295":{"body":10,"breadcrumbs":5,"title":1},"1296":{"body":59,"breadcrumbs":6,"title":2},"1297":{"body":64,"breadcrumbs":6,"title":2},"1298":{"body":50,"breadcrumbs":7,"title":3},"1299":{"body":0,"breadcrumbs":5,"title":1},"13":{"body":0,"breadcrumbs":3,"title":2},"130":{"body":38,"breadcrumbs":6,"title":4},"1300":{"body":65,"breadcrumbs":5,"title":1},"1301":{"body":43,"breadcrumbs":5,"title":1},"1302":{"body":24,"breadcrumbs":5,"title":1},"1303":{"body":0,"breadcrumbs":6,"title":2},"1304":{"body":18,"breadcrumbs":8,"title":4},"1305":{"body":16,"breadcrumbs":8,"title":4},"1306":{"body":16,"breadcrumbs":8,"title":4},"1307":{"body":16,"breadcrumbs":9,"title":5},"1308":{"body":14,"breadcrumbs":9,"title":5},"1309":{"body":0,"breadcrumbs":6,"title":2},"131":{"body":9,"breadcrumbs":6,"title":4},"1310":{"body":46,"breadcrumbs":7,"title":3},"1311":{"body":16,"breadcrumbs":6,"title":2},"1312":{"body":33,"breadcrumbs":6,"title":2},"1313":{"body":22,"breadcrumbs":5,"title":1},"1314":{"body":0,"breadcrumbs":8,"title":4},"1315":{"body":1,"breadcrumbs":5,"title":1},"1316":{"body":128,"breadcrumbs":5,"title":1},"1317":{"body":8,"breadcrumbs":5,"title":1},"1318":{"body":86,"breadcrumbs":6,"title":2},"1319":{"body":25,"breadcrumbs":8,"title":4},"132":{"body":13,"breadcrumbs":6,"title":4},"1320":{"body":57,"breadcrumbs":7,"title":3},"1321":{"body":0,"breadcrumbs":5,"title":1},"1322":{"body":65,"breadcrumbs":5,"title":1},"1323":{"body":44,"breadcrumbs":5,"title":1},"1324":{"body":26,"breadcrumbs":5,"title":1},"1325":{"body":0,"breadcrumbs":6,"title":2},"1326":{"body":19,"breadcrumbs":9,"title":5},"1327":{"body":16,"breadcrumbs":9,"title":5},"1328":{"body":16,"breadcrumbs":10,"title":6},"1329":{"body":16,"breadcrumbs":9,"title":5},"133":{"body":16,"breadcrumbs":8,"title":6},"1330":{"body":16,"breadcrumbs":9,"title":5},"1331":{"body":0,"breadcrumbs":6,"title":2},"1332":{"body":25,"breadcrumbs":6,"title":2},"1333":{"body":36,"breadcrumbs":7,"title":3},"1334":{"body":29,"breadcrumbs":6,"title":2},"1335":{"body":35,"breadcrumbs":6,"title":2},"1336":{"body":20,"breadcrumbs":5,"title":1},"1337":{"body":0,"breadcrumbs":8,"title":4},"1338":{"body":1,"breadcrumbs":5,"title":1},"1339":{"body":126,"breadcrumbs":5,"title":1},"134":{"body":17,"breadcrumbs":6,"title":4},"1340":{"body":7,"breadcrumbs":5,"title":1},"1341":{"body":97,"breadcrumbs":6,"title":2},"1342":{"body":70,"breadcrumbs":6,"title":2},"1343":{"body":49,"breadcrumbs":7,"title":3},"1344":{"body":0,"breadcrumbs":5,"title":1},"1345":{"body":68,"breadcrumbs":5,"title":1},"1346":{"body":43,"breadcrumbs":5,"title":1},"1347":{"body":29,"breadcrumbs":5,"title":1},"1348":{"body":0,"breadcrumbs":6,"title":2},"1349":{"body":20,"breadcrumbs":9,"title":5},"135":{"body":0,"breadcrumbs":4,"title":2},"1350":{"body":19,"breadcrumbs":9,"title":5},"1351":{"body":21,"breadcrumbs":9,"title":5},"1352":{"body":19,"breadcrumbs":8,"title":4},"1353":{"body":17,"breadcrumbs":9,"title":5},"1354":{"body":0,"breadcrumbs":6,"title":2},"1355":{"body":37,"breadcrumbs":6,"title":2},"1356":{"body":28,"breadcrumbs":6,"title":2},"1357":{"body":28,"breadcrumbs":6,"title":2},"1358":{"body":28,"breadcrumbs":6,"title":2},"1359":{"body":0,"breadcrumbs":6,"title":2},"136":{"body":21,"breadcrumbs":5,"title":3},"1360":{"body":13,"breadcrumbs":9,"title":5},"1361":{"body":12,"breadcrumbs":9,"title":5},"1362":{"body":10,"breadcrumbs":9,"title":5},"1363":{"body":24,"breadcrumbs":5,"title":1},"1364":{"body":0,"breadcrumbs":8,"title":4},"1365":{"body":1,"breadcrumbs":5,"title":1},"1366":{"body":111,"breadcrumbs":5,"title":1},"1367":{"body":8,"breadcrumbs":5,"title":1},"1368":{"body":42,"breadcrumbs":6,"title":2},"1369":{"body":52,"breadcrumbs":6,"title":2},"137":{"body":14,"breadcrumbs":4,"title":2},"1370":{"body":48,"breadcrumbs":7,"title":3},"1371":{"body":53,"breadcrumbs":7,"title":3},"1372":{"body":0,"breadcrumbs":5,"title":1},"1373":{"body":61,"breadcrumbs":5,"title":1},"1374":{"body":42,"breadcrumbs":5,"title":1},"1375":{"body":28,"breadcrumbs":5,"title":1},"1376":{"body":0,"breadcrumbs":6,"title":2},"1377":{"body":14,"breadcrumbs":9,"title":5},"1378":{"body":15,"breadcrumbs":9,"title":5},"1379":{"body":17,"breadcrumbs":9,"title":5},"138":{"body":23,"breadcrumbs":5,"title":3},"1380":{"body":13,"breadcrumbs":9,"title":5},"1381":{"body":15,"breadcrumbs":8,"title":4},"1382":{"body":0,"breadcrumbs":6,"title":2},"1383":{"body":38,"breadcrumbs":7,"title":3},"1384":{"body":51,"breadcrumbs":7,"title":3},"1385":{"body":32,"breadcrumbs":6,"title":2},"1386":{"body":34,"breadcrumbs":6,"title":2},"1387":{"body":26,"breadcrumbs":6,"title":2},"1388":{"body":0,"breadcrumbs":7,"title":3},"1389":{"body":35,"breadcrumbs":7,"title":3},"139":{"body":0,"breadcrumbs":4,"title":2},"1390":{"body":34,"breadcrumbs":8,"title":4},"1391":{"body":23,"breadcrumbs":5,"title":1},"1392":{"body":18,"breadcrumbs":12,"title":7},"1393":{"body":21,"breadcrumbs":6,"title":1},"1394":{"body":88,"breadcrumbs":7,"title":2},"1395":{"body":54,"breadcrumbs":6,"title":1},"1396":{"body":184,"breadcrumbs":7,"title":2},"1397":{"body":0,"breadcrumbs":7,"title":2},"1398":{"body":12,"breadcrumbs":8,"title":3},"1399":{"body":18,"breadcrumbs":8,"title":3},"14":{"body":19,"breadcrumbs":4,"title":3},"140":{"body":10,"breadcrumbs":4,"title":2},"1400":{"body":18,"breadcrumbs":10,"title":5},"1401":{"body":11,"breadcrumbs":8,"title":3},"1402":{"body":21,"breadcrumbs":8,"title":3},"1403":{"body":0,"breadcrumbs":7,"title":2},"1404":{"body":62,"breadcrumbs":10,"title":5},"1405":{"body":48,"breadcrumbs":9,"title":4},"1406":{"body":33,"breadcrumbs":7,"title":2},"1407":{"body":5,"breadcrumbs":6,"title":1},"1408":{"body":22,"breadcrumbs":7,"title":2},"1409":{"body":26,"breadcrumbs":7,"title":2},"141":{"body":21,"breadcrumbs":4,"title":2},"1410":{"body":0,"breadcrumbs":6,"title":1},"1411":{"body":59,"breadcrumbs":7,"title":2},"1412":{"body":65,"breadcrumbs":7,"title":2},"1413":{"body":0,"breadcrumbs":7,"title":2},"1414":{"body":33,"breadcrumbs":6,"title":1},"1415":{"body":29,"breadcrumbs":6,"title":1},"1416":{"body":0,"breadcrumbs":6,"title":1},"1417":{"body":37,"breadcrumbs":9,"title":4},"1418":{"body":21,"breadcrumbs":7,"title":2},"1419":{"body":0,"breadcrumbs":7,"title":2},"142":{"body":31,"breadcrumbs":4,"title":2},"1420":{"body":34,"breadcrumbs":7,"title":2},"1421":{"body":13,"breadcrumbs":8,"title":3},"1422":{"body":17,"breadcrumbs":7,"title":2},"1423":{"body":83,"breadcrumbs":6,"title":1},"1424":{"body":15,"breadcrumbs":12,"title":8},"1425":{"body":22,"breadcrumbs":5,"title":1},"1426":{"body":50,"breadcrumbs":8,"title":4},"1427":{"body":42,"breadcrumbs":6,"title":2},"1428":{"body":62,"breadcrumbs":5,"title":1},"1429":{"body":0,"breadcrumbs":5,"title":1},"143":{"body":0,"breadcrumbs":5,"title":3},"1430":{"body":57,"breadcrumbs":5,"title":1},"1431":{"body":26,"breadcrumbs":5,"title":1},"1432":{"body":20,"breadcrumbs":5,"title":1},"1433":{"body":0,"breadcrumbs":5,"title":1},"1434":{"body":29,"breadcrumbs":6,"title":2},"1435":{"body":36,"breadcrumbs":6,"title":2},"1436":{"body":17,"breadcrumbs":6,"title":2},"1437":{"body":39,"breadcrumbs":6,"title":2},"1438":{"body":0,"breadcrumbs":6,"title":2},"1439":{"body":32,"breadcrumbs":5,"title":1},"144":{"body":13,"breadcrumbs":4,"title":2},"1440":{"body":31,"breadcrumbs":5,"title":1},"1441":{"body":0,"breadcrumbs":6,"title":2},"1442":{"body":16,"breadcrumbs":9,"title":5},"1443":{"body":22,"breadcrumbs":8,"title":4},"1444":{"body":20,"breadcrumbs":9,"title":5},"1445":{"body":24,"breadcrumbs":9,"title":5},"1446":{"body":0,"breadcrumbs":5,"title":1},"1447":{"body":20,"breadcrumbs":6,"title":2},"1448":{"body":21,"breadcrumbs":6,"title":2},"1449":{"body":13,"breadcrumbs":6,"title":2},"145":{"body":15,"breadcrumbs":4,"title":2},"1450":{"body":0,"breadcrumbs":5,"title":1},"1451":{"body":28,"breadcrumbs":6,"title":2},"1452":{"body":29,"breadcrumbs":6,"title":2},"1453":{"body":21,"breadcrumbs":5,"title":1},"1454":{"body":33,"breadcrumbs":5,"title":1},"1455":{"body":15,"breadcrumbs":11,"title":7},"1456":{"body":63,"breadcrumbs":7,"title":3},"1457":{"body":39,"breadcrumbs":6,"title":2},"1458":{"body":0,"breadcrumbs":6,"title":2},"1459":{"body":33,"breadcrumbs":11,"title":7},"146":{"body":15,"breadcrumbs":5,"title":3},"1460":{"body":27,"breadcrumbs":10,"title":6},"1461":{"body":44,"breadcrumbs":10,"title":6},"1462":{"body":24,"breadcrumbs":7,"title":3},"1463":{"body":7,"breadcrumbs":6,"title":2},"1464":{"body":45,"breadcrumbs":5,"title":1},"1465":{"body":206,"breadcrumbs":6,"title":2},"1466":{"body":23,"breadcrumbs":6,"title":2},"1467":{"body":39,"breadcrumbs":7,"title":3},"1468":{"body":0,"breadcrumbs":5,"title":1},"1469":{"body":36,"breadcrumbs":5,"title":1},"147":{"body":0,"breadcrumbs":4,"title":2},"1470":{"body":23,"breadcrumbs":5,"title":1},"1471":{"body":20,"breadcrumbs":5,"title":1},"1472":{"body":0,"breadcrumbs":5,"title":1},"1473":{"body":23,"breadcrumbs":6,"title":2},"1474":{"body":17,"breadcrumbs":6,"title":2},"1475":{"body":0,"breadcrumbs":6,"title":2},"1476":{"body":15,"breadcrumbs":8,"title":4},"1477":{"body":18,"breadcrumbs":8,"title":4},"1478":{"body":22,"breadcrumbs":8,"title":4},"1479":{"body":0,"breadcrumbs":6,"title":2},"148":{"body":60,"breadcrumbs":6,"title":4},"1480":{"body":18,"breadcrumbs":10,"title":6},"1481":{"body":19,"breadcrumbs":8,"title":4},"1482":{"body":17,"breadcrumbs":5,"title":1},"1483":{"body":16,"breadcrumbs":6,"title":2},"1484":{"body":30,"breadcrumbs":5,"title":1},"1485":{"body":10,"breadcrumbs":11,"title":6},"1486":{"body":27,"breadcrumbs":6,"title":1},"1487":{"body":12,"breadcrumbs":6,"title":1},"1488":{"body":0,"breadcrumbs":7,"title":2},"1489":{"body":19,"breadcrumbs":7,"title":2},"149":{"body":23,"breadcrumbs":6,"title":4},"1490":{"body":0,"breadcrumbs":7,"title":2},"1491":{"body":155,"breadcrumbs":10,"title":5},"1492":{"body":113,"breadcrumbs":11,"title":6},"1493":{"body":128,"breadcrumbs":11,"title":6},"1494":{"body":102,"breadcrumbs":11,"title":6},"1495":{"body":0,"breadcrumbs":8,"title":3},"1496":{"body":53,"breadcrumbs":9,"title":4},"1497":{"body":31,"breadcrumbs":8,"title":3},"1498":{"body":0,"breadcrumbs":7,"title":2},"1499":{"body":30,"breadcrumbs":7,"title":2},"15":{"body":25,"breadcrumbs":4,"title":3},"150":{"body":28,"breadcrumbs":6,"title":4},"1500":{"body":15,"breadcrumbs":7,"title":2},"1501":{"body":7,"breadcrumbs":7,"title":2},"1502":{"body":18,"breadcrumbs":6,"title":1},"1503":{"body":0,"breadcrumbs":7,"title":2},"1504":{"body":23,"breadcrumbs":6,"title":1},"1505":{"body":17,"breadcrumbs":6,"title":1},"1506":{"body":16,"breadcrumbs":7,"title":2},"1507":{"body":19,"breadcrumbs":7,"title":2},"1508":{"body":15,"breadcrumbs":7,"title":2},"1509":{"body":60,"breadcrumbs":7,"title":2},"151":{"body":0,"breadcrumbs":4,"title":2},"1510":{"body":0,"breadcrumbs":7,"title":2},"1511":{"body":16,"breadcrumbs":6,"title":1},"1512":{"body":32,"breadcrumbs":6,"title":1},"1513":{"body":0,"breadcrumbs":6,"title":1},"1514":{"body":28,"breadcrumbs":7,"title":2},"1515":{"body":47,"breadcrumbs":7,"title":2},"1516":{"body":0,"breadcrumbs":6,"title":1},"1517":{"body":33,"breadcrumbs":7,"title":2},"1518":{"body":13,"breadcrumbs":7,"title":2},"1519":{"body":0,"breadcrumbs":7,"title":2},"152":{"body":26,"breadcrumbs":5,"title":3},"1520":{"body":29,"breadcrumbs":7,"title":2},"1521":{"body":27,"breadcrumbs":7,"title":2},"1522":{"body":0,"breadcrumbs":6,"title":1},"1523":{"body":13,"breadcrumbs":6,"title":1},"1524":{"body":15,"breadcrumbs":6,"title":1},"1525":{"body":17,"breadcrumbs":6,"title":1},"1526":{"body":11,"breadcrumbs":6,"title":1},"1527":{"body":0,"breadcrumbs":7,"title":2},"1528":{"body":64,"breadcrumbs":7,"title":2},"1529":{"body":0,"breadcrumbs":7,"title":2},"153":{"body":21,"breadcrumbs":5,"title":3},"1530":{"body":27,"breadcrumbs":7,"title":2},"1531":{"body":22,"breadcrumbs":7,"title":2},"1532":{"body":0,"breadcrumbs":6,"title":1},"1533":{"body":38,"breadcrumbs":6,"title":1},"1534":{"body":30,"breadcrumbs":6,"title":1},"1535":{"body":19,"breadcrumbs":6,"title":1},"1536":{"body":25,"breadcrumbs":7,"title":2},"1537":{"body":27,"breadcrumbs":6,"title":1},"1538":{"body":19,"breadcrumbs":11,"title":6},"1539":{"body":66,"breadcrumbs":6,"title":1},"154":{"body":0,"breadcrumbs":4,"title":2},"1540":{"body":45,"breadcrumbs":6,"title":1},"1541":{"body":0,"breadcrumbs":7,"title":2},"1542":{"body":21,"breadcrumbs":9,"title":4},"1543":{"body":50,"breadcrumbs":11,"title":6},"1544":{"body":37,"breadcrumbs":11,"title":6},"1545":{"body":0,"breadcrumbs":8,"title":3},"1546":{"body":121,"breadcrumbs":8,"title":3},"1547":{"body":64,"breadcrumbs":8,"title":3},"1548":{"body":60,"breadcrumbs":9,"title":4},"1549":{"body":65,"breadcrumbs":8,"title":3},"155":{"body":21,"breadcrumbs":5,"title":3},"1550":{"body":0,"breadcrumbs":7,"title":2},"1551":{"body":51,"breadcrumbs":7,"title":2},"1552":{"body":26,"breadcrumbs":7,"title":2},"1553":{"body":0,"breadcrumbs":9,"title":4},"1554":{"body":57,"breadcrumbs":8,"title":3},"1555":{"body":0,"breadcrumbs":6,"title":1},"1556":{"body":51,"breadcrumbs":6,"title":1},"1557":{"body":20,"breadcrumbs":7,"title":2},"1558":{"body":25,"breadcrumbs":7,"title":2},"1559":{"body":0,"breadcrumbs":8,"title":3},"156":{"body":11,"breadcrumbs":4,"title":2},"1560":{"body":36,"breadcrumbs":6,"title":1},"1561":{"body":26,"breadcrumbs":6,"title":1},"1562":{"body":14,"breadcrumbs":6,"title":1},"1563":{"body":0,"breadcrumbs":6,"title":1},"1564":{"body":45,"breadcrumbs":8,"title":3},"1565":{"body":18,"breadcrumbs":7,"title":2},"1566":{"body":38,"breadcrumbs":7,"title":2},"1567":{"body":15,"breadcrumbs":9,"title":5},"1568":{"body":28,"breadcrumbs":5,"title":1},"1569":{"body":116,"breadcrumbs":6,"title":2},"157":{"body":37,"breadcrumbs":4,"title":2},"1570":{"body":26,"breadcrumbs":6,"title":2},"1571":{"body":12,"breadcrumbs":5,"title":1},"1572":{"body":116,"breadcrumbs":6,"title":2},"1573":{"body":0,"breadcrumbs":6,"title":2},"1574":{"body":29,"breadcrumbs":6,"title":2},"1575":{"body":19,"breadcrumbs":7,"title":3},"1576":{"body":52,"breadcrumbs":6,"title":2},"1577":{"body":26,"breadcrumbs":7,"title":3},"1578":{"body":26,"breadcrumbs":6,"title":2},"1579":{"body":69,"breadcrumbs":8,"title":4},"158":{"body":0,"breadcrumbs":4,"title":2},"1580":{"body":0,"breadcrumbs":6,"title":2},"1581":{"body":55,"breadcrumbs":7,"title":3},"1582":{"body":25,"breadcrumbs":7,"title":3},"1583":{"body":55,"breadcrumbs":7,"title":3},"1584":{"body":0,"breadcrumbs":7,"title":3},"1585":{"body":118,"breadcrumbs":7,"title":3},"1586":{"body":0,"breadcrumbs":6,"title":2},"1587":{"body":68,"breadcrumbs":8,"title":4},"1588":{"body":0,"breadcrumbs":6,"title":2},"1589":{"body":11,"breadcrumbs":7,"title":3},"159":{"body":21,"breadcrumbs":5,"title":3},"1590":{"body":6,"breadcrumbs":8,"title":4},"1591":{"body":7,"breadcrumbs":6,"title":2},"1592":{"body":12,"breadcrumbs":6,"title":2},"1593":{"body":8,"breadcrumbs":6,"title":2},"1594":{"body":33,"breadcrumbs":6,"title":2},"1595":{"body":0,"breadcrumbs":5,"title":1},"1596":{"body":77,"breadcrumbs":5,"title":1},"1597":{"body":42,"breadcrumbs":5,"title":1},"1598":{"body":49,"breadcrumbs":5,"title":1},"1599":{"body":0,"breadcrumbs":6,"title":2},"16":{"body":20,"breadcrumbs":3,"title":2},"160":{"body":22,"breadcrumbs":4,"title":2},"1600":{"body":48,"breadcrumbs":8,"title":4},"1601":{"body":18,"breadcrumbs":7,"title":3},"1602":{"body":14,"breadcrumbs":6,"title":2},"1603":{"body":0,"breadcrumbs":6,"title":2},"1604":{"body":21,"breadcrumbs":6,"title":2},"1605":{"body":22,"breadcrumbs":6,"title":2},"1606":{"body":14,"breadcrumbs":6,"title":2},"1607":{"body":43,"breadcrumbs":5,"title":1},"1608":{"body":0,"breadcrumbs":15,"title":8},"1609":{"body":4,"breadcrumbs":8,"title":1},"161":{"body":18,"breadcrumbs":4,"title":2},"1610":{"body":41,"breadcrumbs":8,"title":1},"1611":{"body":37,"breadcrumbs":9,"title":2},"1612":{"body":9,"breadcrumbs":8,"title":1},"1613":{"body":50,"breadcrumbs":9,"title":2},"1614":{"body":55,"breadcrumbs":9,"title":2},"1615":{"body":0,"breadcrumbs":8,"title":1},"1616":{"body":56,"breadcrumbs":11,"title":4},"1617":{"body":71,"breadcrumbs":10,"title":3},"1618":{"body":29,"breadcrumbs":9,"title":2},"1619":{"body":37,"breadcrumbs":12,"title":5},"162":{"body":0,"breadcrumbs":4,"title":2},"1620":{"body":0,"breadcrumbs":8,"title":1},"1621":{"body":42,"breadcrumbs":8,"title":1},"1622":{"body":28,"breadcrumbs":8,"title":1},"1623":{"body":53,"breadcrumbs":9,"title":2},"1624":{"body":0,"breadcrumbs":9,"title":2},"1625":{"body":13,"breadcrumbs":14,"title":7},"1626":{"body":14,"breadcrumbs":14,"title":7},"1627":{"body":11,"breadcrumbs":11,"title":4},"1628":{"body":14,"breadcrumbs":12,"title":5},"1629":{"body":0,"breadcrumbs":9,"title":2},"163":{"body":20,"breadcrumbs":5,"title":3},"1630":{"body":39,"breadcrumbs":9,"title":2},"1631":{"body":29,"breadcrumbs":12,"title":5},"1632":{"body":36,"breadcrumbs":9,"title":2},"1633":{"body":34,"breadcrumbs":10,"title":3},"1634":{"body":51,"breadcrumbs":9,"title":2},"1635":{"body":19,"breadcrumbs":9,"title":2},"1636":{"body":36,"breadcrumbs":8,"title":1},"1637":{"body":0,"breadcrumbs":16,"title":9},"1638":{"body":4,"breadcrumbs":8,"title":1},"1639":{"body":35,"breadcrumbs":8,"title":1},"164":{"body":16,"breadcrumbs":4,"title":2},"1640":{"body":82,"breadcrumbs":10,"title":3},"1641":{"body":66,"breadcrumbs":12,"title":5},"1642":{"body":56,"breadcrumbs":11,"title":4},"1643":{"body":29,"breadcrumbs":8,"title":1},"1644":{"body":72,"breadcrumbs":9,"title":2},"1645":{"body":58,"breadcrumbs":9,"title":2},"1646":{"body":0,"breadcrumbs":8,"title":1},"1647":{"body":61,"breadcrumbs":11,"title":4},"1648":{"body":32,"breadcrumbs":9,"title":2},"1649":{"body":28,"breadcrumbs":9,"title":2},"165":{"body":13,"breadcrumbs":5,"title":3},"1650":{"body":61,"breadcrumbs":11,"title":4},"1651":{"body":0,"breadcrumbs":8,"title":1},"1652":{"body":54,"breadcrumbs":8,"title":1},"1653":{"body":34,"breadcrumbs":8,"title":1},"1654":{"body":77,"breadcrumbs":9,"title":2},"1655":{"body":0,"breadcrumbs":9,"title":2},"1656":{"body":14,"breadcrumbs":14,"title":7},"1657":{"body":18,"breadcrumbs":12,"title":5},"1658":{"body":14,"breadcrumbs":14,"title":7},"1659":{"body":17,"breadcrumbs":14,"title":7},"166":{"body":38,"breadcrumbs":5,"title":3},"1660":{"body":18,"breadcrumbs":13,"title":6},"1661":{"body":0,"breadcrumbs":9,"title":2},"1662":{"body":58,"breadcrumbs":10,"title":3},"1663":{"body":30,"breadcrumbs":9,"title":2},"1664":{"body":41,"breadcrumbs":10,"title":3},"1665":{"body":51,"breadcrumbs":9,"title":2},"1666":{"body":23,"breadcrumbs":10,"title":3},"1667":{"body":72,"breadcrumbs":9,"title":2},"1668":{"body":28,"breadcrumbs":9,"title":2},"1669":{"body":68,"breadcrumbs":9,"title":2},"167":{"body":0,"breadcrumbs":4,"title":2},"1670":{"body":77,"breadcrumbs":9,"title":2},"1671":{"body":56,"breadcrumbs":8,"title":1},"1672":{"body":0,"breadcrumbs":10,"title":6},"1673":{"body":4,"breadcrumbs":5,"title":1},"1674":{"body":45,"breadcrumbs":5,"title":1},"1675":{"body":52,"breadcrumbs":8,"title":4},"1676":{"body":76,"breadcrumbs":9,"title":5},"1677":{"body":76,"breadcrumbs":10,"title":6},"1678":{"body":62,"breadcrumbs":8,"title":4},"1679":{"body":8,"breadcrumbs":5,"title":1},"168":{"body":22,"breadcrumbs":5,"title":3},"1680":{"body":124,"breadcrumbs":6,"title":2},"1681":{"body":80,"breadcrumbs":6,"title":2},"1682":{"body":0,"breadcrumbs":5,"title":1},"1683":{"body":78,"breadcrumbs":6,"title":2},"1684":{"body":61,"breadcrumbs":7,"title":3},"1685":{"body":68,"breadcrumbs":8,"title":4},"1686":{"body":60,"breadcrumbs":8,"title":4},"1687":{"body":0,"breadcrumbs":5,"title":1},"1688":{"body":50,"breadcrumbs":5,"title":1},"1689":{"body":37,"breadcrumbs":5,"title":1},"169":{"body":6,"breadcrumbs":4,"title":2},"1690":{"body":97,"breadcrumbs":6,"title":2},"1691":{"body":0,"breadcrumbs":6,"title":2},"1692":{"body":16,"breadcrumbs":8,"title":4},"1693":{"body":21,"breadcrumbs":8,"title":4},"1694":{"body":21,"breadcrumbs":15,"title":11},"1695":{"body":14,"breadcrumbs":9,"title":5},"1696":{"body":18,"breadcrumbs":10,"title":6},"1697":{"body":0,"breadcrumbs":6,"title":2},"1698":{"body":33,"breadcrumbs":6,"title":2},"1699":{"body":57,"breadcrumbs":7,"title":3},"17":{"body":18,"breadcrumbs":4,"title":3},"170":{"body":5,"breadcrumbs":3,"title":1},"1700":{"body":28,"breadcrumbs":6,"title":2},"1701":{"body":19,"breadcrumbs":7,"title":3},"1702":{"body":36,"breadcrumbs":8,"title":4},"1703":{"body":31,"breadcrumbs":7,"title":3},"1704":{"body":26,"breadcrumbs":7,"title":3},"1705":{"body":36,"breadcrumbs":7,"title":3},"1706":{"body":90,"breadcrumbs":6,"title":2},"1707":{"body":46,"breadcrumbs":6,"title":2},"1708":{"body":86,"breadcrumbs":6,"title":2},"1709":{"body":69,"breadcrumbs":6,"title":2},"171":{"body":10,"breadcrumbs":5,"title":3},"1710":{"body":51,"breadcrumbs":5,"title":1},"1711":{"body":0,"breadcrumbs":13,"title":8},"1712":{"body":4,"breadcrumbs":6,"title":1},"1713":{"body":37,"breadcrumbs":6,"title":1},"1714":{"body":87,"breadcrumbs":8,"title":3},"1715":{"body":80,"breadcrumbs":8,"title":3},"1716":{"body":41,"breadcrumbs":8,"title":3},"1717":{"body":61,"breadcrumbs":8,"title":3},"1718":{"body":37,"breadcrumbs":6,"title":1},"1719":{"body":184,"breadcrumbs":7,"title":2},"172":{"body":17,"breadcrumbs":6,"title":4},"1720":{"body":182,"breadcrumbs":7,"title":2},"1721":{"body":0,"breadcrumbs":6,"title":1},"1722":{"body":43,"breadcrumbs":8,"title":3},"1723":{"body":78,"breadcrumbs":9,"title":4},"1724":{"body":64,"breadcrumbs":10,"title":5},"1725":{"body":95,"breadcrumbs":9,"title":4},"1726":{"body":54,"breadcrumbs":9,"title":4},"1727":{"body":0,"breadcrumbs":6,"title":1},"1728":{"body":53,"breadcrumbs":6,"title":1},"1729":{"body":44,"breadcrumbs":6,"title":1},"173":{"body":14,"breadcrumbs":6,"title":4},"1730":{"body":99,"breadcrumbs":7,"title":2},"1731":{"body":0,"breadcrumbs":7,"title":2},"1732":{"body":23,"breadcrumbs":9,"title":4},"1733":{"body":19,"breadcrumbs":14,"title":9},"1734":{"body":16,"breadcrumbs":9,"title":4},"1735":{"body":13,"breadcrumbs":12,"title":7},"1736":{"body":17,"breadcrumbs":11,"title":6},"1737":{"body":0,"breadcrumbs":7,"title":2},"1738":{"body":146,"breadcrumbs":8,"title":3},"1739":{"body":114,"breadcrumbs":8,"title":3},"174":{"body":22,"breadcrumbs":7,"title":5},"1740":{"body":118,"breadcrumbs":8,"title":3},"1741":{"body":77,"breadcrumbs":8,"title":3},"1742":{"body":98,"breadcrumbs":8,"title":3},"1743":{"body":71,"breadcrumbs":8,"title":3},"1744":{"body":114,"breadcrumbs":7,"title":2},"1745":{"body":87,"breadcrumbs":7,"title":2},"1746":{"body":102,"breadcrumbs":7,"title":2},"1747":{"body":71,"breadcrumbs":6,"title":1},"1748":{"body":10,"breadcrumbs":4,"title":3},"1749":{"body":28,"breadcrumbs":3,"title":2},"175":{"body":31,"breadcrumbs":6,"title":4},"1750":{"body":0,"breadcrumbs":4,"title":3},"1751":{"body":153,"breadcrumbs":4,"title":3},"1752":{"body":109,"breadcrumbs":4,"title":3},"1753":{"body":112,"breadcrumbs":4,"title":3},"1754":{"body":56,"breadcrumbs":3,"title":2},"1755":{"body":0,"breadcrumbs":4,"title":3},"1756":{"body":9,"breadcrumbs":3,"title":2},"1757":{"body":21,"breadcrumbs":4,"title":3},"1758":{"body":9,"breadcrumbs":4,"title":3},"1759":{"body":24,"breadcrumbs":4,"title":3},"176":{"body":21,"breadcrumbs":6,"title":4},"1760":{"body":19,"breadcrumbs":7,"title":4},"1761":{"body":29,"breadcrumbs":4,"title":1},"1762":{"body":0,"breadcrumbs":5,"title":2},"1763":{"body":55,"breadcrumbs":7,"title":4},"1764":{"body":37,"breadcrumbs":7,"title":4},"1765":{"body":36,"breadcrumbs":9,"title":6},"1766":{"body":31,"breadcrumbs":6,"title":3},"1767":{"body":31,"breadcrumbs":7,"title":4},"1768":{"body":28,"breadcrumbs":7,"title":4},"1769":{"body":28,"breadcrumbs":6,"title":3},"177":{"body":31,"breadcrumbs":4,"title":2},"1770":{"body":33,"breadcrumbs":6,"title":3},"1771":{"body":41,"breadcrumbs":4,"title":1},"1772":{"body":0,"breadcrumbs":5,"title":2},"1773":{"body":14,"breadcrumbs":8,"title":5},"1774":{"body":9,"breadcrumbs":8,"title":5},"1775":{"body":9,"breadcrumbs":8,"title":5},"1776":{"body":8,"breadcrumbs":8,"title":5},"1777":{"body":31,"breadcrumbs":5,"title":2},"1778":{"body":20,"breadcrumbs":4,"title":1},"1779":{"body":25,"breadcrumbs":5,"title":2},"178":{"body":7,"breadcrumbs":7,"title":4},"1780":{"body":16,"breadcrumbs":8,"title":5},"1781":{"body":0,"breadcrumbs":5,"title":2},"1782":{"body":30,"breadcrumbs":4,"title":1},"1783":{"body":139,"breadcrumbs":5,"title":2},"1784":{"body":0,"breadcrumbs":5,"title":2},"1785":{"body":40,"breadcrumbs":6,"title":3},"1786":{"body":42,"breadcrumbs":5,"title":2},"1787":{"body":0,"breadcrumbs":5,"title":2},"1788":{"body":8,"breadcrumbs":8,"title":5},"1789":{"body":10,"breadcrumbs":8,"title":5},"179":{"body":55,"breadcrumbs":5,"title":2},"1790":{"body":6,"breadcrumbs":8,"title":5},"1791":{"body":9,"breadcrumbs":7,"title":4},"1792":{"body":0,"breadcrumbs":6,"title":3},"1793":{"body":16,"breadcrumbs":4,"title":1},"1794":{"body":23,"breadcrumbs":6,"title":3},"1795":{"body":12,"breadcrumbs":6,"title":3},"1796":{"body":40,"breadcrumbs":6,"title":3},"1797":{"body":0,"breadcrumbs":4,"title":1},"1798":{"body":22,"breadcrumbs":5,"title":2},"1799":{"body":27,"breadcrumbs":5,"title":2},"18":{"body":0,"breadcrumbs":3,"title":2},"180":{"body":11,"breadcrumbs":5,"title":2},"1800":{"body":26,"breadcrumbs":5,"title":2},"1801":{"body":18,"breadcrumbs":5,"title":2},"1802":{"body":36,"breadcrumbs":5,"title":2},"1803":{"body":17,"breadcrumbs":8,"title":5},"1804":{"body":0,"breadcrumbs":5,"title":2},"1805":{"body":50,"breadcrumbs":7,"title":4},"1806":{"body":0,"breadcrumbs":6,"title":3},"1807":{"body":18,"breadcrumbs":4,"title":1},"1808":{"body":106,"breadcrumbs":5,"title":2},"1809":{"body":43,"breadcrumbs":8,"title":5},"181":{"body":87,"breadcrumbs":6,"title":3},"1810":{"body":37,"breadcrumbs":4,"title":1},"1811":{"body":0,"breadcrumbs":5,"title":2},"1812":{"body":42,"breadcrumbs":5,"title":2},"1813":{"body":10,"breadcrumbs":5,"title":2},"1814":{"body":35,"breadcrumbs":5,"title":2},"1815":{"body":37,"breadcrumbs":5,"title":2},"1816":{"body":67,"breadcrumbs":7,"title":4},"1817":{"body":19,"breadcrumbs":5,"title":2},"1818":{"body":44,"breadcrumbs":4,"title":1},"1819":{"body":8,"breadcrumbs":5,"title":3},"182":{"body":140,"breadcrumbs":6,"title":3},"1820":{"body":24,"breadcrumbs":3,"title":1},"1821":{"body":5,"breadcrumbs":4,"title":2},"1822":{"body":0,"breadcrumbs":3,"title":1},"1823":{"body":14,"breadcrumbs":4,"title":2},"1824":{"body":11,"breadcrumbs":5,"title":3},"1825":{"body":0,"breadcrumbs":5,"title":3},"1826":{"body":11,"breadcrumbs":4,"title":2},"1827":{"body":108,"breadcrumbs":4,"title":2},"1828":{"body":76,"breadcrumbs":4,"title":2},"1829":{"body":147,"breadcrumbs":4,"title":2},"183":{"body":72,"breadcrumbs":6,"title":3},"1830":{"body":112,"breadcrumbs":4,"title":2},"1831":{"body":146,"breadcrumbs":4,"title":2},"1832":{"body":0,"breadcrumbs":6,"title":4},"1833":{"body":72,"breadcrumbs":3,"title":1},"1834":{"body":112,"breadcrumbs":4,"title":2},"1835":{"body":76,"breadcrumbs":4,"title":2},"1836":{"body":58,"breadcrumbs":4,"title":2},"1837":{"body":11,"breadcrumbs":4,"title":2},"1838":{"body":39,"breadcrumbs":5,"title":3},"1839":{"body":44,"breadcrumbs":4,"title":2},"184":{"body":47,"breadcrumbs":6,"title":3},"1840":{"body":0,"breadcrumbs":4,"title":2},"1841":{"body":31,"breadcrumbs":3,"title":1},"1842":{"body":40,"breadcrumbs":4,"title":2},"1843":{"body":0,"breadcrumbs":4,"title":2},"1844":{"body":58,"breadcrumbs":5,"title":3},"1845":{"body":58,"breadcrumbs":5,"title":3},"1846":{"body":5,"breadcrumbs":4,"title":2},"1847":{"body":18,"breadcrumbs":4,"title":2},"1848":{"body":19,"breadcrumbs":4,"title":2},"1849":{"body":38,"breadcrumbs":3,"title":1},"185":{"body":0,"breadcrumbs":5,"title":2},"1850":{"body":10,"breadcrumbs":4,"title":2},"1851":{"body":25,"breadcrumbs":3,"title":1},"1852":{"body":15,"breadcrumbs":4,"title":3},"1853":{"body":26,"breadcrumbs":2,"title":1},"1854":{"body":0,"breadcrumbs":3,"title":2},"1855":{"body":44,"breadcrumbs":4,"title":3},"1856":{"body":35,"breadcrumbs":4,"title":3},"1857":{"body":0,"breadcrumbs":2,"title":1},"1858":{"body":26,"breadcrumbs":4,"title":3},"1859":{"body":28,"breadcrumbs":4,"title":3},"186":{"body":95,"breadcrumbs":5,"title":2},"1860":{"body":0,"breadcrumbs":4,"title":3},"1861":{"body":252,"breadcrumbs":4,"title":3},"1862":{"body":18,"breadcrumbs":4,"title":3},"1863":{"body":0,"breadcrumbs":5,"title":4},"1864":{"body":178,"breadcrumbs":3,"title":2},"1865":{"body":174,"breadcrumbs":5,"title":4},"1866":{"body":0,"breadcrumbs":4,"title":3},"1867":{"body":266,"breadcrumbs":4,"title":3},"1868":{"body":0,"breadcrumbs":4,"title":3},"1869":{"body":39,"breadcrumbs":4,"title":3},"187":{"body":86,"breadcrumbs":5,"title":2},"1870":{"body":16,"breadcrumbs":5,"title":4},"1871":{"body":0,"breadcrumbs":4,"title":3},"1872":{"body":66,"breadcrumbs":3,"title":2},"1873":{"body":54,"breadcrumbs":4,"title":3},"1874":{"body":0,"breadcrumbs":3,"title":2},"1875":{"body":22,"breadcrumbs":3,"title":2},"1876":{"body":9,"breadcrumbs":2,"title":1},"1877":{"body":20,"breadcrumbs":3,"title":2},"1878":{"body":0,"breadcrumbs":3,"title":2},"1879":{"body":16,"breadcrumbs":3,"title":2},"188":{"body":96,"breadcrumbs":5,"title":2},"1880":{"body":13,"breadcrumbs":3,"title":2},"1881":{"body":30,"breadcrumbs":3,"title":2},"1882":{"body":13,"breadcrumbs":4,"title":3},"1883":{"body":35,"breadcrumbs":2,"title":1},"1884":{"body":0,"breadcrumbs":3,"title":2},"1885":{"body":59,"breadcrumbs":4,"title":3},"1886":{"body":0,"breadcrumbs":4,"title":3},"1887":{"body":53,"breadcrumbs":3,"title":2},"1888":{"body":573,"breadcrumbs":4,"title":3},"1889":{"body":63,"breadcrumbs":3,"title":2},"189":{"body":70,"breadcrumbs":5,"title":2},"1890":{"body":0,"breadcrumbs":5,"title":4},"1891":{"body":32,"breadcrumbs":4,"title":3},"1892":{"body":429,"breadcrumbs":5,"title":4},"1893":{"body":0,"breadcrumbs":4,"title":3},"1894":{"body":33,"breadcrumbs":3,"title":2},"1895":{"body":550,"breadcrumbs":4,"title":3},"1896":{"body":0,"breadcrumbs":4,"title":3},"1897":{"body":24,"breadcrumbs":3,"title":2},"1898":{"body":41,"breadcrumbs":3,"title":2},"1899":{"body":37,"breadcrumbs":3,"title":2},"19":{"body":12,"breadcrumbs":3,"title":2},"190":{"body":45,"breadcrumbs":5,"title":2},"1900":{"body":0,"breadcrumbs":3,"title":2},"1901":{"body":127,"breadcrumbs":3,"title":2},"1902":{"body":14,"breadcrumbs":3,"title":2},"1903":{"body":0,"breadcrumbs":3,"title":2},"1904":{"body":19,"breadcrumbs":3,"title":2},"1905":{"body":32,"breadcrumbs":4,"title":3},"1906":{"body":0,"breadcrumbs":3,"title":2},"1907":{"body":49,"breadcrumbs":3,"title":2},"1908":{"body":30,"breadcrumbs":3,"title":2},"1909":{"body":40,"breadcrumbs":4,"title":3},"191":{"body":84,"breadcrumbs":5,"title":2},"1910":{"body":10,"breadcrumbs":3,"title":2},"1911":{"body":7,"breadcrumbs":3,"title":2},"1912":{"body":29,"breadcrumbs":3,"title":2},"1913":{"body":16,"breadcrumbs":3,"title":2},"1914":{"body":0,"breadcrumbs":3,"title":2},"1915":{"body":14,"breadcrumbs":2,"title":1},"1916":{"body":64,"breadcrumbs":3,"title":2},"1917":{"body":227,"breadcrumbs":3,"title":2},"1918":{"body":113,"breadcrumbs":3,"title":2},"1919":{"body":0,"breadcrumbs":3,"title":2},"192":{"body":33,"breadcrumbs":5,"title":2},"1920":{"body":12,"breadcrumbs":2,"title":1},"1921":{"body":55,"breadcrumbs":3,"title":2},"1922":{"body":186,"breadcrumbs":3,"title":2},"1923":{"body":256,"breadcrumbs":4,"title":3},"1924":{"body":78,"breadcrumbs":3,"title":2},"1925":{"body":0,"breadcrumbs":3,"title":2},"1926":{"body":4,"breadcrumbs":2,"title":1},"1927":{"body":103,"breadcrumbs":3,"title":2},"1928":{"body":97,"breadcrumbs":3,"title":2},"1929":{"body":135,"breadcrumbs":5,"title":4},"193":{"body":51,"breadcrumbs":5,"title":2},"1930":{"body":0,"breadcrumbs":3,"title":2},"1931":{"body":11,"breadcrumbs":2,"title":1},"1932":{"body":80,"breadcrumbs":3,"title":2},"1933":{"body":76,"breadcrumbs":3,"title":2},"1934":{"body":105,"breadcrumbs":3,"title":2},"1935":{"body":0,"breadcrumbs":3,"title":2},"1936":{"body":26,"breadcrumbs":3,"title":2},"1937":{"body":26,"breadcrumbs":3,"title":2},"1938":{"body":23,"breadcrumbs":3,"title":2},"1939":{"body":23,"breadcrumbs":3,"title":2},"194":{"body":0,"breadcrumbs":5,"title":2},"1940":{"body":46,"breadcrumbs":2,"title":1},"1941":{"body":17,"breadcrumbs":4,"title":2},"1942":{"body":23,"breadcrumbs":3,"title":1},"1943":{"body":0,"breadcrumbs":5,"title":3},"1944":{"body":774,"breadcrumbs":4,"title":2},"1945":{"body":879,"breadcrumbs":4,"title":2},"1946":{"body":0,"breadcrumbs":5,"title":3},"1947":{"body":239,"breadcrumbs":5,"title":3},"1948":{"body":108,"breadcrumbs":5,"title":3},"1949":{"body":0,"breadcrumbs":4,"title":2},"195":{"body":68,"breadcrumbs":5,"title":2},"1950":{"body":231,"breadcrumbs":5,"title":3},"1951":{"body":75,"breadcrumbs":5,"title":3},"1952":{"body":0,"breadcrumbs":4,"title":2},"1953":{"body":63,"breadcrumbs":4,"title":2},"1954":{"body":36,"breadcrumbs":4,"title":2},"1955":{"body":0,"breadcrumbs":5,"title":3},"1956":{"body":165,"breadcrumbs":5,"title":3},"1957":{"body":221,"breadcrumbs":5,"title":3},"1958":{"body":6,"breadcrumbs":5,"title":3},"1959":{"body":16,"breadcrumbs":3,"title":1},"196":{"body":63,"breadcrumbs":5,"title":2},"1960":{"body":12,"breadcrumbs":4,"title":2},"1961":{"body":4,"breadcrumbs":4,"title":2},"1962":{"body":44,"breadcrumbs":4,"title":2},"1963":{"body":25,"breadcrumbs":4,"title":2},"1964":{"body":0,"breadcrumbs":5,"title":3},"1965":{"body":16,"breadcrumbs":5,"title":3},"1966":{"body":26,"breadcrumbs":5,"title":3},"1967":{"body":22,"breadcrumbs":5,"title":3},"1968":{"body":22,"breadcrumbs":4,"title":2},"1969":{"body":0,"breadcrumbs":5,"title":3},"197":{"body":38,"breadcrumbs":5,"title":2},"1970":{"body":13,"breadcrumbs":4,"title":2},"1971":{"body":7,"breadcrumbs":4,"title":2},"1972":{"body":3,"breadcrumbs":3,"title":1},"1973":{"body":17,"breadcrumbs":4,"title":2},"1974":{"body":19,"breadcrumbs":5,"title":3},"1975":{"body":25,"breadcrumbs":4,"title":2},"1976":{"body":7,"breadcrumbs":5,"title":3},"1977":{"body":10,"breadcrumbs":3,"title":1},"1978":{"body":0,"breadcrumbs":4,"title":2},"1979":{"body":18,"breadcrumbs":4,"title":2},"198":{"body":0,"breadcrumbs":5,"title":2},"1980":{"body":18,"breadcrumbs":4,"title":2},"1981":{"body":20,"breadcrumbs":5,"title":3},"1982":{"body":18,"breadcrumbs":4,"title":2},"1983":{"body":18,"breadcrumbs":4,"title":2},"1984":{"body":0,"breadcrumbs":4,"title":2},"1985":{"body":24,"breadcrumbs":4,"title":2},"1986":{"body":23,"breadcrumbs":4,"title":2},"1987":{"body":30,"breadcrumbs":4,"title":2},"1988":{"body":27,"breadcrumbs":4,"title":2},"1989":{"body":6,"breadcrumbs":4,"title":2},"199":{"body":98,"breadcrumbs":5,"title":2},"1990":{"body":15,"breadcrumbs":4,"title":2},"1991":{"body":15,"breadcrumbs":5,"title":3},"1992":{"body":28,"breadcrumbs":3,"title":1},"1993":{"body":34,"breadcrumbs":5,"title":3},"1994":{"body":14,"breadcrumbs":5,"title":3},"1995":{"body":0,"breadcrumbs":5,"title":3},"1996":{"body":110,"breadcrumbs":4,"title":2},"1997":{"body":80,"breadcrumbs":4,"title":2},"1998":{"body":0,"breadcrumbs":5,"title":3},"1999":{"body":79,"breadcrumbs":4,"title":2},"2":{"body":31,"breadcrumbs":3,"title":2},"20":{"body":16,"breadcrumbs":2,"title":1},"200":{"body":99,"breadcrumbs":5,"title":2},"2000":{"body":72,"breadcrumbs":5,"title":3},"2001":{"body":22,"breadcrumbs":4,"title":2},"2002":{"body":0,"breadcrumbs":5,"title":3},"2003":{"body":68,"breadcrumbs":4,"title":2},"2004":{"body":37,"breadcrumbs":4,"title":2},"2005":{"body":0,"breadcrumbs":5,"title":3},"2006":{"body":59,"breadcrumbs":4,"title":2},"2007":{"body":61,"breadcrumbs":5,"title":3},"2008":{"body":0,"breadcrumbs":4,"title":2},"2009":{"body":49,"breadcrumbs":4,"title":2},"201":{"body":33,"breadcrumbs":5,"title":2},"2010":{"body":0,"breadcrumbs":5,"title":3},"2011":{"body":56,"breadcrumbs":4,"title":2},"2012":{"body":0,"breadcrumbs":5,"title":3},"2013":{"body":66,"breadcrumbs":4,"title":2},"2014":{"body":0,"breadcrumbs":5,"title":3},"2015":{"body":44,"breadcrumbs":5,"title":3},"2016":{"body":0,"breadcrumbs":4,"title":2},"2017":{"body":49,"breadcrumbs":4,"title":2},"2018":{"body":59,"breadcrumbs":4,"title":2},"2019":{"body":0,"breadcrumbs":4,"title":2},"202":{"body":0,"breadcrumbs":5,"title":2},"2020":{"body":48,"breadcrumbs":5,"title":3},"2021":{"body":28,"breadcrumbs":4,"title":2},"2022":{"body":0,"breadcrumbs":4,"title":2},"2023":{"body":26,"breadcrumbs":4,"title":2},"2024":{"body":24,"breadcrumbs":3,"title":1},"2025":{"body":0,"breadcrumbs":4,"title":2},"2026":{"body":20,"breadcrumbs":5,"title":3},"2027":{"body":39,"breadcrumbs":4,"title":2},"2028":{"body":12,"breadcrumbs":7,"title":4},"2029":{"body":17,"breadcrumbs":5,"title":2},"203":{"body":22,"breadcrumbs":5,"title":2},"2030":{"body":29,"breadcrumbs":4,"title":1},"2031":{"body":0,"breadcrumbs":5,"title":2},"2032":{"body":204,"breadcrumbs":6,"title":3},"2033":{"body":121,"breadcrumbs":5,"title":2},"2034":{"body":0,"breadcrumbs":6,"title":3},"2035":{"body":484,"breadcrumbs":7,"title":4},"2036":{"body":308,"breadcrumbs":6,"title":3},"2037":{"body":0,"breadcrumbs":6,"title":3},"2038":{"body":324,"breadcrumbs":7,"title":4},"2039":{"body":0,"breadcrumbs":6,"title":3},"204":{"body":26,"breadcrumbs":5,"title":2},"2040":{"body":328,"breadcrumbs":7,"title":4},"2041":{"body":0,"breadcrumbs":5,"title":2},"2042":{"body":430,"breadcrumbs":6,"title":3},"2043":{"body":0,"breadcrumbs":6,"title":3},"2044":{"body":11,"breadcrumbs":8,"title":5},"2045":{"body":9,"breadcrumbs":7,"title":4},"2046":{"body":9,"breadcrumbs":7,"title":4},"2047":{"body":110,"breadcrumbs":5,"title":2},"2048":{"body":16,"breadcrumbs":7,"title":4},"2049":{"body":16,"breadcrumbs":4,"title":1},"205":{"body":13,"breadcrumbs":5,"title":2},"2050":{"body":42,"breadcrumbs":6,"title":3},"2051":{"body":71,"breadcrumbs":5,"title":2},"2052":{"body":0,"breadcrumbs":6,"title":3},"2053":{"body":50,"breadcrumbs":9,"title":6},"2054":{"body":113,"breadcrumbs":8,"title":5},"2055":{"body":49,"breadcrumbs":8,"title":5},"2056":{"body":0,"breadcrumbs":6,"title":3},"2057":{"body":83,"breadcrumbs":7,"title":4},"2058":{"body":0,"breadcrumbs":5,"title":2},"2059":{"body":45,"breadcrumbs":7,"title":4},"206":{"body":0,"breadcrumbs":5,"title":2},"2060":{"body":71,"breadcrumbs":6,"title":3},"2061":{"body":83,"breadcrumbs":6,"title":3},"2062":{"body":0,"breadcrumbs":6,"title":3},"2063":{"body":26,"breadcrumbs":6,"title":3},"2064":{"body":33,"breadcrumbs":7,"title":4},"2065":{"body":0,"breadcrumbs":5,"title":2},"2066":{"body":6,"breadcrumbs":6,"title":3},"2067":{"body":25,"breadcrumbs":5,"title":2},"2068":{"body":43,"breadcrumbs":6,"title":3},"2069":{"body":24,"breadcrumbs":5,"title":2},"207":{"body":83,"breadcrumbs":5,"title":2},"2070":{"body":0,"breadcrumbs":5,"title":2},"2071":{"body":22,"breadcrumbs":8,"title":5},"2072":{"body":42,"breadcrumbs":7,"title":4},"2073":{"body":55,"breadcrumbs":7,"title":4},"2074":{"body":42,"breadcrumbs":9,"title":6},"2075":{"body":0,"breadcrumbs":5,"title":2},"2076":{"body":15,"breadcrumbs":7,"title":4},"2077":{"body":28,"breadcrumbs":8,"title":5},"2078":{"body":55,"breadcrumbs":7,"title":4},"2079":{"body":19,"breadcrumbs":6,"title":3},"208":{"body":43,"breadcrumbs":5,"title":2},"2080":{"body":27,"breadcrumbs":6,"title":3},"2081":{"body":0,"breadcrumbs":4,"title":1},"2082":{"body":21,"breadcrumbs":6,"title":3},"2083":{"body":29,"breadcrumbs":8,"title":5},"2084":{"body":10,"breadcrumbs":7,"title":4},"2085":{"body":16,"breadcrumbs":7,"title":4},"2086":{"body":0,"breadcrumbs":5,"title":2},"2087":{"body":41,"breadcrumbs":5,"title":2},"2088":{"body":59,"breadcrumbs":5,"title":2},"2089":{"body":29,"breadcrumbs":5,"title":2},"209":{"body":0,"breadcrumbs":5,"title":2},"2090":{"body":22,"breadcrumbs":5,"title":2},"2091":{"body":49,"breadcrumbs":4,"title":1},"2092":{"body":13,"breadcrumbs":4,"title":3},"2093":{"body":20,"breadcrumbs":3,"title":2},"2094":{"body":63,"breadcrumbs":2,"title":1},"2095":{"body":0,"breadcrumbs":3,"title":2},"2096":{"body":78,"breadcrumbs":4,"title":3},"2097":{"body":53,"breadcrumbs":3,"title":2},"2098":{"body":36,"breadcrumbs":3,"title":2},"2099":{"body":0,"breadcrumbs":4,"title":3},"21":{"body":12,"breadcrumbs":2,"title":1},"210":{"body":19,"breadcrumbs":6,"title":3},"2100":{"body":43,"breadcrumbs":3,"title":2},"2101":{"body":75,"breadcrumbs":3,"title":2},"2102":{"body":46,"breadcrumbs":4,"title":3},"2103":{"body":70,"breadcrumbs":4,"title":3},"2104":{"body":0,"breadcrumbs":3,"title":2},"2105":{"body":174,"breadcrumbs":4,"title":3},"2106":{"body":93,"breadcrumbs":4,"title":3},"2107":{"body":45,"breadcrumbs":4,"title":3},"2108":{"body":0,"breadcrumbs":3,"title":2},"2109":{"body":34,"breadcrumbs":4,"title":3},"211":{"body":39,"breadcrumbs":7,"title":4},"2110":{"body":134,"breadcrumbs":3,"title":2},"2111":{"body":71,"breadcrumbs":3,"title":2},"2112":{"body":20,"breadcrumbs":3,"title":2},"2113":{"body":31,"breadcrumbs":3,"title":2},"2114":{"body":0,"breadcrumbs":3,"title":2},"2115":{"body":17,"breadcrumbs":3,"title":2},"2116":{"body":145,"breadcrumbs":3,"title":2},"2117":{"body":62,"breadcrumbs":3,"title":2},"2118":{"body":48,"breadcrumbs":3,"title":2},"2119":{"body":0,"breadcrumbs":3,"title":2},"212":{"body":43,"breadcrumbs":5,"title":2},"2120":{"body":32,"breadcrumbs":4,"title":3},"2121":{"body":37,"breadcrumbs":4,"title":3},"2122":{"body":29,"breadcrumbs":3,"title":2},"2123":{"body":34,"breadcrumbs":4,"title":3},"2124":{"body":0,"breadcrumbs":3,"title":2},"2125":{"body":64,"breadcrumbs":3,"title":2},"2126":{"body":52,"breadcrumbs":4,"title":3},"2127":{"body":54,"breadcrumbs":3,"title":2},"2128":{"body":28,"breadcrumbs":2,"title":1},"2129":{"body":0,"breadcrumbs":3,"title":2},"213":{"body":33,"breadcrumbs":6,"title":3},"2130":{"body":36,"breadcrumbs":4,"title":3},"2131":{"body":40,"breadcrumbs":3,"title":2},"2132":{"body":0,"breadcrumbs":3,"title":2},"2133":{"body":15,"breadcrumbs":3,"title":2},"2134":{"body":51,"breadcrumbs":3,"title":2},"2135":{"body":32,"breadcrumbs":3,"title":2},"2136":{"body":76,"breadcrumbs":4,"title":3},"2137":{"body":18,"breadcrumbs":3,"title":2},"2138":{"body":22,"breadcrumbs":3,"title":2},"2139":{"body":87,"breadcrumbs":2,"title":1},"214":{"body":29,"breadcrumbs":7,"title":4},"2140":{"body":0,"breadcrumbs":4,"title":3},"2141":{"body":78,"breadcrumbs":5,"title":4},"2142":{"body":81,"breadcrumbs":4,"title":3},"2143":{"body":60,"breadcrumbs":3,"title":2},"2144":{"body":55,"breadcrumbs":3,"title":2},"2145":{"body":0,"breadcrumbs":4,"title":3},"2146":{"body":45,"breadcrumbs":4,"title":3},"2147":{"body":69,"breadcrumbs":4,"title":3},"2148":{"body":65,"breadcrumbs":3,"title":2},"2149":{"body":86,"breadcrumbs":4,"title":3},"215":{"body":40,"breadcrumbs":7,"title":4},"2150":{"body":0,"breadcrumbs":4,"title":3},"2151":{"body":27,"breadcrumbs":4,"title":3},"2152":{"body":181,"breadcrumbs":3,"title":2},"2153":{"body":80,"breadcrumbs":4,"title":3},"2154":{"body":0,"breadcrumbs":3,"title":2},"2155":{"body":25,"breadcrumbs":3,"title":2},"2156":{"body":218,"breadcrumbs":3,"title":2},"2157":{"body":45,"breadcrumbs":3,"title":2},"2158":{"body":132,"breadcrumbs":3,"title":2},"2159":{"body":0,"breadcrumbs":3,"title":2},"216":{"body":77,"breadcrumbs":5,"title":2},"2160":{"body":38,"breadcrumbs":4,"title":3},"2161":{"body":112,"breadcrumbs":3,"title":2},"2162":{"body":72,"breadcrumbs":3,"title":2},"2163":{"body":80,"breadcrumbs":4,"title":3},"2164":{"body":0,"breadcrumbs":4,"title":3},"2165":{"body":134,"breadcrumbs":3,"title":2},"2166":{"body":111,"breadcrumbs":4,"title":3},"2167":{"body":0,"breadcrumbs":3,"title":2},"2168":{"body":52,"breadcrumbs":3,"title":2},"2169":{"body":179,"breadcrumbs":3,"title":2},"217":{"body":0,"breadcrumbs":5,"title":2},"2170":{"body":0,"breadcrumbs":4,"title":3},"2171":{"body":196,"breadcrumbs":4,"title":3},"2172":{"body":128,"breadcrumbs":3,"title":2},"2173":{"body":19,"breadcrumbs":5,"title":3},"2174":{"body":15,"breadcrumbs":4,"title":2},"2175":{"body":47,"breadcrumbs":3,"title":1},"2176":{"body":39,"breadcrumbs":4,"title":2},"2177":{"body":0,"breadcrumbs":4,"title":2},"2178":{"body":40,"breadcrumbs":4,"title":2},"2179":{"body":633,"breadcrumbs":4,"title":2},"218":{"body":97,"breadcrumbs":6,"title":3},"2180":{"body":0,"breadcrumbs":4,"title":2},"2181":{"body":352,"breadcrumbs":5,"title":3},"2182":{"body":212,"breadcrumbs":4,"title":2},"2183":{"body":65,"breadcrumbs":4,"title":2},"2184":{"body":75,"breadcrumbs":4,"title":2},"2185":{"body":0,"breadcrumbs":5,"title":3},"2186":{"body":37,"breadcrumbs":4,"title":2},"2187":{"body":67,"breadcrumbs":5,"title":3},"2188":{"body":55,"breadcrumbs":5,"title":3},"2189":{"body":0,"breadcrumbs":4,"title":2},"219":{"body":58,"breadcrumbs":6,"title":3},"2190":{"body":40,"breadcrumbs":4,"title":2},"2191":{"body":45,"breadcrumbs":4,"title":2},"2192":{"body":32,"breadcrumbs":4,"title":2},"2193":{"body":0,"breadcrumbs":3,"title":1},"2194":{"body":117,"breadcrumbs":5,"title":3},"2195":{"body":59,"breadcrumbs":5,"title":3},"2196":{"body":36,"breadcrumbs":4,"title":2},"2197":{"body":36,"breadcrumbs":4,"title":2},"2198":{"body":0,"breadcrumbs":4,"title":2},"2199":{"body":50,"breadcrumbs":4,"title":2},"22":{"body":12,"breadcrumbs":2,"title":1},"220":{"body":35,"breadcrumbs":5,"title":2},"2200":{"body":36,"breadcrumbs":4,"title":2},"2201":{"body":38,"breadcrumbs":5,"title":3},"2202":{"body":18,"breadcrumbs":5,"title":3},"2203":{"body":19,"breadcrumbs":4,"title":2},"2204":{"body":62,"breadcrumbs":3,"title":1},"2205":{"body":0,"breadcrumbs":4,"title":2},"2206":{"body":57,"breadcrumbs":4,"title":2},"2207":{"body":47,"breadcrumbs":4,"title":2},"2208":{"body":37,"breadcrumbs":4,"title":2},"2209":{"body":0,"breadcrumbs":4,"title":2},"221":{"body":38,"breadcrumbs":6,"title":3},"2210":{"body":30,"breadcrumbs":4,"title":2},"2211":{"body":256,"breadcrumbs":6,"title":4},"2212":{"body":35,"breadcrumbs":4,"title":2},"2213":{"body":0,"breadcrumbs":4,"title":2},"2214":{"body":79,"breadcrumbs":4,"title":2},"2215":{"body":76,"breadcrumbs":4,"title":2},"2216":{"body":90,"breadcrumbs":3,"title":1},"2217":{"body":0,"breadcrumbs":5,"title":3},"2218":{"body":60,"breadcrumbs":4,"title":2},"2219":{"body":71,"breadcrumbs":5,"title":3},"222":{"body":0,"breadcrumbs":6,"title":3},"2220":{"body":61,"breadcrumbs":4,"title":2},"2221":{"body":0,"breadcrumbs":4,"title":2},"2222":{"body":43,"breadcrumbs":4,"title":2},"2223":{"body":56,"breadcrumbs":4,"title":2},"2224":{"body":44,"breadcrumbs":4,"title":2},"2225":{"body":0,"breadcrumbs":4,"title":2},"2226":{"body":46,"breadcrumbs":4,"title":2},"2227":{"body":49,"breadcrumbs":4,"title":2},"2228":{"body":48,"breadcrumbs":4,"title":2},"2229":{"body":0,"breadcrumbs":4,"title":2},"223":{"body":35,"breadcrumbs":5,"title":2},"2230":{"body":17,"breadcrumbs":4,"title":2},"2231":{"body":78,"breadcrumbs":4,"title":2},"2232":{"body":41,"breadcrumbs":4,"title":2},"2233":{"body":33,"breadcrumbs":4,"title":2},"2234":{"body":0,"breadcrumbs":4,"title":2},"2235":{"body":109,"breadcrumbs":5,"title":3},"2236":{"body":50,"breadcrumbs":5,"title":3},"2237":{"body":26,"breadcrumbs":4,"title":2},"2238":{"body":0,"breadcrumbs":3,"title":1},"2239":{"body":125,"breadcrumbs":4,"title":2},"224":{"body":43,"breadcrumbs":7,"title":4},"2240":{"body":69,"breadcrumbs":4,"title":2},"2241":{"body":59,"breadcrumbs":4,"title":2},"2242":{"body":13,"breadcrumbs":6,"title":4},"2243":{"body":20,"breadcrumbs":3,"title":1},"2244":{"body":0,"breadcrumbs":3,"title":1},"2245":{"body":10,"breadcrumbs":4,"title":2},"2246":{"body":8,"breadcrumbs":4,"title":2},"2247":{"body":13,"breadcrumbs":4,"title":2},"2248":{"body":0,"breadcrumbs":9,"title":7},"2249":{"body":225,"breadcrumbs":6,"title":4},"225":{"body":25,"breadcrumbs":6,"title":3},"2250":{"body":407,"breadcrumbs":6,"title":4},"2251":{"body":318,"breadcrumbs":7,"title":5},"2252":{"body":423,"breadcrumbs":6,"title":4},"2253":{"body":0,"breadcrumbs":10,"title":8},"2254":{"body":106,"breadcrumbs":7,"title":5},"2255":{"body":7,"breadcrumbs":9,"title":7},"2256":{"body":0,"breadcrumbs":9,"title":7},"2257":{"body":52,"breadcrumbs":6,"title":4},"2258":{"body":0,"breadcrumbs":4,"title":2},"2259":{"body":25,"breadcrumbs":5,"title":3},"226":{"body":22,"breadcrumbs":5,"title":2},"2260":{"body":12,"breadcrumbs":5,"title":3},"2261":{"body":16,"breadcrumbs":4,"title":2},"2262":{"body":0,"breadcrumbs":3,"title":1},"2263":{"body":19,"breadcrumbs":6,"title":4},"2264":{"body":19,"breadcrumbs":6,"title":4},"2265":{"body":15,"breadcrumbs":5,"title":3},"2266":{"body":12,"breadcrumbs":6,"title":4},"2267":{"body":8,"breadcrumbs":5,"title":3},"2268":{"body":27,"breadcrumbs":3,"title":1},"2269":{"body":14,"breadcrumbs":3,"title":1},"227":{"body":27,"breadcrumbs":5,"title":2},"2270":{"body":18,"breadcrumbs":5,"title":3},"2271":{"body":16,"breadcrumbs":4,"title":2},"2272":{"body":41,"breadcrumbs":3,"title":1},"2273":{"body":0,"breadcrumbs":6,"title":4},"2274":{"body":57,"breadcrumbs":6,"title":4},"2275":{"body":40,"breadcrumbs":5,"title":3},"2276":{"body":20,"breadcrumbs":5,"title":3},"2277":{"body":0,"breadcrumbs":4,"title":2},"2278":{"body":35,"breadcrumbs":6,"title":4},"2279":{"body":112,"breadcrumbs":6,"title":4},"228":{"body":0,"breadcrumbs":5,"title":2},"2280":{"body":35,"breadcrumbs":5,"title":3},"2281":{"body":20,"breadcrumbs":5,"title":3},"2282":{"body":40,"breadcrumbs":5,"title":3},"2283":{"body":0,"breadcrumbs":4,"title":2},"2284":{"body":38,"breadcrumbs":4,"title":2},"2285":{"body":21,"breadcrumbs":4,"title":2},"2286":{"body":22,"breadcrumbs":4,"title":2},"2287":{"body":0,"breadcrumbs":5,"title":3},"2288":{"body":18,"breadcrumbs":5,"title":3},"2289":{"body":18,"breadcrumbs":4,"title":2},"229":{"body":23,"breadcrumbs":5,"title":2},"2290":{"body":17,"breadcrumbs":5,"title":3},"2291":{"body":16,"breadcrumbs":4,"title":2},"2292":{"body":0,"breadcrumbs":4,"title":2},"2293":{"body":60,"breadcrumbs":4,"title":2},"2294":{"body":53,"breadcrumbs":4,"title":2},"2295":{"body":35,"breadcrumbs":4,"title":2},"2296":{"body":0,"breadcrumbs":4,"title":2},"2297":{"body":32,"breadcrumbs":3,"title":1},"2298":{"body":58,"breadcrumbs":3,"title":1},"2299":{"body":32,"breadcrumbs":4,"title":2},"23":{"body":0,"breadcrumbs":3,"title":2},"230":{"body":23,"breadcrumbs":5,"title":2},"2300":{"body":0,"breadcrumbs":4,"title":2},"2301":{"body":19,"breadcrumbs":4,"title":2},"2302":{"body":24,"breadcrumbs":4,"title":2},"2303":{"body":38,"breadcrumbs":4,"title":2},"2304":{"body":0,"breadcrumbs":9,"title":5},"2305":{"body":12,"breadcrumbs":5,"title":1},"2306":{"body":68,"breadcrumbs":6,"title":2},"2307":{"body":0,"breadcrumbs":6,"title":2},"2308":{"body":18,"breadcrumbs":10,"title":6},"2309":{"body":45,"breadcrumbs":7,"title":3},"231":{"body":18,"breadcrumbs":6,"title":3},"2310":{"body":0,"breadcrumbs":6,"title":2},"2311":{"body":82,"breadcrumbs":9,"title":5},"2312":{"body":34,"breadcrumbs":10,"title":6},"2313":{"body":37,"breadcrumbs":10,"title":6},"2314":{"body":42,"breadcrumbs":10,"title":6},"2315":{"body":26,"breadcrumbs":11,"title":7},"2316":{"body":55,"breadcrumbs":7,"title":3},"2317":{"body":0,"breadcrumbs":7,"title":3},"2318":{"body":19,"breadcrumbs":8,"title":4},"2319":{"body":33,"breadcrumbs":7,"title":3},"232":{"body":8,"breadcrumbs":5,"title":2},"2320":{"body":9,"breadcrumbs":9,"title":5},"2321":{"body":0,"breadcrumbs":6,"title":2},"2322":{"body":24,"breadcrumbs":12,"title":8},"2323":{"body":11,"breadcrumbs":9,"title":5},"2324":{"body":16,"breadcrumbs":10,"title":6},"2325":{"body":20,"breadcrumbs":10,"title":6},"2326":{"body":0,"breadcrumbs":6,"title":2},"2327":{"body":48,"breadcrumbs":8,"title":4},"2328":{"body":30,"breadcrumbs":6,"title":2},"2329":{"body":34,"breadcrumbs":6,"title":2},"233":{"body":0,"breadcrumbs":5,"title":2},"2330":{"body":25,"breadcrumbs":5,"title":1},"2331":{"body":18,"breadcrumbs":6,"title":2},"2332":{"body":28,"breadcrumbs":5,"title":1},"2333":{"body":11,"breadcrumbs":9,"title":6},"2334":{"body":11,"breadcrumbs":5,"title":2},"2335":{"body":50,"breadcrumbs":4,"title":1},"2336":{"body":0,"breadcrumbs":4,"title":1},"2337":{"body":27,"breadcrumbs":5,"title":2},"2338":{"body":23,"breadcrumbs":5,"title":2},"2339":{"body":24,"breadcrumbs":5,"title":2},"234":{"body":25,"breadcrumbs":7,"title":4},"2340":{"body":0,"breadcrumbs":4,"title":1},"2341":{"body":11,"breadcrumbs":4,"title":1},"2342":{"body":48,"breadcrumbs":5,"title":2},"2343":{"body":0,"breadcrumbs":5,"title":2},"2344":{"body":45,"breadcrumbs":5,"title":2},"2345":{"body":72,"breadcrumbs":5,"title":2},"2346":{"body":26,"breadcrumbs":8,"title":5},"2347":{"body":29,"breadcrumbs":7,"title":4},"2348":{"body":0,"breadcrumbs":5,"title":2},"2349":{"body":77,"breadcrumbs":8,"title":5},"235":{"body":11,"breadcrumbs":5,"title":2},"2350":{"body":78,"breadcrumbs":8,"title":5},"2351":{"body":40,"breadcrumbs":7,"title":4},"2352":{"body":0,"breadcrumbs":5,"title":2},"2353":{"body":102,"breadcrumbs":7,"title":4},"2354":{"body":66,"breadcrumbs":6,"title":3},"2355":{"body":42,"breadcrumbs":5,"title":2},"2356":{"body":66,"breadcrumbs":6,"title":3},"2357":{"body":0,"breadcrumbs":4,"title":1},"2358":{"body":29,"breadcrumbs":5,"title":2},"2359":{"body":25,"breadcrumbs":5,"title":2},"236":{"body":12,"breadcrumbs":6,"title":3},"2360":{"body":15,"breadcrumbs":5,"title":2},"2361":{"body":0,"breadcrumbs":4,"title":1},"2362":{"body":11,"breadcrumbs":6,"title":3},"2363":{"body":24,"breadcrumbs":7,"title":4},"2364":{"body":18,"breadcrumbs":7,"title":4},"2365":{"body":23,"breadcrumbs":7,"title":4},"2366":{"body":0,"breadcrumbs":5,"title":2},"2367":{"body":34,"breadcrumbs":5,"title":2},"2368":{"body":23,"breadcrumbs":6,"title":3},"2369":{"body":41,"breadcrumbs":5,"title":2},"237":{"body":0,"breadcrumbs":5,"title":2},"2370":{"body":8,"breadcrumbs":6,"title":4},"2371":{"body":38,"breadcrumbs":3,"title":1},"2372":{"body":0,"breadcrumbs":3,"title":1},"2373":{"body":28,"breadcrumbs":3,"title":1},"2374":{"body":18,"breadcrumbs":3,"title":1},"2375":{"body":18,"breadcrumbs":3,"title":1},"2376":{"body":0,"breadcrumbs":3,"title":1},"2377":{"body":35,"breadcrumbs":5,"title":3},"2378":{"body":32,"breadcrumbs":5,"title":3},"2379":{"body":0,"breadcrumbs":4,"title":2},"238":{"body":38,"breadcrumbs":6,"title":3},"2380":{"body":102,"breadcrumbs":4,"title":2},"2381":{"body":282,"breadcrumbs":4,"title":2},"2382":{"body":0,"breadcrumbs":4,"title":2},"2383":{"body":25,"breadcrumbs":5,"title":3},"2384":{"body":32,"breadcrumbs":4,"title":2},"2385":{"body":0,"breadcrumbs":4,"title":2},"2386":{"body":53,"breadcrumbs":4,"title":2},"2387":{"body":0,"breadcrumbs":4,"title":2},"2388":{"body":35,"breadcrumbs":4,"title":2},"2389":{"body":30,"breadcrumbs":4,"title":2},"239":{"body":14,"breadcrumbs":6,"title":3},"2390":{"body":27,"breadcrumbs":4,"title":2},"2391":{"body":0,"breadcrumbs":4,"title":2},"2392":{"body":49,"breadcrumbs":4,"title":2},"2393":{"body":56,"breadcrumbs":4,"title":2},"2394":{"body":0,"breadcrumbs":3,"title":1},"2395":{"body":17,"breadcrumbs":5,"title":3},"2396":{"body":23,"breadcrumbs":5,"title":3},"2397":{"body":12,"breadcrumbs":4,"title":2},"2398":{"body":11,"breadcrumbs":3,"title":1},"2399":{"body":27,"breadcrumbs":3,"title":1},"24":{"body":15,"breadcrumbs":3,"title":2},"240":{"body":12,"breadcrumbs":5,"title":2},"2400":{"body":76,"breadcrumbs":3,"title":1},"2401":{"body":4,"breadcrumbs":3,"title":1},"2402":{"body":37,"breadcrumbs":4,"title":2},"2403":{"body":56,"breadcrumbs":4,"title":2},"2404":{"body":33,"breadcrumbs":3,"title":1},"2405":{"body":26,"breadcrumbs":4,"title":3},"2406":{"body":0,"breadcrumbs":1,"title":0},"2407":{"body":44,"breadcrumbs":5,"title":4},"2408":{"body":29,"breadcrumbs":2,"title":1},"2409":{"body":41,"breadcrumbs":3,"title":2},"241":{"body":55,"breadcrumbs":7,"title":4},"2410":{"body":35,"breadcrumbs":3,"title":2},"2411":{"body":35,"breadcrumbs":3,"title":2},"2412":{"body":29,"breadcrumbs":2,"title":1},"2413":{"body":0,"breadcrumbs":2,"title":1},"2414":{"body":39,"breadcrumbs":3,"title":2},"2415":{"body":40,"breadcrumbs":3,"title":2},"2416":{"body":0,"breadcrumbs":2,"title":1},"2417":{"body":26,"breadcrumbs":2,"title":1},"2418":{"body":26,"breadcrumbs":2,"title":1},"2419":{"body":40,"breadcrumbs":5,"title":4},"242":{"body":42,"breadcrumbs":7,"title":4},"2420":{"body":40,"breadcrumbs":2,"title":1},"2421":{"body":28,"breadcrumbs":2,"title":1},"2422":{"body":34,"breadcrumbs":3,"title":2},"2423":{"body":30,"breadcrumbs":3,"title":2},"2424":{"body":28,"breadcrumbs":2,"title":1},"2425":{"body":27,"breadcrumbs":3,"title":2},"2426":{"body":0,"breadcrumbs":2,"title":1},"2427":{"body":28,"breadcrumbs":2,"title":1},"2428":{"body":26,"breadcrumbs":2,"title":1},"2429":{"body":30,"breadcrumbs":3,"title":2},"243":{"body":40,"breadcrumbs":5,"title":2},"2430":{"body":0,"breadcrumbs":2,"title":1},"2431":{"body":29,"breadcrumbs":2,"title":1},"2432":{"body":30,"breadcrumbs":2,"title":1},"2433":{"body":0,"breadcrumbs":2,"title":1},"2434":{"body":33,"breadcrumbs":2,"title":1},"2435":{"body":0,"breadcrumbs":2,"title":1},"2436":{"body":35,"breadcrumbs":6,"title":5},"2437":{"body":20,"breadcrumbs":2,"title":1},"2438":{"body":32,"breadcrumbs":2,"title":1},"2439":{"body":0,"breadcrumbs":2,"title":1},"244":{"body":7,"breadcrumbs":9,"title":6},"2440":{"body":26,"breadcrumbs":3,"title":2},"2441":{"body":30,"breadcrumbs":3,"title":2},"2442":{"body":0,"breadcrumbs":1,"title":0},"2443":{"body":34,"breadcrumbs":2,"title":1},"2444":{"body":25,"breadcrumbs":2,"title":1},"2445":{"body":32,"breadcrumbs":3,"title":2},"2446":{"body":0,"breadcrumbs":2,"title":1},"2447":{"body":24,"breadcrumbs":5,"title":4},"2448":{"body":0,"breadcrumbs":2,"title":1},"2449":{"body":30,"breadcrumbs":5,"title":4},"245":{"body":16,"breadcrumbs":9,"title":6},"2450":{"body":28,"breadcrumbs":5,"title":4},"2451":{"body":27,"breadcrumbs":2,"title":1},"2452":{"body":0,"breadcrumbs":2,"title":1},"2453":{"body":22,"breadcrumbs":2,"title":1},"2454":{"body":0,"breadcrumbs":2,"title":1},"2455":{"body":27,"breadcrumbs":5,"title":4},"2456":{"body":37,"breadcrumbs":5,"title":4},"2457":{"body":24,"breadcrumbs":2,"title":1},"2458":{"body":38,"breadcrumbs":2,"title":1},"2459":{"body":0,"breadcrumbs":2,"title":1},"246":{"body":18,"breadcrumbs":9,"title":6},"2460":{"body":26,"breadcrumbs":2,"title":1},"2461":{"body":0,"breadcrumbs":2,"title":1},"2462":{"body":22,"breadcrumbs":5,"title":4},"2463":{"body":21,"breadcrumbs":2,"title":1},"2464":{"body":33,"breadcrumbs":2,"title":1},"2465":{"body":0,"breadcrumbs":2,"title":1},"2466":{"body":22,"breadcrumbs":5,"title":4},"2467":{"body":27,"breadcrumbs":3,"title":2},"2468":{"body":34,"breadcrumbs":2,"title":1},"2469":{"body":40,"breadcrumbs":2,"title":1},"247":{"body":20,"breadcrumbs":9,"title":6},"2470":{"body":0,"breadcrumbs":2,"title":1},"2471":{"body":29,"breadcrumbs":3,"title":2},"2472":{"body":0,"breadcrumbs":2,"title":1},"2473":{"body":28,"breadcrumbs":6,"title":5},"2474":{"body":23,"breadcrumbs":2,"title":1},"2475":{"body":27,"breadcrumbs":3,"title":2},"2476":{"body":26,"breadcrumbs":2,"title":1},"2477":{"body":23,"breadcrumbs":2,"title":1},"2478":{"body":0,"breadcrumbs":2,"title":1},"2479":{"body":31,"breadcrumbs":2,"title":1},"248":{"body":11,"breadcrumbs":10,"title":7},"2480":{"body":22,"breadcrumbs":3,"title":2},"2481":{"body":32,"breadcrumbs":3,"title":2},"2482":{"body":33,"breadcrumbs":2,"title":1},"2483":{"body":24,"breadcrumbs":2,"title":1},"2484":{"body":39,"breadcrumbs":2,"title":1},"2485":{"body":26,"breadcrumbs":4,"title":3},"2486":{"body":37,"breadcrumbs":4,"title":3},"2487":{"body":18,"breadcrumbs":3,"title":2},"2488":{"body":0,"breadcrumbs":2,"title":1},"2489":{"body":18,"breadcrumbs":2,"title":1},"249":{"body":26,"breadcrumbs":10,"title":7},"2490":{"body":38,"breadcrumbs":2,"title":1},"2491":{"body":21,"breadcrumbs":2,"title":1},"2492":{"body":39,"breadcrumbs":3,"title":2},"2493":{"body":29,"breadcrumbs":2,"title":1},"2494":{"body":30,"breadcrumbs":7,"title":6},"2495":{"body":21,"breadcrumbs":2,"title":1},"2496":{"body":0,"breadcrumbs":2,"title":1},"2497":{"body":23,"breadcrumbs":4,"title":3},"2498":{"body":30,"breadcrumbs":2,"title":1},"2499":{"body":0,"breadcrumbs":2,"title":1},"25":{"body":12,"breadcrumbs":3,"title":2},"250":{"body":18,"breadcrumbs":6,"title":3},"2500":{"body":29,"breadcrumbs":2,"title":1},"2501":{"body":28,"breadcrumbs":2,"title":1},"2502":{"body":0,"breadcrumbs":2,"title":1},"2503":{"body":28,"breadcrumbs":2,"title":1},"2504":{"body":38,"breadcrumbs":2,"title":1},"2505":{"body":37,"breadcrumbs":2,"title":1},"2506":{"body":0,"breadcrumbs":3,"title":2},"2507":{"body":21,"breadcrumbs":2,"title":1},"2508":{"body":99,"breadcrumbs":4,"title":3},"2509":{"body":0,"breadcrumbs":4,"title":3},"251":{"body":74,"breadcrumbs":7,"title":4},"2510":{"body":78,"breadcrumbs":3,"title":2},"2511":{"body":26,"breadcrumbs":3,"title":2},"2512":{"body":0,"breadcrumbs":3,"title":2},"2513":{"body":42,"breadcrumbs":3,"title":2},"2514":{"body":28,"breadcrumbs":3,"title":2},"2515":{"body":0,"breadcrumbs":3,"title":2},"2516":{"body":31,"breadcrumbs":4,"title":3},"2517":{"body":19,"breadcrumbs":4,"title":3},"2518":{"body":27,"breadcrumbs":3,"title":2},"2519":{"body":20,"breadcrumbs":7,"title":5},"252":{"body":42,"breadcrumbs":6,"title":3},"2520":{"body":12,"breadcrumbs":3,"title":1},"2521":{"body":56,"breadcrumbs":4,"title":2},"2522":{"body":34,"breadcrumbs":3,"title":1},"2523":{"body":32,"breadcrumbs":4,"title":2},"2524":{"body":49,"breadcrumbs":6,"title":4},"2525":{"body":33,"breadcrumbs":3,"title":1},"2526":{"body":13,"breadcrumbs":3,"title":1},"2527":{"body":27,"breadcrumbs":4,"title":2},"2528":{"body":17,"breadcrumbs":4,"title":2},"2529":{"body":3,"breadcrumbs":4,"title":2},"253":{"body":30,"breadcrumbs":5,"title":2},"2530":{"body":46,"breadcrumbs":8,"title":4},"2531":{"body":45,"breadcrumbs":5,"title":1},"2532":{"body":0,"breadcrumbs":6,"title":2},"2533":{"body":44,"breadcrumbs":10,"title":6},"2534":{"body":9,"breadcrumbs":8,"title":4},"2535":{"body":13,"breadcrumbs":7,"title":3},"2536":{"body":19,"breadcrumbs":9,"title":5},"2537":{"body":0,"breadcrumbs":7,"title":3},"2538":{"body":34,"breadcrumbs":9,"title":5},"2539":{"body":29,"breadcrumbs":9,"title":5},"254":{"body":20,"breadcrumbs":5,"title":2},"2540":{"body":0,"breadcrumbs":6,"title":2},"2541":{"body":62,"breadcrumbs":8,"title":4},"2542":{"body":110,"breadcrumbs":7,"title":3},"2543":{"body":0,"breadcrumbs":7,"title":3},"2544":{"body":79,"breadcrumbs":6,"title":2},"2545":{"body":33,"breadcrumbs":6,"title":2},"2546":{"body":33,"breadcrumbs":7,"title":3},"2547":{"body":85,"breadcrumbs":6,"title":2},"2548":{"body":0,"breadcrumbs":10,"title":6},"2549":{"body":124,"breadcrumbs":6,"title":2},"255":{"body":25,"breadcrumbs":5,"title":2},"2550":{"body":129,"breadcrumbs":7,"title":3},"2551":{"body":153,"breadcrumbs":9,"title":5},"2552":{"body":132,"breadcrumbs":6,"title":2},"2553":{"body":122,"breadcrumbs":6,"title":2},"2554":{"body":0,"breadcrumbs":7,"title":3},"2555":{"body":31,"breadcrumbs":6,"title":2},"2556":{"body":25,"breadcrumbs":6,"title":2},"2557":{"body":16,"breadcrumbs":6,"title":2},"2558":{"body":0,"breadcrumbs":6,"title":2},"2559":{"body":22,"breadcrumbs":6,"title":2},"256":{"body":10,"breadcrumbs":7,"title":4},"2560":{"body":169,"breadcrumbs":7,"title":3},"2561":{"body":0,"breadcrumbs":6,"title":2},"2562":{"body":23,"breadcrumbs":6,"title":2},"2563":{"body":21,"breadcrumbs":7,"title":3},"2564":{"body":0,"breadcrumbs":5,"title":1},"2565":{"body":20,"breadcrumbs":7,"title":3},"2566":{"body":28,"breadcrumbs":6,"title":2},"2567":{"body":21,"breadcrumbs":8,"title":4},"2568":{"body":24,"breadcrumbs":9,"title":5},"2569":{"body":0,"breadcrumbs":6,"title":2},"257":{"body":0,"breadcrumbs":5,"title":2},"2570":{"body":42,"breadcrumbs":6,"title":2},"2571":{"body":59,"breadcrumbs":6,"title":2},"2572":{"body":49,"breadcrumbs":7,"title":3},"2573":{"body":0,"breadcrumbs":6,"title":2},"2574":{"body":9,"breadcrumbs":9,"title":5},"2575":{"body":13,"breadcrumbs":9,"title":5},"2576":{"body":11,"breadcrumbs":8,"title":4},"2577":{"body":17,"breadcrumbs":9,"title":5},"2578":{"body":7,"breadcrumbs":7,"title":3},"2579":{"body":0,"breadcrumbs":6,"title":2},"258":{"body":14,"breadcrumbs":4,"title":1},"2580":{"body":22,"breadcrumbs":6,"title":2},"2581":{"body":59,"breadcrumbs":6,"title":2},"2582":{"body":26,"breadcrumbs":7,"title":3},"2583":{"body":0,"breadcrumbs":6,"title":2},"2584":{"body":14,"breadcrumbs":6,"title":2},"2585":{"body":26,"breadcrumbs":6,"title":2},"2586":{"body":19,"breadcrumbs":6,"title":2},"2587":{"body":17,"breadcrumbs":4,"title":3},"2588":{"body":17,"breadcrumbs":3,"title":2},"2589":{"body":74,"breadcrumbs":2,"title":1},"259":{"body":24,"breadcrumbs":6,"title":3},"2590":{"body":0,"breadcrumbs":3,"title":2},"2591":{"body":67,"breadcrumbs":3,"title":2},"2592":{"body":55,"breadcrumbs":3,"title":2},"2593":{"body":0,"breadcrumbs":3,"title":2},"2594":{"body":48,"breadcrumbs":3,"title":2},"2595":{"body":38,"breadcrumbs":4,"title":3},"2596":{"body":148,"breadcrumbs":3,"title":2},"2597":{"body":569,"breadcrumbs":3,"title":2},"2598":{"body":217,"breadcrumbs":3,"title":2},"2599":{"body":0,"breadcrumbs":4,"title":3},"26":{"body":10,"breadcrumbs":3,"title":2},"260":{"body":7,"breadcrumbs":5,"title":2},"2600":{"body":40,"breadcrumbs":4,"title":3},"2601":{"body":39,"breadcrumbs":5,"title":4},"2602":{"body":100,"breadcrumbs":4,"title":3},"2603":{"body":551,"breadcrumbs":4,"title":3},"2604":{"body":0,"breadcrumbs":3,"title":2},"2605":{"body":47,"breadcrumbs":3,"title":2},"2606":{"body":35,"breadcrumbs":4,"title":3},"2607":{"body":243,"breadcrumbs":3,"title":2},"2608":{"body":0,"breadcrumbs":3,"title":2},"2609":{"body":30,"breadcrumbs":3,"title":2},"261":{"body":22,"breadcrumbs":5,"title":2},"2610":{"body":59,"breadcrumbs":4,"title":3},"2611":{"body":133,"breadcrumbs":3,"title":2},"2612":{"body":0,"breadcrumbs":3,"title":2},"2613":{"body":22,"breadcrumbs":3,"title":2},"2614":{"body":45,"breadcrumbs":3,"title":2},"2615":{"body":42,"breadcrumbs":3,"title":2},"2616":{"body":0,"breadcrumbs":3,"title":2},"2617":{"body":130,"breadcrumbs":3,"title":2},"2618":{"body":105,"breadcrumbs":3,"title":2},"2619":{"body":96,"breadcrumbs":3,"title":2},"262":{"body":29,"breadcrumbs":5,"title":2},"2620":{"body":162,"breadcrumbs":3,"title":2},"2621":{"body":0,"breadcrumbs":2,"title":1},"2622":{"body":89,"breadcrumbs":4,"title":3},"2623":{"body":25,"breadcrumbs":3,"title":2},"2624":{"body":49,"breadcrumbs":3,"title":2},"2625":{"body":13,"breadcrumbs":5,"title":3},"2626":{"body":21,"breadcrumbs":4,"title":2},"2627":{"body":0,"breadcrumbs":4,"title":2},"2628":{"body":29,"breadcrumbs":4,"title":2},"2629":{"body":45,"breadcrumbs":4,"title":2},"263":{"body":0,"breadcrumbs":5,"title":2},"2630":{"body":42,"breadcrumbs":4,"title":2},"2631":{"body":0,"breadcrumbs":5,"title":3},"2632":{"body":19,"breadcrumbs":4,"title":2},"2633":{"body":104,"breadcrumbs":7,"title":5},"2634":{"body":357,"breadcrumbs":7,"title":5},"2635":{"body":40,"breadcrumbs":6,"title":4},"2636":{"body":0,"breadcrumbs":6,"title":4},"2637":{"body":15,"breadcrumbs":5,"title":3},"2638":{"body":108,"breadcrumbs":7,"title":5},"2639":{"body":713,"breadcrumbs":7,"title":5},"264":{"body":26,"breadcrumbs":7,"title":4},"2640":{"body":0,"breadcrumbs":5,"title":3},"2641":{"body":9,"breadcrumbs":4,"title":2},"2642":{"body":172,"breadcrumbs":7,"title":5},"2643":{"body":504,"breadcrumbs":7,"title":5},"2644":{"body":0,"breadcrumbs":4,"title":2},"2645":{"body":33,"breadcrumbs":4,"title":2},"2646":{"body":105,"breadcrumbs":5,"title":3},"2647":{"body":79,"breadcrumbs":4,"title":2},"2648":{"body":0,"breadcrumbs":4,"title":2},"2649":{"body":25,"breadcrumbs":5,"title":3},"265":{"body":20,"breadcrumbs":5,"title":2},"2650":{"body":57,"breadcrumbs":4,"title":2},"2651":{"body":28,"breadcrumbs":4,"title":2},"2652":{"body":0,"breadcrumbs":4,"title":2},"2653":{"body":19,"breadcrumbs":5,"title":3},"2654":{"body":34,"breadcrumbs":5,"title":3},"2655":{"body":40,"breadcrumbs":5,"title":3},"2656":{"body":18,"breadcrumbs":4,"title":2},"2657":{"body":15,"breadcrumbs":4,"title":2},"2658":{"body":40,"breadcrumbs":4,"title":2},"2659":{"body":21,"breadcrumbs":5,"title":3},"266":{"body":26,"breadcrumbs":5,"title":2},"2660":{"body":87,"breadcrumbs":3,"title":1},"2661":{"body":0,"breadcrumbs":3,"title":1},"2662":{"body":51,"breadcrumbs":5,"title":3},"2663":{"body":71,"breadcrumbs":4,"title":2},"2664":{"body":6,"breadcrumbs":3,"title":1},"2665":{"body":0,"breadcrumbs":3,"title":1},"2666":{"body":47,"breadcrumbs":8,"title":6},"2667":{"body":150,"breadcrumbs":6,"title":4},"2668":{"body":64,"breadcrumbs":4,"title":2},"2669":{"body":16,"breadcrumbs":5,"title":3},"267":{"body":26,"breadcrumbs":5,"title":2},"2670":{"body":0,"breadcrumbs":4,"title":2},"2671":{"body":14,"breadcrumbs":4,"title":2},"2672":{"body":62,"breadcrumbs":4,"title":2},"2673":{"body":0,"breadcrumbs":5,"title":3},"2674":{"body":13,"breadcrumbs":4,"title":2},"2675":{"body":10,"breadcrumbs":4,"title":2},"2676":{"body":0,"breadcrumbs":3,"title":1},"2677":{"body":14,"breadcrumbs":3,"title":1},"2678":{"body":23,"breadcrumbs":3,"title":1},"2679":{"body":0,"breadcrumbs":7,"title":5},"268":{"body":22,"breadcrumbs":5,"title":2},"2680":{"body":47,"breadcrumbs":4,"title":2},"2681":{"body":12,"breadcrumbs":4,"title":2},"2682":{"body":14,"breadcrumbs":4,"title":2},"2683":{"body":47,"breadcrumbs":5,"title":3},"2684":{"body":36,"breadcrumbs":4,"title":2},"2685":{"body":21,"breadcrumbs":4,"title":2},"2686":{"body":10,"breadcrumbs":9,"title":6},"2687":{"body":12,"breadcrumbs":4,"title":1},"2688":{"body":0,"breadcrumbs":7,"title":4},"2689":{"body":6,"breadcrumbs":8,"title":5},"269":{"body":0,"breadcrumbs":4,"title":1},"2690":{"body":7,"breadcrumbs":8,"title":5},"2691":{"body":44,"breadcrumbs":8,"title":5},"2692":{"body":127,"breadcrumbs":8,"title":5},"2693":{"body":88,"breadcrumbs":9,"title":6},"2694":{"body":30,"breadcrumbs":7,"title":4},"2695":{"body":17,"breadcrumbs":8,"title":5},"2696":{"body":0,"breadcrumbs":5,"title":2},"2697":{"body":36,"breadcrumbs":6,"title":3},"2698":{"body":31,"breadcrumbs":6,"title":3},"2699":{"body":31,"breadcrumbs":7,"title":4},"27":{"body":13,"breadcrumbs":4,"title":3},"270":{"body":2,"breadcrumbs":6,"title":3},"2700":{"body":0,"breadcrumbs":5,"title":2},"2701":{"body":21,"breadcrumbs":6,"title":3},"2702":{"body":22,"breadcrumbs":5,"title":2},"2703":{"body":34,"breadcrumbs":6,"title":3},"2704":{"body":57,"breadcrumbs":6,"title":3},"2705":{"body":31,"breadcrumbs":5,"title":2},"2706":{"body":0,"breadcrumbs":5,"title":2},"2707":{"body":17,"breadcrumbs":5,"title":2},"2708":{"body":11,"breadcrumbs":6,"title":3},"2709":{"body":16,"breadcrumbs":5,"title":2},"271":{"body":4,"breadcrumbs":5,"title":2},"2710":{"body":28,"breadcrumbs":5,"title":2},"2711":{"body":23,"breadcrumbs":5,"title":2},"2712":{"body":0,"breadcrumbs":7,"title":4},"2713":{"body":46,"breadcrumbs":4,"title":1},"2714":{"body":0,"breadcrumbs":5,"title":2},"2715":{"body":29,"breadcrumbs":7,"title":4},"2716":{"body":34,"breadcrumbs":7,"title":4},"2717":{"body":40,"breadcrumbs":7,"title":4},"2718":{"body":47,"breadcrumbs":6,"title":3},"2719":{"body":28,"breadcrumbs":8,"title":5},"272":{"body":4,"breadcrumbs":5,"title":2},"2720":{"body":0,"breadcrumbs":6,"title":3},"2721":{"body":25,"breadcrumbs":7,"title":4},"2722":{"body":21,"breadcrumbs":6,"title":3},"2723":{"body":26,"breadcrumbs":5,"title":2},"2724":{"body":0,"breadcrumbs":5,"title":2},"2725":{"body":29,"breadcrumbs":5,"title":2},"2726":{"body":24,"breadcrumbs":5,"title":2},"2727":{"body":0,"breadcrumbs":6,"title":3},"2728":{"body":49,"breadcrumbs":7,"title":4},"2729":{"body":7,"breadcrumbs":6,"title":3},"273":{"body":5,"breadcrumbs":6,"title":3},"2730":{"body":14,"breadcrumbs":7,"title":4},"2731":{"body":0,"breadcrumbs":5,"title":2},"2732":{"body":31,"breadcrumbs":5,"title":2},"2733":{"body":28,"breadcrumbs":6,"title":3},"2734":{"body":13,"breadcrumbs":6,"title":3},"2735":{"body":0,"breadcrumbs":4,"title":1},"2736":{"body":50,"breadcrumbs":5,"title":2},"2737":{"body":20,"breadcrumbs":5,"title":2},"2738":{"body":24,"breadcrumbs":5,"title":2},"2739":{"body":23,"breadcrumbs":5,"title":2},"274":{"body":60,"breadcrumbs":4,"title":1},"2740":{"body":18,"breadcrumbs":5,"title":2},"2741":{"body":12,"breadcrumbs":7,"title":4},"2742":{"body":48,"breadcrumbs":8,"title":5},"2743":{"body":15,"breadcrumbs":5,"title":2},"2744":{"body":5,"breadcrumbs":7,"title":4},"2745":{"body":6,"breadcrumbs":8,"title":5},"2746":{"body":7,"breadcrumbs":7,"title":4},"2747":{"body":14,"breadcrumbs":9,"title":6},"2748":{"body":27,"breadcrumbs":9,"title":6},"2749":{"body":8,"breadcrumbs":7,"title":4},"275":{"body":16,"breadcrumbs":5,"title":2},"2750":{"body":13,"breadcrumbs":8,"title":5},"2751":{"body":22,"breadcrumbs":7,"title":4},"2752":{"body":4,"breadcrumbs":8,"title":5},"2753":{"body":18,"breadcrumbs":6,"title":3},"2754":{"body":11,"breadcrumbs":6,"title":3},"2755":{"body":16,"breadcrumbs":6,"title":3},"2756":{"body":23,"breadcrumbs":6,"title":3},"2757":{"body":0,"breadcrumbs":7,"title":4},"2758":{"body":27,"breadcrumbs":5,"title":2},"2759":{"body":55,"breadcrumbs":8,"title":5},"276":{"body":18,"breadcrumbs":5,"title":2},"2760":{"body":46,"breadcrumbs":8,"title":5},"2761":{"body":43,"breadcrumbs":6,"title":3},"2762":{"body":25,"breadcrumbs":4,"title":1},"2763":{"body":0,"breadcrumbs":7,"title":4},"2764":{"body":10,"breadcrumbs":5,"title":2},"2765":{"body":23,"breadcrumbs":6,"title":3},"2766":{"body":500,"breadcrumbs":7,"title":4},"2767":{"body":56,"breadcrumbs":9,"title":6},"2768":{"body":47,"breadcrumbs":5,"title":2},"2769":{"body":11,"breadcrumbs":4,"title":1},"277":{"body":7,"breadcrumbs":5,"title":2},"2770":{"body":0,"breadcrumbs":7,"title":4},"2771":{"body":4,"breadcrumbs":5,"title":2},"2772":{"body":55,"breadcrumbs":5,"title":2},"2773":{"body":15,"breadcrumbs":4,"title":1},"2774":{"body":31,"breadcrumbs":8,"title":5},"2775":{"body":76,"breadcrumbs":6,"title":3},"2776":{"body":10,"breadcrumbs":5,"title":2},"2777":{"body":29,"breadcrumbs":5,"title":2},"2778":{"body":16,"breadcrumbs":6,"title":3},"2779":{"body":19,"breadcrumbs":5,"title":2},"278":{"body":23,"breadcrumbs":7,"title":4},"2780":{"body":32,"breadcrumbs":4,"title":1},"2781":{"body":0,"breadcrumbs":6,"title":3},"2782":{"body":8,"breadcrumbs":4,"title":1},"2783":{"body":31,"breadcrumbs":4,"title":1},"2784":{"body":30,"breadcrumbs":5,"title":2},"2785":{"body":27,"breadcrumbs":6,"title":3},"2786":{"body":47,"breadcrumbs":5,"title":2},"2787":{"body":23,"breadcrumbs":5,"title":2},"2788":{"body":0,"breadcrumbs":6,"title":3},"2789":{"body":8,"breadcrumbs":4,"title":1},"279":{"body":22,"breadcrumbs":5,"title":2},"2790":{"body":33,"breadcrumbs":4,"title":1},"2791":{"body":23,"breadcrumbs":5,"title":2},"2792":{"body":30,"breadcrumbs":6,"title":3},"2793":{"body":52,"breadcrumbs":5,"title":2},"2794":{"body":20,"breadcrumbs":5,"title":2},"2795":{"body":19,"breadcrumbs":6,"title":3},"2796":{"body":66,"breadcrumbs":5,"title":2},"2797":{"body":0,"breadcrumbs":6,"title":3},"2798":{"body":34,"breadcrumbs":5,"title":2},"2799":{"body":31,"breadcrumbs":5,"title":2},"28":{"body":11,"breadcrumbs":3,"title":2},"280":{"body":9,"breadcrumbs":2,"title":1},"2800":{"body":24,"breadcrumbs":5,"title":2},"2801":{"body":0,"breadcrumbs":5,"title":2},"2802":{"body":60,"breadcrumbs":6,"title":3},"2803":{"body":48,"breadcrumbs":6,"title":3},"2804":{"body":0,"breadcrumbs":6,"title":3},"2805":{"body":28,"breadcrumbs":9,"title":6},"2806":{"body":53,"breadcrumbs":9,"title":6},"2807":{"body":51,"breadcrumbs":9,"title":6},"2808":{"body":42,"breadcrumbs":8,"title":5},"2809":{"body":33,"breadcrumbs":9,"title":6},"281":{"body":0,"breadcrumbs":3,"title":2},"2810":{"body":0,"breadcrumbs":5,"title":2},"2811":{"body":32,"breadcrumbs":4,"title":1},"2812":{"body":33,"breadcrumbs":4,"title":1},"2813":{"body":29,"breadcrumbs":4,"title":1},"2814":{"body":32,"breadcrumbs":4,"title":1},"2815":{"body":0,"breadcrumbs":5,"title":2},"2816":{"body":52,"breadcrumbs":6,"title":3},"2817":{"body":31,"breadcrumbs":7,"title":4},"2818":{"body":0,"breadcrumbs":4,"title":1},"2819":{"body":8,"breadcrumbs":5,"title":2},"282":{"body":16,"breadcrumbs":5,"title":4},"2820":{"body":33,"breadcrumbs":5,"title":2},"2821":{"body":0,"breadcrumbs":4,"title":1},"2822":{"body":33,"breadcrumbs":6,"title":3},"2823":{"body":32,"breadcrumbs":6,"title":3},"2824":{"body":43,"breadcrumbs":4,"title":1},"2825":{"body":23,"breadcrumbs":5,"title":2},"2826":{"body":20,"breadcrumbs":6,"title":3},"2827":{"body":0,"breadcrumbs":5,"title":2},"2828":{"body":56,"breadcrumbs":4,"title":1},"2829":{"body":38,"breadcrumbs":5,"title":2},"283":{"body":15,"breadcrumbs":6,"title":5},"2830":{"body":52,"breadcrumbs":5,"title":2},"2831":{"body":46,"breadcrumbs":5,"title":2},"2832":{"body":29,"breadcrumbs":5,"title":2},"2833":{"body":39,"breadcrumbs":4,"title":1},"2834":{"body":24,"breadcrumbs":4,"title":1},"2835":{"body":28,"breadcrumbs":4,"title":1},"2836":{"body":0,"breadcrumbs":5,"title":2},"2837":{"body":68,"breadcrumbs":6,"title":3},"2838":{"body":23,"breadcrumbs":6,"title":3},"2839":{"body":24,"breadcrumbs":6,"title":3},"284":{"body":19,"breadcrumbs":5,"title":4},"2840":{"body":70,"breadcrumbs":8,"title":5},"2841":{"body":0,"breadcrumbs":5,"title":2},"2842":{"body":35,"breadcrumbs":5,"title":2},"2843":{"body":39,"breadcrumbs":5,"title":2},"2844":{"body":46,"breadcrumbs":6,"title":3},"2845":{"body":34,"breadcrumbs":5,"title":2},"2846":{"body":32,"breadcrumbs":6,"title":3},"2847":{"body":0,"breadcrumbs":5,"title":2},"2848":{"body":16,"breadcrumbs":5,"title":2},"2849":{"body":23,"breadcrumbs":6,"title":3},"285":{"body":0,"breadcrumbs":3,"title":2},"2850":{"body":22,"breadcrumbs":6,"title":3},"2851":{"body":0,"breadcrumbs":6,"title":3},"2852":{"body":46,"breadcrumbs":7,"title":4},"2853":{"body":44,"breadcrumbs":6,"title":3},"2854":{"body":26,"breadcrumbs":7,"title":4},"2855":{"body":38,"breadcrumbs":8,"title":5},"2856":{"body":25,"breadcrumbs":7,"title":4},"2857":{"body":19,"breadcrumbs":6,"title":3},"2858":{"body":0,"breadcrumbs":6,"title":3},"2859":{"body":32,"breadcrumbs":4,"title":1},"286":{"body":19,"breadcrumbs":3,"title":2},"2860":{"body":31,"breadcrumbs":4,"title":1},"2861":{"body":30,"breadcrumbs":5,"title":2},"2862":{"body":28,"breadcrumbs":5,"title":2},"2863":{"body":49,"breadcrumbs":5,"title":2},"2864":{"body":50,"breadcrumbs":4,"title":1},"2865":{"body":0,"breadcrumbs":6,"title":3},"2866":{"body":0,"breadcrumbs":5,"title":2},"2867":{"body":5,"breadcrumbs":7,"title":4},"2868":{"body":14,"breadcrumbs":7,"title":4},"2869":{"body":0,"breadcrumbs":6,"title":3},"287":{"body":19,"breadcrumbs":4,"title":3},"2870":{"body":17,"breadcrumbs":6,"title":3},"2871":{"body":21,"breadcrumbs":6,"title":3},"2872":{"body":38,"breadcrumbs":6,"title":3},"2873":{"body":30,"breadcrumbs":6,"title":3},"2874":{"body":0,"breadcrumbs":5,"title":2},"2875":{"body":36,"breadcrumbs":5,"title":2},"2876":{"body":36,"breadcrumbs":5,"title":2},"2877":{"body":37,"breadcrumbs":5,"title":2},"2878":{"body":0,"breadcrumbs":5,"title":2},"2879":{"body":28,"breadcrumbs":4,"title":1},"288":{"body":0,"breadcrumbs":3,"title":2},"2880":{"body":24,"breadcrumbs":4,"title":1},"2881":{"body":22,"breadcrumbs":4,"title":1},"2882":{"body":46,"breadcrumbs":5,"title":2},"2883":{"body":0,"breadcrumbs":4,"title":1},"2884":{"body":16,"breadcrumbs":5,"title":2},"2885":{"body":17,"breadcrumbs":6,"title":3},"2886":{"body":13,"breadcrumbs":6,"title":3},"2887":{"body":38,"breadcrumbs":5,"title":2},"2888":{"body":17,"breadcrumbs":5,"title":2},"2889":{"body":0,"breadcrumbs":5,"title":3},"289":{"body":26,"breadcrumbs":3,"title":2},"2890":{"body":0,"breadcrumbs":6,"title":4},"2891":{"body":1,"breadcrumbs":4,"title":2},"2892":{"body":7,"breadcrumbs":4,"title":2},"2893":{"body":6,"breadcrumbs":5,"title":3},"2894":{"body":7,"breadcrumbs":4,"title":2},"2895":{"body":2,"breadcrumbs":4,"title":2},"2896":{"body":6,"breadcrumbs":4,"title":2},"2897":{"body":6,"breadcrumbs":4,"title":2},"2898":{"body":1,"breadcrumbs":4,"title":2},"2899":{"body":15,"breadcrumbs":5,"title":3},"29":{"body":0,"breadcrumbs":3,"title":2},"290":{"body":30,"breadcrumbs":3,"title":2},"2900":{"body":28,"breadcrumbs":6,"title":3},"2901":{"body":23,"breadcrumbs":5,"title":2},"2902":{"body":0,"breadcrumbs":4,"title":1},"2903":{"body":18,"breadcrumbs":5,"title":2},"2904":{"body":21,"breadcrumbs":7,"title":4},"2905":{"body":35,"breadcrumbs":5,"title":2},"2906":{"body":19,"breadcrumbs":5,"title":2},"2907":{"body":0,"breadcrumbs":5,"title":2},"2908":{"body":19,"breadcrumbs":6,"title":3},"2909":{"body":207,"breadcrumbs":5,"title":2},"291":{"body":7,"breadcrumbs":3,"title":2},"2910":{"body":0,"breadcrumbs":5,"title":2},"2911":{"body":8,"breadcrumbs":6,"title":3},"2912":{"body":18,"breadcrumbs":7,"title":4},"2913":{"body":31,"breadcrumbs":7,"title":4},"2914":{"body":31,"breadcrumbs":6,"title":3},"2915":{"body":77,"breadcrumbs":8,"title":5},"2916":{"body":32,"breadcrumbs":6,"title":3},"2917":{"body":4,"breadcrumbs":6,"title":3},"2918":{"body":21,"breadcrumbs":9,"title":6},"2919":{"body":20,"breadcrumbs":9,"title":6},"292":{"body":9,"breadcrumbs":2,"title":1},"2920":{"body":8,"breadcrumbs":7,"title":4},"2921":{"body":13,"breadcrumbs":9,"title":6},"2922":{"body":71,"breadcrumbs":7,"title":4},"2923":{"body":28,"breadcrumbs":7,"title":4},"2924":{"body":16,"breadcrumbs":8,"title":5},"2925":{"body":15,"breadcrumbs":4,"title":1},"2926":{"body":5,"breadcrumbs":6,"title":3},"2927":{"body":15,"breadcrumbs":4,"title":1},"2928":{"body":25,"breadcrumbs":7,"title":4},"2929":{"body":8,"breadcrumbs":8,"title":5},"293":{"body":9,"breadcrumbs":2,"title":1},"2930":{"body":33,"breadcrumbs":9,"title":6},"2931":{"body":3,"breadcrumbs":7,"title":4},"2932":{"body":22,"breadcrumbs":9,"title":6},"2933":{"body":72,"breadcrumbs":9,"title":6},"2934":{"body":27,"breadcrumbs":9,"title":6},"2935":{"body":27,"breadcrumbs":8,"title":5},"2936":{"body":22,"breadcrumbs":6,"title":3},"2937":{"body":8,"breadcrumbs":6,"title":3},"2938":{"body":19,"breadcrumbs":8,"title":5},"2939":{"body":20,"breadcrumbs":9,"title":6},"294":{"body":16,"breadcrumbs":2,"title":1},"2940":{"body":69,"breadcrumbs":8,"title":5},"2941":{"body":123,"breadcrumbs":8,"title":5},"2942":{"body":53,"breadcrumbs":8,"title":5},"2943":{"body":5,"breadcrumbs":6,"title":3},"2944":{"body":31,"breadcrumbs":4,"title":1},"2945":{"body":111,"breadcrumbs":7,"title":4},"2946":{"body":43,"breadcrumbs":9,"title":6},"2947":{"body":68,"breadcrumbs":8,"title":5},"2948":{"body":28,"breadcrumbs":8,"title":5},"2949":{"body":36,"breadcrumbs":8,"title":5},"295":{"body":9,"breadcrumbs":2,"title":1},"2950":{"body":49,"breadcrumbs":7,"title":4},"2951":{"body":0,"breadcrumbs":5,"title":2},"2952":{"body":114,"breadcrumbs":5,"title":2},"2953":{"body":39,"breadcrumbs":5,"title":2},"2954":{"body":37,"breadcrumbs":5,"title":2},"2955":{"body":40,"breadcrumbs":6,"title":3},"2956":{"body":0,"breadcrumbs":6,"title":3},"2957":{"body":39,"breadcrumbs":6,"title":3},"2958":{"body":56,"breadcrumbs":6,"title":3},"2959":{"body":52,"breadcrumbs":5,"title":2},"296":{"body":9,"breadcrumbs":2,"title":1},"2960":{"body":39,"breadcrumbs":4,"title":1},"2961":{"body":0,"breadcrumbs":4,"title":1},"2962":{"body":35,"breadcrumbs":6,"title":3},"2963":{"body":51,"breadcrumbs":6,"title":3},"2964":{"body":46,"breadcrumbs":6,"title":3},"2965":{"body":41,"breadcrumbs":6,"title":3},"2966":{"body":50,"breadcrumbs":6,"title":3},"2967":{"body":34,"breadcrumbs":5,"title":2},"2968":{"body":37,"breadcrumbs":6,"title":3},"2969":{"body":79,"breadcrumbs":5,"title":2},"297":{"body":0,"breadcrumbs":4,"title":3},"2970":{"body":0,"breadcrumbs":5,"title":2},"2971":{"body":57,"breadcrumbs":6,"title":3},"2972":{"body":59,"breadcrumbs":5,"title":2},"2973":{"body":54,"breadcrumbs":5,"title":2},"2974":{"body":0,"breadcrumbs":5,"title":2},"2975":{"body":14,"breadcrumbs":5,"title":2},"2976":{"body":13,"breadcrumbs":5,"title":2},"2977":{"body":39,"breadcrumbs":6,"title":3},"2978":{"body":7,"breadcrumbs":6,"title":3},"2979":{"body":19,"breadcrumbs":5,"title":2},"298":{"body":44,"breadcrumbs":4,"title":3},"2980":{"body":20,"breadcrumbs":4,"title":1},"2981":{"body":41,"breadcrumbs":5,"title":2},"2982":{"body":56,"breadcrumbs":5,"title":2},"2983":{"body":0,"breadcrumbs":5,"title":2},"2984":{"body":16,"breadcrumbs":5,"title":2},"2985":{"body":49,"breadcrumbs":5,"title":2},"2986":{"body":0,"breadcrumbs":5,"title":2},"2987":{"body":2,"breadcrumbs":5,"title":2},"2988":{"body":86,"breadcrumbs":6,"title":3},"2989":{"body":47,"breadcrumbs":6,"title":3},"299":{"body":94,"breadcrumbs":2,"title":1},"2990":{"body":6,"breadcrumbs":5,"title":2},"2991":{"body":49,"breadcrumbs":5,"title":2},"2992":{"body":44,"breadcrumbs":5,"title":2},"2993":{"body":16,"breadcrumbs":5,"title":2},"2994":{"body":42,"breadcrumbs":5,"title":2},"2995":{"body":41,"breadcrumbs":5,"title":2},"2996":{"body":30,"breadcrumbs":5,"title":2},"2997":{"body":4,"breadcrumbs":5,"title":2},"2998":{"body":45,"breadcrumbs":5,"title":2},"2999":{"body":28,"breadcrumbs":5,"title":2},"3":{"body":52,"breadcrumbs":3,"title":2},"30":{"body":18,"breadcrumbs":5,"title":4},"300":{"body":55,"breadcrumbs":2,"title":1},"3000":{"body":37,"breadcrumbs":5,"title":2},"3001":{"body":17,"breadcrumbs":5,"title":2},"3002":{"body":4,"breadcrumbs":5,"title":2},"3003":{"body":26,"breadcrumbs":5,"title":2},"3004":{"body":22,"breadcrumbs":5,"title":2},"3005":{"body":16,"breadcrumbs":6,"title":3},"3006":{"body":17,"breadcrumbs":5,"title":2},"3007":{"body":20,"breadcrumbs":5,"title":2},"3008":{"body":34,"breadcrumbs":5,"title":2},"3009":{"body":19,"breadcrumbs":5,"title":2},"301":{"body":0,"breadcrumbs":3,"title":2},"3010":{"body":0,"breadcrumbs":5,"title":2},"3011":{"body":31,"breadcrumbs":5,"title":2},"3012":{"body":28,"breadcrumbs":5,"title":2},"3013":{"body":21,"breadcrumbs":6,"title":3},"3014":{"body":21,"breadcrumbs":5,"title":2},"3015":{"body":15,"breadcrumbs":5,"title":2},"3016":{"body":0,"breadcrumbs":5,"title":2},"3017":{"body":47,"breadcrumbs":6,"title":3},"3018":{"body":18,"breadcrumbs":6,"title":3},"3019":{"body":43,"breadcrumbs":5,"title":2},"302":{"body":45,"breadcrumbs":3,"title":2},"3020":{"body":0,"breadcrumbs":5,"title":2},"3021":{"body":15,"breadcrumbs":5,"title":2},"3022":{"body":15,"breadcrumbs":5,"title":2},"3023":{"body":45,"breadcrumbs":5,"title":2},"3024":{"body":27,"breadcrumbs":4,"title":1},"3025":{"body":21,"breadcrumbs":5,"title":2},"3026":{"body":26,"breadcrumbs":5,"title":2},"3027":{"body":0,"breadcrumbs":6,"title":3},"3028":{"body":10,"breadcrumbs":4,"title":1},"3029":{"body":13,"breadcrumbs":5,"title":2},"303":{"body":20,"breadcrumbs":3,"title":2},"3030":{"body":29,"breadcrumbs":5,"title":2},"3031":{"body":12,"breadcrumbs":5,"title":2},"3032":{"body":23,"breadcrumbs":5,"title":2},"3033":{"body":0,"breadcrumbs":4,"title":1},"3034":{"body":32,"breadcrumbs":6,"title":3},"3035":{"body":36,"breadcrumbs":7,"title":4},"3036":{"body":20,"breadcrumbs":5,"title":2},"3037":{"body":14,"breadcrumbs":5,"title":2},"3038":{"body":14,"breadcrumbs":6,"title":3},"3039":{"body":11,"breadcrumbs":5,"title":2},"304":{"body":6,"breadcrumbs":5,"title":4},"3040":{"body":26,"breadcrumbs":5,"title":2},"3041":{"body":11,"breadcrumbs":5,"title":2},"3042":{"body":0,"breadcrumbs":5,"title":2},"3043":{"body":5,"breadcrumbs":6,"title":3},"3044":{"body":13,"breadcrumbs":5,"title":2},"3045":{"body":17,"breadcrumbs":5,"title":2},"3046":{"body":7,"breadcrumbs":5,"title":2},"3047":{"body":7,"breadcrumbs":5,"title":2},"3048":{"body":2,"breadcrumbs":5,"title":2},"3049":{"body":56,"breadcrumbs":7,"title":4},"305":{"body":13,"breadcrumbs":2,"title":1},"3050":{"body":88,"breadcrumbs":7,"title":4},"3051":{"body":33,"breadcrumbs":5,"title":2},"3052":{"body":40,"breadcrumbs":5,"title":2},"3053":{"body":42,"breadcrumbs":5,"title":2},"3054":{"body":12,"breadcrumbs":6,"title":3},"3055":{"body":28,"breadcrumbs":6,"title":3},"3056":{"body":101,"breadcrumbs":5,"title":2},"3057":{"body":91,"breadcrumbs":4,"title":1},"3058":{"body":48,"breadcrumbs":5,"title":2},"3059":{"body":63,"breadcrumbs":5,"title":2},"306":{"body":10,"breadcrumbs":2,"title":1},"3060":{"body":13,"breadcrumbs":5,"title":2},"3061":{"body":29,"breadcrumbs":5,"title":2},"3062":{"body":25,"breadcrumbs":7,"title":4},"3063":{"body":26,"breadcrumbs":4,"title":1},"3064":{"body":20,"breadcrumbs":4,"title":1},"3065":{"body":0,"breadcrumbs":4,"title":1},"3066":{"body":41,"breadcrumbs":5,"title":2},"3067":{"body":21,"breadcrumbs":5,"title":2},"3068":{"body":22,"breadcrumbs":4,"title":1},"3069":{"body":75,"breadcrumbs":6,"title":3},"307":{"body":5,"breadcrumbs":3,"title":2},"3070":{"body":0,"breadcrumbs":5,"title":2},"3071":{"body":161,"breadcrumbs":7,"title":4},"3072":{"body":60,"breadcrumbs":6,"title":3},"3073":{"body":21,"breadcrumbs":6,"title":3},"3074":{"body":0,"breadcrumbs":6,"title":3},"3075":{"body":391,"breadcrumbs":7,"title":4},"3076":{"body":19,"breadcrumbs":7,"title":4},"3077":{"body":0,"breadcrumbs":5,"title":2},"3078":{"body":174,"breadcrumbs":7,"title":4},"3079":{"body":44,"breadcrumbs":6,"title":3},"308":{"body":7,"breadcrumbs":3,"title":1},"3080":{"body":20,"breadcrumbs":6,"title":3},"3081":{"body":0,"breadcrumbs":5,"title":2},"3082":{"body":24,"breadcrumbs":6,"title":3},"3083":{"body":23,"breadcrumbs":8,"title":5},"3084":{"body":89,"breadcrumbs":8,"title":5},"3085":{"body":25,"breadcrumbs":8,"title":5},"3086":{"body":0,"breadcrumbs":6,"title":3},"3087":{"body":58,"breadcrumbs":8,"title":5},"3088":{"body":42,"breadcrumbs":7,"title":4},"3089":{"body":0,"breadcrumbs":7,"title":4},"309":{"body":19,"breadcrumbs":3,"title":1},"3090":{"body":17,"breadcrumbs":6,"title":3},"3091":{"body":28,"breadcrumbs":6,"title":3},"3092":{"body":18,"breadcrumbs":6,"title":3},"3093":{"body":0,"breadcrumbs":6,"title":3},"3094":{"body":74,"breadcrumbs":6,"title":3},"3095":{"body":0,"breadcrumbs":5,"title":2},"3096":{"body":30,"breadcrumbs":7,"title":4},"3097":{"body":31,"breadcrumbs":8,"title":5},"3098":{"body":16,"breadcrumbs":9,"title":6},"3099":{"body":0,"breadcrumbs":6,"title":3},"31":{"body":17,"breadcrumbs":4,"title":3},"310":{"body":17,"breadcrumbs":6,"title":4},"3100":{"body":40,"breadcrumbs":7,"title":4},"3101":{"body":8,"breadcrumbs":7,"title":4},"3102":{"body":0,"breadcrumbs":5,"title":2},"3103":{"body":97,"breadcrumbs":5,"title":2},"3104":{"body":55,"breadcrumbs":6,"title":3},"3105":{"body":58,"breadcrumbs":6,"title":3},"3106":{"body":0,"breadcrumbs":5,"title":2},"3107":{"body":88,"breadcrumbs":7,"title":4},"3108":{"body":22,"breadcrumbs":4,"title":1},"3109":{"body":10,"breadcrumbs":5,"title":3},"311":{"body":7,"breadcrumbs":7,"title":5},"3110":{"body":15,"breadcrumbs":4,"title":2},"3111":{"body":48,"breadcrumbs":3,"title":1},"3112":{"body":42,"breadcrumbs":4,"title":2},"3113":{"body":0,"breadcrumbs":3,"title":1},"3114":{"body":12,"breadcrumbs":3,"title":1},"3115":{"body":22,"breadcrumbs":5,"title":3},"3116":{"body":10,"breadcrumbs":4,"title":2},"3117":{"body":0,"breadcrumbs":3,"title":1},"3118":{"body":51,"breadcrumbs":5,"title":3},"3119":{"body":69,"breadcrumbs":4,"title":2},"312":{"body":15,"breadcrumbs":6,"title":4},"3120":{"body":0,"breadcrumbs":4,"title":2},"3121":{"body":53,"breadcrumbs":4,"title":2},"3122":{"body":24,"breadcrumbs":4,"title":2},"3123":{"body":0,"breadcrumbs":4,"title":2},"3124":{"body":11,"breadcrumbs":4,"title":2},"3125":{"body":16,"breadcrumbs":4,"title":2},"3126":{"body":26,"breadcrumbs":5,"title":3},"3127":{"body":26,"breadcrumbs":4,"title":2},"3128":{"body":0,"breadcrumbs":4,"title":2},"3129":{"body":82,"breadcrumbs":4,"title":2},"313":{"body":12,"breadcrumbs":5,"title":3},"3130":{"body":27,"breadcrumbs":4,"title":2},"3131":{"body":21,"breadcrumbs":4,"title":2},"3132":{"body":62,"breadcrumbs":4,"title":2},"3133":{"body":0,"breadcrumbs":4,"title":2},"3134":{"body":10,"breadcrumbs":3,"title":1},"3135":{"body":14,"breadcrumbs":5,"title":3},"3136":{"body":37,"breadcrumbs":5,"title":3},"3137":{"body":22,"breadcrumbs":5,"title":3},"3138":{"body":30,"breadcrumbs":4,"title":2},"3139":{"body":8,"breadcrumbs":4,"title":2},"314":{"body":33,"breadcrumbs":7,"title":5},"3140":{"body":0,"breadcrumbs":3,"title":1},"3141":{"body":26,"breadcrumbs":5,"title":3},"3142":{"body":37,"breadcrumbs":4,"title":2},"3143":{"body":15,"breadcrumbs":5,"title":3},"3144":{"body":15,"breadcrumbs":4,"title":2},"3145":{"body":0,"breadcrumbs":4,"title":2},"3146":{"body":41,"breadcrumbs":4,"title":2},"3147":{"body":25,"breadcrumbs":4,"title":2},"3148":{"body":0,"breadcrumbs":3,"title":1},"3149":{"body":39,"breadcrumbs":4,"title":2},"315":{"body":35,"breadcrumbs":8,"title":6},"3150":{"body":47,"breadcrumbs":5,"title":3},"3151":{"body":37,"breadcrumbs":6,"title":4},"3152":{"body":50,"breadcrumbs":5,"title":3},"3153":{"body":39,"breadcrumbs":5,"title":3},"3154":{"body":0,"breadcrumbs":4,"title":2},"3155":{"body":16,"breadcrumbs":5,"title":3},"3156":{"body":14,"breadcrumbs":4,"title":2},"3157":{"body":11,"breadcrumbs":5,"title":3},"3158":{"body":15,"breadcrumbs":4,"title":2},"3159":{"body":9,"breadcrumbs":5,"title":3},"316":{"body":34,"breadcrumbs":6,"title":4},"3160":{"body":16,"breadcrumbs":5,"title":3},"3161":{"body":0,"breadcrumbs":4,"title":2},"3162":{"body":61,"breadcrumbs":4,"title":2},"3163":{"body":47,"breadcrumbs":4,"title":2},"3164":{"body":40,"breadcrumbs":4,"title":2},"3165":{"body":0,"breadcrumbs":3,"title":1},"3166":{"body":77,"breadcrumbs":5,"title":3},"3167":{"body":49,"breadcrumbs":5,"title":3},"3168":{"body":58,"breadcrumbs":4,"title":2},"3169":{"body":9,"breadcrumbs":3,"title":1},"317":{"body":28,"breadcrumbs":6,"title":4},"3170":{"body":6,"breadcrumbs":4,"title":2},"3171":{"body":13,"breadcrumbs":3,"title":1},"3172":{"body":38,"breadcrumbs":4,"title":2},"3173":{"body":41,"breadcrumbs":4,"title":2},"3174":{"body":117,"breadcrumbs":4,"title":2},"3175":{"body":41,"breadcrumbs":4,"title":2},"3176":{"body":18,"breadcrumbs":3,"title":1},"3177":{"body":75,"breadcrumbs":4,"title":2},"3178":{"body":124,"breadcrumbs":4,"title":2},"3179":{"body":59,"breadcrumbs":3,"title":1},"318":{"body":31,"breadcrumbs":6,"title":4},"3180":{"body":16,"breadcrumbs":4,"title":2},"3181":{"body":26,"breadcrumbs":4,"title":2},"3182":{"body":5,"breadcrumbs":4,"title":2},"3183":{"body":23,"breadcrumbs":4,"title":2},"3184":{"body":19,"breadcrumbs":3,"title":1},"3185":{"body":10,"breadcrumbs":6,"title":3},"3186":{"body":14,"breadcrumbs":5,"title":2},"3187":{"body":34,"breadcrumbs":5,"title":2},"3188":{"body":0,"breadcrumbs":6,"title":3},"3189":{"body":25,"breadcrumbs":6,"title":3},"319":{"body":45,"breadcrumbs":6,"title":4},"3190":{"body":21,"breadcrumbs":6,"title":3},"3191":{"body":16,"breadcrumbs":5,"title":2},"3192":{"body":19,"breadcrumbs":5,"title":2},"3193":{"body":13,"breadcrumbs":5,"title":2},"3194":{"body":16,"breadcrumbs":6,"title":3},"3195":{"body":0,"breadcrumbs":6,"title":3},"3196":{"body":22,"breadcrumbs":5,"title":2},"3197":{"body":16,"breadcrumbs":5,"title":2},"3198":{"body":16,"breadcrumbs":5,"title":2},"3199":{"body":16,"breadcrumbs":6,"title":3},"32":{"body":14,"breadcrumbs":4,"title":3},"320":{"body":25,"breadcrumbs":6,"title":4},"3200":{"body":0,"breadcrumbs":5,"title":2},"3201":{"body":18,"breadcrumbs":8,"title":5},"3202":{"body":20,"breadcrumbs":9,"title":6},"3203":{"body":23,"breadcrumbs":9,"title":6},"3204":{"body":25,"breadcrumbs":8,"title":5},"3205":{"body":0,"breadcrumbs":6,"title":3},"3206":{"body":16,"breadcrumbs":7,"title":4},"3207":{"body":16,"breadcrumbs":6,"title":3},"3208":{"body":13,"breadcrumbs":6,"title":3},"3209":{"body":13,"breadcrumbs":5,"title":2},"321":{"body":0,"breadcrumbs":3,"title":1},"3210":{"body":0,"breadcrumbs":5,"title":2},"3211":{"body":13,"breadcrumbs":8,"title":5},"3212":{"body":23,"breadcrumbs":7,"title":4},"3213":{"body":20,"breadcrumbs":8,"title":5},"3214":{"body":23,"breadcrumbs":6,"title":3},"3215":{"body":45,"breadcrumbs":5,"title":2},"3216":{"body":0,"breadcrumbs":5,"title":2},"3217":{"body":11,"breadcrumbs":7,"title":4},"3218":{"body":10,"breadcrumbs":7,"title":4},"3219":{"body":9,"breadcrumbs":7,"title":4},"322":{"body":13,"breadcrumbs":5,"title":3},"3220":{"body":48,"breadcrumbs":5,"title":2},"3221":{"body":26,"breadcrumbs":5,"title":2},"3222":{"body":33,"breadcrumbs":4,"title":1},"3223":{"body":20,"breadcrumbs":10,"title":6},"3224":{"body":22,"breadcrumbs":6,"title":2},"3225":{"body":35,"breadcrumbs":6,"title":2},"3226":{"body":20,"breadcrumbs":6,"title":2},"3227":{"body":0,"breadcrumbs":7,"title":3},"3228":{"body":46,"breadcrumbs":7,"title":3},"3229":{"body":26,"breadcrumbs":6,"title":2},"323":{"body":17,"breadcrumbs":4,"title":2},"3230":{"body":0,"breadcrumbs":5,"title":1},"3231":{"body":37,"breadcrumbs":9,"title":5},"3232":{"body":15,"breadcrumbs":6,"title":2},"3233":{"body":0,"breadcrumbs":6,"title":2},"3234":{"body":27,"breadcrumbs":5,"title":1},"3235":{"body":27,"breadcrumbs":5,"title":1},"3236":{"body":26,"breadcrumbs":6,"title":2},"3237":{"body":0,"breadcrumbs":7,"title":3},"3238":{"body":38,"breadcrumbs":9,"title":5},"3239":{"body":37,"breadcrumbs":10,"title":6},"324":{"body":14,"breadcrumbs":5,"title":3},"3240":{"body":34,"breadcrumbs":10,"title":6},"3241":{"body":30,"breadcrumbs":8,"title":4},"3242":{"body":33,"breadcrumbs":8,"title":4},"3243":{"body":0,"breadcrumbs":6,"title":2},"3244":{"body":273,"breadcrumbs":6,"title":2},"3245":{"body":85,"breadcrumbs":8,"title":4},"3246":{"body":0,"breadcrumbs":5,"title":1},"3247":{"body":128,"breadcrumbs":9,"title":5},"3248":{"body":121,"breadcrumbs":8,"title":4},"3249":{"body":138,"breadcrumbs":9,"title":5},"325":{"body":6,"breadcrumbs":4,"title":2},"3250":{"body":0,"breadcrumbs":6,"title":2},"3251":{"body":48,"breadcrumbs":5,"title":1},"3252":{"body":13,"breadcrumbs":5,"title":1},"3253":{"body":38,"breadcrumbs":6,"title":2},"3254":{"body":0,"breadcrumbs":7,"title":3},"3255":{"body":30,"breadcrumbs":7,"title":3},"3256":{"body":23,"breadcrumbs":6,"title":2},"3257":{"body":110,"breadcrumbs":5,"title":1},"3258":{"body":0,"breadcrumbs":5,"title":1},"3259":{"body":20,"breadcrumbs":9,"title":5},"326":{"body":7,"breadcrumbs":4,"title":2},"3260":{"body":9,"breadcrumbs":10,"title":6},"3261":{"body":18,"breadcrumbs":8,"title":4},"3262":{"body":6,"breadcrumbs":8,"title":4},"3263":{"body":19,"breadcrumbs":9,"title":5},"3264":{"body":12,"breadcrumbs":8,"title":4},"3265":{"body":15,"breadcrumbs":8,"title":4},"3266":{"body":16,"breadcrumbs":8,"title":4},"3267":{"body":16,"breadcrumbs":8,"title":4},"3268":{"body":0,"breadcrumbs":6,"title":2},"3269":{"body":26,"breadcrumbs":6,"title":2},"327":{"body":9,"breadcrumbs":4,"title":2},"3270":{"body":15,"breadcrumbs":6,"title":2},"3271":{"body":14,"breadcrumbs":6,"title":2},"3272":{"body":50,"breadcrumbs":6,"title":2},"3273":{"body":19,"breadcrumbs":8,"title":4},"3274":{"body":20,"breadcrumbs":6,"title":2},"3275":{"body":18,"breadcrumbs":5,"title":1},"3276":{"body":33,"breadcrumbs":5,"title":1},"3277":{"body":0,"breadcrumbs":7,"title":3},"3278":{"body":18,"breadcrumbs":6,"title":2},"3279":{"body":25,"breadcrumbs":5,"title":1},"328":{"body":19,"breadcrumbs":3,"title":1},"3280":{"body":16,"breadcrumbs":5,"title":1},"3281":{"body":0,"breadcrumbs":7,"title":3},"3282":{"body":71,"breadcrumbs":6,"title":2},"3283":{"body":0,"breadcrumbs":6,"title":2},"3284":{"body":52,"breadcrumbs":10,"title":6},"3285":{"body":45,"breadcrumbs":8,"title":4},"3286":{"body":29,"breadcrumbs":8,"title":4},"3287":{"body":24,"breadcrumbs":8,"title":4},"3288":{"body":30,"breadcrumbs":8,"title":4},"3289":{"body":25,"breadcrumbs":8,"title":4},"329":{"body":21,"breadcrumbs":6,"title":4},"3290":{"body":0,"breadcrumbs":7,"title":3},"3291":{"body":48,"breadcrumbs":8,"title":4},"3292":{"body":34,"breadcrumbs":8,"title":4},"3293":{"body":16,"breadcrumbs":8,"title":4},"3294":{"body":26,"breadcrumbs":8,"title":4},"3295":{"body":22,"breadcrumbs":8,"title":4},"3296":{"body":29,"breadcrumbs":8,"title":4},"3297":{"body":0,"breadcrumbs":6,"title":2},"3298":{"body":112,"breadcrumbs":6,"title":2},"3299":{"body":54,"breadcrumbs":6,"title":2},"33":{"body":14,"breadcrumbs":4,"title":3},"330":{"body":42,"breadcrumbs":6,"title":4},"3300":{"body":20,"breadcrumbs":6,"title":2},"3301":{"body":0,"breadcrumbs":5,"title":1},"3302":{"body":62,"breadcrumbs":7,"title":3},"3303":{"body":23,"breadcrumbs":6,"title":2},"3304":{"body":0,"breadcrumbs":6,"title":2},"3305":{"body":33,"breadcrumbs":7,"title":3},"3306":{"body":28,"breadcrumbs":7,"title":3},"3307":{"body":31,"breadcrumbs":7,"title":3},"3308":{"body":0,"breadcrumbs":5,"title":1},"3309":{"body":34,"breadcrumbs":6,"title":2},"331":{"body":37,"breadcrumbs":8,"title":6},"3310":{"body":34,"breadcrumbs":6,"title":2},"3311":{"body":46,"breadcrumbs":6,"title":2},"3312":{"body":0,"breadcrumbs":6,"title":2},"3313":{"body":19,"breadcrumbs":9,"title":5},"3314":{"body":26,"breadcrumbs":7,"title":3},"3315":{"body":25,"breadcrumbs":7,"title":3},"3316":{"body":26,"breadcrumbs":8,"title":4},"3317":{"body":31,"breadcrumbs":8,"title":4},"3318":{"body":44,"breadcrumbs":7,"title":3},"3319":{"body":20,"breadcrumbs":8,"title":4},"332":{"body":30,"breadcrumbs":7,"title":5},"3320":{"body":0,"breadcrumbs":6,"title":2},"3321":{"body":63,"breadcrumbs":7,"title":3},"3322":{"body":36,"breadcrumbs":6,"title":2},"3323":{"body":25,"breadcrumbs":5,"title":1},"3324":{"body":21,"breadcrumbs":11,"title":7},"3325":{"body":29,"breadcrumbs":6,"title":2},"3326":{"body":0,"breadcrumbs":5,"title":1},"3327":{"body":25,"breadcrumbs":5,"title":1},"3328":{"body":33,"breadcrumbs":6,"title":2},"3329":{"body":32,"breadcrumbs":7,"title":3},"333":{"body":26,"breadcrumbs":6,"title":4},"3330":{"body":0,"breadcrumbs":6,"title":2},"3331":{"body":23,"breadcrumbs":7,"title":3},"3332":{"body":14,"breadcrumbs":6,"title":2},"3333":{"body":45,"breadcrumbs":6,"title":2},"3334":{"body":0,"breadcrumbs":7,"title":3},"3335":{"body":39,"breadcrumbs":9,"title":5},"3336":{"body":49,"breadcrumbs":9,"title":5},"3337":{"body":11,"breadcrumbs":9,"title":5},"3338":{"body":45,"breadcrumbs":8,"title":4},"3339":{"body":0,"breadcrumbs":8,"title":4},"334":{"body":33,"breadcrumbs":8,"title":6},"3340":{"body":33,"breadcrumbs":7,"title":3},"3341":{"body":284,"breadcrumbs":8,"title":4},"3342":{"body":0,"breadcrumbs":8,"title":4},"3343":{"body":41,"breadcrumbs":7,"title":3},"3344":{"body":299,"breadcrumbs":8,"title":4},"3345":{"body":0,"breadcrumbs":9,"title":5},"3346":{"body":51,"breadcrumbs":8,"title":4},"3347":{"body":39,"breadcrumbs":9,"title":5},"3348":{"body":43,"breadcrumbs":6,"title":2},"3349":{"body":54,"breadcrumbs":7,"title":3},"335":{"body":34,"breadcrumbs":7,"title":5},"3350":{"body":0,"breadcrumbs":7,"title":3},"3351":{"body":93,"breadcrumbs":7,"title":3},"3352":{"body":64,"breadcrumbs":7,"title":3},"3353":{"body":66,"breadcrumbs":9,"title":5},"3354":{"body":0,"breadcrumbs":6,"title":2},"3355":{"body":69,"breadcrumbs":10,"title":6},"3356":{"body":77,"breadcrumbs":10,"title":6},"3357":{"body":124,"breadcrumbs":9,"title":5},"3358":{"body":51,"breadcrumbs":9,"title":5},"3359":{"body":0,"breadcrumbs":5,"title":1},"336":{"body":34,"breadcrumbs":6,"title":4},"3360":{"body":83,"breadcrumbs":10,"title":6},"3361":{"body":53,"breadcrumbs":8,"title":4},"3362":{"body":87,"breadcrumbs":10,"title":6},"3363":{"body":55,"breadcrumbs":9,"title":5},"3364":{"body":72,"breadcrumbs":9,"title":5},"3365":{"body":0,"breadcrumbs":6,"title":2},"3366":{"body":143,"breadcrumbs":6,"title":2},"3367":{"body":164,"breadcrumbs":6,"title":2},"3368":{"body":114,"breadcrumbs":6,"title":2},"3369":{"body":0,"breadcrumbs":6,"title":2},"337":{"body":0,"breadcrumbs":5,"title":3},"3370":{"body":48,"breadcrumbs":7,"title":3},"3371":{"body":175,"breadcrumbs":6,"title":2},"3372":{"body":75,"breadcrumbs":7,"title":3},"3373":{"body":0,"breadcrumbs":7,"title":3},"3374":{"body":24,"breadcrumbs":7,"title":3},"3375":{"body":71,"breadcrumbs":6,"title":2},"3376":{"body":41,"breadcrumbs":6,"title":2},"3377":{"body":0,"breadcrumbs":6,"title":2},"3378":{"body":39,"breadcrumbs":6,"title":2},"3379":{"body":39,"breadcrumbs":6,"title":2},"338":{"body":36,"breadcrumbs":6,"title":4},"3380":{"body":36,"breadcrumbs":6,"title":2},"3381":{"body":0,"breadcrumbs":6,"title":2},"3382":{"body":12,"breadcrumbs":5,"title":1},"3383":{"body":11,"breadcrumbs":6,"title":2},"3384":{"body":20,"breadcrumbs":6,"title":2},"3385":{"body":22,"breadcrumbs":5,"title":1},"3386":{"body":16,"breadcrumbs":3,"title":2},"3387":{"body":33,"breadcrumbs":2,"title":1},"3388":{"body":96,"breadcrumbs":3,"title":2},"3389":{"body":0,"breadcrumbs":3,"title":2},"339":{"body":14,"breadcrumbs":8,"title":6},"3390":{"body":56,"breadcrumbs":3,"title":2},"3391":{"body":22,"breadcrumbs":3,"title":2},"3392":{"body":0,"breadcrumbs":3,"title":2},"3393":{"body":11,"breadcrumbs":3,"title":2},"3394":{"body":18,"breadcrumbs":3,"title":2},"3395":{"body":25,"breadcrumbs":4,"title":3},"3396":{"body":9,"breadcrumbs":4,"title":3},"3397":{"body":28,"breadcrumbs":4,"title":3},"3398":{"body":42,"breadcrumbs":4,"title":3},"3399":{"body":28,"breadcrumbs":3,"title":2},"34":{"body":13,"breadcrumbs":5,"title":4},"340":{"body":10,"breadcrumbs":6,"title":4},"3400":{"body":32,"breadcrumbs":3,"title":2},"3401":{"body":12,"breadcrumbs":3,"title":2},"3402":{"body":0,"breadcrumbs":6,"title":4},"3403":{"body":15,"breadcrumbs":8,"title":6},"3404":{"body":41,"breadcrumbs":4,"title":2},"3405":{"body":27,"breadcrumbs":4,"title":2},"3406":{"body":5,"breadcrumbs":4,"title":2},"3407":{"body":20,"breadcrumbs":4,"title":2},"3408":{"body":38,"breadcrumbs":4,"title":2},"3409":{"body":24,"breadcrumbs":4,"title":2},"341":{"body":88,"breadcrumbs":4,"title":2},"3410":{"body":46,"breadcrumbs":4,"title":2},"3411":{"body":26,"breadcrumbs":5,"title":3},"3412":{"body":15,"breadcrumbs":7,"title":5},"3413":{"body":0,"breadcrumbs":4,"title":2},"3414":{"body":28,"breadcrumbs":5,"title":3},"3415":{"body":27,"breadcrumbs":4,"title":2},"3416":{"body":24,"breadcrumbs":4,"title":2},"3417":{"body":31,"breadcrumbs":4,"title":2},"3418":{"body":25,"breadcrumbs":4,"title":2},"3419":{"body":0,"breadcrumbs":4,"title":2},"342":{"body":0,"breadcrumbs":3,"title":1},"3420":{"body":6,"breadcrumbs":3,"title":1},"3421":{"body":30,"breadcrumbs":3,"title":1},"3422":{"body":5,"breadcrumbs":4,"title":2},"3423":{"body":27,"breadcrumbs":5,"title":3},"3424":{"body":0,"breadcrumbs":4,"title":2},"3425":{"body":14,"breadcrumbs":6,"title":4},"3426":{"body":16,"breadcrumbs":5,"title":3},"3427":{"body":12,"breadcrumbs":4,"title":2},"3428":{"body":0,"breadcrumbs":4,"title":2},"3429":{"body":25,"breadcrumbs":4,"title":2},"343":{"body":17,"breadcrumbs":5,"title":3},"3430":{"body":23,"breadcrumbs":4,"title":2},"3431":{"body":0,"breadcrumbs":4,"title":2},"3432":{"body":21,"breadcrumbs":4,"title":2},"3433":{"body":11,"breadcrumbs":4,"title":2},"3434":{"body":12,"breadcrumbs":3,"title":1},"3435":{"body":12,"breadcrumbs":4,"title":2},"3436":{"body":0,"breadcrumbs":3,"title":1},"3437":{"body":47,"breadcrumbs":4,"title":2},"3438":{"body":29,"breadcrumbs":5,"title":3},"3439":{"body":0,"breadcrumbs":3,"title":1},"344":{"body":17,"breadcrumbs":6,"title":4},"3440":{"body":30,"breadcrumbs":3,"title":1},"3441":{"body":28,"breadcrumbs":3,"title":1},"3442":{"body":7,"breadcrumbs":4,"title":2},"3443":{"body":19,"breadcrumbs":4,"title":3},"3444":{"body":43,"breadcrumbs":2,"title":1},"3445":{"body":8,"breadcrumbs":2,"title":1},"3446":{"body":0,"breadcrumbs":2,"title":1},"3447":{"body":35,"breadcrumbs":4,"title":3},"3448":{"body":35,"breadcrumbs":4,"title":3},"3449":{"body":18,"breadcrumbs":3,"title":2},"345":{"body":24,"breadcrumbs":5,"title":3},"3450":{"body":0,"breadcrumbs":3,"title":2},"3451":{"body":12,"breadcrumbs":3,"title":2},"3452":{"body":15,"breadcrumbs":3,"title":2},"3453":{"body":10,"breadcrumbs":3,"title":2},"3454":{"body":11,"breadcrumbs":2,"title":1},"3455":{"body":0,"breadcrumbs":3,"title":2},"3456":{"body":20,"breadcrumbs":4,"title":3},"3457":{"body":19,"breadcrumbs":5,"title":4},"3458":{"body":19,"breadcrumbs":4,"title":3},"3459":{"body":19,"breadcrumbs":4,"title":3},"346":{"body":10,"breadcrumbs":4,"title":2},"3460":{"body":55,"breadcrumbs":3,"title":2},"3461":{"body":0,"breadcrumbs":3,"title":2},"3462":{"body":13,"breadcrumbs":3,"title":2},"3463":{"body":13,"breadcrumbs":3,"title":2},"3464":{"body":12,"breadcrumbs":4,"title":3},"3465":{"body":7,"breadcrumbs":3,"title":2},"3466":{"body":0,"breadcrumbs":6,"title":4},"3467":{"body":16,"breadcrumbs":6,"title":4},"3468":{"body":0,"breadcrumbs":4,"title":2},"3469":{"body":53,"breadcrumbs":6,"title":4},"347":{"body":7,"breadcrumbs":4,"title":2},"3470":{"body":69,"breadcrumbs":5,"title":3},"3471":{"body":35,"breadcrumbs":5,"title":3},"3472":{"body":39,"breadcrumbs":4,"title":2},"3473":{"body":0,"breadcrumbs":4,"title":2},"3474":{"body":41,"breadcrumbs":4,"title":2},"3475":{"body":33,"breadcrumbs":5,"title":3},"3476":{"body":42,"breadcrumbs":4,"title":2},"3477":{"body":0,"breadcrumbs":4,"title":2},"3478":{"body":27,"breadcrumbs":4,"title":2},"3479":{"body":22,"breadcrumbs":4,"title":2},"348":{"body":8,"breadcrumbs":2,"title":1},"3480":{"body":72,"breadcrumbs":4,"title":2},"3481":{"body":0,"breadcrumbs":4,"title":2},"3482":{"body":24,"breadcrumbs":4,"title":2},"3483":{"body":17,"breadcrumbs":4,"title":2},"3484":{"body":13,"breadcrumbs":4,"title":2},"3485":{"body":33,"breadcrumbs":4,"title":2},"3486":{"body":15,"breadcrumbs":3,"title":1},"3487":{"body":26,"breadcrumbs":4,"title":2},"3488":{"body":21,"breadcrumbs":4,"title":2},"3489":{"body":16,"breadcrumbs":5,"title":3},"349":{"body":14,"breadcrumbs":2,"title":1},"3490":{"body":62,"breadcrumbs":3,"title":1},"3491":{"body":20,"breadcrumbs":3,"title":1},"3492":{"body":6,"breadcrumbs":3,"title":1},"3493":{"body":30,"breadcrumbs":3,"title":1},"3494":{"body":0,"breadcrumbs":3,"title":1},"3495":{"body":24,"breadcrumbs":4,"title":2},"3496":{"body":32,"breadcrumbs":3,"title":1},"3497":{"body":0,"breadcrumbs":4,"title":2},"3498":{"body":9,"breadcrumbs":3,"title":1},"3499":{"body":15,"breadcrumbs":3,"title":1},"35":{"body":13,"breadcrumbs":4,"title":3},"350":{"body":27,"breadcrumbs":5,"title":4},"3500":{"body":14,"breadcrumbs":3,"title":1},"3501":{"body":14,"breadcrumbs":3,"title":1},"3502":{"body":10,"breadcrumbs":3,"title":1},"3503":{"body":11,"breadcrumbs":3,"title":1},"3504":{"body":0,"breadcrumbs":4,"title":2},"3505":{"body":9,"breadcrumbs":4,"title":2},"3506":{"body":9,"breadcrumbs":4,"title":2},"3507":{"body":8,"breadcrumbs":4,"title":2},"3508":{"body":5,"breadcrumbs":4,"title":2},"3509":{"body":45,"breadcrumbs":5,"title":3},"351":{"body":47,"breadcrumbs":5,"title":4},"3510":{"body":0,"breadcrumbs":4,"title":2},"3511":{"body":41,"breadcrumbs":4,"title":2},"3512":{"body":9,"breadcrumbs":4,"title":2},"3513":{"body":10,"breadcrumbs":5,"title":3},"3514":{"body":22,"breadcrumbs":4,"title":2},"3515":{"body":0,"breadcrumbs":4,"title":2},"3516":{"body":43,"breadcrumbs":4,"title":2},"3517":{"body":12,"breadcrumbs":4,"title":2},"3518":{"body":0,"breadcrumbs":4,"title":2},"3519":{"body":73,"breadcrumbs":5,"title":3},"352":{"body":52,"breadcrumbs":6,"title":5},"3520":{"body":227,"breadcrumbs":5,"title":3},"3521":{"body":0,"breadcrumbs":5,"title":3},"3522":{"body":32,"breadcrumbs":5,"title":3},"3523":{"body":71,"breadcrumbs":5,"title":3},"3524":{"body":165,"breadcrumbs":5,"title":3},"3525":{"body":46,"breadcrumbs":4,"title":2},"3526":{"body":0,"breadcrumbs":4,"title":2},"3527":{"body":60,"breadcrumbs":4,"title":2},"3528":{"body":132,"breadcrumbs":4,"title":2},"3529":{"body":0,"breadcrumbs":4,"title":2},"353":{"body":45,"breadcrumbs":6,"title":5},"3530":{"body":79,"breadcrumbs":4,"title":2},"3531":{"body":32,"breadcrumbs":4,"title":2},"3532":{"body":0,"breadcrumbs":4,"title":2},"3533":{"body":44,"breadcrumbs":4,"title":2},"3534":{"body":31,"breadcrumbs":4,"title":2},"3535":{"body":0,"breadcrumbs":4,"title":2},"3536":{"body":31,"breadcrumbs":4,"title":2},"3537":{"body":38,"breadcrumbs":4,"title":2},"3538":{"body":24,"breadcrumbs":4,"title":2},"3539":{"body":0,"breadcrumbs":4,"title":2},"354":{"body":3,"breadcrumbs":7,"title":6},"3540":{"body":35,"breadcrumbs":4,"title":2},"3541":{"body":25,"breadcrumbs":4,"title":2},"3542":{"body":0,"breadcrumbs":4,"title":2},"3543":{"body":31,"breadcrumbs":4,"title":2},"3544":{"body":40,"breadcrumbs":5,"title":3},"3545":{"body":0,"breadcrumbs":4,"title":2},"3546":{"body":30,"breadcrumbs":4,"title":2},"3547":{"body":37,"breadcrumbs":4,"title":2},"3548":{"body":0,"breadcrumbs":5,"title":3},"3549":{"body":54,"breadcrumbs":5,"title":3},"355":{"body":11,"breadcrumbs":2,"title":1},"3550":{"body":47,"breadcrumbs":5,"title":3},"3551":{"body":57,"breadcrumbs":4,"title":2},"3552":{"body":0,"breadcrumbs":4,"title":2},"3553":{"body":111,"breadcrumbs":4,"title":2},"3554":{"body":25,"breadcrumbs":4,"title":2},"3555":{"body":13,"breadcrumbs":4,"title":2},"3556":{"body":36,"breadcrumbs":4,"title":2},"3557":{"body":145,"breadcrumbs":5,"title":3},"3558":{"body":34,"breadcrumbs":5,"title":3},"3559":{"body":46,"breadcrumbs":5,"title":3},"356":{"body":19,"breadcrumbs":3,"title":2},"3560":{"body":59,"breadcrumbs":5,"title":3},"3561":{"body":52,"breadcrumbs":4,"title":2},"3562":{"body":26,"breadcrumbs":3,"title":1},"3563":{"body":97,"breadcrumbs":4,"title":2},"3564":{"body":18,"breadcrumbs":3,"title":1},"3565":{"body":0,"breadcrumbs":4,"title":2},"3566":{"body":20,"breadcrumbs":5,"title":3},"3567":{"body":36,"breadcrumbs":5,"title":3},"3568":{"body":24,"breadcrumbs":4,"title":2},"3569":{"body":27,"breadcrumbs":5,"title":3},"357":{"body":12,"breadcrumbs":3,"title":2},"3570":{"body":37,"breadcrumbs":4,"title":2},"3571":{"body":0,"breadcrumbs":7,"title":4},"3572":{"body":34,"breadcrumbs":4,"title":1},"3573":{"body":0,"breadcrumbs":5,"title":2},"3574":{"body":23,"breadcrumbs":7,"title":4},"3575":{"body":22,"breadcrumbs":7,"title":4},"3576":{"body":29,"breadcrumbs":7,"title":4},"3577":{"body":0,"breadcrumbs":5,"title":2},"3578":{"body":82,"breadcrumbs":4,"title":1},"3579":{"body":68,"breadcrumbs":4,"title":1},"358":{"body":30,"breadcrumbs":6,"title":5},"3580":{"body":88,"breadcrumbs":5,"title":2},"3581":{"body":4,"breadcrumbs":7,"title":4},"3582":{"body":33,"breadcrumbs":6,"title":3},"3583":{"body":85,"breadcrumbs":6,"title":3},"3584":{"body":30,"breadcrumbs":5,"title":2},"3585":{"body":0,"breadcrumbs":5,"title":2},"3586":{"body":18,"breadcrumbs":6,"title":3},"3587":{"body":18,"breadcrumbs":5,"title":2},"3588":{"body":8,"breadcrumbs":5,"title":2},"3589":{"body":0,"breadcrumbs":5,"title":2},"359":{"body":21,"breadcrumbs":5,"title":4},"3590":{"body":39,"breadcrumbs":8,"title":5},"3591":{"body":20,"breadcrumbs":8,"title":5},"3592":{"body":29,"breadcrumbs":7,"title":4},"3593":{"body":0,"breadcrumbs":5,"title":2},"3594":{"body":30,"breadcrumbs":6,"title":3},"3595":{"body":27,"breadcrumbs":6,"title":3},"3596":{"body":0,"breadcrumbs":4,"title":1},"3597":{"body":14,"breadcrumbs":6,"title":3},"3598":{"body":24,"breadcrumbs":5,"title":2},"3599":{"body":20,"breadcrumbs":7,"title":4},"36":{"body":36,"breadcrumbs":3,"title":2},"360":{"body":0,"breadcrumbs":4,"title":3},"3600":{"body":0,"breadcrumbs":5,"title":2},"3601":{"body":13,"breadcrumbs":6,"title":3},"3602":{"body":7,"breadcrumbs":6,"title":3},"3603":{"body":11,"breadcrumbs":6,"title":3},"3604":{"body":46,"breadcrumbs":5,"title":2},"3605":{"body":22,"breadcrumbs":5,"title":2},"3606":{"body":19,"breadcrumbs":5,"title":2},"3607":{"body":0,"breadcrumbs":5,"title":2},"3608":{"body":24,"breadcrumbs":6,"title":3},"3609":{"body":46,"breadcrumbs":5,"title":2},"361":{"body":18,"breadcrumbs":5,"title":4},"3610":{"body":24,"breadcrumbs":5,"title":2},"3611":{"body":40,"breadcrumbs":5,"title":2},"3612":{"body":27,"breadcrumbs":6,"title":3},"3613":{"body":24,"breadcrumbs":5,"title":2},"3614":{"body":24,"breadcrumbs":6,"title":3},"3615":{"body":40,"breadcrumbs":5,"title":2},"3616":{"body":37,"breadcrumbs":4,"title":1},"3617":{"body":10,"breadcrumbs":5,"title":2},"3618":{"body":21,"breadcrumbs":6,"title":3},"3619":{"body":21,"breadcrumbs":5,"title":2},"362":{"body":28,"breadcrumbs":3,"title":2},"3620":{"body":54,"breadcrumbs":5,"title":2},"3621":{"body":14,"breadcrumbs":5,"title":2},"3622":{"body":0,"breadcrumbs":10,"title":7},"3623":{"body":30,"breadcrumbs":10,"title":7},"3624":{"body":50,"breadcrumbs":5,"title":2},"3625":{"body":66,"breadcrumbs":6,"title":3},"3626":{"body":64,"breadcrumbs":6,"title":3},"3627":{"body":26,"breadcrumbs":8,"title":5},"3628":{"body":38,"breadcrumbs":5,"title":2},"3629":{"body":15,"breadcrumbs":10,"title":5},"363":{"body":35,"breadcrumbs":3,"title":2},"3630":{"body":29,"breadcrumbs":7,"title":2},"3631":{"body":35,"breadcrumbs":6,"title":1},"3632":{"body":33,"breadcrumbs":11,"title":6},"3633":{"body":486,"breadcrumbs":7,"title":2},"3634":{"body":36,"breadcrumbs":7,"title":2},"3635":{"body":28,"breadcrumbs":12,"title":7},"3636":{"body":442,"breadcrumbs":7,"title":2},"3637":{"body":71,"breadcrumbs":7,"title":2},"3638":{"body":24,"breadcrumbs":11,"title":6},"3639":{"body":266,"breadcrumbs":7,"title":2},"364":{"body":0,"breadcrumbs":4,"title":3},"3640":{"body":23,"breadcrumbs":11,"title":6},"3641":{"body":140,"breadcrumbs":7,"title":2},"3642":{"body":0,"breadcrumbs":7,"title":2},"3643":{"body":32,"breadcrumbs":8,"title":3},"3644":{"body":29,"breadcrumbs":7,"title":2},"3645":{"body":23,"breadcrumbs":8,"title":3},"3646":{"body":0,"breadcrumbs":6,"title":1},"3647":{"body":27,"breadcrumbs":9,"title":4},"3648":{"body":23,"breadcrumbs":8,"title":3},"3649":{"body":21,"breadcrumbs":9,"title":4},"365":{"body":15,"breadcrumbs":4,"title":3},"3650":{"body":28,"breadcrumbs":6,"title":1},"3651":{"body":0,"breadcrumbs":8,"title":6},"3652":{"body":14,"breadcrumbs":8,"title":6},"3653":{"body":54,"breadcrumbs":4,"title":2},"3654":{"body":0,"breadcrumbs":5,"title":3},"3655":{"body":30,"breadcrumbs":3,"title":1},"3656":{"body":29,"breadcrumbs":3,"title":1},"3657":{"body":30,"breadcrumbs":3,"title":1},"3658":{"body":21,"breadcrumbs":3,"title":1},"3659":{"body":30,"breadcrumbs":3,"title":1},"366":{"body":19,"breadcrumbs":3,"title":2},"3660":{"body":36,"breadcrumbs":3,"title":1},"3661":{"body":13,"breadcrumbs":3,"title":1},"3662":{"body":26,"breadcrumbs":4,"title":2},"3663":{"body":57,"breadcrumbs":6,"title":4},"3664":{"body":81,"breadcrumbs":5,"title":3},"3665":{"body":0,"breadcrumbs":5,"title":3},"3666":{"body":33,"breadcrumbs":7,"title":5},"3667":{"body":26,"breadcrumbs":4,"title":2},"3668":{"body":20,"breadcrumbs":4,"title":2},"3669":{"body":10,"breadcrumbs":4,"title":2},"367":{"body":15,"breadcrumbs":4,"title":3},"3670":{"body":29,"breadcrumbs":6,"title":4},"3671":{"body":35,"breadcrumbs":5,"title":3},"3672":{"body":12,"breadcrumbs":4,"title":2},"3673":{"body":17,"breadcrumbs":4,"title":2},"3674":{"body":12,"breadcrumbs":4,"title":2},"3675":{"body":50,"breadcrumbs":4,"title":2},"3676":{"body":30,"breadcrumbs":4,"title":2},"3677":{"body":0,"breadcrumbs":4,"title":2},"3678":{"body":43,"breadcrumbs":6,"title":4},"3679":{"body":36,"breadcrumbs":6,"title":4},"368":{"body":22,"breadcrumbs":4,"title":3},"3680":{"body":38,"breadcrumbs":5,"title":3},"3681":{"body":0,"breadcrumbs":5,"title":3},"3682":{"body":90,"breadcrumbs":6,"title":4},"3683":{"body":64,"breadcrumbs":6,"title":4},"3684":{"body":57,"breadcrumbs":6,"title":4},"3685":{"body":71,"breadcrumbs":6,"title":4},"3686":{"body":54,"breadcrumbs":6,"title":4},"3687":{"body":0,"breadcrumbs":5,"title":3},"3688":{"body":80,"breadcrumbs":6,"title":4},"3689":{"body":59,"breadcrumbs":6,"title":4},"369":{"body":0,"breadcrumbs":3,"title":2},"3690":{"body":57,"breadcrumbs":6,"title":4},"3691":{"body":62,"breadcrumbs":6,"title":4},"3692":{"body":63,"breadcrumbs":7,"title":5},"3693":{"body":0,"breadcrumbs":5,"title":3},"3694":{"body":60,"breadcrumbs":6,"title":4},"3695":{"body":48,"breadcrumbs":6,"title":4},"3696":{"body":44,"breadcrumbs":6,"title":4},"3697":{"body":64,"breadcrumbs":6,"title":4},"3698":{"body":0,"breadcrumbs":4,"title":2},"3699":{"body":77,"breadcrumbs":5,"title":3},"37":{"body":0,"breadcrumbs":2,"title":1},"370":{"body":30,"breadcrumbs":4,"title":3},"3700":{"body":72,"breadcrumbs":5,"title":3},"3701":{"body":52,"breadcrumbs":5,"title":3},"3702":{"body":70,"breadcrumbs":5,"title":3},"3703":{"body":0,"breadcrumbs":4,"title":2},"3704":{"body":45,"breadcrumbs":5,"title":3},"3705":{"body":45,"breadcrumbs":5,"title":3},"3706":{"body":0,"breadcrumbs":4,"title":2},"3707":{"body":41,"breadcrumbs":5,"title":3},"3708":{"body":53,"breadcrumbs":5,"title":3},"3709":{"body":51,"breadcrumbs":5,"title":3},"371":{"body":14,"breadcrumbs":3,"title":2},"3710":{"body":0,"breadcrumbs":4,"title":2},"3711":{"body":60,"breadcrumbs":5,"title":3},"3712":{"body":31,"breadcrumbs":5,"title":3},"3713":{"body":0,"breadcrumbs":4,"title":2},"3714":{"body":23,"breadcrumbs":4,"title":2},"3715":{"body":24,"breadcrumbs":4,"title":2},"3716":{"body":63,"breadcrumbs":4,"title":2},"3717":{"body":41,"breadcrumbs":5,"title":3},"3718":{"body":0,"breadcrumbs":5,"title":3},"3719":{"body":36,"breadcrumbs":4,"title":2},"372":{"body":0,"breadcrumbs":3,"title":2},"3720":{"body":65,"breadcrumbs":4,"title":2},"3721":{"body":0,"breadcrumbs":4,"title":2},"3722":{"body":31,"breadcrumbs":4,"title":2},"3723":{"body":40,"breadcrumbs":4,"title":2},"3724":{"body":59,"breadcrumbs":4,"title":2},"3725":{"body":19,"breadcrumbs":6,"title":3},"3726":{"body":9,"breadcrumbs":5,"title":2},"3727":{"body":59,"breadcrumbs":5,"title":2},"3728":{"body":42,"breadcrumbs":5,"title":2},"3729":{"body":25,"breadcrumbs":6,"title":3},"373":{"body":16,"breadcrumbs":2,"title":1},"3730":{"body":36,"breadcrumbs":6,"title":3},"3731":{"body":33,"breadcrumbs":6,"title":3},"3732":{"body":20,"breadcrumbs":4,"title":1},"3733":{"body":0,"breadcrumbs":4,"title":1},"3734":{"body":3,"breadcrumbs":5,"title":2},"3735":{"body":5,"breadcrumbs":6,"title":3},"3736":{"body":4,"breadcrumbs":5,"title":2},"3737":{"body":7,"breadcrumbs":6,"title":3},"3738":{"body":18,"breadcrumbs":5,"title":2},"3739":{"body":9,"breadcrumbs":4,"title":1},"374":{"body":17,"breadcrumbs":3,"title":2},"3740":{"body":6,"breadcrumbs":7,"title":4},"3741":{"body":26,"breadcrumbs":5,"title":2},"3742":{"body":42,"breadcrumbs":5,"title":2},"3743":{"body":0,"breadcrumbs":5,"title":2},"3744":{"body":17,"breadcrumbs":5,"title":2},"3745":{"body":37,"breadcrumbs":6,"title":3},"3746":{"body":34,"breadcrumbs":5,"title":2},"3747":{"body":33,"breadcrumbs":5,"title":2},"3748":{"body":0,"breadcrumbs":5,"title":2},"3749":{"body":3,"breadcrumbs":6,"title":3},"375":{"body":46,"breadcrumbs":3,"title":2},"3750":{"body":11,"breadcrumbs":6,"title":3},"3751":{"body":24,"breadcrumbs":5,"title":2},"3752":{"body":25,"breadcrumbs":5,"title":2},"3753":{"body":13,"breadcrumbs":6,"title":3},"3754":{"body":13,"breadcrumbs":5,"title":2},"3755":{"body":18,"breadcrumbs":5,"title":2},"3756":{"body":0,"breadcrumbs":6,"title":3},"3757":{"body":41,"breadcrumbs":5,"title":2},"3758":{"body":0,"breadcrumbs":6,"title":3},"3759":{"body":42,"breadcrumbs":6,"title":3},"376":{"body":21,"breadcrumbs":3,"title":2},"3760":{"body":55,"breadcrumbs":7,"title":4},"3761":{"body":38,"breadcrumbs":6,"title":3},"3762":{"body":78,"breadcrumbs":6,"title":3},"3763":{"body":0,"breadcrumbs":5,"title":2},"3764":{"body":19,"breadcrumbs":5,"title":2},"3765":{"body":6,"breadcrumbs":6,"title":3},"3766":{"body":40,"breadcrumbs":6,"title":3},"3767":{"body":0,"breadcrumbs":4,"title":1},"3768":{"body":18,"breadcrumbs":6,"title":3},"3769":{"body":22,"breadcrumbs":8,"title":5},"377":{"body":18,"breadcrumbs":3,"title":2},"3770":{"body":22,"breadcrumbs":7,"title":4},"3771":{"body":37,"breadcrumbs":8,"title":5},"3772":{"body":21,"breadcrumbs":7,"title":4},"3773":{"body":20,"breadcrumbs":5,"title":2},"3774":{"body":0,"breadcrumbs":5,"title":2},"3775":{"body":19,"breadcrumbs":7,"title":4},"3776":{"body":5,"breadcrumbs":7,"title":4},"3777":{"body":7,"breadcrumbs":7,"title":4},"3778":{"body":4,"breadcrumbs":7,"title":4},"3779":{"body":12,"breadcrumbs":8,"title":5},"378":{"body":15,"breadcrumbs":6,"title":3},"3780":{"body":11,"breadcrumbs":9,"title":6},"3781":{"body":0,"breadcrumbs":5,"title":2},"3782":{"body":10,"breadcrumbs":5,"title":2},"3783":{"body":11,"breadcrumbs":6,"title":3},"3784":{"body":17,"breadcrumbs":5,"title":2},"3785":{"body":23,"breadcrumbs":5,"title":2},"3786":{"body":19,"breadcrumbs":5,"title":2},"3787":{"body":13,"breadcrumbs":6,"title":3},"3788":{"body":41,"breadcrumbs":4,"title":1},"3789":{"body":0,"breadcrumbs":5,"title":2},"379":{"body":27,"breadcrumbs":5,"title":2},"3790":{"body":14,"breadcrumbs":5,"title":2},"3791":{"body":35,"breadcrumbs":6,"title":3},"3792":{"body":0,"breadcrumbs":6,"title":3},"3793":{"body":106,"breadcrumbs":5,"title":2},"3794":{"body":20,"breadcrumbs":4,"title":1},"3795":{"body":11,"breadcrumbs":5,"title":2},"3796":{"body":0,"breadcrumbs":5,"title":2},"3797":{"body":32,"breadcrumbs":6,"title":3},"3798":{"body":48,"breadcrumbs":6,"title":3},"3799":{"body":28,"breadcrumbs":7,"title":4},"38":{"body":26,"breadcrumbs":3,"title":2},"380":{"body":22,"breadcrumbs":4,"title":1},"3800":{"body":0,"breadcrumbs":6,"title":3},"3801":{"body":55,"breadcrumbs":6,"title":3},"3802":{"body":58,"breadcrumbs":6,"title":3},"3803":{"body":16,"breadcrumbs":7,"title":4},"3804":{"body":0,"breadcrumbs":5,"title":2},"3805":{"body":55,"breadcrumbs":5,"title":2},"3806":{"body":41,"breadcrumbs":4,"title":1},"3807":{"body":20,"breadcrumbs":5,"title":2},"3808":{"body":0,"breadcrumbs":5,"title":2},"3809":{"body":111,"breadcrumbs":5,"title":2},"381":{"body":57,"breadcrumbs":6,"title":3},"3810":{"body":0,"breadcrumbs":5,"title":2},"3811":{"body":25,"breadcrumbs":5,"title":2},"3812":{"body":43,"breadcrumbs":5,"title":2},"3813":{"body":39,"breadcrumbs":5,"title":2},"3814":{"body":0,"breadcrumbs":4,"title":1},"3815":{"body":16,"breadcrumbs":6,"title":3},"3816":{"body":24,"breadcrumbs":6,"title":3},"3817":{"body":16,"breadcrumbs":5,"title":2},"3818":{"body":8,"breadcrumbs":5,"title":2},"3819":{"body":54,"breadcrumbs":5,"title":2},"382":{"body":47,"breadcrumbs":5,"title":2},"3820":{"body":11,"breadcrumbs":4,"title":1},"3821":{"body":0,"breadcrumbs":5,"title":2},"3822":{"body":2,"breadcrumbs":5,"title":2},"3823":{"body":16,"breadcrumbs":5,"title":2},"3824":{"body":46,"breadcrumbs":5,"title":2},"3825":{"body":20,"breadcrumbs":4,"title":1},"3826":{"body":26,"breadcrumbs":5,"title":2},"3827":{"body":15,"breadcrumbs":5,"title":2},"3828":{"body":13,"breadcrumbs":5,"title":2},"3829":{"body":46,"breadcrumbs":5,"title":2},"383":{"body":32,"breadcrumbs":8,"title":5},"3830":{"body":36,"breadcrumbs":4,"title":1},"3831":{"body":65,"breadcrumbs":5,"title":2},"3832":{"body":39,"breadcrumbs":5,"title":2},"3833":{"body":64,"breadcrumbs":6,"title":3},"3834":{"body":14,"breadcrumbs":5,"title":2},"3835":{"body":34,"breadcrumbs":5,"title":2},"3836":{"body":27,"breadcrumbs":5,"title":2},"3837":{"body":14,"breadcrumbs":3,"title":2},"3838":{"body":23,"breadcrumbs":3,"title":2},"3839":{"body":0,"breadcrumbs":3,"title":2},"384":{"body":0,"breadcrumbs":8,"title":5},"3840":{"body":34,"breadcrumbs":3,"title":2},"3841":{"body":45,"breadcrumbs":4,"title":3},"3842":{"body":0,"breadcrumbs":4,"title":3},"3843":{"body":9,"breadcrumbs":4,"title":3},"3844":{"body":45,"breadcrumbs":3,"title":2},"3845":{"body":32,"breadcrumbs":3,"title":2},"3846":{"body":15,"breadcrumbs":3,"title":2},"3847":{"body":32,"breadcrumbs":3,"title":2},"3848":{"body":16,"breadcrumbs":4,"title":3},"3849":{"body":8,"breadcrumbs":3,"title":2},"385":{"body":97,"breadcrumbs":8,"title":5},"3850":{"body":56,"breadcrumbs":4,"title":3},"3851":{"body":22,"breadcrumbs":3,"title":2},"3852":{"body":31,"breadcrumbs":3,"title":2},"3853":{"body":0,"breadcrumbs":4,"title":3},"3854":{"body":21,"breadcrumbs":3,"title":2},"3855":{"body":90,"breadcrumbs":4,"title":3},"3856":{"body":23,"breadcrumbs":3,"title":2},"3857":{"body":0,"breadcrumbs":4,"title":3},"3858":{"body":13,"breadcrumbs":4,"title":3},"3859":{"body":70,"breadcrumbs":4,"title":3},"386":{"body":57,"breadcrumbs":9,"title":6},"3860":{"body":0,"breadcrumbs":4,"title":3},"3861":{"body":55,"breadcrumbs":5,"title":4},"3862":{"body":46,"breadcrumbs":5,"title":4},"3863":{"body":0,"breadcrumbs":3,"title":2},"3864":{"body":28,"breadcrumbs":3,"title":2},"3865":{"body":34,"breadcrumbs":4,"title":3},"3866":{"body":0,"breadcrumbs":3,"title":2},"3867":{"body":113,"breadcrumbs":4,"title":3},"3868":{"body":31,"breadcrumbs":3,"title":2},"3869":{"body":20,"breadcrumbs":3,"title":2},"387":{"body":64,"breadcrumbs":8,"title":5},"3870":{"body":0,"breadcrumbs":4,"title":3},"3871":{"body":16,"breadcrumbs":4,"title":3},"3872":{"body":23,"breadcrumbs":3,"title":2},"3873":{"body":33,"breadcrumbs":4,"title":3},"3874":{"body":24,"breadcrumbs":3,"title":2},"3875":{"body":0,"breadcrumbs":5,"title":4},"3876":{"body":22,"breadcrumbs":4,"title":3},"3877":{"body":28,"breadcrumbs":3,"title":2},"3878":{"body":14,"breadcrumbs":3,"title":2},"3879":{"body":25,"breadcrumbs":3,"title":2},"388":{"body":58,"breadcrumbs":8,"title":5},"3880":{"body":31,"breadcrumbs":3,"title":2},"3881":{"body":0,"breadcrumbs":3,"title":2},"3882":{"body":16,"breadcrumbs":4,"title":3},"3883":{"body":22,"breadcrumbs":4,"title":3},"3884":{"body":36,"breadcrumbs":3,"title":2},"3885":{"body":13,"breadcrumbs":5,"title":3},"3886":{"body":0,"breadcrumbs":4,"title":2},"3887":{"body":49,"breadcrumbs":7,"title":5},"3888":{"body":56,"breadcrumbs":7,"title":5},"3889":{"body":40,"breadcrumbs":6,"title":4},"389":{"body":40,"breadcrumbs":8,"title":5},"3890":{"body":46,"breadcrumbs":6,"title":4},"3891":{"body":26,"breadcrumbs":5,"title":3},"3892":{"body":0,"breadcrumbs":6,"title":4},"3893":{"body":32,"breadcrumbs":7,"title":5},"3894":{"body":20,"breadcrumbs":5,"title":3},"3895":{"body":15,"breadcrumbs":5,"title":3},"3896":{"body":28,"breadcrumbs":6,"title":4},"3897":{"body":34,"breadcrumbs":5,"title":3},"3898":{"body":0,"breadcrumbs":5,"title":3},"3899":{"body":3,"breadcrumbs":4,"title":2},"39":{"body":17,"breadcrumbs":3,"title":2},"390":{"body":4,"breadcrumbs":8,"title":5},"3900":{"body":4,"breadcrumbs":4,"title":2},"3901":{"body":3,"breadcrumbs":5,"title":3},"3902":{"body":21,"breadcrumbs":4,"title":2},"3903":{"body":0,"breadcrumbs":3,"title":1},"3904":{"body":18,"breadcrumbs":5,"title":3},"3905":{"body":19,"breadcrumbs":4,"title":2},"3906":{"body":27,"breadcrumbs":4,"title":2},"3907":{"body":26,"breadcrumbs":4,"title":2},"3908":{"body":6,"breadcrumbs":4,"title":2},"3909":{"body":29,"breadcrumbs":5,"title":3},"391":{"body":14,"breadcrumbs":6,"title":3},"3910":{"body":24,"breadcrumbs":4,"title":2},"3911":{"body":21,"breadcrumbs":5,"title":3},"3912":{"body":9,"breadcrumbs":6,"title":3},"3913":{"body":19,"breadcrumbs":4,"title":1},"3914":{"body":0,"breadcrumbs":5,"title":2},"3915":{"body":1044,"breadcrumbs":6,"title":3},"3916":{"body":0,"breadcrumbs":7,"title":4},"3917":{"body":22,"breadcrumbs":9,"title":6},"3918":{"body":51,"breadcrumbs":5,"title":2},"3919":{"body":65,"breadcrumbs":6,"title":3},"392":{"body":42,"breadcrumbs":6,"title":3},"3920":{"body":62,"breadcrumbs":6,"title":3},"3921":{"body":76,"breadcrumbs":5,"title":2},"3922":{"body":60,"breadcrumbs":6,"title":3},"3923":{"body":48,"breadcrumbs":4,"title":1},"3924":{"body":8,"breadcrumbs":6,"title":3},"3925":{"body":19,"breadcrumbs":4,"title":1},"3926":{"body":17,"breadcrumbs":6,"title":3},"3927":{"body":26,"breadcrumbs":5,"title":2},"3928":{"body":53,"breadcrumbs":5,"title":2},"3929":{"body":4,"breadcrumbs":5,"title":2},"393":{"body":22,"breadcrumbs":6,"title":3},"3930":{"body":28,"breadcrumbs":5,"title":2},"3931":{"body":31,"breadcrumbs":5,"title":2},"3932":{"body":0,"breadcrumbs":5,"title":2},"3933":{"body":16,"breadcrumbs":4,"title":1},"3934":{"body":33,"breadcrumbs":4,"title":1},"3935":{"body":9,"breadcrumbs":5,"title":2},"3936":{"body":15,"breadcrumbs":4,"title":1},"3937":{"body":19,"breadcrumbs":4,"title":1},"3938":{"body":0,"breadcrumbs":6,"title":3},"3939":{"body":16,"breadcrumbs":7,"title":4},"394":{"body":24,"breadcrumbs":8,"title":5},"3940":{"body":6,"breadcrumbs":7,"title":4},"3941":{"body":8,"breadcrumbs":7,"title":4},"3942":{"body":8,"breadcrumbs":7,"title":4},"3943":{"body":12,"breadcrumbs":7,"title":4},"3944":{"body":7,"breadcrumbs":7,"title":4},"3945":{"body":0,"breadcrumbs":6,"title":3},"3946":{"body":9,"breadcrumbs":6,"title":3},"3947":{"body":17,"breadcrumbs":5,"title":2},"3948":{"body":23,"breadcrumbs":5,"title":2},"3949":{"body":0,"breadcrumbs":6,"title":3},"395":{"body":0,"breadcrumbs":5,"title":2},"3950":{"body":10,"breadcrumbs":5,"title":2},"3951":{"body":2,"breadcrumbs":5,"title":2},"3952":{"body":3,"breadcrumbs":5,"title":2},"3953":{"body":2,"breadcrumbs":5,"title":2},"3954":{"body":0,"breadcrumbs":5,"title":2},"3955":{"body":12,"breadcrumbs":5,"title":2},"3956":{"body":31,"breadcrumbs":7,"title":4},"3957":{"body":0,"breadcrumbs":5,"title":2},"3958":{"body":35,"breadcrumbs":7,"title":4},"3959":{"body":19,"breadcrumbs":6,"title":3},"396":{"body":42,"breadcrumbs":7,"title":4},"3960":{"body":15,"breadcrumbs":6,"title":3},"3961":{"body":40,"breadcrumbs":4,"title":1},"3962":{"body":0,"breadcrumbs":5,"title":2},"3963":{"body":21,"breadcrumbs":5,"title":2},"3964":{"body":17,"breadcrumbs":5,"title":2},"3965":{"body":0,"breadcrumbs":4,"title":1},"3966":{"body":17,"breadcrumbs":6,"title":3},"3967":{"body":20,"breadcrumbs":6,"title":3},"3968":{"body":16,"breadcrumbs":5,"title":2},"3969":{"body":36,"breadcrumbs":5,"title":2},"397":{"body":59,"breadcrumbs":7,"title":4},"3970":{"body":32,"breadcrumbs":4,"title":1},"3971":{"body":13,"breadcrumbs":5,"title":2},"3972":{"body":0,"breadcrumbs":7,"title":4},"3973":{"body":14,"breadcrumbs":4,"title":1},"3974":{"body":41,"breadcrumbs":5,"title":2},"3975":{"body":0,"breadcrumbs":4,"title":1},"3976":{"body":62,"breadcrumbs":6,"title":3},"3977":{"body":51,"breadcrumbs":6,"title":3},"3978":{"body":59,"breadcrumbs":6,"title":3},"3979":{"body":83,"breadcrumbs":6,"title":3},"398":{"body":45,"breadcrumbs":7,"title":4},"3980":{"body":40,"breadcrumbs":6,"title":3},"3981":{"body":53,"breadcrumbs":6,"title":3},"3982":{"body":25,"breadcrumbs":5,"title":2},"3983":{"body":28,"breadcrumbs":6,"title":3},"3984":{"body":32,"breadcrumbs":5,"title":2},"3985":{"body":0,"breadcrumbs":4,"title":1},"3986":{"body":75,"breadcrumbs":5,"title":2},"3987":{"body":49,"breadcrumbs":6,"title":3},"3988":{"body":40,"breadcrumbs":5,"title":2},"3989":{"body":41,"breadcrumbs":5,"title":2},"399":{"body":62,"breadcrumbs":6,"title":3},"3990":{"body":48,"breadcrumbs":4,"title":1},"3991":{"body":6,"breadcrumbs":4,"title":1},"3992":{"body":10,"breadcrumbs":8,"title":5},"3993":{"body":12,"breadcrumbs":5,"title":2},"3994":{"body":38,"breadcrumbs":4,"title":1},"3995":{"body":33,"breadcrumbs":5,"title":2},"3996":{"body":0,"breadcrumbs":5,"title":2},"3997":{"body":28,"breadcrumbs":6,"title":3},"3998":{"body":30,"breadcrumbs":7,"title":4},"3999":{"body":36,"breadcrumbs":6,"title":3},"4":{"body":28,"breadcrumbs":2,"title":1},"40":{"body":16,"breadcrumbs":2,"title":1},"400":{"body":0,"breadcrumbs":6,"title":3},"4000":{"body":0,"breadcrumbs":5,"title":2},"4001":{"body":40,"breadcrumbs":5,"title":2},"4002":{"body":47,"breadcrumbs":5,"title":2},"4003":{"body":70,"breadcrumbs":6,"title":3},"4004":{"body":0,"breadcrumbs":5,"title":2},"4005":{"body":23,"breadcrumbs":5,"title":2},"4006":{"body":65,"breadcrumbs":5,"title":2},"4007":{"body":44,"breadcrumbs":5,"title":2},"4008":{"body":89,"breadcrumbs":5,"title":2},"4009":{"body":90,"breadcrumbs":5,"title":2},"401":{"body":27,"breadcrumbs":7,"title":4},"4010":{"body":0,"breadcrumbs":5,"title":2},"4011":{"body":49,"breadcrumbs":6,"title":3},"4012":{"body":44,"breadcrumbs":6,"title":3},"4013":{"body":0,"breadcrumbs":4,"title":1},"4014":{"body":25,"breadcrumbs":6,"title":3},"4015":{"body":14,"breadcrumbs":7,"title":4},"4016":{"body":8,"breadcrumbs":7,"title":4},"4017":{"body":18,"breadcrumbs":6,"title":3},"4018":{"body":41,"breadcrumbs":8,"title":5},"4019":{"body":0,"breadcrumbs":5,"title":2},"402":{"body":17,"breadcrumbs":7,"title":4},"4020":{"body":20,"breadcrumbs":8,"title":5},"4021":{"body":25,"breadcrumbs":7,"title":4},"4022":{"body":10,"breadcrumbs":8,"title":5},"4023":{"body":14,"breadcrumbs":7,"title":4},"4024":{"body":16,"breadcrumbs":7,"title":4},"4025":{"body":28,"breadcrumbs":9,"title":6},"4026":{"body":15,"breadcrumbs":7,"title":4},"4027":{"body":50,"breadcrumbs":5,"title":2},"4028":{"body":95,"breadcrumbs":4,"title":1},"4029":{"body":7,"breadcrumbs":7,"title":4},"403":{"body":9,"breadcrumbs":5,"title":2},"4030":{"body":19,"breadcrumbs":4,"title":1},"4031":{"body":0,"breadcrumbs":5,"title":2},"4032":{"body":25,"breadcrumbs":7,"title":4},"4033":{"body":23,"breadcrumbs":5,"title":2},"4034":{"body":0,"breadcrumbs":5,"title":2},"4035":{"body":13,"breadcrumbs":5,"title":2},"4036":{"body":16,"breadcrumbs":4,"title":1},"4037":{"body":44,"breadcrumbs":5,"title":2},"4038":{"body":0,"breadcrumbs":5,"title":2},"4039":{"body":33,"breadcrumbs":8,"title":5},"404":{"body":0,"breadcrumbs":5,"title":2},"4040":{"body":29,"breadcrumbs":8,"title":5},"4041":{"body":34,"breadcrumbs":8,"title":5},"4042":{"body":27,"breadcrumbs":8,"title":5},"4043":{"body":0,"breadcrumbs":5,"title":2},"4044":{"body":42,"breadcrumbs":5,"title":2},"4045":{"body":45,"breadcrumbs":6,"title":3},"4046":{"body":0,"breadcrumbs":4,"title":1},"4047":{"body":11,"breadcrumbs":5,"title":2},"4048":{"body":12,"breadcrumbs":5,"title":2},"4049":{"body":11,"breadcrumbs":5,"title":2},"405":{"body":19,"breadcrumbs":8,"title":5},"4050":{"body":11,"breadcrumbs":5,"title":2},"4051":{"body":13,"breadcrumbs":5,"title":2},"4052":{"body":0,"breadcrumbs":5,"title":2},"4053":{"body":23,"breadcrumbs":5,"title":2},"4054":{"body":0,"breadcrumbs":4,"title":1},"4055":{"body":25,"breadcrumbs":5,"title":2},"4056":{"body":32,"breadcrumbs":5,"title":2},"4057":{"body":0,"breadcrumbs":4,"title":1},"4058":{"body":30,"breadcrumbs":5,"title":2},"4059":{"body":13,"breadcrumbs":5,"title":2},"406":{"body":22,"breadcrumbs":8,"title":5},"4060":{"body":0,"breadcrumbs":5,"title":2},"4061":{"body":18,"breadcrumbs":9,"title":6},"4062":{"body":27,"breadcrumbs":10,"title":7},"4063":{"body":19,"breadcrumbs":8,"title":5},"4064":{"body":15,"breadcrumbs":7,"title":4},"4065":{"body":0,"breadcrumbs":4,"title":1},"4066":{"body":9,"breadcrumbs":7,"title":4},"4067":{"body":17,"breadcrumbs":6,"title":3},"4068":{"body":28,"breadcrumbs":7,"title":4},"4069":{"body":14,"breadcrumbs":7,"title":4},"407":{"body":36,"breadcrumbs":8,"title":5},"4070":{"body":0,"breadcrumbs":6,"title":3},"4071":{"body":15,"breadcrumbs":5,"title":2},"4072":{"body":11,"breadcrumbs":5,"title":2},"4073":{"body":22,"breadcrumbs":5,"title":2},"4074":{"body":37,"breadcrumbs":5,"title":2},"4075":{"body":17,"breadcrumbs":4,"title":1},"4076":{"body":9,"breadcrumbs":7,"title":4},"4077":{"body":23,"breadcrumbs":4,"title":1},"4078":{"body":0,"breadcrumbs":5,"title":2},"4079":{"body":22,"breadcrumbs":5,"title":2},"408":{"body":24,"breadcrumbs":9,"title":6},"4080":{"body":12,"breadcrumbs":5,"title":2},"4081":{"body":26,"breadcrumbs":5,"title":2},"4082":{"body":14,"breadcrumbs":5,"title":2},"4083":{"body":14,"breadcrumbs":7,"title":4},"4084":{"body":0,"breadcrumbs":5,"title":2},"4085":{"body":26,"breadcrumbs":6,"title":3},"4086":{"body":17,"breadcrumbs":8,"title":5},"4087":{"body":11,"breadcrumbs":8,"title":5},"4088":{"body":13,"breadcrumbs":7,"title":4},"4089":{"body":0,"breadcrumbs":5,"title":2},"409":{"body":0,"breadcrumbs":5,"title":2},"4090":{"body":59,"breadcrumbs":5,"title":2},"4091":{"body":37,"breadcrumbs":6,"title":3},"4092":{"body":29,"breadcrumbs":5,"title":2},"4093":{"body":35,"breadcrumbs":5,"title":2},"4094":{"body":0,"breadcrumbs":4,"title":1},"4095":{"body":42,"breadcrumbs":6,"title":3},"4096":{"body":17,"breadcrumbs":6,"title":3},"4097":{"body":0,"breadcrumbs":6,"title":3},"4098":{"body":19,"breadcrumbs":6,"title":3},"4099":{"body":11,"breadcrumbs":6,"title":3},"41":{"body":3,"breadcrumbs":2,"title":1},"410":{"body":18,"breadcrumbs":6,"title":3},"4100":{"body":25,"breadcrumbs":8,"title":5},"4101":{"body":0,"breadcrumbs":5,"title":2},"4102":{"body":27,"breadcrumbs":4,"title":1},"4103":{"body":46,"breadcrumbs":5,"title":2},"4104":{"body":23,"breadcrumbs":5,"title":2},"4105":{"body":29,"breadcrumbs":5,"title":2},"4106":{"body":30,"breadcrumbs":6,"title":3},"4107":{"body":36,"breadcrumbs":6,"title":3},"4108":{"body":21,"breadcrumbs":6,"title":3},"4109":{"body":0,"breadcrumbs":5,"title":2},"411":{"body":17,"breadcrumbs":5,"title":2},"4110":{"body":45,"breadcrumbs":5,"title":2},"4111":{"body":5,"breadcrumbs":5,"title":2},"4112":{"body":0,"breadcrumbs":4,"title":1},"4113":{"body":23,"breadcrumbs":5,"title":2},"4114":{"body":28,"breadcrumbs":6,"title":3},"4115":{"body":33,"breadcrumbs":5,"title":2},"4116":{"body":25,"breadcrumbs":6,"title":3},"4117":{"body":0,"breadcrumbs":4,"title":1},"4118":{"body":57,"breadcrumbs":5,"title":2},"4119":{"body":56,"breadcrumbs":5,"title":2},"412":{"body":34,"breadcrumbs":5,"title":2},"4120":{"body":22,"breadcrumbs":5,"title":2},"4121":{"body":51,"breadcrumbs":5,"title":2},"4122":{"body":0,"breadcrumbs":5,"title":2},"4123":{"body":37,"breadcrumbs":4,"title":1},"4124":{"body":38,"breadcrumbs":4,"title":1},"4125":{"body":36,"breadcrumbs":4,"title":1},"4126":{"body":6,"breadcrumbs":4,"title":1},"4127":{"body":7,"breadcrumbs":5,"title":2},"4128":{"body":54,"breadcrumbs":5,"title":2},"4129":{"body":30,"breadcrumbs":5,"title":2},"413":{"body":28,"breadcrumbs":5,"title":2},"4130":{"body":30,"breadcrumbs":6,"title":3},"4131":{"body":22,"breadcrumbs":4,"title":1},"4132":{"body":51,"breadcrumbs":5,"title":2},"4133":{"body":39,"breadcrumbs":4,"title":1},"4134":{"body":19,"breadcrumbs":5,"title":2},"4135":{"body":34,"breadcrumbs":5,"title":2},"4136":{"body":20,"breadcrumbs":4,"title":1},"4137":{"body":28,"breadcrumbs":5,"title":2},"4138":{"body":0,"breadcrumbs":5,"title":2},"4139":{"body":25,"breadcrumbs":7,"title":4},"414":{"body":41,"breadcrumbs":5,"title":2},"4140":{"body":33,"breadcrumbs":8,"title":5},"4141":{"body":31,"breadcrumbs":8,"title":5},"4142":{"body":42,"breadcrumbs":6,"title":3},"4143":{"body":20,"breadcrumbs":8,"title":5},"4144":{"body":59,"breadcrumbs":7,"title":4},"4145":{"body":43,"breadcrumbs":4,"title":1},"4146":{"body":10,"breadcrumbs":6,"title":3},"4147":{"body":45,"breadcrumbs":4,"title":1},"4148":{"body":15,"breadcrumbs":5,"title":2},"4149":{"body":0,"breadcrumbs":4,"title":1},"415":{"body":43,"breadcrumbs":5,"title":2},"4150":{"body":40,"breadcrumbs":5,"title":2},"4151":{"body":14,"breadcrumbs":5,"title":2},"4152":{"body":0,"breadcrumbs":5,"title":2},"4153":{"body":24,"breadcrumbs":6,"title":3},"4154":{"body":15,"breadcrumbs":7,"title":4},"4155":{"body":22,"breadcrumbs":6,"title":3},"4156":{"body":28,"breadcrumbs":7,"title":4},"4157":{"body":0,"breadcrumbs":5,"title":2},"4158":{"body":29,"breadcrumbs":6,"title":3},"4159":{"body":64,"breadcrumbs":6,"title":3},"416":{"body":20,"breadcrumbs":6,"title":5},"4160":{"body":23,"breadcrumbs":6,"title":3},"4161":{"body":32,"breadcrumbs":6,"title":3},"4162":{"body":15,"breadcrumbs":6,"title":3},"4163":{"body":0,"breadcrumbs":5,"title":2},"4164":{"body":39,"breadcrumbs":6,"title":3},"4165":{"body":46,"breadcrumbs":6,"title":3},"4166":{"body":44,"breadcrumbs":6,"title":3},"4167":{"body":44,"breadcrumbs":7,"title":4},"4168":{"body":0,"breadcrumbs":5,"title":2},"4169":{"body":70,"breadcrumbs":6,"title":3},"417":{"body":56,"breadcrumbs":2,"title":1},"4170":{"body":57,"breadcrumbs":4,"title":1},"4171":{"body":0,"breadcrumbs":6,"title":3},"4172":{"body":46,"breadcrumbs":5,"title":2},"4173":{"body":19,"breadcrumbs":5,"title":2},"4174":{"body":23,"breadcrumbs":6,"title":3},"4175":{"body":0,"breadcrumbs":5,"title":2},"4176":{"body":23,"breadcrumbs":7,"title":4},"4177":{"body":20,"breadcrumbs":8,"title":5},"4178":{"body":55,"breadcrumbs":6,"title":3},"4179":{"body":20,"breadcrumbs":6,"title":3},"418":{"body":0,"breadcrumbs":3,"title":2},"4180":{"body":30,"breadcrumbs":6,"title":3},"4181":{"body":36,"breadcrumbs":6,"title":3},"4182":{"body":30,"breadcrumbs":6,"title":3},"4183":{"body":0,"breadcrumbs":4,"title":1},"4184":{"body":13,"breadcrumbs":5,"title":2},"4185":{"body":22,"breadcrumbs":6,"title":3},"4186":{"body":17,"breadcrumbs":5,"title":2},"4187":{"body":33,"breadcrumbs":5,"title":2},"4188":{"body":29,"breadcrumbs":7,"title":4},"4189":{"body":22,"breadcrumbs":6,"title":3},"419":{"body":16,"breadcrumbs":4,"title":3},"4190":{"body":0,"breadcrumbs":5,"title":2},"4191":{"body":29,"breadcrumbs":5,"title":2},"4192":{"body":38,"breadcrumbs":6,"title":3},"4193":{"body":16,"breadcrumbs":5,"title":2},"4194":{"body":15,"breadcrumbs":4,"title":1},"4195":{"body":0,"breadcrumbs":5,"title":2},"4196":{"body":26,"breadcrumbs":6,"title":3},"4197":{"body":58,"breadcrumbs":5,"title":2},"4198":{"body":18,"breadcrumbs":6,"title":3},"4199":{"body":45,"breadcrumbs":5,"title":2},"42":{"body":76,"breadcrumbs":3,"title":2},"420":{"body":12,"breadcrumbs":4,"title":3},"4200":{"body":34,"breadcrumbs":5,"title":2},"4201":{"body":39,"breadcrumbs":5,"title":2},"4202":{"body":42,"breadcrumbs":4,"title":1},"4203":{"body":30,"breadcrumbs":4,"title":1},"4204":{"body":25,"breadcrumbs":4,"title":1},"4205":{"body":24,"breadcrumbs":6,"title":3},"4206":{"body":63,"breadcrumbs":4,"title":1},"4207":{"body":17,"breadcrumbs":4,"title":1},"4208":{"body":0,"breadcrumbs":6,"title":4},"4209":{"body":13,"breadcrumbs":6,"title":4},"421":{"body":11,"breadcrumbs":3,"title":2},"4210":{"body":0,"breadcrumbs":5,"title":3},"4211":{"body":25,"breadcrumbs":5,"title":3},"4212":{"body":23,"breadcrumbs":5,"title":3},"4213":{"body":22,"breadcrumbs":7,"title":5},"4214":{"body":33,"breadcrumbs":5,"title":3},"4215":{"body":23,"breadcrumbs":7,"title":5},"4216":{"body":26,"breadcrumbs":5,"title":3},"4217":{"body":24,"breadcrumbs":7,"title":5},"4218":{"body":24,"breadcrumbs":5,"title":3},"4219":{"body":22,"breadcrumbs":6,"title":4},"422":{"body":33,"breadcrumbs":3,"title":2},"4220":{"body":10,"breadcrumbs":5,"title":3},"4221":{"body":12,"breadcrumbs":5,"title":3},"4222":{"body":11,"breadcrumbs":4,"title":2},"4223":{"body":22,"breadcrumbs":4,"title":2},"4224":{"body":93,"breadcrumbs":4,"title":2},"4225":{"body":28,"breadcrumbs":3,"title":1},"4226":{"body":17,"breadcrumbs":3,"title":1},"4227":{"body":23,"breadcrumbs":3,"title":1},"4228":{"body":20,"breadcrumbs":4,"title":2},"4229":{"body":9,"breadcrumbs":7,"title":4},"423":{"body":66,"breadcrumbs":3,"title":2},"4230":{"body":26,"breadcrumbs":4,"title":1},"4231":{"body":44,"breadcrumbs":4,"title":1},"4232":{"body":26,"breadcrumbs":5,"title":2},"4233":{"body":0,"breadcrumbs":4,"title":1},"4234":{"body":12,"breadcrumbs":8,"title":5},"4235":{"body":22,"breadcrumbs":7,"title":4},"4236":{"body":16,"breadcrumbs":6,"title":3},"4237":{"body":0,"breadcrumbs":4,"title":1},"4238":{"body":31,"breadcrumbs":6,"title":3},"4239":{"body":27,"breadcrumbs":5,"title":2},"424":{"body":0,"breadcrumbs":3,"title":2},"4240":{"body":23,"breadcrumbs":6,"title":3},"4241":{"body":0,"breadcrumbs":6,"title":3},"4242":{"body":27,"breadcrumbs":5,"title":2},"4243":{"body":19,"breadcrumbs":5,"title":2},"4244":{"body":0,"breadcrumbs":4,"title":1},"4245":{"body":12,"breadcrumbs":6,"title":3},"4246":{"body":29,"breadcrumbs":5,"title":2},"4247":{"body":48,"breadcrumbs":6,"title":3},"4248":{"body":0,"breadcrumbs":5,"title":2},"4249":{"body":32,"breadcrumbs":7,"title":4},"425":{"body":27,"breadcrumbs":4,"title":3},"4250":{"body":48,"breadcrumbs":5,"title":2},"4251":{"body":29,"breadcrumbs":5,"title":2},"4252":{"body":0,"breadcrumbs":5,"title":2},"4253":{"body":53,"breadcrumbs":6,"title":3},"4254":{"body":19,"breadcrumbs":5,"title":2},"4255":{"body":11,"breadcrumbs":7,"title":4},"4256":{"body":0,"breadcrumbs":4,"title":1},"4257":{"body":13,"breadcrumbs":5,"title":2},"4258":{"body":11,"breadcrumbs":5,"title":2},"4259":{"body":0,"breadcrumbs":4,"title":1},"426":{"body":23,"breadcrumbs":5,"title":4},"4260":{"body":62,"breadcrumbs":5,"title":2},"4261":{"body":0,"breadcrumbs":5,"title":2},"4262":{"body":26,"breadcrumbs":5,"title":2},"4263":{"body":25,"breadcrumbs":4,"title":1},"4264":{"body":0,"breadcrumbs":5,"title":2},"4265":{"body":39,"breadcrumbs":5,"title":2},"4266":{"body":36,"breadcrumbs":5,"title":2},"4267":{"body":0,"breadcrumbs":4,"title":1},"4268":{"body":32,"breadcrumbs":5,"title":2},"4269":{"body":23,"breadcrumbs":5,"title":2},"427":{"body":31,"breadcrumbs":5,"title":4},"4270":{"body":17,"breadcrumbs":5,"title":2},"4271":{"body":22,"breadcrumbs":4,"title":1},"4272":{"body":23,"breadcrumbs":7,"title":4},"4273":{"body":0,"breadcrumbs":4,"title":1},"4274":{"body":48,"breadcrumbs":4,"title":1},"4275":{"body":28,"breadcrumbs":5,"title":2},"4276":{"body":0,"breadcrumbs":5,"title":2},"4277":{"body":23,"breadcrumbs":6,"title":3},"4278":{"body":40,"breadcrumbs":7,"title":4},"4279":{"body":71,"breadcrumbs":7,"title":4},"428":{"body":23,"breadcrumbs":3,"title":2},"4280":{"body":0,"breadcrumbs":4,"title":1},"4281":{"body":51,"breadcrumbs":5,"title":2},"4282":{"body":24,"breadcrumbs":5,"title":2},"4283":{"body":0,"breadcrumbs":4,"title":1},"4284":{"body":25,"breadcrumbs":5,"title":2},"4285":{"body":25,"breadcrumbs":5,"title":2},"4286":{"body":32,"breadcrumbs":6,"title":3},"4287":{"body":17,"breadcrumbs":5,"title":2},"4288":{"body":29,"breadcrumbs":5,"title":2},"4289":{"body":0,"breadcrumbs":5,"title":2},"429":{"body":49,"breadcrumbs":3,"title":2},"4290":{"body":29,"breadcrumbs":5,"title":2},"4291":{"body":35,"breadcrumbs":5,"title":2},"4292":{"body":38,"breadcrumbs":5,"title":2},"4293":{"body":28,"breadcrumbs":5,"title":2},"4294":{"body":0,"breadcrumbs":4,"title":1},"4295":{"body":26,"breadcrumbs":5,"title":2},"4296":{"body":29,"breadcrumbs":5,"title":2},"4297":{"body":60,"breadcrumbs":6,"title":3},"4298":{"body":59,"breadcrumbs":5,"title":2},"4299":{"body":31,"breadcrumbs":4,"title":1},"43":{"body":8,"breadcrumbs":4,"title":2},"430":{"body":32,"breadcrumbs":4,"title":3},"4300":{"body":0,"breadcrumbs":6,"title":3},"4301":{"body":19,"breadcrumbs":5,"title":2},"4302":{"body":17,"breadcrumbs":4,"title":1},"4303":{"body":16,"breadcrumbs":5,"title":2},"4304":{"body":18,"breadcrumbs":5,"title":2},"4305":{"body":16,"breadcrumbs":5,"title":2},"4306":{"body":0,"breadcrumbs":5,"title":2},"4307":{"body":30,"breadcrumbs":5,"title":2},"4308":{"body":23,"breadcrumbs":5,"title":2},"4309":{"body":37,"breadcrumbs":5,"title":2},"431":{"body":39,"breadcrumbs":3,"title":2},"4310":{"body":0,"breadcrumbs":5,"title":2},"4311":{"body":9,"breadcrumbs":5,"title":2},"4312":{"body":9,"breadcrumbs":5,"title":2},"4313":{"body":9,"breadcrumbs":5,"title":2},"4314":{"body":0,"breadcrumbs":5,"title":2},"4315":{"body":29,"breadcrumbs":5,"title":2},"4316":{"body":26,"breadcrumbs":5,"title":2},"4317":{"body":0,"breadcrumbs":5,"title":2},"4318":{"body":30,"breadcrumbs":6,"title":3},"4319":{"body":30,"breadcrumbs":7,"title":4},"432":{"body":8,"breadcrumbs":4,"title":3},"4320":{"body":16,"breadcrumbs":6,"title":3},"4321":{"body":19,"breadcrumbs":5,"title":2},"4322":{"body":22,"breadcrumbs":4,"title":1},"4323":{"body":0,"breadcrumbs":10,"title":5},"4324":{"body":0,"breadcrumbs":7,"title":2},"4325":{"body":37,"breadcrumbs":9,"title":4},"4326":{"body":80,"breadcrumbs":8,"title":3},"4327":{"body":0,"breadcrumbs":7,"title":2},"4328":{"body":20,"breadcrumbs":7,"title":2},"4329":{"body":34,"breadcrumbs":8,"title":3},"433":{"body":38,"breadcrumbs":3,"title":2},"4330":{"body":21,"breadcrumbs":7,"title":2},"4331":{"body":0,"breadcrumbs":8,"title":3},"4332":{"body":23,"breadcrumbs":7,"title":2},"4333":{"body":36,"breadcrumbs":7,"title":2},"4334":{"body":28,"breadcrumbs":8,"title":3},"4335":{"body":0,"breadcrumbs":7,"title":2},"4336":{"body":71,"breadcrumbs":8,"title":3},"4337":{"body":19,"breadcrumbs":8,"title":3},"4338":{"body":31,"breadcrumbs":8,"title":3},"4339":{"body":16,"breadcrumbs":7,"title":2},"434":{"body":0,"breadcrumbs":4,"title":3},"4340":{"body":20,"breadcrumbs":8,"title":3},"4341":{"body":55,"breadcrumbs":7,"title":2},"4342":{"body":37,"breadcrumbs":7,"title":2},"4343":{"body":8,"breadcrumbs":7,"title":2},"4344":{"body":18,"breadcrumbs":7,"title":2},"4345":{"body":6,"breadcrumbs":7,"title":2},"4346":{"body":28,"breadcrumbs":7,"title":2},"4347":{"body":0,"breadcrumbs":8,"title":3},"4348":{"body":28,"breadcrumbs":8,"title":3},"4349":{"body":41,"breadcrumbs":8,"title":3},"435":{"body":19,"breadcrumbs":2,"title":1},"4350":{"body":0,"breadcrumbs":6,"title":1},"4351":{"body":28,"breadcrumbs":8,"title":3},"4352":{"body":33,"breadcrumbs":8,"title":3},"4353":{"body":24,"breadcrumbs":7,"title":2},"4354":{"body":0,"breadcrumbs":7,"title":2},"4355":{"body":49,"breadcrumbs":6,"title":1},"4356":{"body":42,"breadcrumbs":7,"title":2},"4357":{"body":0,"breadcrumbs":7,"title":2},"4358":{"body":80,"breadcrumbs":7,"title":2},"4359":{"body":37,"breadcrumbs":6,"title":1},"436":{"body":0,"breadcrumbs":5,"title":4},"4360":{"body":37,"breadcrumbs":7,"title":2},"4361":{"body":109,"breadcrumbs":6,"title":1},"4362":{"body":20,"breadcrumbs":6,"title":1},"4363":{"body":7,"breadcrumbs":6,"title":1},"4364":{"body":14,"breadcrumbs":7,"title":4},"4365":{"body":30,"breadcrumbs":5,"title":2},"4366":{"body":44,"breadcrumbs":4,"title":1},"4367":{"body":37,"breadcrumbs":5,"title":2},"4368":{"body":40,"breadcrumbs":5,"title":2},"4369":{"body":0,"breadcrumbs":5,"title":2},"437":{"body":65,"breadcrumbs":6,"title":5},"4370":{"body":107,"breadcrumbs":5,"title":2},"4371":{"body":51,"breadcrumbs":7,"title":4},"4372":{"body":90,"breadcrumbs":6,"title":3},"4373":{"body":0,"breadcrumbs":4,"title":1},"4374":{"body":19,"breadcrumbs":5,"title":2},"4375":{"body":23,"breadcrumbs":5,"title":2},"4376":{"body":21,"breadcrumbs":5,"title":2},"4377":{"body":0,"breadcrumbs":4,"title":1},"4378":{"body":4,"breadcrumbs":9,"title":6},"4379":{"body":42,"breadcrumbs":7,"title":4},"438":{"body":53,"breadcrumbs":7,"title":6},"4380":{"body":24,"breadcrumbs":8,"title":5},"4381":{"body":33,"breadcrumbs":7,"title":4},"4382":{"body":11,"breadcrumbs":8,"title":5},"4383":{"body":0,"breadcrumbs":7,"title":4},"4384":{"body":61,"breadcrumbs":6,"title":3},"4385":{"body":35,"breadcrumbs":6,"title":3},"4386":{"body":38,"breadcrumbs":6,"title":3},"4387":{"body":26,"breadcrumbs":6,"title":3},"4388":{"body":14,"breadcrumbs":6,"title":3},"4389":{"body":52,"breadcrumbs":5,"title":2},"439":{"body":25,"breadcrumbs":7,"title":6},"4390":{"body":521,"breadcrumbs":5,"title":2},"4391":{"body":20,"breadcrumbs":5,"title":2},"4392":{"body":115,"breadcrumbs":5,"title":2},"4393":{"body":11,"breadcrumbs":6,"title":3},"4394":{"body":43,"breadcrumbs":5,"title":2},"4395":{"body":89,"breadcrumbs":6,"title":3},"4396":{"body":33,"breadcrumbs":5,"title":2},"4397":{"body":488,"breadcrumbs":5,"title":2},"4398":{"body":193,"breadcrumbs":5,"title":2},"4399":{"body":84,"breadcrumbs":5,"title":2},"44":{"body":15,"breadcrumbs":4,"title":2},"440":{"body":46,"breadcrumbs":4,"title":3},"4400":{"body":112,"breadcrumbs":5,"title":2},"4401":{"body":15,"breadcrumbs":6,"title":3},"4402":{"body":24,"breadcrumbs":5,"title":2},"4403":{"body":302,"breadcrumbs":5,"title":2},"4404":{"body":7,"breadcrumbs":5,"title":2},"4405":{"body":65,"breadcrumbs":5,"title":2},"4406":{"body":65,"breadcrumbs":5,"title":2},"4407":{"body":0,"breadcrumbs":5,"title":2},"4408":{"body":103,"breadcrumbs":8,"title":5},"4409":{"body":57,"breadcrumbs":8,"title":5},"441":{"body":0,"breadcrumbs":5,"title":4},"4410":{"body":79,"breadcrumbs":8,"title":5},"4411":{"body":57,"breadcrumbs":8,"title":5},"4412":{"body":86,"breadcrumbs":8,"title":5},"4413":{"body":0,"breadcrumbs":5,"title":2},"4414":{"body":41,"breadcrumbs":7,"title":4},"4415":{"body":79,"breadcrumbs":5,"title":2},"4416":{"body":90,"breadcrumbs":5,"title":2},"4417":{"body":122,"breadcrumbs":6,"title":3},"4418":{"body":0,"breadcrumbs":4,"title":1},"4419":{"body":76,"breadcrumbs":6,"title":3},"442":{"body":18,"breadcrumbs":5,"title":4},"4420":{"body":87,"breadcrumbs":6,"title":3},"4421":{"body":71,"breadcrumbs":5,"title":2},"4422":{"body":0,"breadcrumbs":5,"title":2},"4423":{"body":189,"breadcrumbs":7,"title":4},"4424":{"body":22,"breadcrumbs":5,"title":2},"4425":{"body":0,"breadcrumbs":5,"title":2},"4426":{"body":15,"breadcrumbs":6,"title":3},"4427":{"body":27,"breadcrumbs":6,"title":3},"4428":{"body":40,"breadcrumbs":5,"title":2},"4429":{"body":66,"breadcrumbs":5,"title":2},"443":{"body":19,"breadcrumbs":6,"title":5},"4430":{"body":0,"breadcrumbs":5,"title":2},"4431":{"body":43,"breadcrumbs":5,"title":2},"4432":{"body":126,"breadcrumbs":5,"title":2},"4433":{"body":231,"breadcrumbs":4,"title":1},"4434":{"body":45,"breadcrumbs":5,"title":2},"4435":{"body":6,"breadcrumbs":7,"title":4},"4436":{"body":29,"breadcrumbs":4,"title":1},"4437":{"body":47,"breadcrumbs":5,"title":2},"4438":{"body":0,"breadcrumbs":4,"title":1},"4439":{"body":10,"breadcrumbs":4,"title":1},"444":{"body":20,"breadcrumbs":3,"title":2},"4440":{"body":38,"breadcrumbs":5,"title":2},"4441":{"body":17,"breadcrumbs":5,"title":2},"4442":{"body":15,"breadcrumbs":5,"title":2},"4443":{"body":8,"breadcrumbs":5,"title":2},"4444":{"body":312,"breadcrumbs":4,"title":1},"4445":{"body":14,"breadcrumbs":5,"title":2},"4446":{"body":66,"breadcrumbs":5,"title":2},"4447":{"body":7,"breadcrumbs":5,"title":2},"4448":{"body":31,"breadcrumbs":5,"title":2},"4449":{"body":249,"breadcrumbs":4,"title":1},"445":{"body":44,"breadcrumbs":3,"title":2},"4450":{"body":33,"breadcrumbs":5,"title":2},"4451":{"body":45,"breadcrumbs":5,"title":2},"4452":{"body":7,"breadcrumbs":5,"title":2},"4453":{"body":215,"breadcrumbs":4,"title":1},"4454":{"body":7,"breadcrumbs":5,"title":2},"4455":{"body":27,"breadcrumbs":5,"title":2},"4456":{"body":0,"breadcrumbs":5,"title":2},"4457":{"body":18,"breadcrumbs":5,"title":2},"4458":{"body":29,"breadcrumbs":5,"title":2},"4459":{"body":15,"breadcrumbs":5,"title":2},"446":{"body":34,"breadcrumbs":3,"title":2},"4460":{"body":28,"breadcrumbs":5,"title":2},"4461":{"body":0,"breadcrumbs":4,"title":1},"4462":{"body":64,"breadcrumbs":5,"title":2},"4463":{"body":60,"breadcrumbs":5,"title":2},"4464":{"body":42,"breadcrumbs":5,"title":2},"4465":{"body":0,"breadcrumbs":4,"title":1},"4466":{"body":40,"breadcrumbs":5,"title":2},"4467":{"body":52,"breadcrumbs":5,"title":2},"4468":{"body":0,"breadcrumbs":5,"title":2},"4469":{"body":20,"breadcrumbs":6,"title":3},"447":{"body":0,"breadcrumbs":3,"title":2},"4470":{"body":36,"breadcrumbs":5,"title":2},"4471":{"body":0,"breadcrumbs":6,"title":3},"4472":{"body":26,"breadcrumbs":4,"title":1},"4473":{"body":25,"breadcrumbs":5,"title":2},"4474":{"body":27,"breadcrumbs":4,"title":1},"4475":{"body":84,"breadcrumbs":4,"title":1},"4476":{"body":29,"breadcrumbs":5,"title":2},"4477":{"body":22,"breadcrumbs":12,"title":9},"4478":{"body":14,"breadcrumbs":4,"title":1},"4479":{"body":39,"breadcrumbs":5,"title":2},"448":{"body":16,"breadcrumbs":3,"title":2},"4480":{"body":53,"breadcrumbs":6,"title":3},"4481":{"body":28,"breadcrumbs":5,"title":2},"4482":{"body":17,"breadcrumbs":4,"title":1},"4483":{"body":45,"breadcrumbs":4,"title":1},"4484":{"body":0,"breadcrumbs":7,"title":4},"4485":{"body":32,"breadcrumbs":4,"title":1},"4486":{"body":0,"breadcrumbs":4,"title":1},"4487":{"body":8,"breadcrumbs":4,"title":1},"4488":{"body":17,"breadcrumbs":5,"title":2},"4489":{"body":36,"breadcrumbs":5,"title":2},"449":{"body":19,"breadcrumbs":3,"title":2},"4490":{"body":0,"breadcrumbs":4,"title":1},"4491":{"body":62,"breadcrumbs":5,"title":2},"4492":{"body":85,"breadcrumbs":5,"title":2},"4493":{"body":132,"breadcrumbs":5,"title":2},"4494":{"body":33,"breadcrumbs":5,"title":2},"4495":{"body":12,"breadcrumbs":5,"title":2},"4496":{"body":8,"breadcrumbs":6,"title":3},"4497":{"body":52,"breadcrumbs":5,"title":2},"4498":{"body":40,"breadcrumbs":5,"title":2},"4499":{"body":0,"breadcrumbs":4,"title":1},"45":{"body":0,"breadcrumbs":4,"title":2},"450":{"body":20,"breadcrumbs":4,"title":3},"4500":{"body":16,"breadcrumbs":6,"title":3},"4501":{"body":28,"breadcrumbs":6,"title":3},"4502":{"body":14,"breadcrumbs":6,"title":3},"4503":{"body":37,"breadcrumbs":6,"title":3},"4504":{"body":0,"breadcrumbs":5,"title":2},"4505":{"body":18,"breadcrumbs":6,"title":3},"4506":{"body":19,"breadcrumbs":6,"title":3},"4507":{"body":39,"breadcrumbs":6,"title":3},"4508":{"body":40,"breadcrumbs":6,"title":3},"4509":{"body":28,"breadcrumbs":4,"title":1},"451":{"body":13,"breadcrumbs":2,"title":1},"4510":{"body":26,"breadcrumbs":5,"title":2},"4511":{"body":25,"breadcrumbs":4,"title":1},"4512":{"body":9,"breadcrumbs":8,"title":5},"4513":{"body":34,"breadcrumbs":4,"title":1},"4514":{"body":0,"breadcrumbs":5,"title":2},"4515":{"body":113,"breadcrumbs":7,"title":4},"4516":{"body":104,"breadcrumbs":8,"title":5},"4517":{"body":116,"breadcrumbs":8,"title":5},"4518":{"body":33,"breadcrumbs":7,"title":4},"4519":{"body":35,"breadcrumbs":6,"title":3},"452":{"body":33,"breadcrumbs":3,"title":2},"4520":{"body":25,"breadcrumbs":5,"title":2},"4521":{"body":0,"breadcrumbs":6,"title":3},"4522":{"body":13,"breadcrumbs":7,"title":4},"4523":{"body":21,"breadcrumbs":7,"title":4},"4524":{"body":23,"breadcrumbs":6,"title":3},"4525":{"body":0,"breadcrumbs":5,"title":2},"4526":{"body":16,"breadcrumbs":5,"title":2},"4527":{"body":27,"breadcrumbs":7,"title":4},"4528":{"body":0,"breadcrumbs":5,"title":2},"4529":{"body":41,"breadcrumbs":6,"title":3},"453":{"body":37,"breadcrumbs":7,"title":5},"4530":{"body":34,"breadcrumbs":5,"title":2},"4531":{"body":0,"breadcrumbs":6,"title":3},"4532":{"body":12,"breadcrumbs":7,"title":4},"4533":{"body":37,"breadcrumbs":7,"title":4},"4534":{"body":35,"breadcrumbs":6,"title":3},"4535":{"body":30,"breadcrumbs":6,"title":3},"4536":{"body":0,"breadcrumbs":4,"title":1},"4537":{"body":21,"breadcrumbs":5,"title":2},"4538":{"body":26,"breadcrumbs":5,"title":2},"4539":{"body":26,"breadcrumbs":5,"title":2},"454":{"body":35,"breadcrumbs":4,"title":2},"4540":{"body":75,"breadcrumbs":4,"title":1},"4541":{"body":26,"breadcrumbs":4,"title":1},"4542":{"body":58,"breadcrumbs":6,"title":3},"4543":{"body":12,"breadcrumbs":7,"title":5},"4544":{"body":28,"breadcrumbs":4,"title":2},"4545":{"body":32,"breadcrumbs":3,"title":1},"4546":{"body":0,"breadcrumbs":4,"title":2},"4547":{"body":33,"breadcrumbs":5,"title":3},"4548":{"body":21,"breadcrumbs":5,"title":3},"4549":{"body":0,"breadcrumbs":5,"title":3},"455":{"body":0,"breadcrumbs":4,"title":2},"4550":{"body":13,"breadcrumbs":4,"title":2},"4551":{"body":12,"breadcrumbs":4,"title":2},"4552":{"body":41,"breadcrumbs":5,"title":3},"4553":{"body":73,"breadcrumbs":4,"title":2},"4554":{"body":19,"breadcrumbs":4,"title":2},"4555":{"body":0,"breadcrumbs":3,"title":1},"4556":{"body":30,"breadcrumbs":3,"title":1},"4557":{"body":32,"breadcrumbs":3,"title":1},"4558":{"body":52,"breadcrumbs":5,"title":3},"4559":{"body":7,"breadcrumbs":4,"title":2},"456":{"body":37,"breadcrumbs":5,"title":3},"4560":{"body":17,"breadcrumbs":6,"title":3},"4561":{"body":14,"breadcrumbs":5,"title":2},"4562":{"body":34,"breadcrumbs":4,"title":1},"4563":{"body":14,"breadcrumbs":4,"title":1},"4564":{"body":0,"breadcrumbs":4,"title":1},"4565":{"body":17,"breadcrumbs":4,"title":1},"4566":{"body":137,"breadcrumbs":4,"title":1},"4567":{"body":0,"breadcrumbs":6,"title":3},"4568":{"body":45,"breadcrumbs":6,"title":3},"4569":{"body":18,"breadcrumbs":6,"title":3},"457":{"body":73,"breadcrumbs":5,"title":3},"4570":{"body":25,"breadcrumbs":5,"title":2},"4571":{"body":33,"breadcrumbs":5,"title":2},"4572":{"body":22,"breadcrumbs":5,"title":2},"4573":{"body":9,"breadcrumbs":5,"title":2},"4574":{"body":15,"breadcrumbs":5,"title":2},"4575":{"body":28,"breadcrumbs":5,"title":2},"4576":{"body":23,"breadcrumbs":6,"title":3},"4577":{"body":8,"breadcrumbs":5,"title":2},"4578":{"body":16,"breadcrumbs":7,"title":4},"4579":{"body":22,"breadcrumbs":5,"title":2},"458":{"body":95,"breadcrumbs":5,"title":3},"4580":{"body":7,"breadcrumbs":5,"title":2},"4581":{"body":7,"breadcrumbs":5,"title":2},"4582":{"body":50,"breadcrumbs":5,"title":2},"4583":{"body":26,"breadcrumbs":6,"title":3},"4584":{"body":15,"breadcrumbs":5,"title":2},"4585":{"body":8,"breadcrumbs":5,"title":2},"4586":{"body":31,"breadcrumbs":5,"title":2},"4587":{"body":0,"breadcrumbs":5,"title":2},"4588":{"body":17,"breadcrumbs":5,"title":2},"4589":{"body":23,"breadcrumbs":5,"title":2},"459":{"body":7,"breadcrumbs":4,"title":2},"4590":{"body":21,"breadcrumbs":5,"title":2},"4591":{"body":22,"breadcrumbs":6,"title":3},"4592":{"body":0,"breadcrumbs":5,"title":2},"4593":{"body":26,"breadcrumbs":5,"title":2},"4594":{"body":31,"breadcrumbs":5,"title":2},"4595":{"body":34,"breadcrumbs":5,"title":2},"4596":{"body":30,"breadcrumbs":5,"title":2},"4597":{"body":0,"breadcrumbs":4,"title":1},"4598":{"body":33,"breadcrumbs":5,"title":2},"4599":{"body":36,"breadcrumbs":6,"title":3},"46":{"body":18,"breadcrumbs":5,"title":3},"460":{"body":48,"breadcrumbs":5,"title":3},"4600":{"body":34,"breadcrumbs":5,"title":2},"4601":{"body":37,"breadcrumbs":6,"title":3},"4602":{"body":28,"breadcrumbs":6,"title":3},"4603":{"body":29,"breadcrumbs":6,"title":3},"4604":{"body":0,"breadcrumbs":5,"title":2},"4605":{"body":18,"breadcrumbs":5,"title":2},"4606":{"body":17,"breadcrumbs":5,"title":2},"4607":{"body":17,"breadcrumbs":4,"title":1},"4608":{"body":19,"breadcrumbs":4,"title":1},"4609":{"body":0,"breadcrumbs":5,"title":2},"461":{"body":46,"breadcrumbs":5,"title":3},"4610":{"body":15,"breadcrumbs":6,"title":3},"4611":{"body":18,"breadcrumbs":5,"title":2},"4612":{"body":14,"breadcrumbs":6,"title":3},"4613":{"body":29,"breadcrumbs":4,"title":1},"4614":{"body":0,"breadcrumbs":8,"title":4},"4615":{"body":12,"breadcrumbs":5,"title":1},"4616":{"body":48,"breadcrumbs":6,"title":2},"4617":{"body":366,"breadcrumbs":7,"title":3},"4618":{"body":367,"breadcrumbs":7,"title":3},"4619":{"body":0,"breadcrumbs":6,"title":2},"462":{"body":61,"breadcrumbs":4,"title":2},"4620":{"body":34,"breadcrumbs":10,"title":6},"4621":{"body":26,"breadcrumbs":7,"title":3},"4622":{"body":24,"breadcrumbs":7,"title":3},"4623":{"body":16,"breadcrumbs":10,"title":6},"4624":{"body":19,"breadcrumbs":6,"title":2},"4625":{"body":0,"breadcrumbs":6,"title":2},"4626":{"body":32,"breadcrumbs":6,"title":2},"4627":{"body":30,"breadcrumbs":7,"title":3},"4628":{"body":8,"breadcrumbs":5,"title":1},"4629":{"body":322,"breadcrumbs":10,"title":6},"463":{"body":69,"breadcrumbs":4,"title":2},"4630":{"body":212,"breadcrumbs":9,"title":5},"4631":{"body":124,"breadcrumbs":11,"title":7},"4632":{"body":125,"breadcrumbs":11,"title":7},"4633":{"body":0,"breadcrumbs":8,"title":4},"4634":{"body":19,"breadcrumbs":5,"title":1},"4635":{"body":21,"breadcrumbs":5,"title":1},"4636":{"body":0,"breadcrumbs":6,"title":2},"4637":{"body":19,"breadcrumbs":6,"title":2},"4638":{"body":39,"breadcrumbs":6,"title":2},"4639":{"body":11,"breadcrumbs":6,"title":2},"464":{"body":0,"breadcrumbs":4,"title":2},"4640":{"body":0,"breadcrumbs":6,"title":2},"4641":{"body":118,"breadcrumbs":6,"title":2},"4642":{"body":164,"breadcrumbs":7,"title":3},"4643":{"body":98,"breadcrumbs":6,"title":2},"4644":{"body":81,"breadcrumbs":5,"title":1},"4645":{"body":74,"breadcrumbs":5,"title":1},"4646":{"body":47,"breadcrumbs":6,"title":2},"4647":{"body":61,"breadcrumbs":8,"title":4},"4648":{"body":29,"breadcrumbs":6,"title":2},"4649":{"body":19,"breadcrumbs":6,"title":2},"465":{"body":32,"breadcrumbs":4,"title":2},"4650":{"body":9,"breadcrumbs":7,"title":4},"4651":{"body":13,"breadcrumbs":5,"title":2},"4652":{"body":40,"breadcrumbs":4,"title":1},"4653":{"body":30,"breadcrumbs":5,"title":2},"4654":{"body":0,"breadcrumbs":5,"title":2},"4655":{"body":23,"breadcrumbs":4,"title":1},"4656":{"body":14,"breadcrumbs":9,"title":6},"4657":{"body":20,"breadcrumbs":6,"title":3},"4658":{"body":19,"breadcrumbs":7,"title":4},"4659":{"body":20,"breadcrumbs":8,"title":5},"466":{"body":24,"breadcrumbs":4,"title":2},"4660":{"body":20,"breadcrumbs":6,"title":3},"4661":{"body":0,"breadcrumbs":6,"title":3},"4662":{"body":52,"breadcrumbs":5,"title":2},"4663":{"body":49,"breadcrumbs":5,"title":2},"4664":{"body":46,"breadcrumbs":5,"title":2},"4665":{"body":22,"breadcrumbs":5,"title":2},"4666":{"body":37,"breadcrumbs":6,"title":3},"4667":{"body":45,"breadcrumbs":5,"title":2},"4668":{"body":56,"breadcrumbs":5,"title":2},"4669":{"body":12,"breadcrumbs":5,"title":2},"467":{"body":0,"breadcrumbs":4,"title":2},"4670":{"body":38,"breadcrumbs":5,"title":2},"4671":{"body":31,"breadcrumbs":5,"title":2},"4672":{"body":24,"breadcrumbs":6,"title":3},"4673":{"body":0,"breadcrumbs":5,"title":2},"4674":{"body":55,"breadcrumbs":5,"title":2},"4675":{"body":27,"breadcrumbs":5,"title":2},"4676":{"body":24,"breadcrumbs":5,"title":2},"4677":{"body":18,"breadcrumbs":5,"title":2},"4678":{"body":14,"breadcrumbs":5,"title":2},"4679":{"body":16,"breadcrumbs":5,"title":2},"468":{"body":28,"breadcrumbs":5,"title":3},"4680":{"body":0,"breadcrumbs":5,"title":2},"4681":{"body":26,"breadcrumbs":6,"title":3},"4682":{"body":39,"breadcrumbs":5,"title":2},"4683":{"body":28,"breadcrumbs":6,"title":3},"4684":{"body":16,"breadcrumbs":5,"title":2},"4685":{"body":18,"breadcrumbs":5,"title":2},"4686":{"body":28,"breadcrumbs":5,"title":2},"4687":{"body":0,"breadcrumbs":5,"title":2},"4688":{"body":64,"breadcrumbs":6,"title":3},"4689":{"body":47,"breadcrumbs":6,"title":3},"469":{"body":32,"breadcrumbs":5,"title":3},"4690":{"body":0,"breadcrumbs":4,"title":1},"4691":{"body":26,"breadcrumbs":6,"title":3},"4692":{"body":18,"breadcrumbs":5,"title":2},"4693":{"body":33,"breadcrumbs":6,"title":3},"4694":{"body":20,"breadcrumbs":5,"title":2},"4695":{"body":32,"breadcrumbs":5,"title":2},"4696":{"body":24,"breadcrumbs":6,"title":3},"4697":{"body":0,"breadcrumbs":5,"title":2},"4698":{"body":18,"breadcrumbs":5,"title":2},"4699":{"body":29,"breadcrumbs":5,"title":2},"47":{"body":25,"breadcrumbs":4,"title":2},"470":{"body":39,"breadcrumbs":5,"title":3},"4700":{"body":17,"breadcrumbs":5,"title":2},"4701":{"body":26,"breadcrumbs":4,"title":1},"4702":{"body":33,"breadcrumbs":5,"title":2},"4703":{"body":18,"breadcrumbs":10,"title":7},"4704":{"body":58,"breadcrumbs":4,"title":1},"4705":{"body":0,"breadcrumbs":6,"title":3},"4706":{"body":28,"breadcrumbs":6,"title":3},"4707":{"body":0,"breadcrumbs":6,"title":3},"4708":{"body":15,"breadcrumbs":3,"title":0},"4709":{"body":56,"breadcrumbs":4,"title":1},"471":{"body":51,"breadcrumbs":4,"title":2},"4710":{"body":60,"breadcrumbs":4,"title":1},"4711":{"body":19,"breadcrumbs":4,"title":1},"4712":{"body":0,"breadcrumbs":7,"title":4},"4713":{"body":16,"breadcrumbs":3,"title":0},"4714":{"body":56,"breadcrumbs":4,"title":1},"4715":{"body":28,"breadcrumbs":5,"title":2},"4716":{"body":50,"breadcrumbs":7,"title":4},"4717":{"body":56,"breadcrumbs":5,"title":2},"4718":{"body":0,"breadcrumbs":6,"title":3},"4719":{"body":17,"breadcrumbs":3,"title":0},"472":{"body":19,"breadcrumbs":3,"title":1},"4720":{"body":66,"breadcrumbs":4,"title":1},"4721":{"body":20,"breadcrumbs":5,"title":2},"4722":{"body":53,"breadcrumbs":8,"title":5},"4723":{"body":23,"breadcrumbs":7,"title":4},"4724":{"body":0,"breadcrumbs":8,"title":5},"4725":{"body":13,"breadcrumbs":3,"title":0},"4726":{"body":68,"breadcrumbs":4,"title":1},"4727":{"body":69,"breadcrumbs":6,"title":3},"4728":{"body":0,"breadcrumbs":6,"title":3},"4729":{"body":11,"breadcrumbs":3,"title":0},"473":{"body":0,"breadcrumbs":4,"title":2},"4730":{"body":59,"breadcrumbs":4,"title":1},"4731":{"body":45,"breadcrumbs":7,"title":4},"4732":{"body":0,"breadcrumbs":5,"title":2},"4733":{"body":18,"breadcrumbs":8,"title":5},"4734":{"body":32,"breadcrumbs":9,"title":6},"4735":{"body":27,"breadcrumbs":7,"title":4},"4736":{"body":42,"breadcrumbs":8,"title":5},"4737":{"body":0,"breadcrumbs":5,"title":2},"4738":{"body":52,"breadcrumbs":6,"title":3},"4739":{"body":0,"breadcrumbs":5,"title":2},"474":{"body":14,"breadcrumbs":4,"title":2},"4740":{"body":35,"breadcrumbs":8,"title":5},"4741":{"body":19,"breadcrumbs":7,"title":4},"4742":{"body":43,"breadcrumbs":7,"title":4},"4743":{"body":0,"breadcrumbs":4,"title":1},"4744":{"body":18,"breadcrumbs":7,"title":4},"4745":{"body":26,"breadcrumbs":7,"title":4},"4746":{"body":26,"breadcrumbs":8,"title":5},"4747":{"body":19,"breadcrumbs":5,"title":2},"4748":{"body":30,"breadcrumbs":5,"title":2},"4749":{"body":14,"breadcrumbs":11,"title":7},"475":{"body":15,"breadcrumbs":4,"title":2},"4750":{"body":30,"breadcrumbs":6,"title":2},"4751":{"body":63,"breadcrumbs":6,"title":2},"4752":{"body":0,"breadcrumbs":8,"title":4},"4753":{"body":15,"breadcrumbs":8,"title":4},"4754":{"body":22,"breadcrumbs":9,"title":5},"4755":{"body":7,"breadcrumbs":7,"title":3},"4756":{"body":20,"breadcrumbs":8,"title":4},"4757":{"body":0,"breadcrumbs":7,"title":3},"4758":{"body":186,"breadcrumbs":9,"title":5},"4759":{"body":163,"breadcrumbs":10,"title":6},"476":{"body":14,"breadcrumbs":4,"title":2},"4760":{"body":125,"breadcrumbs":8,"title":4},"4761":{"body":124,"breadcrumbs":9,"title":5},"4762":{"body":97,"breadcrumbs":8,"title":4},"4763":{"body":4,"breadcrumbs":6,"title":2},"4764":{"body":35,"breadcrumbs":9,"title":5},"4765":{"body":23,"breadcrumbs":6,"title":2},"4766":{"body":0,"breadcrumbs":6,"title":2},"4767":{"body":58,"breadcrumbs":6,"title":2},"4768":{"body":27,"breadcrumbs":6,"title":2},"4769":{"body":0,"breadcrumbs":6,"title":2},"477":{"body":0,"breadcrumbs":4,"title":2},"4770":{"body":83,"breadcrumbs":10,"title":6},"4771":{"body":68,"breadcrumbs":10,"title":6},"4772":{"body":68,"breadcrumbs":10,"title":6},"4773":{"body":0,"breadcrumbs":6,"title":2},"4774":{"body":68,"breadcrumbs":9,"title":5},"4775":{"body":49,"breadcrumbs":9,"title":5},"4776":{"body":46,"breadcrumbs":8,"title":4},"4777":{"body":0,"breadcrumbs":5,"title":1},"4778":{"body":15,"breadcrumbs":5,"title":1},"4779":{"body":15,"breadcrumbs":6,"title":2},"478":{"body":19,"breadcrumbs":4,"title":2},"4780":{"body":23,"breadcrumbs":5,"title":1},"4781":{"body":0,"breadcrumbs":6,"title":2},"4782":{"body":71,"breadcrumbs":8,"title":4},"4783":{"body":0,"breadcrumbs":5,"title":1},"4784":{"body":33,"breadcrumbs":9,"title":5},"4785":{"body":28,"breadcrumbs":7,"title":3},"4786":{"body":29,"breadcrumbs":7,"title":3},"4787":{"body":17,"breadcrumbs":6,"title":2},"4788":{"body":24,"breadcrumbs":7,"title":3},"4789":{"body":13,"breadcrumbs":6,"title":3},"479":{"body":19,"breadcrumbs":4,"title":2},"4790":{"body":22,"breadcrumbs":5,"title":2},"4791":{"body":42,"breadcrumbs":4,"title":1},"4792":{"body":0,"breadcrumbs":5,"title":2},"4793":{"body":20,"breadcrumbs":7,"title":4},"4794":{"body":28,"breadcrumbs":6,"title":3},"4795":{"body":37,"breadcrumbs":5,"title":2},"4796":{"body":0,"breadcrumbs":4,"title":1},"4797":{"body":54,"breadcrumbs":5,"title":2},"4798":{"body":22,"breadcrumbs":5,"title":2},"4799":{"body":28,"breadcrumbs":5,"title":2},"48":{"body":13,"breadcrumbs":4,"title":2},"480":{"body":39,"breadcrumbs":4,"title":2},"4800":{"body":0,"breadcrumbs":4,"title":1},"4801":{"body":18,"breadcrumbs":4,"title":1},"4802":{"body":0,"breadcrumbs":4,"title":1},"4803":{"body":8,"breadcrumbs":5,"title":2},"4804":{"body":6,"breadcrumbs":4,"title":1},"4805":{"body":8,"breadcrumbs":5,"title":2},"4806":{"body":9,"breadcrumbs":6,"title":3},"4807":{"body":22,"breadcrumbs":4,"title":1},"4808":{"body":24,"breadcrumbs":4,"title":1},"4809":{"body":0,"breadcrumbs":6,"title":3},"481":{"body":41,"breadcrumbs":7,"title":5},"4810":{"body":30,"breadcrumbs":7,"title":4},"4811":{"body":29,"breadcrumbs":6,"title":3},"4812":{"body":31,"breadcrumbs":6,"title":3},"4813":{"body":0,"breadcrumbs":5,"title":2},"4814":{"body":14,"breadcrumbs":4,"title":1},"4815":{"body":49,"breadcrumbs":5,"title":2},"4816":{"body":0,"breadcrumbs":5,"title":2},"4817":{"body":37,"breadcrumbs":5,"title":2},"4818":{"body":19,"breadcrumbs":5,"title":2},"4819":{"body":38,"breadcrumbs":5,"title":2},"482":{"body":47,"breadcrumbs":4,"title":2},"4820":{"body":0,"breadcrumbs":5,"title":2},"4821":{"body":56,"breadcrumbs":5,"title":2},"4822":{"body":26,"breadcrumbs":5,"title":2},"4823":{"body":12,"breadcrumbs":5,"title":2},"4824":{"body":0,"breadcrumbs":5,"title":2},"4825":{"body":28,"breadcrumbs":5,"title":2},"4826":{"body":2,"breadcrumbs":5,"title":2},"4827":{"body":12,"breadcrumbs":5,"title":2},"4828":{"body":4,"breadcrumbs":4,"title":1},"4829":{"body":0,"breadcrumbs":5,"title":2},"483":{"body":41,"breadcrumbs":5,"title":3},"4830":{"body":23,"breadcrumbs":6,"title":3},"4831":{"body":18,"breadcrumbs":7,"title":4},"4832":{"body":19,"breadcrumbs":6,"title":3},"4833":{"body":19,"breadcrumbs":6,"title":3},"4834":{"body":0,"breadcrumbs":5,"title":2},"4835":{"body":12,"breadcrumbs":5,"title":2},"4836":{"body":13,"breadcrumbs":5,"title":2},"4837":{"body":12,"breadcrumbs":5,"title":2},"4838":{"body":33,"breadcrumbs":5,"title":2},"4839":{"body":0,"breadcrumbs":4,"title":1},"484":{"body":0,"breadcrumbs":4,"title":2},"4840":{"body":20,"breadcrumbs":5,"title":2},"4841":{"body":11,"breadcrumbs":5,"title":2},"4842":{"body":16,"breadcrumbs":6,"title":3},"4843":{"body":24,"breadcrumbs":5,"title":2},"4844":{"body":0,"breadcrumbs":5,"title":2},"4845":{"body":14,"breadcrumbs":6,"title":3},"4846":{"body":12,"breadcrumbs":6,"title":3},"4847":{"body":25,"breadcrumbs":6,"title":3},"4848":{"body":13,"breadcrumbs":6,"title":3},"4849":{"body":0,"breadcrumbs":4,"title":1},"485":{"body":126,"breadcrumbs":5,"title":3},"4850":{"body":15,"breadcrumbs":5,"title":2},"4851":{"body":14,"breadcrumbs":5,"title":2},"4852":{"body":9,"breadcrumbs":5,"title":2},"4853":{"body":14,"breadcrumbs":5,"title":2},"4854":{"body":0,"breadcrumbs":7,"title":4},"4855":{"body":20,"breadcrumbs":10,"title":7},"4856":{"body":64,"breadcrumbs":5,"title":2},"4857":{"body":0,"breadcrumbs":6,"title":3},"4858":{"body":38,"breadcrumbs":7,"title":4},"4859":{"body":36,"breadcrumbs":6,"title":3},"486":{"body":89,"breadcrumbs":5,"title":3},"4860":{"body":47,"breadcrumbs":8,"title":5},"4861":{"body":42,"breadcrumbs":6,"title":3},"4862":{"body":50,"breadcrumbs":6,"title":3},"4863":{"body":23,"breadcrumbs":6,"title":3},"4864":{"body":24,"breadcrumbs":4,"title":1},"4865":{"body":23,"breadcrumbs":4,"title":1},"4866":{"body":20,"breadcrumbs":4,"title":1},"4867":{"body":29,"breadcrumbs":5,"title":2},"4868":{"body":38,"breadcrumbs":6,"title":3},"4869":{"body":16,"breadcrumbs":4,"title":1},"487":{"body":105,"breadcrumbs":5,"title":3},"4870":{"body":28,"breadcrumbs":5,"title":2},"4871":{"body":9,"breadcrumbs":7,"title":4},"4872":{"body":16,"breadcrumbs":4,"title":1},"4873":{"body":0,"breadcrumbs":5,"title":2},"4874":{"body":33,"breadcrumbs":6,"title":3},"4875":{"body":39,"breadcrumbs":6,"title":3},"4876":{"body":33,"breadcrumbs":8,"title":5},"4877":{"body":45,"breadcrumbs":6,"title":3},"4878":{"body":0,"breadcrumbs":6,"title":3},"4879":{"body":57,"breadcrumbs":6,"title":3},"488":{"body":123,"breadcrumbs":5,"title":3},"4880":{"body":9,"breadcrumbs":6,"title":3},"4881":{"body":0,"breadcrumbs":6,"title":3},"4882":{"body":62,"breadcrumbs":7,"title":4},"4883":{"body":37,"breadcrumbs":8,"title":5},"4884":{"body":40,"breadcrumbs":8,"title":5},"4885":{"body":64,"breadcrumbs":7,"title":4},"4886":{"body":0,"breadcrumbs":5,"title":2},"4887":{"body":56,"breadcrumbs":5,"title":2},"4888":{"body":19,"breadcrumbs":5,"title":2},"4889":{"body":61,"breadcrumbs":6,"title":3},"489":{"body":59,"breadcrumbs":4,"title":2},"4890":{"body":81,"breadcrumbs":5,"title":2},"4891":{"body":0,"breadcrumbs":5,"title":2},"4892":{"body":46,"breadcrumbs":6,"title":3},"4893":{"body":44,"breadcrumbs":6,"title":3},"4894":{"body":0,"breadcrumbs":4,"title":1},"4895":{"body":20,"breadcrumbs":5,"title":2},"4896":{"body":24,"breadcrumbs":5,"title":2},"4897":{"body":26,"breadcrumbs":7,"title":4},"4898":{"body":14,"breadcrumbs":5,"title":2},"4899":{"body":0,"breadcrumbs":5,"title":2},"49":{"body":21,"breadcrumbs":3,"title":1},"490":{"body":58,"breadcrumbs":5,"title":3},"4900":{"body":29,"breadcrumbs":6,"title":3},"4901":{"body":15,"breadcrumbs":5,"title":2},"4902":{"body":22,"breadcrumbs":5,"title":2},"4903":{"body":0,"breadcrumbs":5,"title":2},"4904":{"body":18,"breadcrumbs":5,"title":2},"4905":{"body":21,"breadcrumbs":5,"title":2},"4906":{"body":19,"breadcrumbs":4,"title":1},"4907":{"body":10,"breadcrumbs":5,"title":2},"4908":{"body":17,"breadcrumbs":5,"title":2},"4909":{"body":10,"breadcrumbs":4,"title":2},"491":{"body":0,"breadcrumbs":4,"title":2},"4910":{"body":15,"breadcrumbs":4,"title":2},"4911":{"body":0,"breadcrumbs":5,"title":3},"4912":{"body":24,"breadcrumbs":5,"title":3},"4913":{"body":16,"breadcrumbs":5,"title":3},"4914":{"body":17,"breadcrumbs":6,"title":4},"4915":{"body":0,"breadcrumbs":5,"title":3},"4916":{"body":90,"breadcrumbs":5,"title":3},"4917":{"body":41,"breadcrumbs":5,"title":3},"4918":{"body":53,"breadcrumbs":6,"title":4},"4919":{"body":0,"breadcrumbs":4,"title":2},"492":{"body":53,"breadcrumbs":5,"title":3},"4920":{"body":49,"breadcrumbs":5,"title":3},"4921":{"body":78,"breadcrumbs":6,"title":4},"4922":{"body":50,"breadcrumbs":5,"title":3},"4923":{"body":0,"breadcrumbs":5,"title":3},"4924":{"body":119,"breadcrumbs":6,"title":4},"4925":{"body":124,"breadcrumbs":6,"title":4},"4926":{"body":0,"breadcrumbs":5,"title":3},"4927":{"body":135,"breadcrumbs":6,"title":4},"4928":{"body":119,"breadcrumbs":5,"title":3},"4929":{"body":0,"breadcrumbs":5,"title":3},"493":{"body":35,"breadcrumbs":5,"title":3},"4930":{"body":167,"breadcrumbs":6,"title":4},"4931":{"body":0,"breadcrumbs":4,"title":2},"4932":{"body":100,"breadcrumbs":5,"title":3},"4933":{"body":51,"breadcrumbs":6,"title":4},"4934":{"body":0,"breadcrumbs":5,"title":3},"4935":{"body":99,"breadcrumbs":6,"title":4},"4936":{"body":0,"breadcrumbs":5,"title":3},"4937":{"body":78,"breadcrumbs":6,"title":4},"4938":{"body":56,"breadcrumbs":6,"title":4},"4939":{"body":0,"breadcrumbs":5,"title":3},"494":{"body":24,"breadcrumbs":6,"title":4},"4940":{"body":62,"breadcrumbs":6,"title":4},"4941":{"body":0,"breadcrumbs":4,"title":2},"4942":{"body":18,"breadcrumbs":4,"title":2},"4943":{"body":26,"breadcrumbs":4,"title":2},"4944":{"body":24,"breadcrumbs":4,"title":2},"4945":{"body":0,"breadcrumbs":4,"title":2},"4946":{"body":38,"breadcrumbs":4,"title":2},"4947":{"body":37,"breadcrumbs":4,"title":2},"4948":{"body":45,"breadcrumbs":4,"title":2},"4949":{"body":0,"breadcrumbs":5,"title":3},"495":{"body":33,"breadcrumbs":4,"title":2},"4950":{"body":84,"breadcrumbs":5,"title":3},"4951":{"body":47,"breadcrumbs":4,"title":2},"4952":{"body":15,"breadcrumbs":6,"title":5},"4953":{"body":62,"breadcrumbs":3,"title":2},"4954":{"body":48,"breadcrumbs":2,"title":1},"4955":{"body":16,"breadcrumbs":3,"title":2},"4956":{"body":8,"breadcrumbs":5,"title":4},"4957":{"body":12,"breadcrumbs":4,"title":3},"4958":{"body":36,"breadcrumbs":5,"title":4},"4959":{"body":23,"breadcrumbs":4,"title":3},"496":{"body":41,"breadcrumbs":3,"title":1},"4960":{"body":9,"breadcrumbs":4,"title":3},"4961":{"body":10,"breadcrumbs":3,"title":2},"4962":{"body":11,"breadcrumbs":7,"title":6},"4963":{"body":51,"breadcrumbs":3,"title":2},"4964":{"body":58,"breadcrumbs":4,"title":3},"4965":{"body":39,"breadcrumbs":3,"title":2},"4966":{"body":27,"breadcrumbs":4,"title":3},"4967":{"body":42,"breadcrumbs":4,"title":3},"4968":{"body":23,"breadcrumbs":4,"title":3},"4969":{"body":67,"breadcrumbs":6,"title":5},"497":{"body":21,"breadcrumbs":3,"title":1},"4970":{"body":37,"breadcrumbs":4,"title":3},"4971":{"body":0,"breadcrumbs":6,"title":5},"4972":{"body":70,"breadcrumbs":3,"title":2},"4973":{"body":39,"breadcrumbs":4,"title":3},"4974":{"body":0,"breadcrumbs":6,"title":5},"4975":{"body":18,"breadcrumbs":3,"title":2},"4976":{"body":19,"breadcrumbs":5,"title":4},"4977":{"body":6,"breadcrumbs":5,"title":4},"4978":{"body":74,"breadcrumbs":4,"title":3},"4979":{"body":56,"breadcrumbs":4,"title":3},"498":{"body":0,"breadcrumbs":3,"title":1},"4980":{"body":19,"breadcrumbs":3,"title":2},"4981":{"body":97,"breadcrumbs":5,"title":4},"4982":{"body":0,"breadcrumbs":5,"title":4},"4983":{"body":47,"breadcrumbs":4,"title":3},"4984":{"body":39,"breadcrumbs":4,"title":3},"4985":{"body":31,"breadcrumbs":4,"title":3},"4986":{"body":0,"breadcrumbs":6,"title":5},"4987":{"body":35,"breadcrumbs":4,"title":3},"4988":{"body":42,"breadcrumbs":4,"title":3},"4989":{"body":61,"breadcrumbs":5,"title":4},"499":{"body":14,"breadcrumbs":3,"title":1},"4990":{"body":0,"breadcrumbs":5,"title":4},"4991":{"body":35,"breadcrumbs":6,"title":5},"4992":{"body":84,"breadcrumbs":3,"title":2},"4993":{"body":31,"breadcrumbs":4,"title":3},"4994":{"body":8,"breadcrumbs":6,"title":5},"4995":{"body":22,"breadcrumbs":6,"title":5},"4996":{"body":111,"breadcrumbs":3,"title":2},"4997":{"body":24,"breadcrumbs":4,"title":3},"4998":{"body":9,"breadcrumbs":5,"title":4},"4999":{"body":19,"breadcrumbs":6,"title":5},"5":{"body":34,"breadcrumbs":5,"title":4},"50":{"body":17,"breadcrumbs":5,"title":3},"500":{"body":17,"breadcrumbs":3,"title":1},"5000":{"body":59,"breadcrumbs":4,"title":3},"5001":{"body":24,"breadcrumbs":3,"title":2},"5002":{"body":0,"breadcrumbs":5,"title":4},"5003":{"body":37,"breadcrumbs":4,"title":3},"5004":{"body":27,"breadcrumbs":4,"title":3},"5005":{"body":18,"breadcrumbs":3,"title":2},"5006":{"body":0,"breadcrumbs":5,"title":4},"5007":{"body":25,"breadcrumbs":4,"title":3},"5008":{"body":20,"breadcrumbs":5,"title":4},"5009":{"body":27,"breadcrumbs":5,"title":4},"501":{"body":15,"breadcrumbs":4,"title":2},"5010":{"body":30,"breadcrumbs":3,"title":2},"5011":{"body":0,"breadcrumbs":2,"title":1},"5012":{"body":30,"breadcrumbs":4,"title":3},"5013":{"body":43,"breadcrumbs":4,"title":3},"5014":{"body":32,"breadcrumbs":5,"title":4},"5015":{"body":32,"breadcrumbs":4,"title":3},"5016":{"body":24,"breadcrumbs":3,"title":2},"5017":{"body":24,"breadcrumbs":4,"title":3},"5018":{"body":0,"breadcrumbs":3,"title":2},"5019":{"body":58,"breadcrumbs":4,"title":3},"502":{"body":25,"breadcrumbs":4,"title":2},"5020":{"body":24,"breadcrumbs":3,"title":2},"5021":{"body":22,"breadcrumbs":2,"title":1},"5022":{"body":63,"breadcrumbs":2,"title":1},"5023":{"body":13,"breadcrumbs":5,"title":3},"5024":{"body":15,"breadcrumbs":3,"title":1},"5025":{"body":0,"breadcrumbs":4,"title":2},"5026":{"body":17,"breadcrumbs":7,"title":5},"5027":{"body":15,"breadcrumbs":7,"title":5},"5028":{"body":27,"breadcrumbs":8,"title":6},"5029":{"body":0,"breadcrumbs":6,"title":4},"503":{"body":41,"breadcrumbs":4,"title":2},"5030":{"body":45,"breadcrumbs":6,"title":4},"5031":{"body":40,"breadcrumbs":7,"title":5},"5032":{"body":43,"breadcrumbs":6,"title":4},"5033":{"body":8,"breadcrumbs":6,"title":4},"5034":{"body":0,"breadcrumbs":6,"title":4},"5035":{"body":8,"breadcrumbs":6,"title":4},"5036":{"body":25,"breadcrumbs":5,"title":3},"5037":{"body":89,"breadcrumbs":6,"title":4},"5038":{"body":0,"breadcrumbs":7,"title":5},"5039":{"body":157,"breadcrumbs":9,"title":7},"504":{"body":25,"breadcrumbs":6,"title":4},"5040":{"body":205,"breadcrumbs":8,"title":6},"5041":{"body":192,"breadcrumbs":7,"title":5},"5042":{"body":0,"breadcrumbs":7,"title":5},"5043":{"body":45,"breadcrumbs":6,"title":4},"5044":{"body":41,"breadcrumbs":8,"title":6},"5045":{"body":0,"breadcrumbs":7,"title":5},"5046":{"body":44,"breadcrumbs":6,"title":4},"5047":{"body":48,"breadcrumbs":6,"title":4},"5048":{"body":0,"breadcrumbs":6,"title":4},"5049":{"body":50,"breadcrumbs":6,"title":4},"505":{"body":0,"breadcrumbs":4,"title":2},"5050":{"body":10,"breadcrumbs":5,"title":3},"5051":{"body":10,"breadcrumbs":5,"title":3},"5052":{"body":0,"breadcrumbs":7,"title":5},"5053":{"body":45,"breadcrumbs":5,"title":3},"5054":{"body":6,"breadcrumbs":6,"title":4},"5055":{"body":41,"breadcrumbs":6,"title":4},"5056":{"body":10,"breadcrumbs":5,"title":3},"5057":{"body":61,"breadcrumbs":4,"title":2},"5058":{"body":0,"breadcrumbs":5,"title":3},"5059":{"body":16,"breadcrumbs":7,"title":5},"506":{"body":28,"breadcrumbs":4,"title":2},"5060":{"body":26,"breadcrumbs":7,"title":5},"5061":{"body":12,"breadcrumbs":6,"title":4},"5062":{"body":0,"breadcrumbs":4,"title":2},"5063":{"body":27,"breadcrumbs":7,"title":5},"5064":{"body":24,"breadcrumbs":6,"title":4},"5065":{"body":21,"breadcrumbs":6,"title":4},"5066":{"body":50,"breadcrumbs":4,"title":2},"5067":{"body":13,"breadcrumbs":4,"title":2},"5068":{"body":45,"breadcrumbs":4,"title":2},"5069":{"body":15,"breadcrumbs":4,"title":2},"507":{"body":26,"breadcrumbs":4,"title":2},"5070":{"body":15,"breadcrumbs":3,"title":1},"5071":{"body":0,"breadcrumbs":4,"title":2},"5072":{"body":62,"breadcrumbs":4,"title":2},"5073":{"body":140,"breadcrumbs":5,"title":3},"5074":{"body":90,"breadcrumbs":5,"title":3},"5075":{"body":0,"breadcrumbs":4,"title":2},"5076":{"body":102,"breadcrumbs":5,"title":3},"5077":{"body":82,"breadcrumbs":5,"title":3},"5078":{"body":59,"breadcrumbs":4,"title":2},"5079":{"body":44,"breadcrumbs":5,"title":3},"508":{"body":0,"breadcrumbs":4,"title":2},"5080":{"body":0,"breadcrumbs":5,"title":3},"5081":{"body":9,"breadcrumbs":7,"title":5},"5082":{"body":90,"breadcrumbs":7,"title":5},"5083":{"body":37,"breadcrumbs":7,"title":5},"5084":{"body":24,"breadcrumbs":7,"title":5},"5085":{"body":0,"breadcrumbs":5,"title":3},"5086":{"body":36,"breadcrumbs":7,"title":5},"5087":{"body":59,"breadcrumbs":7,"title":5},"5088":{"body":69,"breadcrumbs":7,"title":5},"5089":{"body":0,"breadcrumbs":5,"title":3},"509":{"body":61,"breadcrumbs":4,"title":2},"5090":{"body":62,"breadcrumbs":7,"title":5},"5091":{"body":54,"breadcrumbs":7,"title":5},"5092":{"body":29,"breadcrumbs":6,"title":4},"5093":{"body":26,"breadcrumbs":6,"title":4},"5094":{"body":58,"breadcrumbs":4,"title":2},"5095":{"body":0,"breadcrumbs":4,"title":2},"5096":{"body":101,"breadcrumbs":5,"title":3},"5097":{"body":0,"breadcrumbs":4,"title":2},"5098":{"body":18,"breadcrumbs":6,"title":4},"5099":{"body":21,"breadcrumbs":5,"title":3},"51":{"body":0,"breadcrumbs":4,"title":2},"510":{"body":51,"breadcrumbs":5,"title":3},"5100":{"body":43,"breadcrumbs":4,"title":2},"5101":{"body":30,"breadcrumbs":5,"title":3},"5102":{"body":0,"breadcrumbs":4,"title":2},"5103":{"body":22,"breadcrumbs":5,"title":3},"5104":{"body":17,"breadcrumbs":5,"title":3},"5105":{"body":18,"breadcrumbs":5,"title":3},"5106":{"body":20,"breadcrumbs":4,"title":2},"5107":{"body":70,"breadcrumbs":4,"title":2},"5108":{"body":7,"breadcrumbs":6,"title":4},"5109":{"body":0,"breadcrumbs":4,"title":2},"511":{"body":48,"breadcrumbs":4,"title":2},"5110":{"body":27,"breadcrumbs":8,"title":6},"5111":{"body":17,"breadcrumbs":6,"title":4},"5112":{"body":53,"breadcrumbs":7,"title":5},"5113":{"body":0,"breadcrumbs":4,"title":2},"5114":{"body":44,"breadcrumbs":5,"title":3},"5115":{"body":19,"breadcrumbs":5,"title":3},"5116":{"body":26,"breadcrumbs":5,"title":3},"5117":{"body":0,"breadcrumbs":7,"title":5},"5118":{"body":36,"breadcrumbs":6,"title":4},"5119":{"body":57,"breadcrumbs":5,"title":3},"512":{"body":0,"breadcrumbs":4,"title":2},"5120":{"body":0,"breadcrumbs":5,"title":3},"5121":{"body":41,"breadcrumbs":5,"title":3},"5122":{"body":44,"breadcrumbs":5,"title":3},"5123":{"body":0,"breadcrumbs":4,"title":2},"5124":{"body":7,"breadcrumbs":5,"title":3},"5125":{"body":7,"breadcrumbs":5,"title":3},"5126":{"body":11,"breadcrumbs":5,"title":3},"5127":{"body":16,"breadcrumbs":5,"title":3},"5128":{"body":40,"breadcrumbs":4,"title":2},"5129":{"body":0,"breadcrumbs":4,"title":2},"513":{"body":72,"breadcrumbs":5,"title":3},"5130":{"body":20,"breadcrumbs":5,"title":3},"5131":{"body":10,"breadcrumbs":4,"title":2},"5132":{"body":8,"breadcrumbs":4,"title":2},"5133":{"body":0,"breadcrumbs":5,"title":3},"5134":{"body":12,"breadcrumbs":5,"title":3},"5135":{"body":12,"breadcrumbs":5,"title":3},"5136":{"body":0,"breadcrumbs":5,"title":3},"5137":{"body":12,"breadcrumbs":6,"title":4},"5138":{"body":32,"breadcrumbs":4,"title":2},"5139":{"body":15,"breadcrumbs":4,"title":2},"514":{"body":0,"breadcrumbs":4,"title":2},"5140":{"body":14,"breadcrumbs":4,"title":2},"5141":{"body":61,"breadcrumbs":4,"title":2},"5142":{"body":0,"breadcrumbs":4,"title":2},"5143":{"body":30,"breadcrumbs":4,"title":2},"5144":{"body":23,"breadcrumbs":6,"title":4},"5145":{"body":55,"breadcrumbs":4,"title":2},"5146":{"body":13,"breadcrumbs":8,"title":5},"5147":{"body":25,"breadcrumbs":4,"title":1},"5148":{"body":0,"breadcrumbs":8,"title":5},"5149":{"body":19,"breadcrumbs":8,"title":5},"515":{"body":65,"breadcrumbs":4,"title":2},"5150":{"body":24,"breadcrumbs":7,"title":4},"5151":{"body":51,"breadcrumbs":7,"title":4},"5152":{"body":21,"breadcrumbs":7,"title":4},"5153":{"body":61,"breadcrumbs":7,"title":4},"5154":{"body":0,"breadcrumbs":6,"title":3},"5155":{"body":79,"breadcrumbs":6,"title":3},"5156":{"body":61,"breadcrumbs":6,"title":3},"5157":{"body":66,"breadcrumbs":6,"title":3},"5158":{"body":0,"breadcrumbs":5,"title":2},"5159":{"body":16,"breadcrumbs":6,"title":3},"516":{"body":25,"breadcrumbs":4,"title":2},"5160":{"body":16,"breadcrumbs":6,"title":3},"5161":{"body":0,"breadcrumbs":5,"title":2},"5162":{"body":55,"breadcrumbs":6,"title":3},"5163":{"body":110,"breadcrumbs":6,"title":3},"5164":{"body":0,"breadcrumbs":6,"title":3},"5165":{"body":17,"breadcrumbs":6,"title":3},"5166":{"body":10,"breadcrumbs":5,"title":2},"5167":{"body":12,"breadcrumbs":5,"title":2},"5168":{"body":11,"breadcrumbs":5,"title":2},"5169":{"body":15,"breadcrumbs":5,"title":2},"517":{"body":0,"breadcrumbs":6,"title":4},"5170":{"body":0,"breadcrumbs":6,"title":3},"5171":{"body":26,"breadcrumbs":5,"title":2},"5172":{"body":26,"breadcrumbs":6,"title":3},"5173":{"body":25,"breadcrumbs":5,"title":2},"5174":{"body":30,"breadcrumbs":5,"title":2},"5175":{"body":24,"breadcrumbs":4,"title":1},"5176":{"body":0,"breadcrumbs":7,"title":5},"5177":{"body":12,"breadcrumbs":8,"title":6},"5178":{"body":53,"breadcrumbs":4,"title":2},"5179":{"body":38,"breadcrumbs":4,"title":2},"518":{"body":49,"breadcrumbs":4,"title":2},"5180":{"body":43,"breadcrumbs":4,"title":2},"5181":{"body":34,"breadcrumbs":5,"title":3},"5182":{"body":105,"breadcrumbs":4,"title":2},"5183":{"body":20,"breadcrumbs":5,"title":3},"5184":{"body":29,"breadcrumbs":4,"title":2},"5185":{"body":19,"breadcrumbs":4,"title":2},"5186":{"body":7,"breadcrumbs":8,"title":4},"5187":{"body":23,"breadcrumbs":8,"title":4},"5188":{"body":53,"breadcrumbs":7,"title":3},"5189":{"body":0,"breadcrumbs":7,"title":3},"519":{"body":27,"breadcrumbs":4,"title":2},"5190":{"body":35,"breadcrumbs":7,"title":3},"5191":{"body":20,"breadcrumbs":6,"title":2},"5192":{"body":43,"breadcrumbs":7,"title":3},"5193":{"body":72,"breadcrumbs":8,"title":4},"5194":{"body":0,"breadcrumbs":6,"title":2},"5195":{"body":33,"breadcrumbs":6,"title":2},"5196":{"body":15,"breadcrumbs":5,"title":1},"5197":{"body":18,"breadcrumbs":5,"title":1},"5198":{"body":0,"breadcrumbs":7,"title":3},"5199":{"body":52,"breadcrumbs":7,"title":3},"52":{"body":55,"breadcrumbs":7,"title":5},"520":{"body":40,"breadcrumbs":4,"title":2},"5200":{"body":0,"breadcrumbs":6,"title":2},"5201":{"body":13,"breadcrumbs":6,"title":2},"5202":{"body":21,"breadcrumbs":7,"title":3},"5203":{"body":30,"breadcrumbs":6,"title":2},"5204":{"body":25,"breadcrumbs":6,"title":2},"5205":{"body":16,"breadcrumbs":6,"title":2},"5206":{"body":28,"breadcrumbs":8,"title":4},"5207":{"body":34,"breadcrumbs":6,"title":2},"5208":{"body":24,"breadcrumbs":5,"title":1},"5209":{"body":40,"breadcrumbs":6,"title":2},"521":{"body":0,"breadcrumbs":4,"title":2},"5210":{"body":36,"breadcrumbs":6,"title":2},"5211":{"body":0,"breadcrumbs":6,"title":2},"5212":{"body":67,"breadcrumbs":6,"title":2},"5213":{"body":36,"breadcrumbs":8,"title":4},"5214":{"body":42,"breadcrumbs":7,"title":3},"5215":{"body":23,"breadcrumbs":7,"title":3},"5216":{"body":23,"breadcrumbs":6,"title":2},"5217":{"body":0,"breadcrumbs":7,"title":3},"5218":{"body":78,"breadcrumbs":6,"title":2},"5219":{"body":75,"breadcrumbs":6,"title":2},"522":{"body":28,"breadcrumbs":3,"title":1},"5220":{"body":83,"breadcrumbs":6,"title":2},"5221":{"body":42,"breadcrumbs":6,"title":2},"5222":{"body":0,"breadcrumbs":6,"title":2},"5223":{"body":20,"breadcrumbs":8,"title":4},"5224":{"body":94,"breadcrumbs":7,"title":3},"5225":{"body":122,"breadcrumbs":8,"title":4},"5226":{"body":0,"breadcrumbs":6,"title":2},"5227":{"body":178,"breadcrumbs":9,"title":5},"5228":{"body":288,"breadcrumbs":8,"title":4},"5229":{"body":263,"breadcrumbs":10,"title":6},"523":{"body":20,"breadcrumbs":4,"title":2},"5230":{"body":277,"breadcrumbs":8,"title":4},"5231":{"body":0,"breadcrumbs":6,"title":2},"5232":{"body":390,"breadcrumbs":10,"title":6},"5233":{"body":35,"breadcrumbs":10,"title":6},"5234":{"body":30,"breadcrumbs":9,"title":5},"5235":{"body":0,"breadcrumbs":6,"title":2},"5236":{"body":31,"breadcrumbs":7,"title":3},"5237":{"body":33,"breadcrumbs":7,"title":3},"5238":{"body":27,"breadcrumbs":7,"title":3},"5239":{"body":31,"breadcrumbs":7,"title":3},"524":{"body":42,"breadcrumbs":4,"title":2},"5240":{"body":30,"breadcrumbs":7,"title":3},"5241":{"body":23,"breadcrumbs":7,"title":3},"5242":{"body":31,"breadcrumbs":7,"title":3},"5243":{"body":0,"breadcrumbs":5,"title":1},"5244":{"body":91,"breadcrumbs":9,"title":5},"5245":{"body":49,"breadcrumbs":8,"title":4},"5246":{"body":64,"breadcrumbs":7,"title":3},"5247":{"body":83,"breadcrumbs":8,"title":4},"5248":{"body":48,"breadcrumbs":5,"title":1},"5249":{"body":17,"breadcrumbs":8,"title":4},"525":{"body":0,"breadcrumbs":4,"title":2},"5250":{"body":19,"breadcrumbs":6,"title":2},"5251":{"body":49,"breadcrumbs":5,"title":1},"5252":{"body":28,"breadcrumbs":5,"title":1},"5253":{"body":0,"breadcrumbs":8,"title":4},"5254":{"body":80,"breadcrumbs":6,"title":2},"5255":{"body":65,"breadcrumbs":8,"title":4},"5256":{"body":88,"breadcrumbs":9,"title":5},"5257":{"body":75,"breadcrumbs":9,"title":5},"5258":{"body":0,"breadcrumbs":7,"title":3},"5259":{"body":54,"breadcrumbs":8,"title":4},"526":{"body":70,"breadcrumbs":5,"title":3},"5260":{"body":87,"breadcrumbs":8,"title":4},"5261":{"body":64,"breadcrumbs":8,"title":4},"5262":{"body":0,"breadcrumbs":7,"title":3},"5263":{"body":302,"breadcrumbs":8,"title":4},"5264":{"body":160,"breadcrumbs":8,"title":4},"5265":{"body":0,"breadcrumbs":7,"title":3},"5266":{"body":47,"breadcrumbs":9,"title":5},"5267":{"body":42,"breadcrumbs":7,"title":3},"5268":{"body":59,"breadcrumbs":7,"title":3},"5269":{"body":0,"breadcrumbs":6,"title":2},"527":{"body":32,"breadcrumbs":4,"title":2},"5270":{"body":36,"breadcrumbs":6,"title":2},"5271":{"body":87,"breadcrumbs":7,"title":3},"5272":{"body":35,"breadcrumbs":7,"title":3},"5273":{"body":55,"breadcrumbs":7,"title":3},"5274":{"body":0,"breadcrumbs":6,"title":2},"5275":{"body":345,"breadcrumbs":10,"title":6},"5276":{"body":0,"breadcrumbs":5,"title":1},"5277":{"body":47,"breadcrumbs":8,"title":4},"5278":{"body":39,"breadcrumbs":9,"title":5},"5279":{"body":33,"breadcrumbs":8,"title":4},"528":{"body":0,"breadcrumbs":5,"title":3},"5280":{"body":39,"breadcrumbs":8,"title":4},"5281":{"body":54,"breadcrumbs":5,"title":1},"5282":{"body":21,"breadcrumbs":6,"title":3},"5283":{"body":14,"breadcrumbs":5,"title":2},"5284":{"body":46,"breadcrumbs":4,"title":1},"5285":{"body":31,"breadcrumbs":6,"title":3},"5286":{"body":54,"breadcrumbs":5,"title":2},"5287":{"body":0,"breadcrumbs":4,"title":1},"5288":{"body":61,"breadcrumbs":5,"title":2},"5289":{"body":44,"breadcrumbs":5,"title":2},"529":{"body":39,"breadcrumbs":5,"title":3},"5290":{"body":0,"breadcrumbs":5,"title":2},"5291":{"body":19,"breadcrumbs":4,"title":1},"5292":{"body":24,"breadcrumbs":9,"title":6},"5293":{"body":13,"breadcrumbs":8,"title":5},"5294":{"body":16,"breadcrumbs":7,"title":4},"5295":{"body":16,"breadcrumbs":7,"title":4},"5296":{"body":0,"breadcrumbs":5,"title":2},"5297":{"body":126,"breadcrumbs":6,"title":3},"5298":{"body":20,"breadcrumbs":7,"title":4},"5299":{"body":22,"breadcrumbs":7,"title":4},"53":{"body":59,"breadcrumbs":6,"title":4},"530":{"body":16,"breadcrumbs":5,"title":3},"5300":{"body":22,"breadcrumbs":6,"title":3},"5301":{"body":33,"breadcrumbs":6,"title":3},"5302":{"body":21,"breadcrumbs":6,"title":3},"5303":{"body":20,"breadcrumbs":5,"title":2},"5304":{"body":18,"breadcrumbs":6,"title":3},"5305":{"body":18,"breadcrumbs":8,"title":5},"5306":{"body":15,"breadcrumbs":5,"title":2},"5307":{"body":0,"breadcrumbs":6,"title":3},"5308":{"body":108,"breadcrumbs":5,"title":2},"5309":{"body":59,"breadcrumbs":6,"title":3},"531":{"body":0,"breadcrumbs":4,"title":2},"5310":{"body":23,"breadcrumbs":5,"title":2},"5311":{"body":31,"breadcrumbs":6,"title":3},"5312":{"body":0,"breadcrumbs":5,"title":2},"5313":{"body":66,"breadcrumbs":8,"title":5},"5314":{"body":86,"breadcrumbs":8,"title":5},"5315":{"body":33,"breadcrumbs":8,"title":5},"5316":{"body":0,"breadcrumbs":5,"title":2},"5317":{"body":57,"breadcrumbs":6,"title":3},"5318":{"body":59,"breadcrumbs":6,"title":3},"5319":{"body":57,"breadcrumbs":7,"title":4},"532":{"body":39,"breadcrumbs":4,"title":2},"5320":{"body":33,"breadcrumbs":6,"title":3},"5321":{"body":36,"breadcrumbs":7,"title":4},"5322":{"body":44,"breadcrumbs":6,"title":3},"5323":{"body":0,"breadcrumbs":4,"title":1},"5324":{"body":56,"breadcrumbs":6,"title":3},"5325":{"body":60,"breadcrumbs":6,"title":3},"5326":{"body":46,"breadcrumbs":9,"title":6},"5327":{"body":48,"breadcrumbs":7,"title":4},"5328":{"body":44,"breadcrumbs":4,"title":1},"5329":{"body":24,"breadcrumbs":6,"title":3},"533":{"body":38,"breadcrumbs":4,"title":2},"5330":{"body":14,"breadcrumbs":5,"title":2},"5331":{"body":45,"breadcrumbs":4,"title":1},"5332":{"body":32,"breadcrumbs":6,"title":3},"5333":{"body":27,"breadcrumbs":10,"title":7},"5334":{"body":49,"breadcrumbs":5,"title":2},"5335":{"body":0,"breadcrumbs":4,"title":1},"5336":{"body":67,"breadcrumbs":5,"title":2},"5337":{"body":38,"breadcrumbs":5,"title":2},"5338":{"body":0,"breadcrumbs":5,"title":2},"5339":{"body":23,"breadcrumbs":4,"title":1},"534":{"body":0,"breadcrumbs":4,"title":2},"5340":{"body":25,"breadcrumbs":9,"title":6},"5341":{"body":13,"breadcrumbs":8,"title":5},"5342":{"body":18,"breadcrumbs":9,"title":6},"5343":{"body":21,"breadcrumbs":8,"title":5},"5344":{"body":19,"breadcrumbs":7,"title":4},"5345":{"body":0,"breadcrumbs":5,"title":2},"5346":{"body":108,"breadcrumbs":7,"title":4},"5347":{"body":25,"breadcrumbs":7,"title":4},"5348":{"body":17,"breadcrumbs":6,"title":3},"5349":{"body":20,"breadcrumbs":6,"title":3},"535":{"body":36,"breadcrumbs":4,"title":2},"5350":{"body":32,"breadcrumbs":6,"title":3},"5351":{"body":16,"breadcrumbs":5,"title":2},"5352":{"body":15,"breadcrumbs":5,"title":2},"5353":{"body":0,"breadcrumbs":6,"title":3},"5354":{"body":91,"breadcrumbs":6,"title":3},"5355":{"body":21,"breadcrumbs":5,"title":2},"5356":{"body":38,"breadcrumbs":6,"title":3},"5357":{"body":47,"breadcrumbs":5,"title":2},"5358":{"body":0,"breadcrumbs":5,"title":2},"5359":{"body":59,"breadcrumbs":9,"title":6},"536":{"body":0,"breadcrumbs":4,"title":2},"5360":{"body":82,"breadcrumbs":10,"title":7},"5361":{"body":68,"breadcrumbs":9,"title":6},"5362":{"body":0,"breadcrumbs":5,"title":2},"5363":{"body":62,"breadcrumbs":7,"title":4},"5364":{"body":40,"breadcrumbs":6,"title":3},"5365":{"body":34,"breadcrumbs":6,"title":3},"5366":{"body":44,"breadcrumbs":6,"title":3},"5367":{"body":23,"breadcrumbs":7,"title":4},"5368":{"body":32,"breadcrumbs":6,"title":3},"5369":{"body":0,"breadcrumbs":4,"title":1},"537":{"body":33,"breadcrumbs":4,"title":2},"5370":{"body":61,"breadcrumbs":6,"title":3},"5371":{"body":55,"breadcrumbs":7,"title":4},"5372":{"body":41,"breadcrumbs":8,"title":5},"5373":{"body":45,"breadcrumbs":8,"title":5},"5374":{"body":40,"breadcrumbs":4,"title":1},"5375":{"body":0,"breadcrumbs":10,"title":5},"5376":{"body":0,"breadcrumbs":10,"title":5},"5377":{"body":0,"breadcrumbs":10,"title":5},"5378":{"body":8,"breadcrumbs":6,"title":4},"5379":{"body":16,"breadcrumbs":5,"title":3},"538":{"body":0,"breadcrumbs":4,"title":2},"5380":{"body":31,"breadcrumbs":8,"title":6},"5381":{"body":30,"breadcrumbs":5,"title":3},"5382":{"body":6,"breadcrumbs":6,"title":3},"5383":{"body":31,"breadcrumbs":7,"title":4},"5384":{"body":48,"breadcrumbs":6,"title":3},"5385":{"body":85,"breadcrumbs":7,"title":4},"5386":{"body":53,"breadcrumbs":6,"title":3},"5387":{"body":0,"breadcrumbs":5,"title":2},"5388":{"body":20,"breadcrumbs":6,"title":3},"5389":{"body":55,"breadcrumbs":6,"title":3},"539":{"body":89,"breadcrumbs":4,"title":2},"5390":{"body":0,"breadcrumbs":5,"title":2},"5391":{"body":30,"breadcrumbs":5,"title":2},"5392":{"body":31,"breadcrumbs":5,"title":2},"5393":{"body":24,"breadcrumbs":5,"title":2},"5394":{"body":0,"breadcrumbs":5,"title":2},"5395":{"body":52,"breadcrumbs":5,"title":2},"5396":{"body":42,"breadcrumbs":6,"title":3},"5397":{"body":0,"breadcrumbs":6,"title":3},"5398":{"body":49,"breadcrumbs":6,"title":3},"5399":{"body":0,"breadcrumbs":6,"title":3},"54":{"body":33,"breadcrumbs":6,"title":4},"540":{"body":0,"breadcrumbs":4,"title":2},"5400":{"body":50,"breadcrumbs":7,"title":4},"5401":{"body":34,"breadcrumbs":5,"title":2},"5402":{"body":0,"breadcrumbs":5,"title":2},"5403":{"body":40,"breadcrumbs":5,"title":2},"5404":{"body":37,"breadcrumbs":5,"title":2},"5405":{"body":0,"breadcrumbs":6,"title":3},"5406":{"body":44,"breadcrumbs":6,"title":3},"5407":{"body":39,"breadcrumbs":6,"title":3},"5408":{"body":38,"breadcrumbs":6,"title":3},"5409":{"body":31,"breadcrumbs":5,"title":2},"541":{"body":30,"breadcrumbs":4,"title":2},"5410":{"body":0,"breadcrumbs":5,"title":2},"5411":{"body":56,"breadcrumbs":6,"title":3},"5412":{"body":58,"breadcrumbs":6,"title":3},"5413":{"body":38,"breadcrumbs":5,"title":2},"5414":{"body":64,"breadcrumbs":5,"title":2},"5415":{"body":53,"breadcrumbs":7,"title":4},"5416":{"body":0,"breadcrumbs":6,"title":3},"5417":{"body":32,"breadcrumbs":6,"title":3},"5418":{"body":38,"breadcrumbs":8,"title":5},"5419":{"body":51,"breadcrumbs":6,"title":3},"542":{"body":0,"breadcrumbs":4,"title":2},"5420":{"body":72,"breadcrumbs":5,"title":2},"5421":{"body":41,"breadcrumbs":5,"title":2},"5422":{"body":13,"breadcrumbs":7,"title":5},"5423":{"body":0,"breadcrumbs":2,"title":0},"5424":{"body":34,"breadcrumbs":5,"title":3},"5425":{"body":23,"breadcrumbs":4,"title":2},"5426":{"body":0,"breadcrumbs":4,"title":2},"5427":{"body":8,"breadcrumbs":4,"title":2},"5428":{"body":4,"breadcrumbs":4,"title":2},"5429":{"body":14,"breadcrumbs":4,"title":2},"543":{"body":55,"breadcrumbs":5,"title":3},"5430":{"body":24,"breadcrumbs":4,"title":2},"5431":{"body":0,"breadcrumbs":3,"title":1},"5432":{"body":10,"breadcrumbs":4,"title":2},"5433":{"body":7,"breadcrumbs":3,"title":1},"5434":{"body":8,"breadcrumbs":3,"title":1},"5435":{"body":0,"breadcrumbs":4,"title":2},"5436":{"body":7,"breadcrumbs":5,"title":3},"5437":{"body":8,"breadcrumbs":5,"title":3},"5438":{"body":7,"breadcrumbs":5,"title":3},"5439":{"body":9,"breadcrumbs":5,"title":3},"544":{"body":31,"breadcrumbs":4,"title":2},"5440":{"body":19,"breadcrumbs":3,"title":1},"5441":{"body":31,"breadcrumbs":4,"title":2},"5442":{"body":28,"breadcrumbs":6,"title":4},"5443":{"body":0,"breadcrumbs":4,"title":2},"5444":{"body":11,"breadcrumbs":3,"title":1},"5445":{"body":9,"breadcrumbs":6,"title":4},"5446":{"body":8,"breadcrumbs":7,"title":5},"5447":{"body":0,"breadcrumbs":3,"title":1},"5448":{"body":19,"breadcrumbs":3,"title":1},"5449":{"body":4,"breadcrumbs":6,"title":4},"545":{"body":28,"breadcrumbs":7,"title":5},"5450":{"body":4,"breadcrumbs":5,"title":3},"5451":{"body":0,"breadcrumbs":4,"title":2},"5452":{"body":11,"breadcrumbs":3,"title":1},"5453":{"body":10,"breadcrumbs":5,"title":3},"5454":{"body":4,"breadcrumbs":4,"title":2},"5455":{"body":0,"breadcrumbs":3,"title":1},"5456":{"body":10,"breadcrumbs":4,"title":2},"5457":{"body":12,"breadcrumbs":3,"title":1},"5458":{"body":0,"breadcrumbs":3,"title":1},"5459":{"body":17,"breadcrumbs":3,"title":1},"546":{"body":0,"breadcrumbs":5,"title":3},"5460":{"body":12,"breadcrumbs":3,"title":1},"5461":{"body":15,"breadcrumbs":3,"title":1},"5462":{"body":16,"breadcrumbs":4,"title":2},"5463":{"body":22,"breadcrumbs":4,"title":2},"5464":{"body":16,"breadcrumbs":3,"title":1},"5465":{"body":37,"breadcrumbs":3,"title":1},"5466":{"body":0,"breadcrumbs":6,"title":4},"5467":{"body":74,"breadcrumbs":4,"title":2},"5468":{"body":97,"breadcrumbs":4,"title":2},"5469":{"body":151,"breadcrumbs":4,"title":2},"547":{"body":81,"breadcrumbs":4,"title":2},"5470":{"body":27,"breadcrumbs":4,"title":2},"5471":{"body":0,"breadcrumbs":4,"title":2},"5472":{"body":8,"breadcrumbs":5,"title":3},"5473":{"body":27,"breadcrumbs":5,"title":3},"5474":{"body":14,"breadcrumbs":5,"title":3},"5475":{"body":22,"breadcrumbs":4,"title":2},"5476":{"body":19,"breadcrumbs":4,"title":2},"5477":{"body":51,"breadcrumbs":3,"title":1},"5478":{"body":12,"breadcrumbs":4,"title":2},"5479":{"body":33,"breadcrumbs":3,"title":1},"548":{"body":0,"breadcrumbs":4,"title":2},"5480":{"body":6,"breadcrumbs":6,"title":4},"5481":{"body":18,"breadcrumbs":3,"title":1},"5482":{"body":41,"breadcrumbs":6,"title":4},"5483":{"body":0,"breadcrumbs":4,"title":2},"5484":{"body":25,"breadcrumbs":4,"title":2},"5485":{"body":28,"breadcrumbs":4,"title":2},"5486":{"body":27,"breadcrumbs":4,"title":2},"5487":{"body":30,"breadcrumbs":4,"title":2},"5488":{"body":0,"breadcrumbs":4,"title":2},"5489":{"body":28,"breadcrumbs":5,"title":3},"549":{"body":129,"breadcrumbs":5,"title":3},"5490":{"body":27,"breadcrumbs":4,"title":2},"5491":{"body":60,"breadcrumbs":5,"title":3},"5492":{"body":0,"breadcrumbs":4,"title":2},"5493":{"body":20,"breadcrumbs":5,"title":3},"5494":{"body":19,"breadcrumbs":5,"title":3},"5495":{"body":40,"breadcrumbs":4,"title":2},"5496":{"body":0,"breadcrumbs":3,"title":1},"5497":{"body":11,"breadcrumbs":4,"title":2},"5498":{"body":12,"breadcrumbs":4,"title":2},"5499":{"body":20,"breadcrumbs":4,"title":2},"55":{"body":33,"breadcrumbs":6,"title":4},"550":{"body":31,"breadcrumbs":5,"title":3},"5500":{"body":18,"breadcrumbs":5,"title":3},"5501":{"body":0,"breadcrumbs":4,"title":2},"5502":{"body":13,"breadcrumbs":3,"title":1},"5503":{"body":14,"breadcrumbs":3,"title":1},"5504":{"body":17,"breadcrumbs":3,"title":1},"5505":{"body":0,"breadcrumbs":4,"title":2},"5506":{"body":19,"breadcrumbs":4,"title":2},"5507":{"body":18,"breadcrumbs":4,"title":2},"5508":{"body":9,"breadcrumbs":5,"title":3},"5509":{"body":12,"breadcrumbs":5,"title":3},"551":{"body":0,"breadcrumbs":4,"title":2},"5510":{"body":16,"breadcrumbs":4,"title":2},"5511":{"body":28,"breadcrumbs":4,"title":2},"5512":{"body":23,"breadcrumbs":4,"title":2},"5513":{"body":0,"breadcrumbs":8,"title":5},"5514":{"body":19,"breadcrumbs":5,"title":2},"5515":{"body":0,"breadcrumbs":5,"title":2},"5516":{"body":13,"breadcrumbs":7,"title":4},"5517":{"body":10,"breadcrumbs":7,"title":4},"5518":{"body":11,"breadcrumbs":7,"title":4},"5519":{"body":0,"breadcrumbs":6,"title":3},"552":{"body":52,"breadcrumbs":5,"title":3},"5520":{"body":48,"breadcrumbs":6,"title":3},"5521":{"body":54,"breadcrumbs":9,"title":6},"5522":{"body":26,"breadcrumbs":5,"title":2},"5523":{"body":28,"breadcrumbs":5,"title":2},"5524":{"body":37,"breadcrumbs":4,"title":1},"5525":{"body":0,"breadcrumbs":6,"title":3},"5526":{"body":5,"breadcrumbs":5,"title":2},"5527":{"body":4,"breadcrumbs":5,"title":2},"5528":{"body":5,"breadcrumbs":5,"title":2},"5529":{"body":39,"breadcrumbs":4,"title":1},"553":{"body":35,"breadcrumbs":3,"title":1},"5530":{"body":38,"breadcrumbs":5,"title":2},"5531":{"body":0,"breadcrumbs":5,"title":3},"5532":{"body":13,"breadcrumbs":3,"title":1},"5533":{"body":0,"breadcrumbs":5,"title":3},"5534":{"body":31,"breadcrumbs":6,"title":4},"5535":{"body":29,"breadcrumbs":5,"title":3},"5536":{"body":23,"breadcrumbs":5,"title":3},"5537":{"body":28,"breadcrumbs":5,"title":3},"5538":{"body":27,"breadcrumbs":5,"title":3},"5539":{"body":22,"breadcrumbs":5,"title":3},"554":{"body":28,"breadcrumbs":4,"title":2},"5540":{"body":0,"breadcrumbs":5,"title":3},"5541":{"body":30,"breadcrumbs":4,"title":2},"5542":{"body":41,"breadcrumbs":4,"title":2},"5543":{"body":13,"breadcrumbs":5,"title":3},"5544":{"body":0,"breadcrumbs":4,"title":2},"5545":{"body":45,"breadcrumbs":4,"title":2},"5546":{"body":71,"breadcrumbs":5,"title":3},"5547":{"body":51,"breadcrumbs":6,"title":4},"5548":{"body":58,"breadcrumbs":5,"title":3},"5549":{"body":0,"breadcrumbs":4,"title":2},"555":{"body":0,"breadcrumbs":4,"title":2},"5550":{"body":20,"breadcrumbs":4,"title":2},"5551":{"body":25,"breadcrumbs":5,"title":3},"5552":{"body":25,"breadcrumbs":5,"title":3},"5553":{"body":0,"breadcrumbs":4,"title":2},"5554":{"body":37,"breadcrumbs":5,"title":3},"5555":{"body":23,"breadcrumbs":4,"title":2},"5556":{"body":0,"breadcrumbs":5,"title":3},"5557":{"body":11,"breadcrumbs":6,"title":4},"5558":{"body":12,"breadcrumbs":6,"title":4},"5559":{"body":8,"breadcrumbs":6,"title":4},"556":{"body":38,"breadcrumbs":6,"title":4},"5560":{"body":10,"breadcrumbs":6,"title":4},"5561":{"body":7,"breadcrumbs":6,"title":4},"5562":{"body":20,"breadcrumbs":7,"title":5},"5563":{"body":0,"breadcrumbs":4,"title":2},"5564":{"body":6,"breadcrumbs":4,"title":2},"5565":{"body":18,"breadcrumbs":4,"title":2},"5566":{"body":38,"breadcrumbs":4,"title":2},"5567":{"body":0,"breadcrumbs":4,"title":2},"5568":{"body":10,"breadcrumbs":5,"title":3},"5569":{"body":14,"breadcrumbs":6,"title":4},"557":{"body":35,"breadcrumbs":5,"title":3},"5570":{"body":17,"breadcrumbs":5,"title":3},"5571":{"body":10,"breadcrumbs":5,"title":3},"5572":{"body":8,"breadcrumbs":5,"title":3},"5573":{"body":0,"breadcrumbs":3,"title":1},"5574":{"body":13,"breadcrumbs":5,"title":3},"5575":{"body":20,"breadcrumbs":4,"title":2},"5576":{"body":23,"breadcrumbs":4,"title":2},"5577":{"body":8,"breadcrumbs":4,"title":2},"558":{"body":0,"breadcrumbs":5,"title":3},"559":{"body":49,"breadcrumbs":5,"title":3},"56":{"body":0,"breadcrumbs":5,"title":3},"560":{"body":21,"breadcrumbs":3,"title":1},"561":{"body":27,"breadcrumbs":5,"title":3},"562":{"body":0,"breadcrumbs":4,"title":2},"563":{"body":21,"breadcrumbs":6,"title":4},"564":{"body":0,"breadcrumbs":3,"title":1},"565":{"body":23,"breadcrumbs":4,"title":2},"566":{"body":16,"breadcrumbs":4,"title":2},"567":{"body":0,"breadcrumbs":4,"title":2},"568":{"body":46,"breadcrumbs":4,"title":2},"569":{"body":59,"breadcrumbs":5,"title":3},"57":{"body":77,"breadcrumbs":4,"title":2},"570":{"body":0,"breadcrumbs":4,"title":2},"571":{"body":37,"breadcrumbs":5,"title":3},"572":{"body":23,"breadcrumbs":4,"title":2},"573":{"body":0,"breadcrumbs":5,"title":3},"574":{"body":35,"breadcrumbs":3,"title":1},"575":{"body":32,"breadcrumbs":3,"title":1},"576":{"body":32,"breadcrumbs":3,"title":1},"577":{"body":0,"breadcrumbs":4,"title":2},"578":{"body":52,"breadcrumbs":5,"title":3},"579":{"body":29,"breadcrumbs":4,"title":2},"58":{"body":0,"breadcrumbs":5,"title":3},"580":{"body":0,"breadcrumbs":4,"title":2},"581":{"body":30,"breadcrumbs":4,"title":2},"582":{"body":19,"breadcrumbs":3,"title":1},"583":{"body":31,"breadcrumbs":4,"title":2},"584":{"body":32,"breadcrumbs":6,"title":4},"585":{"body":0,"breadcrumbs":4,"title":2},"586":{"body":62,"breadcrumbs":2,"title":0},"587":{"body":0,"breadcrumbs":4,"title":2},"588":{"body":83,"breadcrumbs":5,"title":3},"589":{"body":0,"breadcrumbs":4,"title":2},"59":{"body":27,"breadcrumbs":4,"title":2},"590":{"body":205,"breadcrumbs":7,"title":5},"591":{"body":159,"breadcrumbs":7,"title":5},"592":{"body":0,"breadcrumbs":4,"title":2},"593":{"body":46,"breadcrumbs":4,"title":2},"594":{"body":35,"breadcrumbs":4,"title":2},"595":{"body":44,"breadcrumbs":4,"title":2},"596":{"body":0,"breadcrumbs":4,"title":2},"597":{"body":22,"breadcrumbs":5,"title":3},"598":{"body":27,"breadcrumbs":5,"title":3},"599":{"body":0,"breadcrumbs":4,"title":2},"6":{"body":26,"breadcrumbs":3,"title":2},"60":{"body":28,"breadcrumbs":4,"title":2},"600":{"body":22,"breadcrumbs":4,"title":2},"601":{"body":21,"breadcrumbs":4,"title":2},"602":{"body":0,"breadcrumbs":4,"title":2},"603":{"body":41,"breadcrumbs":4,"title":2},"604":{"body":56,"breadcrumbs":4,"title":2},"605":{"body":0,"breadcrumbs":3,"title":1},"606":{"body":44,"breadcrumbs":4,"title":2},"607":{"body":26,"breadcrumbs":4,"title":2},"608":{"body":0,"breadcrumbs":4,"title":2},"609":{"body":36,"breadcrumbs":4,"title":2},"61":{"body":12,"breadcrumbs":4,"title":2},"610":{"body":35,"breadcrumbs":3,"title":1},"611":{"body":0,"breadcrumbs":3,"title":1},"612":{"body":19,"breadcrumbs":4,"title":2},"613":{"body":20,"breadcrumbs":5,"title":3},"614":{"body":0,"breadcrumbs":4,"title":2},"615":{"body":11,"breadcrumbs":6,"title":4},"616":{"body":43,"breadcrumbs":4,"title":2},"617":{"body":33,"breadcrumbs":6,"title":4},"618":{"body":0,"breadcrumbs":4,"title":2},"619":{"body":62,"breadcrumbs":5,"title":3},"62":{"body":9,"breadcrumbs":4,"title":2},"620":{"body":91,"breadcrumbs":4,"title":2},"621":{"body":0,"breadcrumbs":5,"title":3},"622":{"body":86,"breadcrumbs":4,"title":2},"623":{"body":53,"breadcrumbs":4,"title":2},"624":{"body":56,"breadcrumbs":4,"title":2},"625":{"body":21,"breadcrumbs":5,"title":3},"626":{"body":0,"breadcrumbs":4,"title":2},"627":{"body":47,"breadcrumbs":4,"title":2},"628":{"body":89,"breadcrumbs":4,"title":2},"629":{"body":0,"breadcrumbs":4,"title":2},"63":{"body":0,"breadcrumbs":4,"title":2},"630":{"body":37,"breadcrumbs":8,"title":6},"631":{"body":36,"breadcrumbs":7,"title":5},"632":{"body":62,"breadcrumbs":8,"title":6},"633":{"body":51,"breadcrumbs":6,"title":4},"634":{"body":44,"breadcrumbs":7,"title":5},"635":{"body":0,"breadcrumbs":5,"title":3},"636":{"body":29,"breadcrumbs":5,"title":3},"637":{"body":25,"breadcrumbs":5,"title":3},"638":{"body":41,"breadcrumbs":5,"title":3},"639":{"body":36,"breadcrumbs":5,"title":3},"64":{"body":18,"breadcrumbs":4,"title":2},"640":{"body":0,"breadcrumbs":4,"title":2},"641":{"body":40,"breadcrumbs":4,"title":2},"642":{"body":0,"breadcrumbs":4,"title":2},"643":{"body":42,"breadcrumbs":4,"title":2},"644":{"body":0,"breadcrumbs":5,"title":3},"645":{"body":104,"breadcrumbs":4,"title":2},"646":{"body":0,"breadcrumbs":5,"title":3},"647":{"body":55,"breadcrumbs":5,"title":3},"648":{"body":0,"breadcrumbs":4,"title":2},"649":{"body":42,"breadcrumbs":5,"title":3},"65":{"body":8,"breadcrumbs":4,"title":2},"650":{"body":24,"breadcrumbs":4,"title":2},"651":{"body":47,"breadcrumbs":4,"title":2},"652":{"body":33,"breadcrumbs":7,"title":4},"653":{"body":0,"breadcrumbs":5,"title":2},"654":{"body":30,"breadcrumbs":3,"title":0},"655":{"body":31,"breadcrumbs":6,"title":3},"656":{"body":0,"breadcrumbs":4,"title":1},"657":{"body":64,"breadcrumbs":5,"title":2},"658":{"body":0,"breadcrumbs":6,"title":3},"659":{"body":35,"breadcrumbs":6,"title":3},"66":{"body":21,"breadcrumbs":5,"title":3},"660":{"body":32,"breadcrumbs":6,"title":3},"661":{"body":54,"breadcrumbs":6,"title":3},"662":{"body":31,"breadcrumbs":6,"title":3},"663":{"body":0,"breadcrumbs":5,"title":2},"664":{"body":63,"breadcrumbs":5,"title":2},"665":{"body":36,"breadcrumbs":5,"title":2},"666":{"body":0,"breadcrumbs":5,"title":2},"667":{"body":52,"breadcrumbs":7,"title":4},"668":{"body":103,"breadcrumbs":8,"title":5},"669":{"body":0,"breadcrumbs":5,"title":2},"67":{"body":0,"breadcrumbs":5,"title":3},"670":{"body":53,"breadcrumbs":6,"title":3},"671":{"body":36,"breadcrumbs":5,"title":2},"672":{"body":0,"breadcrumbs":5,"title":2},"673":{"body":47,"breadcrumbs":6,"title":3},"674":{"body":0,"breadcrumbs":5,"title":2},"675":{"body":26,"breadcrumbs":5,"title":2},"676":{"body":19,"breadcrumbs":5,"title":2},"677":{"body":20,"breadcrumbs":5,"title":2},"678":{"body":0,"breadcrumbs":5,"title":2},"679":{"body":44,"breadcrumbs":6,"title":3},"68":{"body":28,"breadcrumbs":4,"title":2},"680":{"body":37,"breadcrumbs":7,"title":4},"681":{"body":37,"breadcrumbs":5,"title":2},"682":{"body":4,"breadcrumbs":7,"title":5},"683":{"body":14,"breadcrumbs":3,"title":1},"684":{"body":0,"breadcrumbs":4,"title":2},"685":{"body":13,"breadcrumbs":4,"title":2},"686":{"body":17,"breadcrumbs":6,"title":4},"687":{"body":12,"breadcrumbs":5,"title":3},"688":{"body":14,"breadcrumbs":4,"title":2},"689":{"body":15,"breadcrumbs":3,"title":1},"69":{"body":22,"breadcrumbs":4,"title":2},"690":{"body":16,"breadcrumbs":4,"title":2},"691":{"body":28,"breadcrumbs":4,"title":2},"692":{"body":33,"breadcrumbs":8,"title":5},"693":{"body":0,"breadcrumbs":5,"title":2},"694":{"body":44,"breadcrumbs":3,"title":0},"695":{"body":34,"breadcrumbs":6,"title":3},"696":{"body":0,"breadcrumbs":4,"title":1},"697":{"body":32,"breadcrumbs":6,"title":3},"698":{"body":59,"breadcrumbs":5,"title":2},"699":{"body":0,"breadcrumbs":5,"title":2},"7":{"body":29,"breadcrumbs":2,"title":1},"70":{"body":22,"breadcrumbs":3,"title":1},"700":{"body":95,"breadcrumbs":7,"title":4},"701":{"body":49,"breadcrumbs":7,"title":4},"702":{"body":76,"breadcrumbs":8,"title":5},"703":{"body":34,"breadcrumbs":6,"title":3},"704":{"body":44,"breadcrumbs":7,"title":4},"705":{"body":0,"breadcrumbs":5,"title":2},"706":{"body":81,"breadcrumbs":7,"title":4},"707":{"body":42,"breadcrumbs":6,"title":3},"708":{"body":0,"breadcrumbs":4,"title":1},"709":{"body":45,"breadcrumbs":6,"title":3},"71":{"body":21,"breadcrumbs":4,"title":2},"710":{"body":0,"breadcrumbs":6,"title":3},"711":{"body":143,"breadcrumbs":8,"title":5},"712":{"body":37,"breadcrumbs":7,"title":4},"713":{"body":45,"breadcrumbs":7,"title":4},"714":{"body":40,"breadcrumbs":5,"title":2},"715":{"body":38,"breadcrumbs":7,"title":5},"716":{"body":0,"breadcrumbs":4,"title":2},"717":{"body":65,"breadcrumbs":2,"title":0},"718":{"body":0,"breadcrumbs":4,"title":2},"719":{"body":79,"breadcrumbs":6,"title":4},"72":{"body":0,"breadcrumbs":4,"title":2},"720":{"body":88,"breadcrumbs":5,"title":3},"721":{"body":51,"breadcrumbs":4,"title":2},"722":{"body":0,"breadcrumbs":3,"title":1},"723":{"body":40,"breadcrumbs":5,"title":3},"724":{"body":105,"breadcrumbs":4,"title":2},"725":{"body":0,"breadcrumbs":5,"title":3},"726":{"body":34,"breadcrumbs":6,"title":4},"727":{"body":34,"breadcrumbs":6,"title":4},"728":{"body":32,"breadcrumbs":5,"title":3},"729":{"body":30,"breadcrumbs":5,"title":3},"73":{"body":22,"breadcrumbs":4,"title":2},"730":{"body":29,"breadcrumbs":5,"title":3},"731":{"body":0,"breadcrumbs":4,"title":2},"732":{"body":119,"breadcrumbs":7,"title":5},"733":{"body":127,"breadcrumbs":7,"title":5},"734":{"body":0,"breadcrumbs":4,"title":2},"735":{"body":42,"breadcrumbs":5,"title":3},"736":{"body":75,"breadcrumbs":4,"title":2},"737":{"body":59,"breadcrumbs":4,"title":2},"738":{"body":0,"breadcrumbs":3,"title":1},"739":{"body":46,"breadcrumbs":4,"title":2},"74":{"body":18,"breadcrumbs":4,"title":2},"740":{"body":50,"breadcrumbs":6,"title":4},"741":{"body":40,"breadcrumbs":4,"title":2},"742":{"body":0,"breadcrumbs":4,"title":2},"743":{"body":35,"breadcrumbs":4,"title":2},"744":{"body":0,"breadcrumbs":5,"title":3},"745":{"body":98,"breadcrumbs":4,"title":2},"746":{"body":0,"breadcrumbs":4,"title":2},"747":{"body":143,"breadcrumbs":6,"title":4},"748":{"body":89,"breadcrumbs":6,"title":4},"749":{"body":103,"breadcrumbs":6,"title":4},"75":{"body":0,"breadcrumbs":4,"title":2},"750":{"body":136,"breadcrumbs":5,"title":3},"751":{"body":0,"breadcrumbs":5,"title":3},"752":{"body":25,"breadcrumbs":8,"title":6},"753":{"body":27,"breadcrumbs":8,"title":6},"754":{"body":21,"breadcrumbs":8,"title":6},"755":{"body":21,"breadcrumbs":7,"title":5},"756":{"body":22,"breadcrumbs":8,"title":6},"757":{"body":0,"breadcrumbs":5,"title":3},"758":{"body":18,"breadcrumbs":5,"title":3},"759":{"body":23,"breadcrumbs":5,"title":3},"76":{"body":29,"breadcrumbs":5,"title":3},"760":{"body":20,"breadcrumbs":5,"title":3},"761":{"body":0,"breadcrumbs":4,"title":2},"762":{"body":29,"breadcrumbs":4,"title":2},"763":{"body":22,"breadcrumbs":4,"title":2},"764":{"body":15,"breadcrumbs":4,"title":2},"765":{"body":0,"breadcrumbs":4,"title":2},"766":{"body":35,"breadcrumbs":4,"title":2},"767":{"body":35,"breadcrumbs":4,"title":2},"768":{"body":0,"breadcrumbs":4,"title":2},"769":{"body":32,"breadcrumbs":4,"title":2},"77":{"body":17,"breadcrumbs":4,"title":2},"770":{"body":32,"breadcrumbs":4,"title":2},"771":{"body":0,"breadcrumbs":4,"title":2},"772":{"body":26,"breadcrumbs":3,"title":1},"773":{"body":24,"breadcrumbs":3,"title":1},"774":{"body":37,"breadcrumbs":3,"title":1},"775":{"body":11,"breadcrumbs":6,"title":4},"776":{"body":22,"breadcrumbs":4,"title":2},"777":{"body":0,"breadcrumbs":4,"title":2},"778":{"body":21,"breadcrumbs":4,"title":2},"779":{"body":42,"breadcrumbs":4,"title":2},"78":{"body":0,"breadcrumbs":5,"title":3},"780":{"body":32,"breadcrumbs":4,"title":2},"781":{"body":55,"breadcrumbs":4,"title":2},"782":{"body":0,"breadcrumbs":4,"title":2},"783":{"body":121,"breadcrumbs":5,"title":3},"784":{"body":83,"breadcrumbs":5,"title":3},"785":{"body":0,"breadcrumbs":4,"title":2},"786":{"body":543,"breadcrumbs":4,"title":2},"787":{"body":0,"breadcrumbs":4,"title":2},"788":{"body":11,"breadcrumbs":6,"title":4},"789":{"body":69,"breadcrumbs":4,"title":2},"79":{"body":130,"breadcrumbs":5,"title":3},"790":{"body":30,"breadcrumbs":4,"title":2},"791":{"body":157,"breadcrumbs":5,"title":3},"792":{"body":0,"breadcrumbs":4,"title":2},"793":{"body":36,"breadcrumbs":4,"title":2},"794":{"body":53,"breadcrumbs":4,"title":2},"795":{"body":41,"breadcrumbs":4,"title":2},"796":{"body":0,"breadcrumbs":4,"title":2},"797":{"body":113,"breadcrumbs":4,"title":2},"798":{"body":79,"breadcrumbs":4,"title":2},"799":{"body":0,"breadcrumbs":4,"title":2},"8":{"body":7,"breadcrumbs":2,"title":1},"80":{"body":72,"breadcrumbs":4,"title":2},"800":{"body":130,"breadcrumbs":4,"title":2},"801":{"body":32,"breadcrumbs":4,"title":2},"802":{"body":29,"breadcrumbs":5,"title":3},"803":{"body":0,"breadcrumbs":4,"title":2},"804":{"body":229,"breadcrumbs":4,"title":2},"805":{"body":0,"breadcrumbs":4,"title":2},"806":{"body":149,"breadcrumbs":4,"title":2},"807":{"body":0,"breadcrumbs":4,"title":2},"808":{"body":54,"breadcrumbs":4,"title":2},"809":{"body":62,"breadcrumbs":4,"title":2},"81":{"body":25,"breadcrumbs":4,"title":2},"810":{"body":34,"breadcrumbs":4,"title":2},"811":{"body":0,"breadcrumbs":4,"title":2},"812":{"body":59,"breadcrumbs":4,"title":2},"813":{"body":59,"breadcrumbs":5,"title":3},"814":{"body":0,"breadcrumbs":4,"title":2},"815":{"body":17,"breadcrumbs":3,"title":1},"816":{"body":24,"breadcrumbs":3,"title":1},"817":{"body":26,"breadcrumbs":4,"title":2},"818":{"body":0,"breadcrumbs":4,"title":2},"819":{"body":26,"breadcrumbs":3,"title":1},"82":{"body":27,"breadcrumbs":4,"title":2},"820":{"body":0,"breadcrumbs":5,"title":3},"821":{"body":112,"breadcrumbs":8,"title":6},"822":{"body":119,"breadcrumbs":6,"title":4},"823":{"body":68,"breadcrumbs":6,"title":4},"824":{"body":79,"breadcrumbs":6,"title":4},"825":{"body":48,"breadcrumbs":5,"title":3},"826":{"body":0,"breadcrumbs":5,"title":3},"827":{"body":90,"breadcrumbs":6,"title":4},"828":{"body":46,"breadcrumbs":6,"title":4},"829":{"body":72,"breadcrumbs":6,"title":4},"83":{"body":24,"breadcrumbs":7,"title":4},"830":{"body":0,"breadcrumbs":5,"title":3},"831":{"body":69,"breadcrumbs":6,"title":4},"832":{"body":0,"breadcrumbs":5,"title":3},"833":{"body":95,"breadcrumbs":7,"title":5},"834":{"body":59,"breadcrumbs":5,"title":3},"835":{"body":0,"breadcrumbs":5,"title":3},"836":{"body":39,"breadcrumbs":5,"title":3},"837":{"body":39,"breadcrumbs":6,"title":4},"838":{"body":55,"breadcrumbs":6,"title":4},"839":{"body":0,"breadcrumbs":4,"title":2},"84":{"body":8,"breadcrumbs":7,"title":4},"840":{"body":89,"breadcrumbs":6,"title":4},"841":{"body":76,"breadcrumbs":3,"title":1},"842":{"body":0,"breadcrumbs":4,"title":2},"843":{"body":25,"breadcrumbs":3,"title":1},"844":{"body":0,"breadcrumbs":5,"title":3},"845":{"body":109,"breadcrumbs":6,"title":4},"846":{"body":112,"breadcrumbs":6,"title":4},"847":{"body":89,"breadcrumbs":6,"title":4},"848":{"body":92,"breadcrumbs":6,"title":4},"849":{"body":139,"breadcrumbs":6,"title":4},"85":{"body":97,"breadcrumbs":8,"title":5},"850":{"body":93,"breadcrumbs":6,"title":4},"851":{"body":96,"breadcrumbs":6,"title":4},"852":{"body":59,"breadcrumbs":6,"title":4},"853":{"body":0,"breadcrumbs":5,"title":3},"854":{"body":40,"breadcrumbs":5,"title":3},"855":{"body":44,"breadcrumbs":5,"title":3},"856":{"body":0,"breadcrumbs":5,"title":3},"857":{"body":67,"breadcrumbs":5,"title":3},"858":{"body":26,"breadcrumbs":6,"title":4},"859":{"body":0,"breadcrumbs":5,"title":3},"86":{"body":43,"breadcrumbs":8,"title":5},"860":{"body":35,"breadcrumbs":5,"title":3},"861":{"body":14,"breadcrumbs":8,"title":5},"862":{"body":29,"breadcrumbs":5,"title":2},"863":{"body":0,"breadcrumbs":8,"title":5},"864":{"body":49,"breadcrumbs":6,"title":3},"865":{"body":37,"breadcrumbs":6,"title":3},"866":{"body":79,"breadcrumbs":6,"title":3},"867":{"body":0,"breadcrumbs":5,"title":2},"868":{"body":114,"breadcrumbs":9,"title":6},"869":{"body":92,"breadcrumbs":9,"title":6},"87":{"body":31,"breadcrumbs":8,"title":5},"870":{"body":100,"breadcrumbs":7,"title":4},"871":{"body":0,"breadcrumbs":7,"title":4},"872":{"body":58,"breadcrumbs":9,"title":6},"873":{"body":55,"breadcrumbs":6,"title":3},"874":{"body":32,"breadcrumbs":5,"title":2},"875":{"body":0,"breadcrumbs":6,"title":3},"876":{"body":14,"breadcrumbs":10,"title":7},"877":{"body":122,"breadcrumbs":8,"title":5},"878":{"body":106,"breadcrumbs":8,"title":5},"879":{"body":0,"breadcrumbs":4,"title":1},"88":{"body":27,"breadcrumbs":8,"title":5},"880":{"body":94,"breadcrumbs":5,"title":2},"881":{"body":51,"breadcrumbs":5,"title":2},"882":{"body":0,"breadcrumbs":7,"title":4},"883":{"body":38,"breadcrumbs":4,"title":1},"884":{"body":98,"breadcrumbs":5,"title":2},"885":{"body":0,"breadcrumbs":5,"title":2},"886":{"body":41,"breadcrumbs":6,"title":3},"887":{"body":30,"breadcrumbs":6,"title":3},"888":{"body":0,"breadcrumbs":5,"title":2},"889":{"body":14,"breadcrumbs":6,"title":3},"89":{"body":30,"breadcrumbs":8,"title":5},"890":{"body":18,"breadcrumbs":7,"title":4},"891":{"body":15,"breadcrumbs":7,"title":4},"892":{"body":15,"breadcrumbs":9,"title":6},"893":{"body":80,"breadcrumbs":4,"title":1},"894":{"body":9,"breadcrumbs":9,"title":6},"895":{"body":19,"breadcrumbs":4,"title":1},"896":{"body":42,"breadcrumbs":5,"title":2},"897":{"body":0,"breadcrumbs":5,"title":2},"898":{"body":169,"breadcrumbs":7,"title":4},"899":{"body":196,"breadcrumbs":7,"title":4},"9":{"body":16,"breadcrumbs":2,"title":1},"90":{"body":31,"breadcrumbs":5,"title":2},"900":{"body":105,"breadcrumbs":7,"title":4},"901":{"body":0,"breadcrumbs":6,"title":3},"902":{"body":38,"breadcrumbs":5,"title":2},"903":{"body":36,"breadcrumbs":6,"title":3},"904":{"body":0,"breadcrumbs":5,"title":2},"905":{"body":99,"breadcrumbs":5,"title":2},"906":{"body":45,"breadcrumbs":5,"title":2},"907":{"body":33,"breadcrumbs":6,"title":3},"908":{"body":0,"breadcrumbs":6,"title":3},"909":{"body":75,"breadcrumbs":5,"title":2},"91":{"body":11,"breadcrumbs":7,"title":4},"910":{"body":25,"breadcrumbs":5,"title":2},"911":{"body":0,"breadcrumbs":6,"title":3},"912":{"body":27,"breadcrumbs":6,"title":3},"913":{"body":29,"breadcrumbs":7,"title":4},"914":{"body":18,"breadcrumbs":6,"title":3},"915":{"body":30,"breadcrumbs":6,"title":3},"916":{"body":19,"breadcrumbs":7,"title":4},"917":{"body":0,"breadcrumbs":6,"title":3},"918":{"body":36,"breadcrumbs":7,"title":4},"919":{"body":37,"breadcrumbs":7,"title":4},"92":{"body":3,"breadcrumbs":8,"title":5},"920":{"body":0,"breadcrumbs":5,"title":2},"921":{"body":15,"breadcrumbs":8,"title":5},"922":{"body":22,"breadcrumbs":7,"title":4},"923":{"body":61,"breadcrumbs":7,"title":4},"924":{"body":10,"breadcrumbs":7,"title":4},"925":{"body":0,"breadcrumbs":5,"title":2},"926":{"body":13,"breadcrumbs":4,"title":1},"927":{"body":19,"breadcrumbs":4,"title":1},"928":{"body":15,"breadcrumbs":4,"title":1},"929":{"body":16,"breadcrumbs":5,"title":2},"93":{"body":1,"breadcrumbs":8,"title":5},"930":{"body":19,"breadcrumbs":4,"title":1},"931":{"body":16,"breadcrumbs":4,"title":1},"932":{"body":56,"breadcrumbs":5,"title":2},"933":{"body":34,"breadcrumbs":5,"title":2},"934":{"body":11,"breadcrumbs":7,"title":4},"935":{"body":27,"breadcrumbs":5,"title":2},"936":{"body":0,"breadcrumbs":6,"title":3},"937":{"body":31,"breadcrumbs":8,"title":5},"938":{"body":38,"breadcrumbs":9,"title":6},"939":{"body":55,"breadcrumbs":10,"title":7},"94":{"body":105,"breadcrumbs":5,"title":2},"940":{"body":0,"breadcrumbs":7,"title":4},"941":{"body":101,"breadcrumbs":7,"title":4},"942":{"body":126,"breadcrumbs":7,"title":4},"943":{"body":131,"breadcrumbs":7,"title":4},"944":{"body":114,"breadcrumbs":7,"title":4},"945":{"body":126,"breadcrumbs":7,"title":4},"946":{"body":0,"breadcrumbs":6,"title":3},"947":{"body":18,"breadcrumbs":7,"title":4},"948":{"body":137,"breadcrumbs":5,"title":2},"949":{"body":0,"breadcrumbs":6,"title":3},"95":{"body":40,"breadcrumbs":4,"title":1},"950":{"body":20,"breadcrumbs":7,"title":4},"951":{"body":100,"breadcrumbs":5,"title":2},"952":{"body":37,"breadcrumbs":5,"title":2},"953":{"body":0,"breadcrumbs":5,"title":2},"954":{"body":26,"breadcrumbs":6,"title":3},"955":{"body":116,"breadcrumbs":5,"title":2},"956":{"body":23,"breadcrumbs":7,"title":4},"957":{"body":0,"breadcrumbs":5,"title":2},"958":{"body":40,"breadcrumbs":6,"title":3},"959":{"body":41,"breadcrumbs":6,"title":3},"96":{"body":6,"breadcrumbs":7,"title":4},"960":{"body":61,"breadcrumbs":6,"title":3},"961":{"body":0,"breadcrumbs":7,"title":4},"962":{"body":41,"breadcrumbs":6,"title":3},"963":{"body":47,"breadcrumbs":5,"title":2},"964":{"body":0,"breadcrumbs":5,"title":2},"965":{"body":140,"breadcrumbs":6,"title":3},"966":{"body":50,"breadcrumbs":7,"title":4},"967":{"body":0,"breadcrumbs":7,"title":4},"968":{"body":15,"breadcrumbs":5,"title":2},"969":{"body":53,"breadcrumbs":6,"title":3},"97":{"body":44,"breadcrumbs":8,"title":5},"970":{"body":75,"breadcrumbs":5,"title":2},"971":{"body":0,"breadcrumbs":7,"title":4},"972":{"body":93,"breadcrumbs":7,"title":4},"973":{"body":75,"breadcrumbs":5,"title":2},"974":{"body":72,"breadcrumbs":4,"title":1},"975":{"body":25,"breadcrumbs":5,"title":2},"976":{"body":7,"breadcrumbs":6,"title":3},"977":{"body":0,"breadcrumbs":7,"title":4},"978":{"body":14,"breadcrumbs":8,"title":5},"979":{"body":35,"breadcrumbs":5,"title":2},"98":{"body":37,"breadcrumbs":9,"title":6},"980":{"body":69,"breadcrumbs":6,"title":3},"981":{"body":38,"breadcrumbs":6,"title":3},"982":{"body":0,"breadcrumbs":5,"title":2},"983":{"body":16,"breadcrumbs":9,"title":6},"984":{"body":21,"breadcrumbs":6,"title":3},"985":{"body":0,"breadcrumbs":6,"title":3},"986":{"body":37,"breadcrumbs":6,"title":3},"987":{"body":40,"breadcrumbs":5,"title":2},"988":{"body":33,"breadcrumbs":7,"title":4},"989":{"body":15,"breadcrumbs":5,"title":2},"99":{"body":59,"breadcrumbs":9,"title":6},"990":{"body":41,"breadcrumbs":6,"title":3},"991":{"body":6,"breadcrumbs":6,"title":3},"992":{"body":16,"breadcrumbs":5,"title":2},"993":{"body":17,"breadcrumbs":4,"title":1},"994":{"body":12,"breadcrumbs":5,"title":2},"995":{"body":0,"breadcrumbs":7,"title":4},"996":{"body":40,"breadcrumbs":5,"title":2},"997":{"body":11,"breadcrumbs":5,"title":2},"998":{"body":26,"breadcrumbs":5,"title":2},"999":{"body":44,"breadcrumbs":5,"title":2}},"docs":{"0":{"body":"Last Updated : 2025-01-02 (Phase 3.A Cleanup Complete) Status : ✅ Primary documentation source (145 files consolidated) Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, Nickel, and Rust. Note : Architecture Decision Records (ADRs) and design documentation are in docs/ directory. This location contains user-facing, operational, and product documentation.","breadcrumbs":"Home » Provisioning Platform Documentation","id":"0","title":"Provisioning Platform Documentation"},"1":{"body":"","breadcrumbs":"Home » Quick Navigation","id":"1","title":"Quick Navigation"},"10":{"body":"Document Description Workspace Config Architecture Configuration architecture","breadcrumbs":"Home » 🔐 Configuration","id":"10","title":"🔐 Configuration"},"100":{"body":"The orchestrator service manages workflows and deployments: # Check if orchestrator is running (health check)\\ncurl http://localhost:9090/health\\n# Expected: {\\"status\\": \\"healthy\\"} or similar response # If health check fails, check orchestrator logs\\ntail -f /Users/Akasha/project-provisioning/provisioning/platform/orchestrator/data/orchestrator.log # Alternative: Check if orchestrator process is running\\nps aux | grep orchestrator\\n# Expected: Running orchestrator process visible Expected Output : { \\"status\\": \\"healthy\\", \\"uptime\\": \\"0:05:23\\"\\n} If Orchestrator Failed to Start: Check logs and restart manually: cd /Users/Akasha/project-provisioning/provisioning/platform/orchestrator # Check log file\\ncat data/orchestrator.log # Or start orchestrator manually\\n./scripts/start-orchestrator.nu --background # Verify it\'s running\\ncurl http://localhost:9090/health","breadcrumbs":"Installation Validation Guide » Step 3.4: Verify Orchestrator Service","id":"100","title":"Step 3.4: Verify Orchestrator Service"},"1000":{"body":"","breadcrumbs":"Database and Config Architecture » Summary","id":"1000","title":"Summary"},"1001":{"body":"Type : RocksDB (embedded) Location : {{workspace.path}}/.control-center/data/control-center.db No server required : Embedded in control-center process","breadcrumbs":"Database and Config Architecture » Control-Center Database","id":"1001","title":"Control-Center Database"},"1002":{"body":"Type : Filesystem (default) or SurrealDB (production) Location : {{workspace.path}}/.orchestrator/data/queue.rkvs Optional server : SurrealDB for production","breadcrumbs":"Database and Config Architecture » Orchestrator Database","id":"1002","title":"Orchestrator Database"},"1003":{"body":"System defaults (provisioning/config/) Service defaults (platform/{service}/) Workspace config User config Environment variables Runtime overrides","breadcrumbs":"Database and Config Architecture » Configuration Loading","id":"1003","title":"Configuration Loading"},"1004":{"body":"✅ Use workspace-aware paths ✅ Override via environment variables in Docker ✅ Keep secrets in KMS, not config files ✅ Use RocksDB for single-node deployments ✅ Use SurrealDB for distributed/production deployments Related Documentation : Configuration System KMS Architecture Workspace Switching","breadcrumbs":"Database and Config Architecture » Best Practices","id":"1004","title":"Best Practices"},"1005":{"body":"Date : 2025-11-23 Version : 1.0.0 Status : ✅ Implementation Complete","breadcrumbs":"Ecosystem Integration » Prov-Ecosystem & Provctl Integration","id":"1005","title":"Prov-Ecosystem & Provctl Integration"},"1006":{"body":"This document describes the hybrid selective integration of prov-ecosystem and provctl with provisioning, providing access to four critical functionalities: Runtime Abstraction - Unified Docker/Podman/OrbStack/Colima/nerdctl SSH Advanced - Pooling, circuit breaker, retry strategies, distributed operations Backup System - Multi-backend (Restic, Borg, Tar, Rsync) with retention policies GitOps Events - Event-driven deployments from Git","breadcrumbs":"Ecosystem Integration » Overview","id":"1006","title":"Overview"},"1007":{"body":"","breadcrumbs":"Ecosystem Integration » Architecture","id":"1007","title":"Architecture"},"1008":{"body":"┌─────────────────────────────────────────────┐\\n│ Provisioning CLI (provisioning/core/cli/) │\\n│ ✅ 80+ command shortcuts │\\n│ ✅ Domain-driven architecture │\\n│ ✅ Modular CLI commands │\\n└─────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────┐\\n│ Nushell Integration Layer │\\n│ (provisioning/core/nulib/integrations/) │\\n│ ✅ 5 modules with full type safety │\\n│ ✅ Follows 17 Nushell guidelines │\\n│ ✅ Early return, atomic operations │\\n└─────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────┐\\n│ Rust Bridge Crate │\\n│ (provisioning/platform/integrations/ │\\n│ provisioning-bridge/) │\\n│ ✅ Zero unsafe code │\\n│ ✅ Idiomatic error handling (Result) │\\n│ ✅ 5 modules (runtime, ssh, backup, etc) │\\n│ ✅ Comprehensive tests │\\n└─────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────┐\\n│ Prov-Ecosystem & Provctl Crates │\\n│ (../../prov-ecosystem/ & ../../provctl/) │\\n│ ✅ runtime: Container abstraction │\\n│ ✅ init-servs: Service management │\\n│ ✅ backup: Multi-backend backup │\\n│ ✅ gitops: Event-driven automation │\\n│ ✅ provctl-machines: SSH advanced │\\n└─────────────────────────────────────────────┘","breadcrumbs":"Ecosystem Integration » Three-Layer Integration","id":"1008","title":"Three-Layer Integration"},"1009":{"body":"","breadcrumbs":"Ecosystem Integration » Components","id":"1009","title":"Components"},"101":{"body":"You can install the provisioning CLI globally for easier access: # Option A: System-wide installation (requires sudo)\\ncd /Users/Akasha/project-provisioning\\nsudo ./scripts/install-provisioning.sh # Verify installation\\nprovisioning --version\\nprovisioning help # Option B: Add to PATH temporarily (current session only)\\nexport PATH=\\"$PATH:/Users/Akasha/project-provisioning/provisioning/core/cli\\" # Verify\\nprovisioning --version Expected Output : provisioning version 1.0.0 Usage: provisioning [OPTIONS] COMMAND Commands: server - Server management workspace - Workspace management config - Configuration management help - Show help information","breadcrumbs":"Installation Validation Guide » Step 3.5: Install Provisioning CLI (Optional)","id":"101","title":"Step 3.5: Install Provisioning CLI (Optional)"},"1010":{"body":"Location : provisioning/platform/integrations/provisioning-bridge/src/runtime.rs Nushell : provisioning/core/nulib/integrations/runtime.nu Nickel Schema : provisioning/schemas/integrations/runtime.ncl Purpose : Unified interface for Docker, Podman, OrbStack, Colima, nerdctl Key Types : pub enum ContainerRuntime { Docker, Podman, OrbStack, Colima, Nerdctl,\\n} pub struct RuntimeDetector { ... }\\npub struct ComposeAdapter { ... } Nushell Functions : runtime-detect # Auto-detect available runtime\\nruntime-exec # Execute command in detected runtime\\nruntime-compose # Adapt docker-compose for runtime\\nruntime-info # Get runtime details\\nruntime-list # List all available runtimes Benefits : ✅ Eliminates Docker hardcoding ✅ Platform-aware detection ✅ Automatic runtime selection ✅ Docker Compose adaptation","breadcrumbs":"Ecosystem Integration » 1. Runtime Abstraction","id":"1010","title":"1. Runtime Abstraction"},"1011":{"body":"Location : provisioning/platform/integrations/provisioning-bridge/src/ssh.rs Nushell : provisioning/core/nulib/integrations/ssh_advanced.nu Nickel Schema : provisioning/schemas/integrations/ssh_advanced.ncl Purpose : Advanced SSH operations with pooling, circuit breaker, retry strategies Key Types : pub struct SshConfig { ... }\\npub struct SshPool { ... }\\npub enum DeploymentStrategy { Rolling, BlueGreen, Canary,\\n} Nushell Functions : ssh-pool-connect # Create SSH pool connection\\nssh-pool-exec # Execute on SSH pool\\nssh-pool-status # Check pool status\\nssh-deployment-strategies # List strategies\\nssh-retry-config # Configure retry strategy\\nssh-circuit-breaker-status # Check circuit breaker Features : ✅ Connection pooling (90% faster) ✅ Circuit breaker for fault isolation ✅ Three deployment strategies (rolling, blue-green, canary) ✅ Retry strategies (exponential, linear, fibonacci) ✅ Health check integration","breadcrumbs":"Ecosystem Integration » 2. SSH Advanced","id":"1011","title":"2. SSH Advanced"},"1012":{"body":"Location : provisioning/platform/integrations/provisioning-bridge/src/backup.rs Nushell : provisioning/core/nulib/integrations/backup.nu Nickel Schema : provisioning/schemas/integrations/backup.ncl Purpose : Multi-backend backup with retention policies Key Types : pub enum BackupBackend { Restic, Borg, Tar, Rsync, Cpio,\\n} pub struct BackupJob { ... }\\npub struct RetentionPolicy { ... }\\npub struct BackupManager { ... } Nushell Functions : backup-create # Create backup job\\nbackup-restore # Restore from snapshot\\nbackup-list # List snapshots\\nbackup-schedule # Schedule regular backups\\nbackup-retention # Configure retention policy\\nbackup-status # Check backup status Features : ✅ Multiple backends (Restic, Borg, Tar, Rsync, CPIO) ✅ Flexible repositories (local, S3, SFTP, REST, B2) ✅ Retention policies (daily/weekly/monthly/yearly) ✅ Pre/post backup hooks ✅ Automatic scheduling ✅ Compression support","breadcrumbs":"Ecosystem Integration » 3. Backup System","id":"1012","title":"3. Backup System"},"1013":{"body":"Location : provisioning/platform/integrations/provisioning-bridge/src/gitops.rs Nushell : provisioning/core/nulib/integrations/gitops.nu Nickel Schema : provisioning/schemas/integrations/gitops.ncl Purpose : Event-driven deployments from Git Key Types : pub enum GitProvider { GitHub, GitLab, Gitea,\\n} pub struct GitOpsRule { ... }\\npub struct GitOpsOrchestrator { ... } Nushell Functions : gitops-rules # Load rules from config\\ngitops-watch # Watch for Git events\\ngitops-trigger # Manually trigger deployment\\ngitops-event-types # List supported events\\ngitops-rule-config # Configure GitOps rule\\ngitops-deployments # List active deployments\\ngitops-status # Get GitOps status Features : ✅ Event-driven automation (push, PR, webhook, scheduled) ✅ Multi-provider support (GitHub, GitLab, Gitea) ✅ Three deployment strategies ✅ Manual approval workflow ✅ Health check triggers ✅ Audit logging","breadcrumbs":"Ecosystem Integration » 4. GitOps Events","id":"1013","title":"4. GitOps Events"},"1014":{"body":"Location : provisioning/platform/integrations/provisioning-bridge/src/service.rs Nushell : provisioning/core/nulib/integrations/service.nu Nickel Schema : provisioning/schemas/integrations/service.ncl Purpose : Cross-platform service management (systemd, launchd, runit, OpenRC) Nushell Functions : service-install # Install service\\nservice-start # Start service\\nservice-stop # Stop service\\nservice-restart # Restart service\\nservice-status # Get service status\\nservice-list # List all services\\nservice-restart-policy # Configure restart policy\\nservice-detect-init # Detect init system Features : ✅ Multi-platform support (systemd, launchd, runit, OpenRC) ✅ Service file generation ✅ Restart policies (always, on-failure, no) ✅ Health checks ✅ Logging configuration ✅ Metrics collection","breadcrumbs":"Ecosystem Integration » 5. Service Management","id":"1014","title":"5. Service Management"},"1015":{"body":"All implementations follow project standards:","breadcrumbs":"Ecosystem Integration » Code Quality Standards","id":"1015","title":"Code Quality Standards"},"1016":{"body":"✅ Zero unsafe code - #![forbid(unsafe_code)] ✅ Idiomatic error handling - Result pattern ✅ Comprehensive docs - Full rustdoc with examples ✅ Tests - Unit and integration tests for each module ✅ No unwrap() - Only in tests with comments ✅ No clippy warnings - All warnings suppressed","breadcrumbs":"Ecosystem Integration » Rust (provisioning-bridge)","id":"1016","title":"Rust (provisioning-bridge)"},"1017":{"body":"✅ 17 Nushell rules - See Nushell Development Guide ✅ Explicit types - Colon notation: [param: type]: return_type ✅ Early return - Validate inputs immediately ✅ Single purpose - Each function does one thing ✅ Atomic operations - Succeed or fail completely ✅ Pure functions - No hidden side effects","breadcrumbs":"Ecosystem Integration » Nushell","id":"1017","title":"Nushell"},"1018":{"body":"✅ Schema-first - All configs have schemas ✅ Explicit types - Full type annotations ✅ Direct imports - No re-exports ✅ Immutability-first - Mutable only when needed ✅ Lazy evaluation - Efficient computation ✅ Security defaults - TLS enabled, secrets referenced","breadcrumbs":"Ecosystem Integration » Nickel","id":"1018","title":"Nickel"},"1019":{"body":"provisioning/\\n├── platform/integrations/\\n│ └── provisioning-bridge/ # Rust bridge crate\\n│ ├── Cargo.toml\\n│ └── src/\\n│ ├── lib.rs\\n│ ├── error.rs # Error types\\n│ ├── runtime.rs # Runtime abstraction\\n│ ├── ssh.rs # SSH advanced\\n│ ├── backup.rs # Backup system\\n│ ├── gitops.rs # GitOps events\\n│ └── service.rs # Service management\\n│\\n├── core/nulib/lib_provisioning/\\n│ └── integrations/ # Nushell modules\\n│ ├── mod.nu # Module root\\n│ ├── runtime.nu # Runtime functions\\n│ ├── ssh_advanced.nu # SSH functions\\n│ ├── backup.nu # Backup functions\\n│ ├── gitops.nu # GitOps functions\\n│ └── service.nu # Service functions\\n│\\n└── schemas/integrations/ # Nickel schemas ├── main.ncl # Main integration schema ├── runtime.ncl # Runtime schema ├── ssh_advanced.ncl # SSH schema ├── backup.ncl # Backup schema ├── gitops.ncl # GitOps schema └── service.ncl # Service schema","breadcrumbs":"Ecosystem Integration » File Structure","id":"1019","title":"File Structure"},"102":{"body":"[ ] Workspace directories created (.orchestrator, .kms, .providers, .taskservs, .clusters)\\n[ ] Generated TOML files exist in config/generated/\\n[ ] Nickel type-checking passes (no errors)\\n[ ] Workspace utility validation passes\\n[ ] Orchestrator responding to health check\\n[ ] Orchestrator process running\\n[ ] Provisioning CLI accessible and working","breadcrumbs":"Installation Validation Guide » Installation Validation Checklist","id":"102","title":"Installation Validation Checklist"},"1020":{"body":"","breadcrumbs":"Ecosystem Integration » Usage","id":"1020","title":"Usage"},"1021":{"body":"# Auto-detect available runtime\\nlet runtime = (runtime-detect) # Execute command in detected runtime\\nruntime-exec \\"docker ps\\" --check # Adapt compose file\\nlet compose_cmd = (runtime-compose \\"./docker-compose.yml\\")","breadcrumbs":"Ecosystem Integration » Runtime Abstraction","id":"1021","title":"Runtime Abstraction"},"1022":{"body":"# Connect to SSH pool\\nlet pool = (ssh-pool-connect \\"server01.example.com\\" \\"root\\" --port 22) # Execute distributed command\\nlet results = (ssh-pool-exec $hosts \\"systemctl status provisioning\\" --strategy parallel) # Check circuit breaker\\nssh-circuit-breaker-status","breadcrumbs":"Ecosystem Integration » SSH Advanced","id":"1022","title":"SSH Advanced"},"1023":{"body":"# Schedule regular backups\\nbackup-schedule \\"daily-app-backup\\" \\"0 2 * * *\\" \\\\ --paths [\\"/opt/app\\" \\"/var/lib/app\\"] \\\\ --backend \\"restic\\" # Create one-time backup\\nbackup-create \\"full-backup\\" [\\"/home\\" \\"/opt\\"] \\\\ --backend \\"restic\\" \\\\ --repository \\"/backups\\" # Restore from snapshot\\nbackup-restore \\"snapshot-001\\" --restore_path \\".\\"","breadcrumbs":"Ecosystem Integration » Backup System","id":"1023","title":"Backup System"},"1024":{"body":"# Load GitOps rules\\nlet rules = (gitops-rules \\"./gitops-rules.yaml\\") # Watch for Git events\\ngitops-watch --provider \\"github\\" --webhook-port 8080 # Manually trigger deployment\\ngitops-trigger \\"deploy-app\\" --environment \\"prod\\"","breadcrumbs":"Ecosystem Integration » GitOps Events","id":"1024","title":"GitOps Events"},"1025":{"body":"# Install service\\nservice-install \\"my-app\\" \\"/usr/local/bin/my-app\\" \\\\ --user \\"appuser\\" \\\\ --working-dir \\"/opt/myapp\\" # Start service\\nservice-start \\"my-app\\" # Check status\\nservice-status \\"my-app\\" # Set restart policy\\nservice-restart-policy \\"my-app\\" --policy \\"on-failure\\" --delay-secs 5","breadcrumbs":"Ecosystem Integration » Service Management","id":"1025","title":"Service Management"},"1026":{"body":"","breadcrumbs":"Ecosystem Integration » Integration Points","id":"1026","title":"Integration Points"},"1027":{"body":"Existing provisioning CLI will gain new command tree: provisioning runtime detect|exec|compose|info|list\\nprovisioning ssh pool connect|exec|status|strategies\\nprovisioning backup create|restore|list|schedule|retention|status\\nprovisioning gitops rules|watch|trigger|events|config|deployments|status\\nprovisioning service install|start|stop|restart|status|list|policy|detect-init","breadcrumbs":"Ecosystem Integration » CLI Commands","id":"1027","title":"CLI Commands"},"1028":{"body":"All integrations use Nickel schemas from provisioning/schemas/integrations/: let { IntegrationConfig } = import \\"provisioning/integrations.ncl\\" in\\n{ runtime = { ... }, ssh = { ... }, backup = { ... }, gitops = { ... }, service = { ... },\\n}","breadcrumbs":"Ecosystem Integration » Configuration","id":"1028","title":"Configuration"},"1029":{"body":"Nushell plugins can be created for performance-critical operations: provisioning plugin list\\n# [installed]\\n# nu_plugin_runtime\\n# nu_plugin_ssh_advanced\\n# nu_plugin_backup\\n# nu_plugin_gitops","breadcrumbs":"Ecosystem Integration » Plugins","id":"1029","title":"Plugins"},"103":{"body":"This section covers common issues and solutions.","breadcrumbs":"Installation Validation Guide » Section 4: Troubleshooting","id":"103","title":"Section 4: Troubleshooting"},"1030":{"body":"","breadcrumbs":"Ecosystem Integration » Testing","id":"1030","title":"Testing"},"1031":{"body":"cd provisioning/platform/integrations/provisioning-bridge\\ncargo test --all\\ncargo test -p provisioning-bridge --lib\\ncargo test -p provisioning-bridge --doc","breadcrumbs":"Ecosystem Integration » Rust Tests","id":"1031","title":"Rust Tests"},"1032":{"body":"nu provisioning/core/nulib/integrations/runtime.nu\\nnu provisioning/core/nulib/integrations/ssh_advanced.nu","breadcrumbs":"Ecosystem Integration » Nushell Tests","id":"1032","title":"Nushell Tests"},"1033":{"body":"Operation Performance Runtime detection ~50 ms (cached: ~1 ms) SSH pool init ~100 ms per connection SSH command exec 90% faster with pooling Backup initiation <100 ms GitOps rule load <10 ms","breadcrumbs":"Ecosystem Integration » Performance","id":"1033","title":"Performance"},"1034":{"body":"If you want to fully migrate from provisioning to provctl + prov-ecosystem: Phase 1 : Use integrations for new features (runtime, backup, gitops) Phase 2 : Migrate SSH operations to provctl-machines Phase 3 : Adopt provctl CLI for machine orchestration Phase 4 : Use prov-ecosystem crates directly where beneficial Currently we implement Phase 1 with selective integration.","breadcrumbs":"Ecosystem Integration » Migration Path","id":"1034","title":"Migration Path"},"1035":{"body":"✅ Implement : Integrate bridge into provisioning CLI ⏳ Document : Add to docs/user/ for end users ⏳ Examples : Create example configurations ⏳ Tests : Integration tests with real providers ⏳ Plugins : Nushell plugins for performance","breadcrumbs":"Ecosystem Integration » Next Steps","id":"1035","title":"Next Steps"},"1036":{"body":"Rust Bridge : provisioning/platform/integrations/provisioning-bridge/ Nushell Integration : provisioning/core/nulib/integrations/ Nickel Schemas : provisioning/schemas/integrations/ Prov-Ecosystem : /Users/Akasha/Development/prov-ecosystem/ Provctl : /Users/Akasha/Development/provctl/ Rust Guidelines : See Rust Development Nushell Guidelines : See Nushell Development Nickel Guidelines : See Nickel Module System","breadcrumbs":"Ecosystem Integration » References","id":"1036","title":"References"},"1037":{"body":"This document describes the package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a flexible module discovery and loading system using Nickel for type-safe configuration.","breadcrumbs":"Package and Loader System » Nickel Package and Module Loader System","id":"1037","title":"Nickel Package and Module Loader System"},"1038":{"body":"The system consists of two main components: Core Nickel Package : Distributable core provisioning schemas with type safety Module Loader System : Dynamic discovery and loading of extensions","breadcrumbs":"Package and Loader System » Architecture Overview","id":"1038","title":"Architecture Overview"},"1039":{"body":"Type-Safe Configuration : Nickel ensures configuration validity at evaluation time Clean Separation : Core package is self-contained and distributable Plug-and-Play Extensions : Taskservs, providers, and clusters can be loaded dynamically Version Management : Core package and extensions can be versioned independently Developer Friendly : Easy workspace setup and module management with lazy evaluation","breadcrumbs":"Package and Loader System » Benefits","id":"1039","title":"Benefits"},"104":{"body":"Symptoms : ./provisioning/bootstrap/install.sh: line X: nu: command not found Solution : Install Nushell (see Step 1.2) Verify installation: nu --version Retry bootstrap script","breadcrumbs":"Installation Validation Guide » Issue: \\"Nushell not found\\"","id":"104","title":"Issue: \\"Nushell not found\\""},"1040":{"body":"","breadcrumbs":"Package and Loader System » Components","id":"1040","title":"Components"},"1041":{"body":"Contains fundamental schemas for provisioning: main.ncl - Primary provisioning configuration server.ncl - Server definitions and schemas defaults.ncl - Default configurations lib.ncl - Common library schemas dependencies.ncl - Dependency management schemas Key Features: No hardcoded extension paths Self-contained and distributable Type-safe package-based imports Lazy evaluation of expensive computations","breadcrumbs":"Package and Loader System » 1. Core Nickel Package (/provisioning/schemas/)","id":"1041","title":"1. Core Nickel Package (/provisioning/schemas/)"},"1042":{"body":"Discovery Commands # Discover available modules\\nmodule-loader discover taskservs # List all taskservs\\nmodule-loader discover providers --format yaml # List providers as YAML\\nmodule-loader discover clusters redis # Search for redis clusters Supported Module Types Taskservs : Infrastructure services (kubernetes, redis, postgres, etc.) Providers : Cloud providers (upcloud, aws, local) Clusters : Complete configurations (buildkit, web, oci-reg)","breadcrumbs":"Package and Loader System » 2. Module Discovery System","id":"1042","title":"2. Module Discovery System"},"1043":{"body":"Loading Commands # Load modules into workspace\\nmodule-loader load taskservs . [kubernetes, cilium, containerd]\\nmodule-loader load providers . [upcloud]\\nmodule-loader load clusters . [buildkit] # Initialize workspace with modules\\nmodule-loader init workspace/infra/production \\\\ --taskservs [kubernetes, cilium] \\\\ --providers [upcloud] Generated Files taskservs.ncl - Auto-generated taskserv imports providers.ncl - Auto-generated provider imports clusters.ncl - Auto-generated cluster imports .manifest/*.yaml - Module loading manifests","breadcrumbs":"Package and Loader System » 3. Module Loading System","id":"1043","title":"3. Module Loading System"},"1044":{"body":"","breadcrumbs":"Package and Loader System » Workspace Structure","id":"1044","title":"Workspace Structure"},"1045":{"body":"workspace/infra/my-project/\\n├── kcl.mod # Package dependencies\\n├── servers.ncl # Main server configuration\\n├── taskservs.ncl # Auto-generated taskserv imports\\n├── providers.ncl # Auto-generated provider imports\\n├── clusters.ncl # Auto-generated cluster imports\\n├── .taskservs/ # Loaded taskserv modules\\n│ ├── kubernetes/\\n│ ├── cilium/\\n│ └── containerd/\\n├── .providers/ # Loaded provider modules\\n│ └── upcloud/\\n├── .clusters/ # Loaded cluster modules\\n│ └── buildkit/\\n├── .manifest/ # Module manifests\\n│ ├── taskservs.yaml\\n│ ├── providers.yaml\\n│ └── clusters.yaml\\n├── data/ # Runtime data\\n├── tmp/ # Temporary files\\n├── resources/ # Resource definitions\\n└── clusters/ # Cluster configurations","breadcrumbs":"Package and Loader System » New Workspace Layout","id":"1045","title":"New Workspace Layout"},"1046":{"body":"Before (Old System) # Hardcoded relative paths\\nimport ../../../kcl/server as server\\nimport ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s After (New System) # Package-based imports\\nimport provisioning.server as server # Auto-generated module imports (after loading)\\nimport .taskservs.nclubernetes.kubernetes as k8s","breadcrumbs":"Package and Loader System » Import Patterns","id":"1046","title":"Import Patterns"},"1047":{"body":"","breadcrumbs":"Package and Loader System » Package Distribution","id":"1047","title":"Package Distribution"},"1048":{"body":"# Build distributable package\\n./provisioning/tools/kcl-packager.nu build --version 1.0.0 # Install locally\\n./provisioning/tools/kcl-packager.nu install dist/provisioning-1.0.0.tar.gz # Create release\\n./provisioning/tools/kcl-packager.nu build --format tar.gz --include-docs","breadcrumbs":"Package and Loader System » Building Core Package","id":"1048","title":"Building Core Package"},"1049":{"body":"Method 1: Local Installation (Recommended for development) [dependencies]\\nprovisioning = { path = \\"~/.kcl/packages/provisioning\\", version = \\"0.0.1\\" } Method 2: Git Repository (For distributed teams) [dependencies]\\nprovisioning = { git = \\"https://github.com/your-org/provisioning-kcl\\", version = \\"v0.0.1\\" } Method 3: KCL Registry (When available) [dependencies]\\nprovisioning = { version = \\"0.0.1\\" }","breadcrumbs":"Package and Loader System » Package Installation Methods","id":"1049","title":"Package Installation Methods"},"105":{"body":"Symptoms : ⚙️ Stage 4: Validating Configuration\\nError: Nickel configuration validation failed Solution : Check Nickel syntax: nickel typecheck config/config.ncl Review error message for specific issue Edit config file: vim config/config.ncl Run bootstrap again","breadcrumbs":"Installation Validation Guide » Issue: \\"Nickel configuration validation failed\\"","id":"105","title":"Issue: \\"Nickel configuration validation failed\\""},"1050":{"body":"","breadcrumbs":"Package and Loader System » Developer Workflows","id":"1050","title":"Developer Workflows"},"1051":{"body":"# Create workspace from template\\ncp -r provisioning/templates/workspaces/kubernetes ./my-k8s-cluster\\ncd my-k8s-cluster # Initialize with modules\\nworkspace-init.nu . init # Load required modules\\nmodule-loader load taskservs . [kubernetes, cilium, containerd]\\nmodule-loader load providers . [upcloud] # Validate and deploy\\nkcl run servers.ncl\\nprovisioning server create --infra . --check","breadcrumbs":"Package and Loader System » 1. New Project Setup","id":"1051","title":"1. New Project Setup"},"1052":{"body":"# Create new taskserv\\nmkdir -p extensions/taskservs/my-service/kcl\\ncd extensions/taskservs/my-service/kcl # Initialize KCL module\\nkcl mod init my-service\\necho \'provisioning = { path = \\"~/.kcl/packages/provisioning\\", version = \\"0.0.1\\" }\' >> kcl.mod # Develop and test\\nmodule-loader discover taskservs # Should find your service","breadcrumbs":"Package and Loader System » 2. Extension Development","id":"1052","title":"2. Extension Development"},"1053":{"body":"# Analyze existing workspace\\nworkspace-migrate.nu workspace/infra/old-project dry-run # Perform migration\\nworkspace-migrate.nu workspace/infra/old-project # Verify migration\\nmodule-loader validate workspace/infra/old-project","breadcrumbs":"Package and Loader System » 3. Workspace Migration","id":"1053","title":"3. Workspace Migration"},"1054":{"body":"# Development environment\\ncd workspace/infra/dev\\nmodule-loader load taskservs . [redis, postgres]\\nmodule-loader load providers . [local] # Production environment\\ncd workspace/infra/prod\\nmodule-loader load taskservs . [redis, postgres, kubernetes, monitoring]\\nmodule-loader load providers . [upcloud, aws] # Multi-cloud","breadcrumbs":"Package and Loader System » 4. Multi-Environment Management","id":"1054","title":"4. Multi-Environment Management"},"1055":{"body":"","breadcrumbs":"Package and Loader System » Module Management","id":"1055","title":"Module Management"},"1056":{"body":"# List loaded modules\\nmodule-loader list taskservs .\\nmodule-loader list providers .\\nmodule-loader list clusters . # Validate workspace\\nmodule-loader validate . # Show workspace info\\nworkspace-init.nu . info","breadcrumbs":"Package and Loader System » Listing and Validation","id":"1056","title":"Listing and Validation"},"1057":{"body":"# Remove specific modules\\nmodule-loader unload taskservs . redis\\nmodule-loader unload providers . aws # This regenerates import files automatically","breadcrumbs":"Package and Loader System » Unloading Modules","id":"1057","title":"Unloading Modules"},"1058":{"body":"# Get detailed module info\\nmodule-loader info taskservs kubernetes\\nmodule-loader info providers upcloud\\nmodule-loader info clusters buildkit","breadcrumbs":"Package and Loader System » Module Information","id":"1058","title":"Module Information"},"1059":{"body":"","breadcrumbs":"Package and Loader System » CI/CD Integration","id":"1059","title":"CI/CD Integration"},"106":{"body":"Symptoms : ❌ Docker is required but not installed Solution : Install Docker: Docker installation guide Verify: docker --version Retry bootstrap script","breadcrumbs":"Installation Validation Guide » Issue: \\"Docker not installed\\"","id":"106","title":"Issue: \\"Docker not installed\\""},"1060":{"body":"#!/usr/bin/env nu\\n# deploy-pipeline.nu # Install specific versions\\nkcl-packager.nu install --version $env.PROVISIONING_VERSION # Load production modules\\nmodule-loader init $env.WORKSPACE_PATH \\\\ --taskservs $env.REQUIRED_TASKSERVS \\\\ --providers [$env.CLOUD_PROVIDER] # Validate configuration\\nmodule-loader validate $env.WORKSPACE_PATH # Deploy infrastructure\\nprovisioning server create --infra $env.WORKSPACE_PATH","breadcrumbs":"Package and Loader System » Pipeline Example","id":"1060","title":"Pipeline Example"},"1061":{"body":"","breadcrumbs":"Package and Loader System » Troubleshooting","id":"1061","title":"Troubleshooting"},"1062":{"body":"Module Import Errors Error: module not found Solution : Verify modules are loaded and regenerate imports module-loader list taskservs .\\nmodule-loader load taskservs . [kubernetes, cilium, containerd] Provider Configuration Issues Solution : Check provider-specific configuration in .providers/ directory KCL Compilation Errors Solution : Verify core package installation and kcl.mod configuration kcl-packager.nu install --version latest\\nkcl run --dry-run servers.ncl","breadcrumbs":"Package and Loader System » Common Issues","id":"1062","title":"Common Issues"},"1063":{"body":"# Show workspace structure\\ntree -a workspace/infra/my-project # Check generated imports\\ncat workspace/infra/my-project/taskservs.ncl # Validate KCL files\\nnickel typecheck workspace/infra/my-project/*.ncl # Show module manifests\\ncat workspace/infra/my-project/.manifest/taskservs.yaml","breadcrumbs":"Package and Loader System » Debug Commands","id":"1063","title":"Debug Commands"},"1064":{"body":"","breadcrumbs":"Package and Loader System » Best Practices","id":"1064","title":"Best Practices"},"1065":{"body":"Pin core package versions in production Use semantic versioning for extensions Test compatibility before upgrading","breadcrumbs":"Package and Loader System » 1. Version Management","id":"1065","title":"1. Version Management"},"1066":{"body":"Load only required modules to keep workspaces clean Use meaningful workspace names Document required modules in README","breadcrumbs":"Package and Loader System » 2. Module Organization","id":"1066","title":"2. Module Organization"},"1067":{"body":"Exclude .manifest/ and data/ from version control Use secrets management for sensitive configuration Validate modules before loading in production","breadcrumbs":"Package and Loader System » 3. Security","id":"1067","title":"3. Security"},"1068":{"body":"Load modules at workspace initialization, not runtime Cache discovery results when possible Use parallel loading for multiple modules","breadcrumbs":"Package and Loader System » 4. Performance","id":"1068","title":"4. Performance"},"1069":{"body":"For existing workspaces, follow these steps:","breadcrumbs":"Package and Loader System » Migration Guide","id":"1069","title":"Migration Guide"},"107":{"body":"Symptoms : ⚠️ Configuration export encountered issues (may continue) Solution : Check Nushell library paths: nu -c \\"use provisioning/core/nulib/lib_provisioning/config/export.nu *\\" Verify export library exists: ls provisioning/core/nulib/lib_provisioning/config/export.nu Re-export manually: cd /Users/Akasha/project-provisioning\\nnu -c \\" use provisioning/core/nulib/lib_provisioning/config/export.nu * export-all-configs \'workspaces/workspace_librecloud\'\\n\\"","breadcrumbs":"Installation Validation Guide » Issue: \\"Configuration export failed\\"","id":"107","title":"Issue: \\"Configuration export failed\\""},"1070":{"body":"cp -r workspace/infra/existing workspace/infra/existing-backup","breadcrumbs":"Package and Loader System » 1. Backup Current Workspace","id":"1070","title":"1. Backup Current Workspace"},"1071":{"body":"workspace-migrate.nu workspace/infra/existing dry-run","breadcrumbs":"Package and Loader System » 2. Analyze Migration Requirements","id":"1071","title":"2. Analyze Migration Requirements"},"1072":{"body":"workspace-migrate.nu workspace/infra/existing","breadcrumbs":"Package and Loader System » 3. Perform Migration","id":"1072","title":"3. Perform Migration"},"1073":{"body":"cd workspace/infra/existing\\nmodule-loader load taskservs . [kubernetes, cilium]\\nmodule-loader load providers . [upcloud]","breadcrumbs":"Package and Loader System » 4. Load Required Modules","id":"1073","title":"4. Load Required Modules"},"1074":{"body":"kcl run servers.ncl\\nmodule-loader validate .","breadcrumbs":"Package and Loader System » 5. Test and Validate","id":"1074","title":"5. Test and Validate"},"1075":{"body":"provisioning server create --infra . --check","breadcrumbs":"Package and Loader System » 6. Deploy","id":"1075","title":"6. Deploy"},"1076":{"body":"Registry-based module distribution Module dependency resolution Automatic version updates Module templates and scaffolding Integration with external package managers","breadcrumbs":"Package and Loader System » Future Enhancements","id":"1076","title":"Future Enhancements"},"1077":{"body":"","breadcrumbs":"Config Loading Architecture » Modular Configuration Loading Architecture","id":"1077","title":"Modular Configuration Loading Architecture"},"1078":{"body":"The configuration system has been refactored into modular components to achieve 2-3x performance improvements for regular commands while maintaining full functionality for complex operations.","breadcrumbs":"Config Loading Architecture » Overview","id":"1078","title":"Overview"},"1079":{"body":"","breadcrumbs":"Config Loading Architecture » Architecture Layers","id":"1079","title":"Architecture Layers"},"108":{"body":"Symptoms : 🚀 Stage 6: Initializing Orchestrator Service\\n⚠️ Orchestrator may not have started (check logs) curl http://localhost:9090/health\\n# Connection refused Solution : Check for port conflicts: lsof -i :9090 If port 9090 is in use, either: Stop the conflicting service Change orchestrator port in configuration Check logs: tail -f provisioning/platform/orchestrator/data/orchestrator.log Start manually: cd provisioning/platform/orchestrator && ./scripts/start-orchestrator.nu --background Verify: curl http://localhost:9090/health","breadcrumbs":"Installation Validation Guide » Issue: \\"Orchestrator didn\'t start\\"","id":"108","title":"Issue: \\"Orchestrator didn\'t start\\""},"1080":{"body":"File : loader-minimal.nu (~150 lines) Contains only essential functions needed for: Workspace detection Environment determination Project root discovery Fast path detection Exported Functions : get-active-workspace - Get current workspace detect-current-environment - Determine dev/test/prod get-project-root - Find project directory get-defaults-config-path - Path to default config check-if-sops-encrypted - SOPS file detection find-sops-config-path - Locate SOPS config Used by : Help commands (help infrastructure, help workspace, etc.) Status commands Workspace listing Quick reference operations","breadcrumbs":"Config Loading Architecture » Layer 1: Minimal Loader (0.023s)","id":"1080","title":"Layer 1: Minimal Loader (0.023s)"},"1081":{"body":"File : loader-lazy.nu (~80 lines) Smart loader that decides which configuration to load: Fast path for help/status commands Full path for operations that need config Key Function : command-needs-full-config - Determines if full config required","breadcrumbs":"Config Loading Architecture » Layer 2: Lazy Loader (decision layer)","id":"1081","title":"Layer 2: Lazy Loader (decision layer)"},"1082":{"body":"File : loader.nu (1990 lines) Original comprehensive loader that handles: Hierarchical config loading Variable interpolation Config validation Provider configuration Platform configuration Used by : Server creation Infrastructure operations Deployment commands Anything needing full config","breadcrumbs":"Config Loading Architecture » Layer 3: Full Loader (0.091s)","id":"1082","title":"Layer 3: Full Loader (0.091s)"},"1083":{"body":"","breadcrumbs":"Config Loading Architecture » Performance Characteristics","id":"1083","title":"Performance Characteristics"},"1084":{"body":"Operation Time Notes Workspace detection 0.023s 23ms for minimal load Full config load 0.091s ~4x slower than minimal Help command 0.040s Uses minimal loader only Status command 0.030s Fast path, no full config Server operations 0.150s+ Requires full config load","breadcrumbs":"Config Loading Architecture » Benchmarks","id":"1084","title":"Benchmarks"},"1085":{"body":"Help commands : 30-40% faster (40ms vs 60ms with full config) Workspace operations : 50% faster (uses minimal loader) Status checks : Nearly instant (23ms)","breadcrumbs":"Config Loading Architecture » Performance Gains","id":"1085","title":"Performance Gains"},"1086":{"body":"Help/Status Commands ↓\\nloader-lazy.nu ↓\\nloader-minimal.nu (workspace, environment detection) ↓ (no further deps) Infrastructure/Server Commands ↓\\nloader-lazy.nu ↓\\nloader.nu (full configuration) ├── loader-minimal.nu (for workspace detection) ├── Interpolation functions ├── Validation functions └── Config merging logic","breadcrumbs":"Config Loading Architecture » Module Dependency Graph","id":"1086","title":"Module Dependency Graph"},"1087":{"body":"","breadcrumbs":"Config Loading Architecture » Usage Examples","id":"1087","title":"Usage Examples"},"1088":{"body":"# Uses minimal loader - 23ms\\n./provisioning help infrastructure\\n./provisioning workspace list\\n./provisioning version","breadcrumbs":"Config Loading Architecture » Fast Path (Help Commands)","id":"1088","title":"Fast Path (Help Commands)"},"1089":{"body":"# Uses minimal loader with some full config - ~50ms\\n./provisioning status\\n./provisioning workspace active\\n./provisioning config validate","breadcrumbs":"Config Loading Architecture » Medium Path (Status Operations)","id":"1089","title":"Medium Path (Status Operations)"},"109":{"body":"Symptoms : Stage 3: Creating Directory Structure\\n[sudo] password for user: Solution : This is normal if creating directories in system locations Enter your sudo password when prompted Or: Run bootstrap from home directory instead","breadcrumbs":"Installation Validation Guide » Issue: \\"Sudo password prompt during bootstrap\\"","id":"109","title":"Issue: \\"Sudo password prompt during bootstrap\\""},"1090":{"body":"# Uses full loader - ~150ms\\n./provisioning server create --infra myinfra\\n./provisioning taskserv create kubernetes\\n./provisioning workflow submit batch.yaml","breadcrumbs":"Config Loading Architecture » Full Path (Infrastructure Operations)","id":"1090","title":"Full Path (Infrastructure Operations)"},"1091":{"body":"","breadcrumbs":"Config Loading Architecture » Implementation Details","id":"1091","title":"Implementation Details"},"1092":{"body":"# In loader-lazy.nu\\nlet is_fast_command = ( $command == \\"help\\" or $command == \\"status\\" or $command == \\"version\\"\\n) if $is_fast_command { # Use minimal loader only (0.023s) get-minimal-config\\n} else { # Load full configuration (0.091s) load-provisioning-config\\n}","breadcrumbs":"Config Loading Architecture » Lazy Loading Decision Logic","id":"1092","title":"Lazy Loading Decision Logic"},"1093":{"body":"The minimal loader returns a lightweight config record: { workspace: { name: \\"librecloud\\" path: \\"/path/to/workspace_librecloud\\" } environment: \\"dev\\" debug: false paths: { base: \\"/path/to/workspace_librecloud\\" }\\n} This is sufficient for: Workspace identification Environment determination Path resolution Help text generation","breadcrumbs":"Config Loading Architecture » Minimal Config Structure","id":"1093","title":"Minimal Config Structure"},"1094":{"body":"The full loader returns comprehensive configuration with: Workspace settings Provider configurations Platform settings Interpolated variables Validation results Environment-specific overrides","breadcrumbs":"Config Loading Architecture » Full Config Structure","id":"1094","title":"Full Config Structure"},"1095":{"body":"","breadcrumbs":"Config Loading Architecture » Migration Path","id":"1095","title":"Migration Path"},"1096":{"body":"Commands are already categorized (help, workspace, server, etc.) Help system uses fast path (minimal loader) Infrastructure commands use full path (full loader) No changes needed to command implementations","breadcrumbs":"Config Loading Architecture » For CLI Commands","id":"1096","title":"For CLI Commands"},"1097":{"body":"When creating new modules: Check if full config is needed If not, use loader-minimal.nu functions only If yes, use get-config from main config accessor","breadcrumbs":"Config Loading Architecture » For New Modules","id":"1097","title":"For New Modules"},"1098":{"body":"","breadcrumbs":"Config Loading Architecture » Future Optimizations","id":"1098","title":"Future Optimizations"},"1099":{"body":"Cache full config for 60 seconds Reuse config across related commands Potential: Additional 50% improvement","breadcrumbs":"Config Loading Architecture » Phase 2: Per-Command Config Caching","id":"1099","title":"Phase 2: Per-Command Config Caching"},"11":{"body":"Document Description Quickstart Cheatsheet Command shortcuts OCI Quick Reference OCI operations","breadcrumbs":"Home » 📦 Quick References","id":"11","title":"📦 Quick References"},"110":{"body":"Symptoms : bash: ./provisioning/bootstrap/install.sh: Permission denied Solution : # Make script executable\\nchmod +x /Users/Akasha/project-provisioning/provisioning/bootstrap/install.sh # Retry\\n./provisioning/bootstrap/install.sh","breadcrumbs":"Installation Validation Guide » Issue: \\"Permission denied\\" on binary","id":"110","title":"Issue: \\"Permission denied\\" on binary"},"1100":{"body":"Create thin config profiles for common scenarios Pre-loaded templates for workspace/infra combinations Fast switching between profiles","breadcrumbs":"Config Loading Architecture » Phase 3: Configuration Profiles","id":"1100","title":"Phase 3: Configuration Profiles"},"1101":{"body":"Load workspace and provider configs in parallel Async validation and interpolation Potential: 30% improvement for full config load","breadcrumbs":"Config Loading Architecture » Phase 4: Parallel Config Loading","id":"1101","title":"Phase 4: Parallel Config Loading"},"1102":{"body":"","breadcrumbs":"Config Loading Architecture » Maintenance Notes","id":"1102","title":"Maintenance Notes"},"1103":{"body":"Only add if: Used by help/status commands Doesn\'t require full config Performance-critical path","breadcrumbs":"Config Loading Architecture » Adding New Functions to Minimal Loader","id":"1103","title":"Adding New Functions to Minimal Loader"},"1104":{"body":"Changes are backward compatible Validate against existing config files Update tests in test suite","breadcrumbs":"Config Loading Architecture » Modifying Full Loader","id":"1104","title":"Modifying Full Loader"},"1105":{"body":"# Benchmark minimal loader\\ntime nu -n -c \\"use loader-minimal.nu *; get-active-workspace\\" # Benchmark full loader\\ntime nu -c \\"use config/accessor.nu *; get-config\\" # Benchmark help command\\ntime ./provisioning help infrastructure","breadcrumbs":"Config Loading Architecture » Performance Testing","id":"1105","title":"Performance Testing"},"1106":{"body":"loader.nu - Full configuration loading system loader-minimal.nu - Fast path loader loader-lazy.nu - Smart loader decision logic config/ARCHITECTURE.md - Configuration architecture details","breadcrumbs":"Config Loading Architecture » See Also","id":"1106","title":"See Also"},"1107":{"body":"Status : Practical Developer Guide Last Updated : 2025-12-15 Purpose : Copy-paste ready examples, validatable patterns, runnable test cases","breadcrumbs":"Nickel Executable Examples » Nickel Executable Examples & Test Cases","id":"1107","title":"Nickel Executable Examples & Test Cases"},"1108":{"body":"","breadcrumbs":"Nickel Executable Examples » Setup: Run Examples Locally","id":"1108","title":"Setup: Run Examples Locally"},"1109":{"body":"# Install Nickel\\nbrew install nickel\\n# or from source: https://nickel-lang.org/getting-started/ # Verify installation\\nnickel --version # Should be 1.0+","breadcrumbs":"Nickel Executable Examples » Prerequisites","id":"1109","title":"Prerequisites"},"111":{"body":"After successful installation validation, you can:","breadcrumbs":"Installation Validation Guide » Section 5: Next Steps","id":"111","title":"Section 5: Next Steps"},"1110":{"body":"mkdir -p ~/nickel-examples/{simple,complex,production}\\ncd ~/nickel-examples","breadcrumbs":"Nickel Executable Examples » Directory Structure for Examples","id":"1110","title":"Directory Structure for Examples"},"1111":{"body":"","breadcrumbs":"Nickel Executable Examples » Example 1: Simple Server Configuration (Executable)","id":"1111","title":"Example 1: Simple Server Configuration (Executable)"},"1112":{"body":"cat > simple/server_contracts.ncl << \'EOF\'\\n{ ServerConfig = { name | String, cpu_cores | Number, memory_gb | Number, zone | String, },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Step 1: Create Contract File","id":"1112","title":"Step 1: Create Contract File"},"1113":{"body":"cat > simple/server_defaults.ncl << \'EOF\'\\n{ web_server = { name = \\"web-01\\", cpu_cores = 4, memory_gb = 8, zone = \\"us-nyc1\\", }, database_server = { name = \\"db-01\\", cpu_cores = 8, memory_gb = 16, zone = \\"us-nyc1\\", }, cache_server = { name = \\"cache-01\\", cpu_cores = 2, memory_gb = 4, zone = \\"us-nyc1\\", },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Step 2: Create Defaults File","id":"1113","title":"Step 2: Create Defaults File"},"1114":{"body":"cat > simple/server.ncl << \'EOF\'\\nlet contracts = import \\"./server_contracts.ncl\\" in\\nlet defaults = import \\"./server_defaults.ncl\\" in { defaults = defaults, # Level 1: Maker functions (90% of use cases) make_server | not_exported = fun overrides => let base = defaults.web_server in base & overrides, # Level 2: Pre-built instances (inspection/reference) DefaultWebServer = defaults.web_server, DefaultDatabaseServer = defaults.database_server, DefaultCacheServer = defaults.cache_server, # Level 3: Custom combinations production_web_server = defaults.web_server & { cpu_cores = 8, memory_gb = 16, }, production_database_stack = [ defaults.database_server & { name = \\"db-01\\", zone = \\"us-nyc1\\" }, defaults.database_server & { name = \\"db-02\\", zone = \\"eu-fra1\\" }, ],\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Step 3: Create Main Module with Hybrid Interface","id":"1114","title":"Step 3: Create Main Module with Hybrid Interface"},"1115":{"body":"cd simple/ # Export to JSON\\nnickel export server.ncl --format json | jq . # Expected output:\\n# {\\n# \\"defaults\\": { ... },\\n# \\"DefaultWebServer\\": { \\"name\\": \\"web-01\\", \\"cpu_cores\\": 4, ... },\\n# \\"DefaultDatabaseServer\\": { ... },\\n# \\"DefaultCacheServer\\": { ... },\\n# \\"production_web_server\\": { \\"name\\": \\"web-01\\", \\"cpu_cores\\": 8, ... },\\n# \\"production_database_stack\\": [ ... ]\\n# } # Verify specific fields\\nnickel export server.ncl --format json | jq \'.production_web_server.cpu_cores\'\\n# Output: 8","breadcrumbs":"Nickel Executable Examples » Test: Export and Validate JSON","id":"1115","title":"Test: Export and Validate JSON"},"1116":{"body":"cat > simple/consumer.ncl << \'EOF\'\\nlet server = import \\"./server.ncl\\" in { # Use maker function staging_web = server.make_server { name = \\"staging-web\\", zone = \\"eu-fra1\\", }, # Reference defaults default_db = server.DefaultDatabaseServer, # Use pre-built production_stack = server.production_database_stack,\\n}\\nEOF # Export and verify\\nnickel export consumer.ncl --format json | jq \'.staging_web\'","breadcrumbs":"Nickel Executable Examples » Usage in Consumer Module","id":"1116","title":"Usage in Consumer Module"},"1117":{"body":"","breadcrumbs":"Nickel Executable Examples » Example 2: Complex Provider Extension (Production Pattern)","id":"1117","title":"Example 2: Complex Provider Extension (Production Pattern)"},"1118":{"body":"mkdir -p complex/upcloud/{contracts,defaults,main}\\ncd complex/upcloud","breadcrumbs":"Nickel Executable Examples » Create Provider Structure","id":"1118","title":"Create Provider Structure"},"1119":{"body":"cat > upcloud_contracts.ncl << \'EOF\'\\n{ StorageBackup = { backup_id | String, frequency | String, retention_days | Number, }, ServerConfig = { name | String, plan | String, zone | String, backups | Array, }, ProviderConfig = { api_key | String, api_password | String, servers | Array, },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Provider Contracts","id":"1119","title":"Provider Contracts"},"112":{"body":"To deploy infrastructure to UpCloud: # Read workspace deployment guide\\ncat workspaces/workspace_librecloud/docs/deployment-guide.md # Or: From workspace directory\\ncd workspaces/workspace_librecloud\\ncat docs/deployment-guide.md","breadcrumbs":"Installation Validation Guide » Option 1: Deploy workspace_librecloud","id":"112","title":"Option 1: Deploy workspace_librecloud"},"1120":{"body":"cat > upcloud_defaults.ncl << \'EOF\'\\n{ backup = { backup_id = \\"\\", frequency = \\"daily\\", retention_days = 7, }, server = { name = \\"\\", plan = \\"1xCPU-1 GB\\", zone = \\"us-nyc1\\", backups = [], }, provider = { api_key = \\"\\", api_password = \\"\\", servers = [], },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Provider Defaults","id":"1120","title":"Provider Defaults"},"1121":{"body":"cat > upcloud_main.ncl << \'EOF\'\\nlet contracts = import \\"./upcloud_contracts.ncl\\" in\\nlet defaults = import \\"./upcloud_defaults.ncl\\" in { defaults = defaults, # Makers (90% use case) make_backup | not_exported = fun overrides => defaults.backup & overrides, make_server | not_exported = fun overrides => defaults.server & overrides, make_provider | not_exported = fun overrides => defaults.provider & overrides, # Pre-built instances DefaultBackup = defaults.backup, DefaultServer = defaults.server, DefaultProvider = defaults.provider, # Production configs production_high_availability = defaults.provider & { servers = [ defaults.server & { name = \\"web-01\\", plan = \\"2xCPU-4 GB\\", zone = \\"us-nyc1\\", backups = [ defaults.backup & { frequency = \\"hourly\\" }, ], }, defaults.server & { name = \\"web-02\\", plan = \\"2xCPU-4 GB\\", zone = \\"eu-fra1\\", backups = [ defaults.backup & { frequency = \\"hourly\\" }, ], }, defaults.server & { name = \\"db-01\\", plan = \\"4xCPU-16 GB\\", zone = \\"us-nyc1\\", backups = [ defaults.backup & { frequency = \\"every-6h\\", retention_days = 30 }, ], }, ], },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Provider Main Module","id":"1121","title":"Provider Main Module"},"1122":{"body":"# Export provider config\\nnickel export upcloud_main.ncl --format json | jq \'.production_high_availability\' # Export as TOML (for IaC config files)\\nnickel export upcloud_main.ncl --format toml > upcloud.toml\\ncat upcloud.toml # Count servers in production config\\nnickel export upcloud_main.ncl --format json | jq \'.production_high_availability.servers | length\'\\n# Output: 3","breadcrumbs":"Nickel Executable Examples » Test Provider Configuration","id":"1122","title":"Test Provider Configuration"},"1123":{"body":"cat > upcloud_consumer.ncl << \'EOF\'\\nlet upcloud = import \\"./upcloud_main.ncl\\" in { # Simple production setup simple_production = upcloud.make_provider { api_key = \\"prod-key\\", api_password = \\"prod-secret\\", servers = [ upcloud.make_server { name = \\"web-01\\", plan = \\"2xCPU-4 GB\\" }, upcloud.make_server { name = \\"web-02\\", plan = \\"2xCPU-4 GB\\" }, ], }, # Advanced HA setup with custom fields ha_stack = upcloud.production_high_availability & { api_key = \\"prod-key\\", api_password = \\"prod-secret\\", monitoring_enabled = true, alerting_email = \\"ops@company.com\\", custom_vpc_id = \\"vpc-prod-001\\", },\\n}\\nEOF # Validate structure\\nnickel export upcloud_consumer.ncl --format json | jq \'.ha_stack | keys\'","breadcrumbs":"Nickel Executable Examples » Consumer Using Provider","id":"1123","title":"Consumer Using Provider"},"1124":{"body":"","breadcrumbs":"Nickel Executable Examples » Example 3: Real-World Pattern - Taskserv Configuration","id":"1124","title":"Example 3: Real-World Pattern - Taskserv Configuration"},"1125":{"body":"cat > production/taskserv_contracts.ncl << \'EOF\'\\n{ Dependency = { name | String, wait_for_health | Bool, }, TaskServ = { name | String, version | String, dependencies | Array, enabled | Bool, },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Taskserv Contracts (from wuji)","id":"1125","title":"Taskserv Contracts (from wuji)"},"1126":{"body":"cat > production/taskserv_defaults.ncl << \'EOF\'\\n{ kubernetes = { name = \\"kubernetes\\", version = \\"1.28.0\\", enabled = true, dependencies = [ { name = \\"containerd\\", wait_for_health = true }, { name = \\"etcd\\", wait_for_health = true }, ], }, cilium = { name = \\"cilium\\", version = \\"1.14.0\\", enabled = true, dependencies = [ { name = \\"kubernetes\\", wait_for_health = true }, ], }, containerd = { name = \\"containerd\\", version = \\"1.7.0\\", enabled = true, dependencies = [], }, etcd = { name = \\"etcd\\", version = \\"3.5.0\\", enabled = true, dependencies = [], }, postgres = { name = \\"postgres\\", version = \\"15.0\\", enabled = true, dependencies = [], }, redis = { name = \\"redis\\", version = \\"7.0.0\\", enabled = true, dependencies = [], },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Taskserv Defaults","id":"1126","title":"Taskserv Defaults"},"1127":{"body":"cat > production/taskserv.ncl << \'EOF\'\\nlet contracts = import \\"./taskserv_contracts.ncl\\" in\\nlet defaults = import \\"./taskserv_defaults.ncl\\" in { defaults = defaults, make_taskserv | not_exported = fun overrides => defaults.kubernetes & overrides, # Pre-built DefaultKubernetes = defaults.kubernetes, DefaultCilium = defaults.cilium, DefaultContainerd = defaults.containerd, DefaultEtcd = defaults.etcd, DefaultPostgres = defaults.postgres, DefaultRedis = defaults.redis, # Wuji infrastructure (20 taskservs similar to actual) wuji_k8s_stack = { kubernetes = defaults.kubernetes, cilium = defaults.cilium, containerd = defaults.containerd, etcd = defaults.etcd, }, wuji_data_stack = { postgres = defaults.postgres & { version = \\"15.3\\" }, redis = defaults.redis & { version = \\"7.2.0\\" }, }, # Staging with different versions staging_stack = { kubernetes = defaults.kubernetes & { version = \\"1.27.0\\" }, cilium = defaults.cilium & { version = \\"1.13.0\\" }, containerd = defaults.containerd & { version = \\"1.6.0\\" }, etcd = defaults.etcd & { version = \\"3.4.0\\" }, postgres = defaults.postgres & { version = \\"14.0\\" }, },\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Taskserv Main","id":"1127","title":"Taskserv Main"},"1128":{"body":"# Export stack\\nnickel export taskserv.ncl --format json | jq \'.wuji_k8s_stack | keys\'\\n# Output: [\\"kubernetes\\", \\"cilium\\", \\"containerd\\", \\"etcd\\"] # Get specific version\\nnickel export taskserv.ncl --format json | \\\\ jq \'.staging_stack.kubernetes.version\'\\n# Output: \\"1.27.0\\" # Count taskservs in stacks\\necho \\"Wuji K8S stack:\\"\\nnickel export taskserv.ncl --format json | jq \'.wuji_k8s_stack | length\' echo \\"Staging stack:\\"\\nnickel export taskserv.ncl --format json | jq \'.staging_stack | length\'","breadcrumbs":"Nickel Executable Examples » Test Taskserv Setup","id":"1128","title":"Test Taskserv Setup"},"1129":{"body":"","breadcrumbs":"Nickel Executable Examples » Example 4: Composition & Extension Pattern","id":"1129","title":"Example 4: Composition & Extension Pattern"},"113":{"body":"To create a new workspace for different infrastructure: provisioning workspace init my_workspace --template minimal","breadcrumbs":"Installation Validation Guide » Option 2: Create a New Workspace","id":"113","title":"Option 2: Create a New Workspace"},"1130":{"body":"cat > production/infrastructure.ncl << \'EOF\'\\nlet servers = import \\"./server.ncl\\" in\\nlet taskservs = import \\"./taskserv.ncl\\" in { # Infrastructure with servers + taskservs development = { servers = { app = servers.make_server { name = \\"dev-app\\", cpu_cores = 2 }, db = servers.make_server { name = \\"dev-db\\", cpu_cores = 4 }, }, taskservs = taskservs.staging_stack, }, production = { servers = [ servers.make_server { name = \\"prod-app-01\\", cpu_cores = 8 }, servers.make_server { name = \\"prod-app-02\\", cpu_cores = 8 }, servers.make_server { name = \\"prod-db-01\\", cpu_cores = 16 }, ], taskservs = taskservs.wuji_k8s_stack & { prometheus = { name = \\"prometheus\\", version = \\"2.45.0\\", enabled = true, dependencies = [], }, }, },\\n}\\nEOF # Validate composition\\nnickel export infrastructure.ncl --format json | jq \'.production.servers | length\'\\n# Output: 3 nickel export infrastructure.ncl --format json | jq \'.production.taskservs | keys | length\'\\n# Output: 5","breadcrumbs":"Nickel Executable Examples » Base Infrastructure","id":"1130","title":"Base Infrastructure"},"1131":{"body":"cat > production/infrastructure_extended.ncl << \'EOF\'\\nlet infra = import \\"./infrastructure.ncl\\" in # Add custom fields without modifying base!\\n{ development = infra.development & { monitoring_enabled = false, cost_optimization = true, auto_shutdown = true, }, production = infra.production & { monitoring_enabled = true, alert_email = \\"ops@company.com\\", backup_enabled = true, backup_frequency = \\"6h\\", disaster_recovery_enabled = true, dr_region = \\"eu-fra1\\", compliance_level = \\"SOC2\\", security_scanning = true, },\\n}\\nEOF # Verify extension works (custom fields are preserved!)\\nnickel export infrastructure_extended.ncl --format json | \\\\ jq \'.production | keys\'\\n# Output includes: monitoring_enabled, alert_email, backup_enabled, etc","breadcrumbs":"Nickel Executable Examples » Extending Infrastructure (Nickel Advantage!)","id":"1131","title":"Extending Infrastructure (Nickel Advantage!)"},"1132":{"body":"","breadcrumbs":"Nickel Executable Examples » Example 5: Validation & Error Handling","id":"1132","title":"Example 5: Validation & Error Handling"},"1133":{"body":"cat > production/validation.ncl << \'EOF\'\\nlet validate_server = fun server => if server.cpu_cores <= 0 then std.record.fail \\"CPU cores must be positive\\" else if server.memory_gb <= 0 then std.record.fail \\"Memory must be positive\\" else server\\nin let validate_taskserv = fun ts => if std.string.length ts.name == 0 then std.record.fail \\"TaskServ name required\\" else if std.string.length ts.version == 0 then std.record.fail \\"TaskServ version required\\" else ts\\nin { validate_server = validate_server, validate_taskserv = validate_taskserv,\\n}\\nEOF","breadcrumbs":"Nickel Executable Examples » Validation Functions","id":"1133","title":"Validation Functions"},"1134":{"body":"cat > production/validated_config.ncl << \'EOF\'\\nlet server = import \\"./server.ncl\\" in\\nlet taskserv = import \\"./taskserv.ncl\\" in\\nlet validation = import \\"./validation.ncl\\" in { # Valid server (passes validation) valid_server = validation.validate_server { name = \\"web-01\\", cpu_cores = 4, memory_gb = 8, zone = \\"us-nyc1\\", }, # Valid taskserv valid_taskserv = validation.validate_taskserv { name = \\"kubernetes\\", version = \\"1.28.0\\", dependencies = [], enabled = true, },\\n}\\nEOF # Test validation\\nnickel export validated_config.ncl --format json\\n# Should succeed without errors # Test invalid (uncomment to see error)\\n# {\\n# invalid_server = validation.validate_server {\\n# name = \\"bad-server\\",\\n# cpu_cores = -1, # Invalid!\\n# memory_gb = 8,\\n# zone = \\"us-nyc1\\",\\n# },\\n# }","breadcrumbs":"Nickel Executable Examples » Using Validations","id":"1134","title":"Using Validations"},"1135":{"body":"","breadcrumbs":"Nickel Executable Examples » Test Suite: Bash Script","id":"1135","title":"Test Suite: Bash Script"},"1136":{"body":"#!/bin/bash\\n# test_all_examples.sh set -e echo \\"=== Testing Nickel Examples ===\\" cd ~/nickel-examples echo \\"1. Simple Server Configuration...\\"\\ncd simple\\nnickel export server.ncl --format json > /dev/null\\necho \\" ✓ Simple server config valid\\" echo \\"2. Complex Provider (UpCloud)...\\"\\ncd ../complex/upcloud\\nnickel export upcloud_main.ncl --format json > /dev/null\\necho \\" ✓ UpCloud provider config valid\\" echo \\"3. Production Taskserv...\\"\\ncd ../../production\\nnickel export taskserv.ncl --format json > /dev/null\\necho \\" ✓ Taskserv config valid\\" echo \\"4. Infrastructure Composition...\\"\\nnickel export infrastructure.ncl --format json > /dev/null\\necho \\" ✓ Infrastructure composition valid\\" echo \\"5. Extended Infrastructure...\\"\\nnickel export infrastructure_extended.ncl --format json > /dev/null\\necho \\" ✓ Extended infrastructure valid\\" echo \\"6. Validated Config...\\"\\nnickel export validated_config.ncl --format json > /dev/null\\necho \\" ✓ Validated config valid\\" echo \\"\\"\\necho \\"=== All Tests Passed ✓ ===\\"","breadcrumbs":"Nickel Executable Examples » Run All Examples","id":"1136","title":"Run All Examples"},"1137":{"body":"","breadcrumbs":"Nickel Executable Examples » Quick Commands Reference","id":"1137","title":"Quick Commands Reference"},"1138":{"body":"# Validate Nickel syntax\\nnickel export config.ncl # Export as JSON (for inspecting)\\nnickel export config.ncl --format json # Export as TOML (for config files)\\nnickel export config.ncl --format toml # Export as YAML\\nnickel export config.ncl --format yaml # Pretty print JSON output\\nnickel export config.ncl --format json | jq . # Extract specific field\\nnickel export config.ncl --format json | jq \'.production_server\' # Count array elements\\nnickel export config.ncl --format json | jq \'.servers | length\' # Check if file has valid syntax only\\nnickel typecheck config.ncl","breadcrumbs":"Nickel Executable Examples » Common Nickel Operations","id":"1138","title":"Common Nickel Operations"},"1139":{"body":"","breadcrumbs":"Nickel Executable Examples » Troubleshooting Examples","id":"1139","title":"Troubleshooting Examples"},"114":{"body":"Discover what\'s available to deploy: # List available task services\\nprovisioning mod discover taskservs # List available providers\\nprovisioning mod discover providers # List available clusters\\nprovisioning mod discover clusters","breadcrumbs":"Installation Validation Guide » Option 3: Explore Available Modules","id":"114","title":"Option 3: Explore Available Modules"},"1140":{"body":"# ❌ WRONG\\nlet A = {x = 1}\\nlet B = {y = 2}\\n{A = A, B = B} # ✅ CORRECT\\nlet A = {x = 1} in\\nlet B = {y = 2} in\\n{A = A, B = B}","breadcrumbs":"Nickel Executable Examples » Problem: \\"unexpected token\\" with multiple let","id":"1140","title":"Problem: \\"unexpected token\\" with multiple let"},"1141":{"body":"# ❌ WRONG - function will fail to serialize\\n{ get_value = fun x => x + 1, result = get_value 5,\\n} # ✅ CORRECT - mark function not_exported\\n{ get_value | not_exported = fun x => x + 1, result = get_value 5,\\n}","breadcrumbs":"Nickel Executable Examples » Problem: Function serialization fails","id":"1141","title":"Problem: Function serialization fails"},"1142":{"body":"# ❌ WRONG\\n{ optional_field = null } # ✅ CORRECT - use empty string/array/object\\n{ optional_field = \\"\\" } # for strings\\n{ optional_field = [] } # for arrays\\n{ optional_field = {} } # for objects","breadcrumbs":"Nickel Executable Examples » Problem: Null values cause export issues","id":"1142","title":"Problem: Null values cause export issues"},"1143":{"body":"These examples are: ✅ Copy-paste ready - Can run directly ✅ Executable - Validated with nickel export ✅ Progressive - Simple → Complex → Production ✅ Real patterns - Based on actual codebase (wuji, upcloud) ✅ Self-contained - Each example works independently ✅ Comparable - Shows KCL vs Nickel equivalence Next : Use these as templates for your own Nickel configurations. Version : 1.0.0 Status : Tested & Verified Last Updated : 2025-12-15","breadcrumbs":"Nickel Executable Examples » Summary","id":"1143","title":"Summary"},"1144":{"body":"Execution Complete Perfect question. Here\'s a clear explanation: The Orchestrator IS USED and IS CRITICAL That code example was misleading. Here\'s the real architecture: How It Actually Works ┌───────────────────────────────────────────────── ─┐ │ User runs: provisioning server create --orchestrated └───────────────────┬───────────────────────────── ─┘ ↓ ┌───────────────────────┐ │ Nushell CLI │ │ (provisioning) │ └───────────┬───────────┘ ↓ HTTP POST ┌───────────────────────────────┐ │ Rust Orchestrator Daemon │ │ (provisioning-orchestrator) │ │ │ │ • Task Queue │ │ • Workflow Engine │ │ • Dependency Resolution │ │ • Parallel Execution │ └───────────┬───────────────────┘ ↓ spawns subprocess ┌───────────────────────────────┐ │ Nushell Business Logic │ │ nu -c \\"use servers/create.nu\\"│ │ │ │ Executes actual provider │ │ API calls, configuration │ └───────────────────────────────┘ The Flow in Detail User Command: provisioning server create wuji --orchestrated 2. Nushell CLI submits to orchestrator: http post http://localhost:9090/workflows/servers/create { infra: \\"wuji\\" params: {...} }","breadcrumbs":"Orchestrator Info » CLI code","id":"1144","title":"CLI code"},"1145":{"body":"Orchestrator receives and queues: // Orchestrator receives HTTP request\\nasync fn create_server_workflow(request) { let task = Task::new(TaskType::ServerCreate, request); task_queue.enqueue(task).await; // Queue for execution return workflow_id; // Return immediately\\n}\\n```text 2. Orchestrator executes via Nushell subprocess: ```rust\\n// Orchestrator spawns Nushell to run business logic\\nasync fn execute_task(task: Task) { let output = Command::new(\\"nu\\") .arg(\\"-c\\") .arg(\\"use /usr/local/lib/provisioning/servers/create.nu; create-server \'wuji\'\\") .output() .await?; // Orchestrator manages: retry, checkpointing, monitoring\\n}\\n```text 3. Nushell executes the actual work: ```nu\\n# servers/create.nu export def create-server [name: string] { # This is the business logic # Calls UpCloud API, creates server, etc. let provider = (load-provider) $provider | create-vm $name\\n}\\nWhy This Architecture? Problem It Solves Without Orchestrator (Old Way): provisioning → template.nu → cluster.nu → taskserv.nu → provider.nu (Deep call stack = crashes!)\\nWith Orchestrator (Current): provisioning → Orchestrator → spawns fresh Nushell subprocess for each task (No deep nesting, parallel execution, recovery)\\nWhat Orchestrator Provides Task Queue - Reliable execution even if system crashes\\nParallel Execution - Run 10 tasks at once (Rust async)\\nWorkflow Engine - Handle complex dependencies\\nCheckpointing - Resume from failure\\nMonitoring - Real-time progress tracking\\nWhat Nushell Provides Business Logic - Provider integrations, config generation\\nFlexibility - Easy to modify without recompiling\\nReadability - Shell-like syntax for infrastructure ops\\nMulti-Repo Impact: NONE on Integration In Monorepo: provisioning/\\n├── core/nulib/ # Nushell code\\n└── platform/orchestrator/ # Rust code\\nIn Multi-Repo: provisioning-core/ # Separate repo, installs to /usr/local/lib/provisioning\\nprovisioning-platform/ # Separate repo, installs to /usr/local/bin/provisioning-orchestrator\\nIntegration is the same: Orchestrator calls: nu -c \\"use /usr/local/lib/provisioning/servers/create.nu\\"\\nNushell calls: http post ...\\nNo code dependency, just runtime coordination! The Orchestrator IS Essential The orchestrator: ✅ IS USED for all complex operations\\n✅ IS CRITICAL for workflow system (v3.0)\\n✅ IS REQUIRED for batch operations (v3.1)\\n✅ SOLVES deep call stack issues\\n✅ PROVIDES performance and reliability\\nThat misleading code example showed how Platform doesn\'t link to Core code, but it absolutely uses the orchestrator for coordination. Does this clear it up? The orchestrator is the performance and reliability layer that makes the whole system work! Cost: $0.1565 USD\\nDuration: 137.69s\\nTurns: 40\\nTotal tokens: 7466(7 in, 7459 out)","breadcrumbs":"Orchestrator Info » Returns: workflow_id = \\"abc-123\\"","id":"1145","title":"Returns: workflow_id = \\"abc-123\\""},"1146":{"body":"Version : 1.0.0 Date : 2025-10-08 Status : Implemented","breadcrumbs":"Orchestrator Auth Integration » Orchestrator Authentication & Authorization Integration","id":"1146","title":"Orchestrator Authentication & Authorization Integration"},"1147":{"body":"Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.","breadcrumbs":"Orchestrator Auth Integration » Overview","id":"1147","title":"Overview"},"1148":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Architecture","id":"1148","title":"Architecture"},"1149":{"body":"The middleware chain is applied in this specific order to ensure proper security: ┌─────────────────────────────────────────────────────────────────┐\\n│ Incoming HTTP Request │\\n└────────────────────────┬────────────────────────────────────────┘ │ ▼ ┌────────────────────────────────┐ │ 1. Rate Limiting Middleware │ │ - Per-IP request limits │ │ - Sliding window │ │ - Exempt IPs │ └────────────┬───────────────────┘ │ (429 if exceeded) ▼ ┌────────────────────────────────┐ │ 2. Authentication Middleware │ │ - Extract Bearer token │ │ - Validate JWT signature │ │ - Check expiry, issuer, aud │ │ - Check revocation │ └────────────┬───────────────────┘ │ (401 if invalid) ▼ ┌────────────────────────────────┐ │ 3. MFA Verification │ │ - Check MFA status in token │ │ - Enforce for sensitive ops │ │ - Production deployments │ │ - All DELETE operations │ └────────────┬───────────────────┘ │ (403 if required but missing) ▼ ┌────────────────────────────────┐ │ 4. Authorization Middleware │ │ - Build Cedar request │ │ - Evaluate policies │ │ - Check permissions │ │ - Log decision │ └────────────┬───────────────────┘ │ (403 if denied) ▼ ┌────────────────────────────────┐ │ 5. Audit Logging Middleware │ │ - Log complete request │ │ - User, action, resource │ │ - Authorization decision │ │ - Response status │ └────────────┬───────────────────┘ │ ▼ ┌────────────────────────────────┐ │ Protected Handler │ │ - Access security context │ │ - Execute business logic │ └────────────────────────────────┘","breadcrumbs":"Orchestrator Auth Integration » Security Middleware Chain","id":"1149","title":"Security Middleware Chain"},"115":{"body":"After completing all steps, verify with this final checklist: Prerequisites Verified: [ ] OS is macOS, Linux, or WSL2 [ ] CPU: 2+ cores [ ] RAM: 2+ GB available [ ] Disk: 2+ GB free [ ] Nushell 0.109.0+ installed [ ] Nickel 1.x.x installed [ ] Docker 20.10+ installed [ ] Provisioning binary executable Bootstrap Completed: [ ] All 7 stages completed successfully [ ] No error messages in output [ ] Installation log shows success Installation Validated: [ ] Workspace directories exist [ ] Generated TOML files exist [ ] Nickel type-checking passes [ ] Workspace validation passes [ ] Orchestrator health check passes [ ] Provisioning CLI works (if installed) Ready to Deploy: [ ] No errors in validation steps [ ] All services responding correctly [ ] Configuration properly exported","breadcrumbs":"Installation Validation Guide » Section 6: Verification Checklist","id":"115","title":"Section 6: Verification Checklist"},"1150":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Implementation Details","id":"1150","title":"Implementation Details"},"1151":{"body":"Purpose : Build complete security context from authenticated requests. Key Features : Extracts JWT token claims Determines MFA verification status Extracts IP address (X-Forwarded-For, X-Real-IP) Extracts user agent and session info Provides permission checking methods Lines of Code : 275 Example : pub struct SecurityContext { pub user_id: String, pub token: ValidatedToken, pub mfa_verified: bool, pub ip_address: IpAddr, pub user_agent: Option, pub permissions: Vec, pub workspace: String, pub request_id: String, pub session_id: Option,\\n} impl SecurityContext { pub fn has_permission(&self, permission: &str) -> bool { ... } pub fn has_any_permission(&self, permissions: &[&str]) -> bool { ... } pub fn has_all_permissions(&self, permissions: &[&str]) -> bool { ... }\\n}","breadcrumbs":"Orchestrator Auth Integration » 1. Security Context Builder (middleware/security_context.rs)","id":"1151","title":"1. Security Context Builder (middleware/security_context.rs)"},"1152":{"body":"Purpose : JWT token validation with revocation checking. Key Features : Bearer token extraction JWT signature validation (RS256) Expiry, issuer, audience checks Token revocation status Security context injection Lines of Code : 245 Flow : Extract Authorization: Bearer header Validate JWT with TokenValidator Build SecurityContext Inject into request extensions Continue to next middleware or return 401 Error Responses : 401 Unauthorized: Missing/invalid token, expired, revoked 403 Forbidden: Insufficient permissions","breadcrumbs":"Orchestrator Auth Integration » 2. Enhanced Authentication Middleware (middleware/auth.rs)","id":"1152","title":"2. Enhanced Authentication Middleware (middleware/auth.rs)"},"1153":{"body":"Purpose : Enforce MFA for sensitive operations. Key Features : Path-based MFA requirements Method-based enforcement (all DELETEs) Production environment protection Clear error messages Lines of Code : 290 MFA Required For : Production deployments (/production/, /prod/) All DELETE operations Server operations (POST, PUT, DELETE) Cluster operations (POST, PUT, DELETE) Batch submissions Rollback operations Configuration changes (POST, PUT, DELETE) Secret management User/role management Example : fn requires_mfa(method: &str, path: &str) -> bool { if path.contains(\\"/production/\\") { return true; } if method == \\"DELETE\\" { return true; } if path.contains(\\"/deploy\\") { return true; } // ...\\n}","breadcrumbs":"Orchestrator Auth Integration » 3. MFA Verification Middleware (middleware/mfa.rs)","id":"1153","title":"3. MFA Verification Middleware (middleware/mfa.rs)"},"1154":{"body":"Purpose : Cedar policy evaluation with audit logging. Key Features : Builds Cedar authorization request from HTTP request Maps HTTP methods to Cedar actions (GET→Read, POST→Create, etc.) Extracts resource types from paths Evaluates Cedar policies with context (MFA, IP, time, workspace) Logs all authorization decisions to audit log Non-blocking audit logging (tokio::spawn) Lines of Code : 380 Resource Mapping : /api/v1/servers/srv-123 → Resource::Server(\\"srv-123\\")\\n/api/v1/taskserv/kubernetes → Resource::TaskService(\\"kubernetes\\")\\n/api/v1/cluster/prod → Resource::Cluster(\\"prod\\")\\n/api/v1/config/settings → Resource::Config(\\"settings\\") Action Mapping : GET → Action::Read\\nPOST → Action::Create\\nPUT → Action::Update\\nDELETE → Action::Delete","breadcrumbs":"Orchestrator Auth Integration » 4. Enhanced Authorization Middleware (middleware/authz.rs)","id":"1154","title":"4. Enhanced Authorization Middleware (middleware/authz.rs)"},"1155":{"body":"Purpose : Prevent API abuse with per-IP rate limiting. Key Features : Sliding window rate limiting Per-IP request tracking Configurable limits and windows Exempt IP support Automatic cleanup of old entries Statistics tracking Lines of Code : 420 Configuration : pub struct RateLimitConfig { pub max_requests: u32, // for example, 100 pub window_duration: Duration, // for example, 60 seconds pub exempt_ips: Vec, // for example, internal services pub enabled: bool,\\n} // Default: 100 requests per minute Statistics : pub struct RateLimitStats { pub total_ips: usize, // Number of tracked IPs pub total_requests: u32, // Total requests made pub limited_ips: usize, // IPs that hit the limit pub config: RateLimitConfig,\\n}","breadcrumbs":"Orchestrator Auth Integration » 5. Rate Limiting Middleware (middleware/rate_limit.rs)","id":"1155","title":"5. Rate Limiting Middleware (middleware/rate_limit.rs)"},"1156":{"body":"Purpose : Helper module to integrate all security components. Key Features : SecurityComponents struct grouping all middleware SecurityConfig for configuration initialize() method to set up all components disabled() method for development mode apply_security_middleware() helper for router setup Lines of Code : 265 Usage Example : use provisioning_orchestrator::security_integration::{ SecurityComponents, SecurityConfig\\n}; // Initialize security\\nlet config = SecurityConfig { public_key_path: PathBuf::from(\\"keys/public.pem\\"), jwt_issuer: \\"control-center\\".to_string(), jwt_audience: \\"orchestrator\\".to_string(), cedar_policies_path: PathBuf::from(\\"policies\\"), auth_enabled: true, authz_enabled: true, mfa_enabled: true, rate_limit_config: RateLimitConfig::new(100, 60),\\n}; let security = SecurityComponents::initialize(config, audit_logger).await?; // Apply to router\\nlet app = Router::new() .route(\\"/api/v1/servers\\", post(create_server)) .route(\\"/api/v1/servers/:id\\", delete(delete_server)); let secured_app = apply_security_middleware(app, &security);","breadcrumbs":"Orchestrator Auth Integration » 6. Security Integration Module (security_integration.rs)","id":"1156","title":"6. Security Integration Module (security_integration.rs)"},"1157":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Integration with AppState","id":"1157","title":"Integration with AppState"},"1158":{"body":"pub struct AppState { // Existing fields pub task_storage: Arc, pub batch_coordinator: BatchCoordinator, pub dependency_resolver: DependencyResolver, pub state_manager: Arc, pub monitoring_system: Arc, pub progress_tracker: Arc, pub rollback_system: Arc, pub test_orchestrator: Arc, pub dns_manager: Arc, pub extension_manager: Arc, pub oci_manager: Arc, pub service_orchestrator: Arc, pub audit_logger: Arc, pub args: Args, // NEW: Security components pub security: SecurityComponents,\\n}","breadcrumbs":"Orchestrator Auth Integration » Updated AppState Structure","id":"1158","title":"Updated AppState Structure"},"1159":{"body":"#[tokio::main]\\nasync fn main() -> Result<()> { let args = Args::parse(); // Initialize AppState (creates audit_logger) let state = Arc::new(AppState::new(args).await?); // Initialize security components let security_config = SecurityConfig { public_key_path: PathBuf::from(\\"keys/public.pem\\"), jwt_issuer: env::var(\\"JWT_ISSUER\\").unwrap_or(\\"control-center\\".to_string()), jwt_audience: \\"orchestrator\\".to_string(), cedar_policies_path: PathBuf::from(\\"policies\\"), auth_enabled: env::var(\\"AUTH_ENABLED\\").unwrap_or(\\"true\\".to_string()) == \\"true\\", authz_enabled: env::var(\\"AUTHZ_ENABLED\\").unwrap_or(\\"true\\".to_string()) == \\"true\\", mfa_enabled: env::var(\\"MFA_ENABLED\\").unwrap_or(\\"true\\".to_string()) == \\"true\\", rate_limit_config: RateLimitConfig::new( env::var(\\"RATE_LIMIT_MAX\\").unwrap_or(\\"100\\".to_string()).parse().unwrap(), env::var(\\"RATE_LIMIT_WINDOW\\").unwrap_or(\\"60\\".to_string()).parse().unwrap(), ), }; let security = SecurityComponents::initialize( security_config, state.audit_logger.clone() ).await?; // Public routes (no auth) let public_routes = Router::new() .route(\\"/health\\", get(health_check)); // Protected routes (full security chain) let protected_routes = Router::new() .route(\\"/api/v1/servers\\", post(create_server)) .route(\\"/api/v1/servers/:id\\", delete(delete_server)) .route(\\"/api/v1/taskserv\\", post(create_taskserv)) .route(\\"/api/v1/cluster\\", post(create_cluster)) // ... more routes ; // Apply security middleware to protected routes let secured_routes = apply_security_middleware(protected_routes, &security) .with_state(state.clone()); // Combine routes let app = Router::new() .merge(public_routes) .merge(secured_routes) .layer(CorsLayer::permissive()); // Start server let listener = tokio::net::TcpListener::bind(\\"0.0.0.0:9090\\").await?; axum::serve(listener, app).await?; Ok(())\\n}","breadcrumbs":"Orchestrator Auth Integration » Initialization in main.rs","id":"1159","title":"Initialization in main.rs"},"116":{"body":"If you encounter issues not covered here: Check logs : tail -f provisioning/platform/orchestrator/data/orchestrator.log Enable debug mode : provisioning --debug Review bootstrap output : Scroll up to see detailed error messages Check documentation : provisioning help or provisioning guide Workspace guide : cat workspaces/workspace_librecloud/docs/deployment-guide.md","breadcrumbs":"Installation Validation Guide » Getting Help","id":"116","title":"Getting Help"},"1160":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Protected Endpoints","id":"1160","title":"Protected Endpoints"},"1161":{"body":"Category Example Endpoints Auth Required MFA Required Cedar Policy Health /health ❌ ❌ ❌ Read-Only GET /api/v1/servers ✅ ❌ ✅ Server Mgmt POST /api/v1/servers ✅ ❌ ✅ Server Delete DELETE /api/v1/servers/:id ✅ ✅ ✅ Taskserv Mgmt POST /api/v1/taskserv ✅ ❌ ✅ Cluster Mgmt POST /api/v1/cluster ✅ ✅ ✅ Production POST /api/v1/production/* ✅ ✅ ✅ Batch Ops POST /api/v1/batch/submit ✅ ✅ ✅ Rollback POST /api/v1/rollback ✅ ✅ ✅ Config Write POST /api/v1/config ✅ ✅ ✅ Secrets GET /api/v1/secret/* ✅ ✅ ✅","breadcrumbs":"Orchestrator Auth Integration » Endpoint Categories","id":"1161","title":"Endpoint Categories"},"1162":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Complete Authentication Flow","id":"1162","title":"Complete Authentication Flow"},"1163":{"body":"1. CLIENT REQUEST ├─ Headers: │ ├─ Authorization: Bearer │ ├─ X-Forwarded-For: 192.168.1.100 │ ├─ User-Agent: MyClient/1.0 │ └─ X-MFA-Verified: true └─ Path: DELETE /api/v1/servers/prod-srv-01 2. RATE LIMITING MIDDLEWARE ├─ Extract IP: 192.168.1.100 ├─ Check limit: 45/100 requests in window ├─ Decision: ALLOW (under limit) └─ Continue → 3. AUTHENTICATION MIDDLEWARE ├─ Extract Bearer token ├─ Validate JWT: │ ├─ Signature: ✅ Valid (RS256) │ ├─ Expiry: ✅ Valid until 2025-10-09 10:00:00 │ ├─ Issuer: ✅ control-center │ ├─ Audience: ✅ orchestrator │ └─ Revoked: ✅ Not revoked ├─ Build SecurityContext: │ ├─ user_id: \\"user-456\\" │ ├─ workspace: \\"production\\" │ ├─ permissions: [\\"read\\", \\"write\\", \\"delete\\"] │ ├─ mfa_verified: true │ └─ ip_address: 192.168.1.100 ├─ Decision: ALLOW (valid token) └─ Continue → 4. MFA VERIFICATION MIDDLEWARE ├─ Check endpoint: DELETE /api/v1/servers/prod-srv-01 ├─ Requires MFA: ✅ YES (DELETE operation) ├─ MFA status: ✅ Verified ├─ Decision: ALLOW (MFA verified) └─ Continue → 5. AUTHORIZATION MIDDLEWARE ├─ Build Cedar request: │ ├─ Principal: User(\\"user-456\\") │ ├─ Action: Delete │ ├─ Resource: Server(\\"prod-srv-01\\") │ └─ Context: │ ├─ mfa_verified: true │ ├─ ip_address: \\"192.168.1.100\\" │ ├─ time: 2025-10-08T14:30:00Z │ └─ workspace: \\"production\\" ├─ Evaluate Cedar policies: │ ├─ Policy 1: Allow if user.role == \\"admin\\" ✅ │ ├─ Policy 2: Allow if mfa_verified == true ✅ │ └─ Policy 3: Deny if not business_hours ❌ ├─ Decision: ALLOW (2 allow, 1 deny = allow) ├─ Log to audit: Authorization GRANTED └─ Continue → 6. AUDIT LOGGING MIDDLEWARE ├─ Record: │ ├─ User: user-456 (IP: 192.168.1.100) │ ├─ Action: ServerDelete │ ├─ Resource: prod-srv-01 │ ├─ Authorization: GRANTED │ ├─ MFA: Verified │ └─ Timestamp: 2025-10-08T14:30:00Z └─ Continue → 7. PROTECTED HANDLER ├─ Execute business logic ├─ Delete server prod-srv-01 └─ Return: 200 OK 8. AUDIT LOGGING (Response) ├─ Update event: │ ├─ Status: 200 OK │ ├─ Duration: 1.234s │ └─ Result: SUCCESS └─ Write to audit log 9. CLIENT RESPONSE └─ 200 OK: Server deleted successfully","breadcrumbs":"Orchestrator Auth Integration » Step-by-Step Flow","id":"1163","title":"Step-by-Step Flow"},"1164":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Configuration","id":"1164","title":"Configuration"},"1165":{"body":"# JWT Configuration\\nJWT_ISSUER=control-center\\nJWT_AUDIENCE=orchestrator\\nPUBLIC_KEY_PATH=/path/to/keys/public.pem # Cedar Policies\\nCEDAR_POLICIES_PATH=/path/to/policies # Security Toggles\\nAUTH_ENABLED=true\\nAUTHZ_ENABLED=true\\nMFA_ENABLED=true # Rate Limiting\\nRATE_LIMIT_MAX=100\\nRATE_LIMIT_WINDOW=60\\nRATE_LIMIT_EXEMPT_IPS=10.0.0.1,10.0.0.2 # Audit Logging\\nAUDIT_ENABLED=true\\nAUDIT_RETENTION_DAYS=365","breadcrumbs":"Orchestrator Auth Integration » Environment Variables","id":"1165","title":"Environment Variables"},"1166":{"body":"For development/testing, all security can be disabled: // In main.rs\\nlet security = if env::var(\\"DEVELOPMENT_MODE\\").unwrap_or(\\"false\\".to_string()) == \\"true\\" { SecurityComponents::disabled(audit_logger.clone())\\n} else { SecurityComponents::initialize(security_config, audit_logger.clone()).await?\\n};","breadcrumbs":"Orchestrator Auth Integration » Development Mode","id":"1166","title":"Development Mode"},"1167":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Testing","id":"1167","title":"Testing"},"1168":{"body":"Location: provisioning/platform/orchestrator/tests/security_integration_tests.rs Test Coverage : ✅ Rate limiting enforcement ✅ Rate limit statistics ✅ Exempt IP handling ✅ Authentication missing token ✅ MFA verification for sensitive operations ✅ Cedar policy evaluation ✅ Complete security flow ✅ Security components initialization ✅ Configuration defaults Lines of Code : 340 Run Tests : cd provisioning/platform/orchestrator\\ncargo test security_integration_tests","breadcrumbs":"Orchestrator Auth Integration » Integration Tests","id":"1168","title":"Integration Tests"},"1169":{"body":"File Purpose Lines Tests middleware/security_context.rs Security context builder 275 8 middleware/auth.rs JWT authentication 245 5 middleware/mfa.rs MFA verification 290 15 middleware/authz.rs Cedar authorization 380 4 middleware/rate_limit.rs Rate limiting 420 8 middleware/mod.rs Module exports 25 0 security_integration.rs Integration helpers 265 2 tests/security_integration_tests.rs Integration tests 340 11 Total 2,240 53","breadcrumbs":"Orchestrator Auth Integration » File Summary","id":"1169","title":"File Summary"},"117":{"body":"This guide covers: ✅ Prerequisites verification (Nushell, Nickel, Docker) ✅ Bootstrap installation (7-stage automated process) ✅ Installation validation (directories, configs, services) ✅ Troubleshooting common issues ✅ Next steps for deployment You now have a fully installed and validated provisioning system ready for workspace deployment.","breadcrumbs":"Installation Validation Guide » Summary","id":"117","title":"Summary"},"1170":{"body":"","breadcrumbs":"Orchestrator Auth Integration » Benefits","id":"1170","title":"Benefits"},"1171":{"body":"✅ Complete authentication flow with JWT validation ✅ MFA enforcement for sensitive operations ✅ Fine-grained authorization with Cedar policies ✅ Rate limiting prevents API abuse ✅ Complete audit trail for compliance","breadcrumbs":"Orchestrator Auth Integration » Security","id":"1171","title":"Security"},"1172":{"body":"✅ Modular middleware design ✅ Clear separation of concerns ✅ Reusable security components ✅ Easy to test and maintain ✅ Configuration-driven behavior","breadcrumbs":"Orchestrator Auth Integration » Architecture","id":"1172","title":"Architecture"},"1173":{"body":"✅ Can enable/disable features independently ✅ Development mode for testing ✅ Comprehensive error messages ✅ Real-time statistics and monitoring ✅ Non-blocking audit logging","breadcrumbs":"Orchestrator Auth Integration » Operations","id":"1173","title":"Operations"},"1174":{"body":"Token Refresh : Automatic token refresh before expiry IP Whitelisting : Additional IP-based access control Geolocation : Block requests from specific countries Advanced Rate Limiting : Per-user, per-endpoint limits Session Management : Track active sessions, force logout 2FA Integration : Direct integration with TOTP/SMS providers Policy Hot Reload : Update Cedar policies without restart Metrics Dashboard : Real-time security metrics visualization","breadcrumbs":"Orchestrator Auth Integration » Future Enhancements","id":"1174","title":"Future Enhancements"},"1175":{"body":"Cedar Policy Language JWT Token Management MFA Setup Guide Audit Log Format Rate Limiting Best Practices","breadcrumbs":"Orchestrator Auth Integration » Related Documentation","id":"1175","title":"Related Documentation"},"1176":{"body":"Version Date Changes 1.0.0 2025-10-08 Initial implementation Maintained By : Security Team Review Cycle : Quarterly Last Reviewed : 2025-10-08","breadcrumbs":"Orchestrator Auth Integration » Version History","id":"1176","title":"Version History"},"1177":{"body":"Date: 2025-10-01 Status: Analysis Complete - Implementation Planning Author: Architecture Review","breadcrumbs":"Repo Dist Analysis » Repository and Distribution Architecture Analysis","id":"1177","title":"Repository and Distribution Architecture Analysis"},"1178":{"body":"This document analyzes the current project structure and provides a comprehensive plan for optimizing the repository organization and distribution strategy. The goal is to create a professional-grade infrastructure automation system with clear separation of concerns, efficient development workflow, and user-friendly distribution.","breadcrumbs":"Repo Dist Analysis » Executive Summary","id":"1178","title":"Executive Summary"},"1179":{"body":"","breadcrumbs":"Repo Dist Analysis » Current State Analysis","id":"1179","title":"Current State Analysis"},"118":{"body":"Welcome to Infrastructure Automation. This guide will walk you through your first steps with infrastructure automation, from basic setup to deploying your first infrastructure.","breadcrumbs":"Getting Started » Getting Started Guide","id":"118","title":"Getting Started Guide"},"1180":{"body":"Clean Core Separation provisioning/ contains the core system workspace/ concept for user data Clear extension points (providers, taskservs, clusters) Hybrid Architecture Rust orchestrator for performance-critical operations Nushell for business logic and scripting KCL for type-safe configuration Modular Design Extension system for providers and services Plugin architecture for Nushell Template-based code generation Advanced Features Batch workflow system (v3.1.0) Hybrid orchestrator (v3.0.0) Token-optimized agent architecture","breadcrumbs":"Repo Dist Analysis » Strengths","id":"1180","title":"Strengths"},"1181":{"body":"Confusing Root Structure Multiple workspace variants: _workspace/, backup-workspace/, workspace-librecloud/ Development artifacts at root: wrks/, NO/, target/ Unclear which workspace is active Mixed Concerns Runtime data intermixed with source code Build artifacts not properly isolated Presentations and demos in main repo Distribution Challenges Bash wrapper for CLI entry point (provisioning/core/cli/provisioning) No clear installation mechanism Missing package management system Undefined installation paths Documentation Fragmentation Multiple docs/ locations Scattered README files No unified documentation structure Configuration Complexity TOML-based system is good, but paths are unclear User vs system config separation needs clarification Installation paths not standardized","breadcrumbs":"Repo Dist Analysis » Critical Issues","id":"1181","title":"Critical Issues"},"1182":{"body":"","breadcrumbs":"Repo Dist Analysis » Recommended Architecture","id":"1182","title":"Recommended Architecture"},"1183":{"body":"project-provisioning/\\n│\\n├── provisioning/ # CORE SYSTEM (distribution source)\\n│ ├── core/ # Core engine\\n│ │ ├── cli/ # Main CLI entry\\n│ │ │ └── provisioning # Pure Nushell entry point\\n│ │ ├── nulib/ # Nushell libraries\\n│ │ │ ├── lib_provisioning/ # Core library functions\\n│ │ │ ├── main_provisioning/ # CLI handlers\\n│ │ │ ├── servers/ # Server management\\n│ │ │ ├── taskservs/ # Task service management\\n│ │ │ ├── clusters/ # Cluster management\\n│ │ │ └── workflows/ # Workflow orchestration\\n│ │ ├── plugins/ # System plugins\\n│ │ │ └── nushell-plugins/ # Nushell plugin sources\\n│ │ └── scripts/ # Utility scripts\\n│ │\\n│ ├── extensions/ # Extensible modules\\n│ │ ├── providers/ # Cloud providers (aws, upcloud, local)\\n│ │ ├── taskservs/ # Infrastructure services\\n│ │ │ ├── container-runtime/ # Container runtimes\\n│ │ │ ├── kubernetes/ # Kubernetes\\n│ │ │ ├── networking/ # Network services\\n│ │ │ ├── storage/ # Storage services\\n│ │ │ ├── databases/ # Database services\\n│ │ │ └── development/ # Dev tools\\n│ │ ├── clusters/ # Complete cluster configurations\\n│ │ └── workflows/ # Workflow templates\\n│ │\\n│ ├── platform/ # Platform services (Rust)\\n│ │ ├── orchestrator/ # Rust coordination layer\\n│ │ ├── control-center/ # Web management UI\\n│ │ ├── control-center-ui/ # UI frontend\\n│ │ ├── mcp-server/ # Model Context Protocol server\\n│ │ └── api-gateway/ # REST API gateway\\n│ │\\n│ ├── kcl/ # KCL configuration schemas\\n│ │ ├── main.ncl # Main entry point\\n│ │ ├── settings.ncl # Settings schema\\n│ │ ├── server.ncl # Server definitions\\n│ │ ├── cluster.ncl # Cluster definitions\\n│ │ ├── workflows.ncl # Workflow definitions\\n│ │ └── docs/ # KCL documentation\\n│ │\\n│ ├── templates/ # Jinja2 templates\\n│ │ ├── extensions/ # Extension templates\\n│ │ ├── services/ # Service templates\\n│ │ └── workspace/ # Workspace templates\\n│ │\\n│ ├── config/ # Default system configuration\\n│ │ ├── config.defaults.toml # System defaults\\n│ │ └── config-examples/ # Example configs\\n│ │\\n│ ├── tools/ # Build and packaging tools\\n│ │ ├── build/ # Build scripts\\n│ │ ├── package/ # Packaging tools\\n│ │ ├── distribution/ # Distribution tools\\n│ │ └── release/ # Release automation\\n│ │\\n│ └── resources/ # Static resources (images, assets)\\n│\\n├── workspace/ # RUNTIME DATA (gitignored except templates)\\n│ ├── infra/ # Infrastructure instances (gitignored)\\n│ │ └── .gitkeep\\n│ ├── config/ # User configuration (gitignored)\\n│ │ └── .gitkeep\\n│ ├── extensions/ # User extensions (gitignored)\\n│ │ └── .gitkeep\\n│ ├── runtime/ # Runtime data (gitignored)\\n│ │ ├── logs/\\n│ │ ├── cache/\\n│ │ ├── state/\\n│ │ └── tmp/\\n│ └── templates/ # Workspace templates (tracked)\\n│ ├── minimal/\\n│ ├── kubernetes/\\n│ └── multi-cloud/\\n│\\n├── distribution/ # DISTRIBUTION ARTIFACTS (gitignored)\\n│ ├── packages/ # Built packages\\n│ │ ├── provisioning-core-*.tar.gz\\n│ │ ├── provisioning-platform-*.tar.gz\\n│ │ ├── provisioning-extensions-*.tar.gz\\n│ │ └── checksums.txt\\n│ ├── installers/ # Installation scripts\\n│ │ ├── install.sh # Bash installer\\n│ │ └── install.nu # Nushell installer\\n│ └── registry/ # Package registry metadata\\n│ └── index.json\\n│\\n├── docs/ # UNIFIED DOCUMENTATION\\n│ ├── README.md # Documentation index\\n│ ├── user/ # User guides\\n│ │ ├── installation.md\\n│ │ ├── quick-start.md\\n│ │ ├── configuration.md\\n│ │ └── guides/\\n│ ├── api/ # API reference\\n│ │ ├── rest-api.md\\n│ │ ├── nushell-api.md\\n│ │ └── kcl-schemas.md\\n│ ├── architecture/ # Architecture documentation\\n│ │ ├── overview.md\\n│ │ ├── decisions/ # ADRs\\n│ │ └── repo-dist-analysis.md # This document\\n│ └── development/ # Development guides\\n│ ├── contributing.md\\n│ ├── building.md\\n│ ├── testing.md\\n│ └── releasing.md\\n│\\n├── examples/ # EXAMPLE CONFIGURATIONS\\n│ ├── minimal/ # Minimal setup\\n│ ├── kubernetes-cluster/ # Full K8s cluster\\n│ ├── multi-cloud/ # Multi-provider setup\\n│ └── README.md\\n│\\n├── tests/ # INTEGRATION TESTS\\n│ ├── e2e/ # End-to-end tests\\n│ ├── integration/ # Integration tests\\n│ ├── fixtures/ # Test fixtures\\n│ └── README.md\\n│\\n├── tools/ # DEVELOPMENT TOOLS\\n│ ├── build/ # Build scripts\\n│ ├── dev-env/ # Development environment setup\\n│ └── scripts/ # Utility scripts\\n│\\n├── .github/ # GitHub configuration\\n│ ├── workflows/ # CI/CD workflows\\n│ │ ├── build.yml\\n│ │ ├── test.yml\\n│ │ └── release.yml\\n│ └── ISSUE_TEMPLATE/\\n│\\n├── .coder/ # Coder configuration (tracked)\\n│\\n├── .gitignore # Git ignore rules\\n├── .gitattributes # Git attributes\\n├── Cargo.toml # Rust workspace root\\n├── Justfile # Task runner (unified)\\n├── LICENSE # License file\\n├── README.md # Project README\\n├── CHANGELOG.md # Changelog\\n└── CLAUDE.md # AI assistant instructions","breadcrumbs":"Repo Dist Analysis » 1. Monorepo Structure","id":"1183","title":"1. Monorepo Structure"},"1184":{"body":"Clear Separation : Source code (provisioning/), runtime data (workspace/), build artifacts (distribution/) Single Source of Truth : One location for each type of content Gitignore Strategy : Runtime and build artifacts ignored, templates tracked Standard Paths : Follow Unix conventions for installation","breadcrumbs":"Repo Dist Analysis » Key Principles","id":"1184","title":"Key Principles"},"1185":{"body":"","breadcrumbs":"Repo Dist Analysis » Distribution Strategy","id":"1185","title":"Distribution Strategy"},"1186":{"body":"1. provisioning-core (Required) Contents: Nushell CLI and libraries Core providers (local, upcloud, aws) Essential taskservs (kubernetes, containerd, cilium) KCL schemas Configuration system Templates Size: ~50 MB (compressed) Installation: /usr/local/\\n├── bin/\\n│ └── provisioning\\n├── lib/\\n│ └── provisioning/\\n│ ├── core/\\n│ ├── extensions/\\n│ └── kcl/\\n└── share/ └── provisioning/ ├── templates/ ├── config/ └── docs/ 2. provisioning-platform (Optional) Contents: Rust orchestrator binary Control center web UI MCP server API gateway Size: ~30 MB (compressed) Installation: /usr/local/\\n├── bin/\\n│ ├── provisioning-orchestrator\\n│ └── provisioning-control-center\\n└── share/ └── provisioning/ └── platform/ 3. provisioning-extensions (Optional) Contents: Additional taskservs (radicle, gitea, postgres, etc.) Cluster templates Workflow templates Size: ~20 MB (compressed) Installation: /usr/local/lib/provisioning/extensions/\\n├── taskservs/\\n├── clusters/\\n└── workflows/ 4. provisioning-plugins (Optional) Contents: Pre-built Nushell plugins nu_plugin_kcl nu_plugin_tera Other custom plugins Size: ~15 MB (compressed) Installation: ~/.config/nushell/plugins/","breadcrumbs":"Repo Dist Analysis » Package Types","id":"1186","title":"Package Types"},"1187":{"body":"System Installation (Root) /usr/local/\\n├── bin/\\n│ ├── provisioning # Main CLI\\n│ ├── provisioning-orchestrator # Orchestrator binary\\n│ └── provisioning-control-center # Control center binary\\n├── lib/\\n│ └── provisioning/\\n│ ├── core/ # Core Nushell libraries\\n│ │ ├── nulib/\\n│ │ └── plugins/\\n│ ├── extensions/ # Extensions\\n│ │ ├── providers/\\n│ │ ├── taskservs/\\n│ │ └── clusters/\\n│ └── kcl/ # KCL schemas\\n└── share/ └── provisioning/ ├── templates/ # System templates ├── config/ # Default configs │ └── config.defaults.toml └── docs/ # Documentation User Configuration ~/.provisioning/\\n├── config/\\n│ └── config.user.toml # User overrides\\n├── extensions/ # User extensions\\n│ ├── providers/\\n│ ├── taskservs/\\n│ └── clusters/\\n├── cache/ # Cache directory\\n└── plugins/ # User plugins Project Workspace ./workspace/\\n├── infra/ # Infrastructure definitions\\n│ ├── my-cluster/\\n│ │ ├── config.toml\\n│ │ ├── servers.yaml\\n│ │ └── taskservs.yaml\\n│ └── production/\\n├── config/ # Project configuration\\n│ └── config.toml\\n├── runtime/ # Runtime data\\n│ ├── logs/\\n│ ├── state/\\n│ └── cache/\\n└── extensions/ # Project-specific extensions","breadcrumbs":"Repo Dist Analysis » Installation Paths","id":"1187","title":"Installation Paths"},"1188":{"body":"Priority (highest to lowest):\\n1. CLI flags --debug, --infra=my-cluster\\n2. Runtime overrides PROVISIONING_DEBUG=true\\n3. Project config ./workspace/config/config.toml\\n4. User config ~/.provisioning/config/config.user.toml\\n5. System config /usr/local/share/provisioning/config/config.defaults.toml","breadcrumbs":"Repo Dist Analysis » Configuration Hierarchy","id":"1188","title":"Configuration Hierarchy"},"1189":{"body":"","breadcrumbs":"Repo Dist Analysis » Build System","id":"1189","title":"Build System"},"119":{"body":"Essential concepts and terminology How to configure your first environment Creating and managing infrastructure Basic server and service management Common workflows and best practices","breadcrumbs":"Getting Started » What You\'ll Learn","id":"119","title":"What You\'ll Learn"},"1190":{"body":"provisioning/tools/build/: build/\\n├── build-system.nu # Main build orchestrator\\n├── package-core.nu # Core packaging\\n├── package-platform.nu # Platform packaging\\n├── package-extensions.nu # Extensions packaging\\n├── package-plugins.nu # Plugins packaging\\n├── create-installers.nu # Installer generation\\n├── validate-package.nu # Package validation\\n└── publish-registry.nu # Registry publishing","breadcrumbs":"Repo Dist Analysis » Build Tools Structure","id":"1190","title":"Build Tools Structure"},"1191":{"body":"provisioning/tools/build/build-system.nu: #!/usr/bin/env nu\\n# Build system for provisioning project use ../core/nulib/lib_provisioning/config/accessor.nu * # Build all packages\\nexport def \\"main build-all\\" [ --version: string = \\"dev\\" # Version to build --output: string = \\"distribution/packages\\" # Output directory\\n] { print $\\"Building all packages version: ($version)\\" let results = { core: (build-core $version $output) platform: (build-platform $version $output) extensions: (build-extensions $version $output) plugins: (build-plugins $version $output) } # Generate checksums create-checksums $output print \\"✅ All packages built successfully\\" $results\\n} # Build core package\\nexport def \\"build-core\\" [ version: string output: string\\n] -> record { print \\"📦 Building provisioning-core...\\" nu package-core.nu build --version $version --output $output\\n} # Build platform package (Rust binaries)\\nexport def \\"build-platform\\" [ version: string output: string\\n] -> record { print \\"📦 Building provisioning-platform...\\" nu package-platform.nu build --version $version --output $output\\n} # Build extensions package\\nexport def \\"build-extensions\\" [ version: string output: string\\n] -> record { print \\"📦 Building provisioning-extensions...\\" nu package-extensions.nu build --version $version --output $output\\n} # Build plugins package\\nexport def \\"build-plugins\\" [ version: string output: string\\n] -> record { print \\"📦 Building provisioning-plugins...\\" nu package-plugins.nu build --version $version --output $output\\n} # Create release artifacts\\nexport def \\"main release\\" [ version: string # Release version --upload # Upload to release server\\n] { print $\\"🚀 Creating release ($version)\\" # Build all packages let packages = (build-all --version $version) # Create installers create-installers $version # Generate release notes generate-release-notes $version # Upload if requested if $upload { upload-release $version } print $\\"✅ Release ($version) ready\\"\\n} # Create installers\\ndef create-installers [version: string] { print \\"📝 Creating installers...\\" nu create-installers.nu --version $version\\n} # Generate release notes\\ndef generate-release-notes [version: string] { print \\"📝 Generating release notes...\\" let changelog = (open CHANGELOG.md) let notes = ($changelog | parse-version-section $version) $notes | save $\\"distribution/packages/RELEASE_NOTES_($version).md\\"\\n} # Upload release\\ndef upload-release [version: string] { print \\"⬆️ Uploading release...\\" # Implementation depends on your release infrastructure # Could use: GitHub releases, S3, custom server, etc.\\n} # Create checksums for all packages\\ndef create-checksums [output: string] { print \\"🔐 Creating checksums...\\" ls ($output | path join \\"*.tar.gz\\") | each { |file| let hash = (sha256sum $file.name | split row \' \' | get 0) $\\"($hash) (($file.name | path basename))\\" } | str join \\"\\\\n\\" | save ($output | path join \\"checksums.txt\\")\\n} # Clean build artifacts\\nexport def \\"main clean\\" [ --all # Clean all build artifacts\\n] { print \\"🧹 Cleaning build artifacts...\\" if ($all) { rm -rf distribution/packages rm -rf target/ rm -rf provisioning/platform/target/ } else { rm -rf distribution/packages } print \\"✅ Clean complete\\"\\n} # Validate built packages\\nexport def \\"main validate\\" [ package_path: string # Package to validate\\n] { print $\\"🔍 Validating package: ($package_path)\\" nu validate-package.nu $package_path\\n} # Show build status\\nexport def \\"main status\\" [] { print \\"📊 Build Status\\" print \\"─\\" * 60 let core_exists = (\\"distribution/packages\\" | path join \\"provisioning-core-*.tar.gz\\" | glob | is-not-empty) let platform_exists = (\\"distribution/packages\\" | path join \\"provisioning-platform-*.tar.gz\\" | glob | is-not-empty) print $\\"Core package: (if $core_exists { \'✅ Built\' } else { \'❌ Not built\' })\\" print $\\"Platform package: (if $platform_exists { \'✅ Built\' } else { \'❌ Not built\' })\\" if (\\"distribution/packages\\" | path exists) { let packages = (ls distribution/packages | where name =~ \\".tar.gz\\") print $\\"\\\\nTotal packages: (($packages | length))\\" $packages | select name size }\\n}","breadcrumbs":"Repo Dist Analysis » Build System Implementation","id":"1191","title":"Build System Implementation"},"1192":{"body":"Justfile: # Provisioning Build System\\n# Use \'just --list\' to see all available commands # Default recipe\\ndefault: @just --list # Development tasks\\nalias d := dev-check\\nalias t := test\\nalias b := build # Build all packages\\nbuild VERSION=\\"dev\\": nu provisioning/tools/build/build-system.nu build-all --version {{VERSION}} # Build core package only\\nbuild-core VERSION=\\"dev\\": nu provisioning/tools/build/build-system.nu build-core {{VERSION}} # Build platform binaries\\nbuild-platform VERSION=\\"dev\\": cargo build --release --workspace --manifest-path provisioning/platform/Cargo.toml nu provisioning/tools/build/build-system.nu build-platform {{VERSION}} # Run development checks\\ndev-check: @echo \\"🔍 Running development checks...\\" cargo check --workspace --manifest-path provisioning/platform/Cargo.toml cargo clippy --workspace --manifest-path provisioning/platform/Cargo.toml nu provisioning/tools/build/validate-nushell.nu # Run tests\\ntest: @echo \\"🧪 Running tests...\\" cargo test --workspace --manifest-path provisioning/platform/Cargo.toml nu tests/run-all-tests.nu # Run integration tests\\ntest-e2e: @echo \\"🔬 Running E2E tests...\\" nu tests/e2e/run-e2e.nu # Format code\\nfmt: cargo fmt --all --manifest-path provisioning/platform/Cargo.toml nu provisioning/tools/build/format-nushell.nu # Clean build artifacts\\nclean: nu provisioning/tools/build/build-system.nu clean # Clean all (including Rust target/)\\nclean-all: nu provisioning/tools/build/build-system.nu clean --all cargo clean --manifest-path provisioning/platform/Cargo.toml # Create release\\nrelease VERSION: @echo \\"🚀 Creating release {{VERSION}}...\\" nu provisioning/tools/build/build-system.nu release {{VERSION}} # Install from source\\ninstall: @echo \\"📦 Installing from source...\\" just build sudo nu distribution/installers/install.nu --from-source # Install development version (symlink)\\ninstall-dev: @echo \\"🔗 Installing development version...\\" sudo ln -sf $(pwd)/provisioning/core/cli/provisioning /usr/local/bin/provisioning @echo \\"✅ Development installation complete\\" # Uninstall\\nuninstall: @echo \\"🗑️ Uninstalling...\\" sudo rm -f /usr/local/bin/provisioning sudo rm -rf /usr/local/lib/provisioning sudo rm -rf /usr/local/share/provisioning # Show build status\\nstatus: nu provisioning/tools/build/build-system.nu status # Validate package\\nvalidate PACKAGE: nu provisioning/tools/build/build-system.nu validate {{PACKAGE}} # Start development environment\\ndev-start: @echo \\"🚀 Starting development environment...\\" cd provisioning/platform/orchestrator && cargo run # Watch and rebuild on changes\\nwatch: @echo \\"👀 Watching for changes...\\" cargo watch -x \'check --workspace --manifest-path provisioning/platform/Cargo.toml\' # Update dependencies\\nupdate-deps: cargo update --manifest-path provisioning/platform/Cargo.toml nu provisioning/tools/build/update-nushell-deps.nu # Generate documentation\\ndocs: @echo \\"📚 Generating documentation...\\" cargo doc --workspace --no-deps --manifest-path provisioning/platform/Cargo.toml nu provisioning/tools/build/generate-docs.nu # Benchmark\\nbench: cargo bench --workspace --manifest-path provisioning/platform/Cargo.toml # Check licenses\\ncheck-licenses: cargo deny check licenses --manifest-path provisioning/platform/Cargo.toml # Security audit\\naudit: cargo audit --file provisioning/platform/Cargo.lock","breadcrumbs":"Repo Dist Analysis » Justfile Integration","id":"1192","title":"Justfile Integration"},"1193":{"body":"","breadcrumbs":"Repo Dist Analysis » Installation System","id":"1193","title":"Installation System"},"1194":{"body":"distribution/installers/install.nu: #!/usr/bin/env nu\\n# Provisioning installation script const DEFAULT_PREFIX = \\"/usr/local\\"\\nconst REPO_URL = \\"https://releases.provisioning.io\\" # Main installation command\\ndef main [ --prefix: string = $DEFAULT_PREFIX # Installation prefix --version: string = \\"latest\\" # Version to install --from-source # Install from source (development) --packages: list = [\\"core\\"] # Packages to install\\n] { print \\"📦 Provisioning Installation\\" print \\"─\\" * 60 # Check prerequisites check-prerequisites # Install packages if $from_source { install-from-source $prefix } else { install-from-release $prefix $version $packages } # Post-installation post-install $prefix print \\"\\" print \\"✅ Installation complete!\\" print $\\"Run \'provisioning --help\' to get started\\"\\n} # Check prerequisites\\ndef check-prerequisites [] { print \\"🔍 Checking prerequisites...\\" # Check for Nushell if (which nu | is-empty) { error make { msg: \\"Nushell not found. Please install Nushell first: https://nushell.sh\\" } } let nu_version = (nu --version | parse \\"{name} {version}\\" | get 0.version) print $\\" ✓ Nushell ($nu_version)\\" # Check for required tools if (which tar | is-empty) { error make { msg: \\"tar not found\\" } } if (which curl | is-empty) and (which wget | is-empty) { error make { msg: \\"curl or wget required\\" } } print \\" ✓ All prerequisites met\\"\\n} # Install from source\\ndef install-from-source [prefix: string] { print \\"📦 Installing from source...\\" # Check if we\'re in the source directory if not (\\"provisioning\\" | path exists) { error make { msg: \\"Must run from project root\\" } } # Create installation directories create-install-dirs $prefix # Copy files print \\" Copying core files...\\" cp -r provisioning/core/nulib $\\"($prefix)/lib/provisioning/core/\\" cp -r provisioning/extensions $\\"($prefix)/lib/provisioning/\\" cp -r provisioning/kcl $\\"($prefix)/lib/provisioning/\\" cp -r provisioning/templates $\\"($prefix)/share/provisioning/\\" cp -r provisioning/config $\\"($prefix)/share/provisioning/\\" # Create CLI wrapper create-cli-wrapper $prefix print \\" ✓ Source installation complete\\"\\n} # Install from release\\ndef install-from-release [ prefix: string version: string packages: list\\n] { print $\\"📦 Installing version ($version)...\\" # Download packages for package in $packages { download-package $package $version extract-package $package $version $prefix }\\n} # Download package\\ndef download-package [package: string, version: string] { let filename = $\\"provisioning-($package)-($version).tar.gz\\" let url = $\\"($REPO_URL)/($version)/($filename)\\" print $\\" Downloading ($package)...\\" if (which curl | is-not-empty) { curl -fsSL -o $\\"/tmp/($filename)\\" $url } else { wget -q -O $\\"/tmp/($filename)\\" $url }\\n} # Extract package\\ndef extract-package [package: string, version: string, prefix: string] { let filename = $\\"provisioning-($package)-($version).tar.gz\\" print $\\" Installing ($package)...\\" tar xzf $\\"/tmp/($filename)\\" -C $prefix rm $\\"/tmp/($filename)\\"\\n} # Create installation directories\\ndef create-install-dirs [prefix: string] { mkdir ($prefix | path join \\"bin\\") mkdir ($prefix | path join \\"lib\\" \\"provisioning\\" \\"core\\") mkdir ($prefix | path join \\"lib\\" \\"provisioning\\" \\"extensions\\") mkdir ($prefix | path join \\"share\\" \\"provisioning\\" \\"templates\\") mkdir ($prefix | path join \\"share\\" \\"provisioning\\" \\"config\\") mkdir ($prefix | path join \\"share\\" \\"provisioning\\" \\"docs\\")\\n} # Create CLI wrapper\\ndef create-cli-wrapper [prefix: string] { let wrapper = $\\"#!/usr/bin/env nu\\n# Provisioning CLI wrapper # Load provisioning library\\nconst PROVISIONING_LIB = \\\\\\"($prefix)/lib/provisioning\\\\\\"\\nconst PROVISIONING_SHARE = \\\\\\"($prefix)/share/provisioning\\\\\\" $env.PROVISIONING_ROOT = $PROVISIONING_LIB\\n$env.PROVISIONING_SHARE = $PROVISIONING_SHARE # Add to Nushell path\\n$env.NU_LIB_DIRS = ($env.NU_LIB_DIRS | append $\\\\\\"($PROVISIONING_LIB)/core/nulib\\\\\\") # Load main provisioning module\\nuse ($PROVISIONING_LIB)/core/nulib/main_provisioning/dispatcher.nu * # Main entry point\\ndef main [...args] { dispatch-command $args\\n} main ...$args\\n\\" $wrapper | save ($prefix | path join \\"bin\\" \\"provisioning\\") chmod +x ($prefix | path join \\"bin\\" \\"provisioning\\")\\n} # Post-installation tasks\\ndef post-install [prefix: string] { print \\"🔧 Post-installation setup...\\" # Create user config directory let user_config = ($env.HOME | path join \\".provisioning\\") if not ($user_config | path exists) { mkdir ($user_config | path join \\"config\\") mkdir ($user_config | path join \\"extensions\\") mkdir ($user_config | path join \\"cache\\") # Copy example config let example = ($prefix | path join \\"share\\" \\"provisioning\\" \\"config\\" \\"config-examples\\" \\"config.user.toml\\") if ($example | path exists) { cp $example ($user_config | path join \\"config\\" \\"config.user.toml\\") } print $\\" ✓ Created user config directory: ($user_config)\\" } # Check if prefix is in PATH if not ($env.PATH | any { |p| $p == ($prefix | path join \\"bin\\") }) { print \\"\\" print \\"⚠️ Note: ($prefix)/bin is not in your PATH\\" print \\" Add this to your shell configuration:\\" print $\\" export PATH=\\\\\\"($prefix)/bin:$PATH\\\\\\"\\" }\\n} # Uninstall provisioning\\nexport def \\"main uninstall\\" [ --prefix: string = $DEFAULT_PREFIX # Installation prefix --keep-config # Keep user configuration\\n] { print \\"🗑️ Uninstalling provisioning...\\" # Remove installed files rm -rf ($prefix | path join \\"bin\\" \\"provisioning\\") rm -rf ($prefix | path join \\"lib\\" \\"provisioning\\") rm -rf ($prefix | path join \\"share\\" \\"provisioning\\") # Remove user config if requested if not $keep_config { let user_config = ($env.HOME | path join \\".provisioning\\") if ($user_config | path exists) { rm -rf $user_config print \\" ✓ Removed user configuration\\" } } print \\"✅ Uninstallation complete\\"\\n} # Upgrade provisioning\\nexport def \\"main upgrade\\" [ --version: string = \\"latest\\" # Version to upgrade to --prefix: string = $DEFAULT_PREFIX # Installation prefix\\n] { print $\\"⬆️ Upgrading to version ($version)...\\" # Check current version let current = (^provisioning version | parse \\"{version}\\" | get 0.version) print $\\" Current version: ($current)\\" if $current == $version { print \\" Already at latest version\\" return } # Backup current installation print \\" Backing up current installation...\\" let backup = ($prefix | path join \\"lib\\" \\"provisioning.backup\\") mv ($prefix | path join \\"lib\\" \\"provisioning\\") $backup # Install new version try { install-from-release $prefix $version [\\"core\\"] print $\\" ✅ Upgraded to version ($version)\\" rm -rf $backup } catch { print \\" ❌ Upgrade failed, restoring backup...\\" mv $backup ($prefix | path join \\"lib\\" \\"provisioning\\") error make { msg: \\"Upgrade failed\\" } }\\n}","breadcrumbs":"Repo Dist Analysis » Installer Script","id":"1194","title":"Installer Script"},"1195":{"body":"distribution/installers/install.sh: #!/usr/bin/env bash\\n# Provisioning installation script (Bash version)\\n# This script installs Nushell first, then runs the Nushell installer set -euo pipefail DEFAULT_PREFIX=\\"/usr/local\\"\\nREPO_URL=\\"https://releases.provisioning.io\\" # Colors\\nRED=\'\\\\033[0;31m\'\\nGREEN=\'\\\\033[0;32m\'\\nYELLOW=\'\\\\033[1;33m\'\\nNC=\'\\\\033[0m\' # No Color info() { echo -e \\"${GREEN}✓${NC} $*\\"\\n} warn() { echo -e \\"${YELLOW}⚠${NC} $*\\"\\n} error() { echo -e \\"${RED}✗${NC} $*\\" >&2 exit 1\\n} # Check if Nushell is installed\\ncheck_nushell() { if command -v nu >/dev/null 2>&1; then info \\"Nushell is already installed\\" return 0 else warn \\"Nushell not found\\" return 1 fi\\n} # Install Nushell\\ninstall_nushell() { echo \\"📦 Installing Nushell...\\" # Detect OS and architecture OS=\\"$(uname -s)\\" ARCH=\\"$(uname -m)\\" case \\"$OS\\" in Linux*) if command -v apt-get >/dev/null 2>&1; then sudo apt-get update && sudo apt-get install -y nushell elif command -v dnf >/dev/null 2>&1; then sudo dnf install -y nushell elif command -v brew >/dev/null 2>&1; then brew install nushell else error \\"Cannot automatically install Nushell. Please install manually: https://nushell.sh\\" fi ;; Darwin*) if command -v brew >/dev/null 2>&1; then brew install nushell else error \\"Homebrew not found. Install from: https://brew.sh\\" fi ;; *) error \\"Unsupported operating system: $OS\\" ;; esac info \\"Nushell installed successfully\\"\\n} # Main installation\\nmain() { echo \\"📦 Provisioning Installation\\" echo \\"────────────────────────────────────────────────────────────\\" # Check for Nushell if ! check_nushell; then read -p \\"Install Nushell? (y/N) \\" -n 1 -r echo if [[ $REPLY =~ ^[Yy]$ ]]; then install_nushell else error \\"Nushell is required. Install from: https://nushell.sh\\" fi fi # Download Nushell installer echo \\"📥 Downloading installer...\\" INSTALLER_URL=\\"$REPO_URL/latest/install.nu\\" curl -fsSL \\"$INSTALLER_URL\\" -o /tmp/install.nu # Run Nushell installer echo \\"🚀 Running installer...\\" nu /tmp/install.nu \\"$@\\" # Cleanup rm -f /tmp/install.nu info \\"Installation complete!\\"\\n} # Run main\\nmain \\"$@\\"","breadcrumbs":"Repo Dist Analysis » Bash Installer (For Systems Without Nushell)","id":"1195","title":"Bash Installer (For Systems Without Nushell)"},"1196":{"body":"","breadcrumbs":"Repo Dist Analysis » Implementation Plan","id":"1196","title":"Implementation Plan"},"1197":{"body":"Day 1: Cleanup and Preparation Tasks: Create backup of current state Analyze and document all workspace directories Identify active workspace vs backups Map all file dependencies Commands: # Backup current state\\ncp -r /Users/Akasha/project-provisioning /Users/Akasha/project-provisioning.backup # Analyze workspaces\\nfd workspace -t d > workspace-dirs.txt Deliverables: Complete backup Workspace analysis document Dependency map Day 2: Directory Restructuring Tasks: Consolidate workspace directories Move build artifacts to distribution/ Remove obsolete directories (NO/, wrks/, presentation artifacts) Create proper .gitignore Commands: # Create distribution directory\\nmkdir -p distribution/{packages,installers,registry} # Move build artifacts\\nmv target distribution/\\nmv provisioning/tools/dist distribution/packages/ # Remove obsolete\\nrm -rf NO/ wrks/ presentations/ Deliverables: Clean directory structure Updated .gitignore Migration log Day 3: Update Path References Tasks: Update all hardcoded paths in Nushell scripts Update CLAUDE.md with new paths Update documentation references Test all path changes Files to Update: provisioning/core/nulib/**/*.nu (~65 files) CLAUDE.md docs/**/*.md Deliverables: Updated scripts Updated documentation Test results Day 4: Validation and Documentation Tasks: Run full test suite Verify all commands work Update README.md Create migration guide Deliverables: Passing tests Updated README Migration guide for users","breadcrumbs":"Repo Dist Analysis » Phase 1: Repository Restructuring (3-4 days)","id":"1197","title":"Phase 1: Repository Restructuring (3-4 days)"},"1198":{"body":"Day 5: Build System Core Tasks: Create provisioning/tools/build/ structure Implement build-system.nu Implement package-core.nu Create Justfile Files to Create: provisioning/tools/build/build-system.nu provisioning/tools/build/package-core.nu provisioning/tools/build/validate-package.nu Justfile Deliverables: Working build system Core packaging capability Justfile with basic recipes Day 6: Platform and Extension Packaging Tasks: Implement package-platform.nu Implement package-extensions.nu Implement package-plugins.nu Add checksum generation Deliverables: Platform packaging Extension packaging Plugin packaging Checksum generation Day 7: Package Validation Tasks: Create package validation system Implement integrity checks Create test suite for packages Document package format Deliverables: Package validation Test suite Package format documentation Day 8: Build System Testing Tasks: Test full build pipeline Test all package types Optimize build performance Document build system Deliverables: Tested build system Performance optimizations Build system documentation","breadcrumbs":"Repo Dist Analysis » Phase 2: Build System Implementation (3-4 days)","id":"1198","title":"Phase 2: Build System Implementation (3-4 days)"},"1199":{"body":"Day 9: Nushell Installer Tasks: Create install.nu Implement installation logic Implement upgrade logic Implement uninstallation Files to Create: distribution/installers/install.nu Deliverables: Working Nushell installer Upgrade mechanism Uninstall mechanism Day 10: Bash Installer and CLI Tasks: Create install.sh Replace bash CLI wrapper with pure Nushell Update PATH handling Test installation on clean system Files to Create: distribution/installers/install.sh Updated provisioning/core/cli/provisioning Deliverables: Bash installer Pure Nushell CLI Installation tests Day 11: Installation Testing Tasks: Test installation on multiple OSes Test upgrade scenarios Test uninstallation Create installation documentation Deliverables: Multi-OS installation tests Installation guide Troubleshooting guide","breadcrumbs":"Repo Dist Analysis » Phase 3: Installation System (2-3 days)","id":"1199","title":"Phase 3: Installation System (2-3 days)"},"12":{"body":"provisioning/docs/src/\\n├── README.md (this file) # Documentation hub\\n├── getting-started/ # Getting started guides\\n│ ├── installation-guide.md\\n│ ├── getting-started.md\\n│ └── quickstart-cheatsheet.md\\n├── architecture/ # System architecture\\n│ ├── adr/ # Architecture Decision Records\\n│ ├── design-principles.md\\n│ ├── integration-patterns.md\\n│ ├── system-overview.md\\n│ └── ... (and 10+ more architecture docs)\\n├── infrastructure/ # Infrastructure guides\\n│ ├── cli-reference.md\\n│ ├── workspace-setup.md\\n│ ├── workspace-switching-guide.md\\n│ └── infrastructure-management.md\\n├── api-reference/ # API documentation\\n│ ├── rest-api.md\\n│ ├── websocket.md\\n│ ├── integration-examples.md\\n│ └── sdks.md\\n├── development/ # Developer guides\\n│ ├── README.md\\n│ ├── implementation-guide.md\\n│ ├── quick-provider-guide.md\\n│ ├── taskserv-developer-guide.md\\n│ └── ... (15+ more developer docs)\\n├── guides/ # How-to guides\\n│ ├── from-scratch.md\\n│ ├── update-infrastructure.md\\n│ └── customize-infrastructure.md\\n├── operations/ # Operations guides\\n│ ├── service-management-guide.md\\n│ ├── coredns-guide.md\\n│ └── ... (more operations docs)\\n├── security/ # Security docs\\n├── integration/ # Integration guides\\n├── testing/ # Testing docs\\n├── configuration/ # Configuration docs\\n├── troubleshooting/ # Troubleshooting guides\\n└── quick-reference/ # Quick references","breadcrumbs":"Home » Documentation Structure","id":"12","title":"Documentation Structure"},"120":{"body":"Before starting this guide, ensure you have: ✅ Completed the Installation Guide ✅ Verified your installation with provisioning --version ✅ Basic familiarity with command-line interfaces","breadcrumbs":"Getting Started » Prerequisites","id":"120","title":"Prerequisites"},"1200":{"body":"Day 12: Registry System Tasks: Design registry format Implement registry indexing Create package metadata Implement search functionality Files to Create: provisioning/tools/build/publish-registry.nu distribution/registry/index.json Deliverables: Registry system Package metadata Search functionality Day 13: Registry Commands Tasks: Implement provisioning registry list Implement provisioning registry search Implement provisioning registry install Implement provisioning registry update Deliverables: Registry commands Package installation from registry Update mechanism Day 14: Registry Hosting Tasks: Set up registry hosting (S3, GitHub releases, etc.) Implement upload mechanism Create CI/CD for automatic publishing Document registry system Deliverables: Hosted registry CI/CD pipeline Registry documentation","breadcrumbs":"Repo Dist Analysis » Phase 4: Package Registry (Optional, 2-3 days)","id":"1200","title":"Phase 4: Package Registry (Optional, 2-3 days)"},"1201":{"body":"Day 15: Documentation Tasks: Update all documentation for new structure Create user guides Create development guides Create API documentation Deliverables: Updated documentation User guides Developer guides API docs Day 16: Release Preparation Tasks: Create CHANGELOG.md Build release packages Test installation from packages Create release announcement Deliverables: CHANGELOG Release packages Installation verification Release announcement","breadcrumbs":"Repo Dist Analysis » Phase 5: Documentation and Release (2 days)","id":"1201","title":"Phase 5: Documentation and Release (2 days)"},"1202":{"body":"","breadcrumbs":"Repo Dist Analysis » Migration Strategy","id":"1202","title":"Migration Strategy"},"1203":{"body":"Option 1: Clean Migration # Backup current workspace\\ncp -r workspace workspace.backup # Upgrade to new version\\nprovisioning upgrade --version 3.2.0 # Migrate workspace\\nprovisioning workspace migrate --from workspace.backup --to workspace/ Option 2: In-Place Migration # Run migration script\\nprovisioning migrate --check # Dry run\\nprovisioning migrate # Execute migration","breadcrumbs":"Repo Dist Analysis » For Existing Users","id":"1203","title":"For Existing Users"},"1204":{"body":"# Pull latest changes\\ngit pull origin main # Rebuild\\njust clean-all\\njust build # Reinstall development version\\njust install-dev # Verify\\nprovisioning --version","breadcrumbs":"Repo Dist Analysis » For Developers","id":"1204","title":"For Developers"},"1205":{"body":"","breadcrumbs":"Repo Dist Analysis » Success Criteria","id":"1205","title":"Success Criteria"},"1206":{"body":"✅ Single workspace/ directory for all runtime data ✅ Clear separation: source (provisioning/), runtime (workspace/), artifacts (distribution/) ✅ All build artifacts in distribution/ and gitignored ✅ Clean root directory (no wrks/, NO/, etc.) ✅ Unified documentation in docs/","breadcrumbs":"Repo Dist Analysis » Repository Structure","id":"1206","title":"Repository Structure"},"1207":{"body":"✅ Single command builds all packages: just build ✅ Packages can be built independently ✅ Checksums generated automatically ✅ Validation before packaging ✅ Build time < 5 minutes for full build","breadcrumbs":"Repo Dist Analysis » Build System","id":"1207","title":"Build System"},"1208":{"body":"✅ One-line installation: curl -fsSL https://get.provisioning.io | sh ✅ Works on Linux and macOS ✅ Standard installation paths (/usr/local/) ✅ User configuration in ~/.provisioning/ ✅ Clean uninstallation","breadcrumbs":"Repo Dist Analysis » Installation","id":"1208","title":"Installation"},"1209":{"body":"✅ Packages available at stable URL ✅ Automated releases via CI/CD ✅ Package registry for extensions ✅ Upgrade mechanism works reliably","breadcrumbs":"Repo Dist Analysis » Distribution","id":"1209","title":"Distribution"},"121":{"body":"","breadcrumbs":"Getting Started » Essential Concepts","id":"121","title":"Essential Concepts"},"1210":{"body":"✅ Complete installation guide ✅ Quick start guide ✅ Developer contributing guide ✅ API documentation ✅ Architecture documentation","breadcrumbs":"Repo Dist Analysis » Documentation","id":"1210","title":"Documentation"},"1211":{"body":"","breadcrumbs":"Repo Dist Analysis » Risks and Mitigations","id":"1211","title":"Risks and Mitigations"},"1212":{"body":"Impact: High Probability: High Mitigation: Provide migration script Support both old and new paths during transition (v3.2.x) Clear migration guide Automated backup before migration","breadcrumbs":"Repo Dist Analysis » Risk 1: Breaking Changes for Existing Users","id":"1212","title":"Risk 1: Breaking Changes for Existing Users"},"1213":{"body":"Impact: Medium Probability: Medium Mitigation: Start with simple packaging Iterate and improve Document thoroughly Provide examples","breadcrumbs":"Repo Dist Analysis » Risk 2: Build System Complexity","id":"1213","title":"Risk 2: Build System Complexity"},"1214":{"body":"Impact: Medium Probability: Low Mitigation: Check for existing installations Support custom prefix Clear uninstallation Non-conflicting binary names","breadcrumbs":"Repo Dist Analysis » Risk 3: Installation Path Conflicts","id":"1214","title":"Risk 3: Installation Path Conflicts"},"1215":{"body":"Impact: High Probability: Medium Mitigation: Test on multiple OSes (Linux, macOS) Use portable commands Provide fallbacks Clear error messages","breadcrumbs":"Repo Dist Analysis » Risk 4: Cross-Platform Issues","id":"1215","title":"Risk 4: Cross-Platform Issues"},"1216":{"body":"Impact: Medium Probability: Medium Mitigation: Document all dependencies Check prerequisites during installation Provide installation instructions for dependencies Consider bundling critical dependencies","breadcrumbs":"Repo Dist Analysis » Risk 5: Dependency Management","id":"1216","title":"Risk 5: Dependency Management"},"1217":{"body":"Phase Duration Key Deliverables Phase 1: Restructuring 3-4 days Clean directory structure, updated paths Phase 2: Build System 3-4 days Working build system, all package types Phase 3: Installation 2-3 days Installers, pure Nushell CLI Phase 4: Registry (Optional) 2-3 days Package registry, extension management Phase 5: Documentation 2 days Complete documentation, release Total 12-16 days Production-ready distribution system","breadcrumbs":"Repo Dist Analysis » Timeline Summary","id":"1217","title":"Timeline Summary"},"1218":{"body":"Review and Approval (Day 0) Review this analysis Approve implementation plan Assign resources Kickoff (Day 1) Create implementation branch Set up project tracking Begin Phase 1 Weekly Reviews End of Phase 1: Structure review End of Phase 2: Build system review End of Phase 3: Installation review Final review before release","breadcrumbs":"Repo Dist Analysis » Next Steps","id":"1218","title":"Next Steps"},"1219":{"body":"This comprehensive plan transforms the provisioning system into a professional-grade infrastructure automation platform with: Clean Architecture : Clear separation of concerns Professional Distribution : Standard installation paths and packaging Easy Installation : One-command installation for users Developer Friendly : Simple build system and clear development workflow Extensible : Package registry for community extensions Well Documented : Complete guides for users and developers The implementation will take approximately 2-3 weeks and will result in a production-ready system suitable for both individual developers and enterprise deployments.","breadcrumbs":"Repo Dist Analysis » Conclusion","id":"1219","title":"Conclusion"},"122":{"body":"Provisioning uses declarative configuration to manage infrastructure. Instead of manually creating resources, you define what you want in configuration files, and the system makes it happen. You describe → System creates → Infrastructure exists","breadcrumbs":"Getting Started » Infrastructure as Code (IaC)","id":"122","title":"Infrastructure as Code (IaC)"},"1220":{"body":"Current codebase structure Unix FHS (Filesystem Hierarchy Standard) Rust cargo packaging conventions npm/yarn package management patterns Homebrew formula best practices KCL package management design","breadcrumbs":"Repo Dist Analysis » References","id":"1220","title":"References"},"1221":{"body":"Status : Implementation Guide Last Updated : 2025-12-15 Project : TypeDialog at /Users/Akasha/Development/typedialog Purpose : Type-safe UI generation from Nickel schemas","breadcrumbs":"TypeDialog Nickel Integration » TypeDialog + Nickel Integration Guide","id":"1221","title":"TypeDialog + Nickel Integration Guide"},"1222":{"body":"TypeDialog generates type-safe interactive forms from configuration schemas with bidirectional Nickel integration . Nickel Schema ↓\\nTypeDialog Form (Auto-generated) ↓\\nUser fills form interactively ↓\\nNickel output config (Type-safe)","breadcrumbs":"TypeDialog Nickel Integration » What is TypeDialog","id":"1222","title":"What is TypeDialog"},"1223":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Architecture","id":"1223","title":"Architecture"},"1224":{"body":"CLI/TUI/Web Layer ↓\\nTypeDialog Form Engine ↓\\nNickel Integration ↓\\nSchema Contracts","breadcrumbs":"TypeDialog Nickel Integration » Three Layers","id":"1224","title":"Three Layers"},"1225":{"body":"Input (Nickel) ↓\\nForm Definition (TOML) ↓\\nForm Rendering (CLI/TUI/Web) ↓\\nUser Input ↓\\nValidation (against Nickel contracts) ↓\\nOutput (JSON/YAML/TOML/Nickel)","breadcrumbs":"TypeDialog Nickel Integration » Data Flow","id":"1225","title":"Data Flow"},"1226":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Setup","id":"1226","title":"Setup"},"1227":{"body":"# Clone TypeDialog\\ngit clone https://github.com/jesusperezlorenzo/typedialog.git\\ncd typedialog # Build\\ncargo build --release # Install (optional)\\ncargo install --path ./crates/typedialog","breadcrumbs":"TypeDialog Nickel Integration » Installation","id":"1227","title":"Installation"},"1228":{"body":"typedialog --version\\ntypedialog --help","breadcrumbs":"TypeDialog Nickel Integration » Verify Installation","id":"1228","title":"Verify Installation"},"1229":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Basic Workflow","id":"1229","title":"Basic Workflow"},"123":{"body":"Component Purpose Example Providers Cloud platforms AWS, UpCloud, Local Servers Virtual machines Web servers, databases Task Services Infrastructure software Kubernetes, Docker, databases Clusters Grouped services Web cluster, database cluster","breadcrumbs":"Getting Started » Key Components","id":"123","title":"Key Components"},"1230":{"body":"# server_config.ncl\\nlet contracts = import \\"./contracts.ncl\\" in\\nlet defaults = import \\"./defaults.ncl\\" in { defaults = defaults, make_server | not_exported = fun overrides => defaults.server & overrides, DefaultServer = defaults.server,\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 1: Define Nickel Schema","id":"1230","title":"Step 1: Define Nickel Schema"},"1231":{"body":"# server_form.toml\\n[form]\\ntitle = \\"Server Configuration\\"\\ndescription = \\"Create a new server configuration\\" [[fields]]\\nname = \\"server_name\\"\\nlabel = \\"Server Name\\"\\ntype = \\"text\\"\\nrequired = true\\nhelp = \\"Unique identifier for the server\\"\\nplaceholder = \\"web-01\\" [[fields]]\\nname = \\"cpu_cores\\"\\nlabel = \\"CPU Cores\\"\\ntype = \\"number\\"\\nrequired = true\\ndefault = 4\\nhelp = \\"Number of CPU cores (1-32)\\" [[fields]]\\nname = \\"memory_gb\\"\\nlabel = \\"Memory (GB)\\"\\ntype = \\"number\\"\\nrequired = true\\ndefault = 8\\nhelp = \\"Memory in GB (1-256)\\" [[fields]]\\nname = \\"zone\\"\\nlabel = \\"Availability Zone\\"\\ntype = \\"select\\"\\nrequired = true\\noptions = [\\"us-nyc1\\", \\"eu-fra1\\", \\"ap-syd1\\"]\\ndefault = \\"us-nyc1\\" [[fields]]\\nname = \\"monitoring\\"\\nlabel = \\"Enable Monitoring\\"\\ntype = \\"confirm\\"\\ndefault = true [[fields]]\\nname = \\"tags\\"\\nlabel = \\"Tags\\"\\ntype = \\"multiselect\\"\\noptions = [\\"production\\", \\"staging\\", \\"testing\\", \\"development\\"]\\nhelp = \\"Select applicable tags\\"","breadcrumbs":"TypeDialog Nickel Integration » Step 2: Define TypeDialog Form (TOML)","id":"1231","title":"Step 2: Define TypeDialog Form (TOML)"},"1232":{"body":"typedialog form --config server_form.toml --backend cli Output : Server Configuration\\nCreate a new server configuration ? Server Name: web-01\\n? CPU Cores: 4\\n? Memory (GB): 8\\n? Availability Zone: (us-nyc1/eu-fra1/ap-syd1) us-nyc1\\n? Enable Monitoring: (y/n) y\\n? Tags: (Select multiple with space) ◉ production ◯ staging ◯ testing ◯ development","breadcrumbs":"TypeDialog Nickel Integration » Step 3: Render Form (CLI)","id":"1232","title":"Step 3: Render Form (CLI)"},"1233":{"body":"# Validation happens automatically\\n# If input matches Nickel contract, proceeds to output","breadcrumbs":"TypeDialog Nickel Integration » Step 4: Validate Against Nickel Schema","id":"1233","title":"Step 4: Validate Against Nickel Schema"},"1234":{"body":"typedialog form \\\\ --config server_form.toml \\\\ --output nickel \\\\ --backend cli Output file (server_config_output.ncl): { server_name = \\"web-01\\", cpu_cores = 4, memory_gb = 8, zone = \\"us-nyc1\\", monitoring = true, tags = [\\"production\\"],\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 5: Output to Nickel","id":"1234","title":"Step 5: Output to Nickel"},"1235":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Real-World Example 1: Infrastructure Wizard","id":"1235","title":"Real-World Example 1: Infrastructure Wizard"},"1236":{"body":"You want an interactive CLI wizard for infrastructure provisioning.","breadcrumbs":"TypeDialog Nickel Integration » Scenario","id":"1236","title":"Scenario"},"1237":{"body":"# infrastructure_schema.ncl\\n{ InfrastructureConfig = { workspace_name | String, deployment_mode | [| \'solo, \'multiuser, \'cicd, \'enterprise |], provider | [| \'upcloud, \'aws, \'hetzner |], taskservs | Array, enable_monitoring | Bool, enable_backup | Bool, backup_retention_days | Number, }, defaults = { workspace_name = \\"\\", deployment_mode = \'solo, provider = \'upcloud, taskservs = [], enable_monitoring = true, enable_backup = true, backup_retention_days = 7, }, DefaultInfra = defaults,\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 1: Define Nickel Schema for Infrastructure","id":"1237","title":"Step 1: Define Nickel Schema for Infrastructure"},"1238":{"body":"# infrastructure_wizard.toml\\n[form]\\ntitle = \\"Infrastructure Provisioning Wizard\\"\\ndescription = \\"Create a complete infrastructure setup\\" [[fields]]\\nname = \\"workspace_name\\"\\nlabel = \\"Workspace Name\\"\\ntype = \\"text\\"\\nrequired = true\\nvalidation_pattern = \\"^[a-z0-9-]{3,32}$\\"\\nhelp = \\"3-32 chars, lowercase alphanumeric and hyphens only\\"\\nplaceholder = \\"my-workspace\\" [[fields]]\\nname = \\"deployment_mode\\"\\nlabel = \\"Deployment Mode\\"\\ntype = \\"select\\"\\nrequired = true\\noptions = [ { value = \\"solo\\", label = \\"Solo (Single user, 2 CPU, 4 GB RAM)\\" }, { value = \\"multiuser\\", label = \\"MultiUser (Team, 4 CPU, 8 GB RAM)\\" }, { value = \\"cicd\\", label = \\"CI/CD (Pipelines, 8 CPU, 16 GB RAM)\\" }, { value = \\"enterprise\\", label = \\"Enterprise (Production, 16 CPU, 32 GB RAM)\\" },\\n]\\ndefault = \\"solo\\" [[fields]]\\nname = \\"provider\\"\\nlabel = \\"Cloud Provider\\"\\ntype = \\"select\\"\\nrequired = true\\noptions = [ { value = \\"upcloud\\", label = \\"UpCloud (EU)\\" }, { value = \\"aws\\", label = \\"AWS (Global)\\" }, { value = \\"hetzner\\", label = \\"Hetzner (EU)\\" },\\n]\\ndefault = \\"upcloud\\" [[fields]]\\nname = \\"taskservs\\"\\nlabel = \\"Task Services\\"\\ntype = \\"multiselect\\"\\nrequired = false\\noptions = [ { value = \\"kubernetes\\", label = \\"Kubernetes (Container orchestration)\\" }, { value = \\"cilium\\", label = \\"Cilium (Network policy)\\" }, { value = \\"postgres\\", label = \\"PostgreSQL (Database)\\" }, { value = \\"redis\\", label = \\"Redis (Cache)\\" }, { value = \\"prometheus\\", label = \\"Prometheus (Monitoring)\\" }, { value = \\"etcd\\", label = \\"etcd (Distributed config)\\" },\\n]\\nhelp = \\"Select task services to deploy\\" [[fields]]\\nname = \\"enable_monitoring\\"\\nlabel = \\"Enable Monitoring\\"\\ntype = \\"confirm\\"\\ndefault = true\\nhelp = \\"Prometheus + Grafana dashboards\\" [[fields]]\\nname = \\"enable_backup\\"\\nlabel = \\"Enable Backup\\"\\ntype = \\"confirm\\"\\ndefault = true [[fields]]\\nname = \\"backup_retention_days\\"\\nlabel = \\"Backup Retention (days)\\"\\ntype = \\"number\\"\\nrequired = false\\ndefault = 7\\nhelp = \\"How long to keep backups (if enabled)\\"\\nvisible_if = \\"enable_backup == true\\" [[fields]]\\nname = \\"email\\"\\nlabel = \\"Admin Email\\"\\ntype = \\"text\\"\\nrequired = true\\nvalidation_pattern = \\"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\\\\\.[a-zA-Z]{2,}$\\"\\nhelp = \\"For alerts and notifications\\"\\nplaceholder = \\"admin@company.com\\"","breadcrumbs":"TypeDialog Nickel Integration » Step 2: Create Comprehensive Form","id":"1238","title":"Step 2: Create Comprehensive Form"},"1239":{"body":"typedialog form \\\\ --config infrastructure_wizard.toml \\\\ --backend tui \\\\ --output nickel Output (infrastructure_config.ncl): { workspace_name = \\"production-eu\\", deployment_mode = \'enterprise, provider = \'upcloud, taskservs = [\\"kubernetes\\", \\"cilium\\", \\"postgres\\", \\"redis\\", \\"prometheus\\"], enable_monitoring = true, enable_backup = true, backup_retention_days = 30, email = \\"ops@company.com\\",\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 3: Run Interactive Wizard","id":"1239","title":"Step 3: Run Interactive Wizard"},"124":{"body":"Nickel : Primary configuration language for infrastructure definitions (type-safe, validated) TOML : User preferences and system settings YAML : Kubernetes manifests and service definitions","breadcrumbs":"Getting Started » Configuration Languages","id":"124","title":"Configuration Languages"},"1240":{"body":"# main_infrastructure.ncl\\nlet config = import \\"./infrastructure_config.ncl\\" in\\nlet schemas = import \\"../../provisioning/schemas/main.ncl\\" in { # Build infrastructure based on config infrastructure = if config.deployment_mode == \'solo then { servers = [ schemas.lib.make_server { name = config.workspace_name, cpu_cores = 2, memory_gb = 4, }, ], taskservs = config.taskservs, } else if config.deployment_mode == \'enterprise then { servers = [ schemas.lib.make_server { name = \\"app-01\\", cpu_cores = 16, memory_gb = 32 }, schemas.lib.make_server { name = \\"app-02\\", cpu_cores = 16, memory_gb = 32 }, schemas.lib.make_server { name = \\"db-01\\", cpu_cores = 16, memory_gb = 32 }, ], taskservs = config.taskservs, monitoring = { enabled = config.enable_monitoring, email = config.email }, } else # default fallback {},\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 4: Use Output in Infrastructure","id":"1240","title":"Step 4: Use Output in Infrastructure"},"1241":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Real-World Example 2: Server Configuration Form","id":"1241","title":"Real-World Example 2: Server Configuration Form"},"1242":{"body":"# server_advanced_form.toml\\n[form]\\ntitle = \\"Server Configuration\\"\\ndescription = \\"Configure server settings with validation\\" # Section 1: Basic Info\\n[[sections]]\\nname = \\"basic\\"\\ntitle = \\"Basic Information\\" [[fields]]\\nname = \\"server_name\\"\\nsection = \\"basic\\"\\nlabel = \\"Server Name\\"\\ntype = \\"text\\"\\nrequired = true\\nvalidation_pattern = \\"^[a-z0-9-]{3,32}$\\" [[fields]]\\nname = \\"description\\"\\nsection = \\"basic\\"\\nlabel = \\"Description\\"\\ntype = \\"textarea\\"\\nrequired = false\\nplaceholder = \\"Server purpose and details\\" # Section 2: Resources\\n[[sections]]\\nname = \\"resources\\"\\ntitle = \\"Resources\\" [[fields]]\\nname = \\"cpu_cores\\"\\nsection = \\"resources\\"\\nlabel = \\"CPU Cores\\"\\ntype = \\"number\\"\\nrequired = true\\ndefault = 4\\nmin = 1\\nmax = 32 [[fields]]\\nname = \\"memory_gb\\"\\nsection = \\"resources\\"\\nlabel = \\"Memory (GB)\\"\\ntype = \\"number\\"\\nrequired = true\\ndefault = 8\\nmin = 1\\nmax = 256 [[fields]]\\nname = \\"disk_gb\\"\\nsection = \\"resources\\"\\nlabel = \\"Disk (GB)\\"\\ntype = \\"number\\"\\nrequired = true\\ndefault = 100\\nmin = 10\\nmax = 2000 # Section 3: Network\\n[[sections]]\\nname = \\"network\\"\\ntitle = \\"Network Configuration\\" [[fields]]\\nname = \\"zone\\"\\nsection = \\"network\\"\\nlabel = \\"Availability Zone\\"\\ntype = \\"select\\"\\nrequired = true\\noptions = [\\"us-nyc1\\", \\"eu-fra1\\", \\"ap-syd1\\"] [[fields]]\\nname = \\"enable_ipv6\\"\\nsection = \\"network\\"\\nlabel = \\"Enable IPv6\\"\\ntype = \\"confirm\\"\\ndefault = false [[fields]]\\nname = \\"allowed_ports\\"\\nsection = \\"network\\"\\nlabel = \\"Allowed Ports\\"\\ntype = \\"multiselect\\"\\noptions = [ { value = \\"22\\", label = \\"SSH (22)\\" }, { value = \\"80\\", label = \\"HTTP (80)\\" }, { value = \\"443\\", label = \\"HTTPS (443)\\" }, { value = \\"3306\\", label = \\"MySQL (3306)\\" }, { value = \\"5432\\", label = \\"PostgreSQL (5432)\\" },\\n] # Section 4: Advanced\\n[[sections]]\\nname = \\"advanced\\"\\ntitle = \\"Advanced Options\\" [[fields]]\\nname = \\"kernel_version\\"\\nsection = \\"advanced\\"\\nlabel = \\"Kernel Version\\"\\ntype = \\"text\\"\\nrequired = false\\nplaceholder = \\"5.15.0 (or leave blank for latest)\\" [[fields]]\\nname = \\"enable_monitoring\\"\\nsection = \\"advanced\\"\\nlabel = \\"Enable Monitoring\\"\\ntype = \\"confirm\\"\\ndefault = true [[fields]]\\nname = \\"monitoring_interval\\"\\nsection = \\"advanced\\"\\nlabel = \\"Monitoring Interval (seconds)\\"\\ntype = \\"number\\"\\nrequired = false\\ndefault = 60\\nvisible_if = \\"enable_monitoring == true\\" [[fields]]\\nname = \\"tags\\"\\nsection = \\"advanced\\"\\nlabel = \\"Tags\\"\\ntype = \\"multiselect\\"\\noptions = [\\"production\\", \\"staging\\", \\"testing\\", \\"development\\"]","breadcrumbs":"TypeDialog Nickel Integration » Form Definition (Advanced)","id":"1242","title":"Form Definition (Advanced)"},"1243":{"body":"{ # Basic server_name = \\"web-prod-01\\", description = \\"Primary web server\\", # Resources cpu_cores = 16, memory_gb = 32, disk_gb = 500, # Network zone = \\"eu-fra1\\", enable_ipv6 = true, allowed_ports = [\\"22\\", \\"80\\", \\"443\\"], # Advanced kernel_version = \\"5.15.0\\", enable_monitoring = true, monitoring_interval = 30, tags = [\\"production\\"],\\n}","breadcrumbs":"TypeDialog Nickel Integration » Output Structure","id":"1243","title":"Output Structure"},"1244":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » API Integration","id":"1244","title":"API Integration"},"1245":{"body":"# Start TypeDialog server\\ntypedialog server --port 8080 # Render form via HTTP\\ncurl -X POST http://localhost:8080/forms \\\\ -H \\"Content-Type: application/json\\" \\\\ -d @server_form.toml","breadcrumbs":"TypeDialog Nickel Integration » TypeDialog REST Endpoints","id":"1245","title":"TypeDialog REST Endpoints"},"1246":{"body":"{ \\"form_id\\": \\"srv_abc123\\", \\"status\\": \\"rendered\\", \\"fields\\": [ { \\"name\\": \\"server_name\\", \\"label\\": \\"Server Name\\", \\"type\\": \\"text\\", \\"required\\": true, \\"placeholder\\": \\"web-01\\" } ]\\n}","breadcrumbs":"TypeDialog Nickel Integration » Response Format","id":"1246","title":"Response Format"},"1247":{"body":"curl -X POST http://localhost:8080/forms/srv_abc123/submit \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"server_name\\": \\"web-01\\", \\"cpu_cores\\": 4, \\"memory_gb\\": 8, \\"zone\\": \\"us-nyc1\\", \\"monitoring\\": true, \\"tags\\": [\\"production\\"] }\'","breadcrumbs":"TypeDialog Nickel Integration » Submit Form","id":"1247","title":"Submit Form"},"1248":{"body":"{ \\"status\\": \\"success\\", \\"validation\\": \\"passed\\", \\"output_format\\": \\"nickel\\", \\"output\\": { \\"server_name\\": \\"web-01\\", \\"cpu_cores\\": 4, \\"memory_gb\\": 8, \\"zone\\": \\"us-nyc1\\", \\"monitoring\\": true, \\"tags\\": [\\"production\\"] }\\n}","breadcrumbs":"TypeDialog Nickel Integration » Response","id":"1248","title":"Response"},"1249":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Validation","id":"1249","title":"Validation"},"125":{"body":"","breadcrumbs":"Getting Started » First-Time Setup","id":"125","title":"First-Time Setup"},"1250":{"body":"TypeDialog validates user input against Nickel contracts: # Nickel contract\\nServerConfig = { cpu_cores | Number, # Must be number memory_gb | Number, # Must be number zone | [| \'us-nyc1, \'eu-fra1 |], # Enum\\n} # If user enters invalid value\\n# TypeDialog rejects before serializing","breadcrumbs":"TypeDialog Nickel Integration » Contract-Based Validation","id":"1250","title":"Contract-Based Validation"},"1251":{"body":"[[fields]]\\nname = \\"cpu_cores\\"\\ntype = \\"number\\"\\nmin = 1\\nmax = 32\\nhelp = \\"Must be 1-32 cores\\"\\n# TypeDialog enforces before user can submit","breadcrumbs":"TypeDialog Nickel Integration » Validation Rules in Form","id":"1251","title":"Validation Rules in Form"},"1252":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Integration with Provisioning Platform","id":"1252","title":"Integration with Provisioning Platform"},"1253":{"body":"# 1. User runs initialization\\nprovisioning init --wizard # 2. Behind the scenes:\\n# - Loads infrastructure_wizard.toml\\n# - Starts TypeDialog (CLI or TUI)\\n# - User fills form interactively # 3. Output saved as config\\n# ~/.config/provisioning/infrastructure_config.ncl # 4. Provisioning uses output\\n# provisioning server create --from-config infrastructure_config.ncl","breadcrumbs":"TypeDialog Nickel Integration » Use Case: Infrastructure Initialization","id":"1253","title":"Use Case: Infrastructure Initialization"},"1254":{"body":"# provisioning/core/nulib/provisioning_init.nu def provisioning_init_wizard [] { # Launch TypeDialog form let config = ( typedialog form \\\\ --config \\"provisioning/config/infrastructure_wizard.toml\\" \\\\ --backend tui \\\\ --output nickel ) # Save output $config | save ~/.config/provisioning/workspace_config.ncl # Validate with provisioning schemas let provisioning = (import \\"provisioning/schemas/main.ncl\\") let validated = ( nickel export ~/.config/provisioning/workspace_config.ncl | jq . | to json ) print \\"Infrastructure configuration created!\\" print \\"Use: provisioning deploy --from-config\\"\\n}","breadcrumbs":"TypeDialog Nickel Integration » Implementation in Nushell","id":"1254","title":"Implementation in Nushell"},"1255":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Advanced Features","id":"1255","title":"Advanced Features"},"1256":{"body":"Show/hide fields based on user selections: [[fields]]\\nname = \\"backup_retention\\"\\nlabel = \\"Backup Retention (days)\\"\\ntype = \\"number\\"\\nvisible_if = \\"enable_backup == true\\" # Only shown if backup enabled","breadcrumbs":"TypeDialog Nickel Integration » Conditional Visibility","id":"1256","title":"Conditional Visibility"},"1257":{"body":"Set defaults based on other fields: [[fields]]\\nname = \\"deployment_mode\\"\\ntype = \\"select\\"\\noptions = [\\"solo\\", \\"enterprise\\"] [[fields]]\\nname = \\"cpu_cores\\"\\ntype = \\"number\\"\\ndefault_from = \\"deployment_mode\\" # Can reference other fields\\n# solo → default 2, enterprise → default 16","breadcrumbs":"TypeDialog Nickel Integration » Dynamic Defaults","id":"1257","title":"Dynamic Defaults"},"1258":{"body":"[[fields]]\\nname = \\"memory_gb\\"\\ntype = \\"number\\"\\nvalidation_rule = \\"memory_gb >= cpu_cores * 2\\"\\nhelp = \\"Memory must be at least 2 GB per CPU core\\"","breadcrumbs":"TypeDialog Nickel Integration » Custom Validation","id":"1258","title":"Custom Validation"},"1259":{"body":"TypeDialog can output to multiple formats: # Output to Nickel (recommended for IaC)\\ntypedialog form --config form.toml --output nickel # Output to JSON (for APIs)\\ntypedialog form --config form.toml --output json # Output to YAML (for K8s)\\ntypedialog form --config form.toml --output yaml # Output to TOML (for application config)\\ntypedialog form --config form.toml --output toml","breadcrumbs":"TypeDialog Nickel Integration » Output Formats","id":"1259","title":"Output Formats"},"126":{"body":"Create your personal configuration: # Initialize user configuration\\nprovisioning init config # This creates ~/.provisioning/config.user.toml","breadcrumbs":"Getting Started » Step 1: Initialize Your Configuration","id":"126","title":"Step 1: Initialize Your Configuration"},"1260":{"body":"TypeDialog supports three rendering backends:","breadcrumbs":"TypeDialog Nickel Integration » Backends","id":"1260","title":"Backends"},"1261":{"body":"typedialog form --config form.toml --backend cli Pros : Lightweight, SSH-friendly, no dependencies Cons : Basic UI","breadcrumbs":"TypeDialog Nickel Integration » 1. CLI (Command-line prompts)","id":"1261","title":"1. CLI (Command-line prompts)"},"1262":{"body":"typedialog form --config form.toml --backend tui Pros : Rich UI, keyboard navigation, sections Cons : Requires terminal support","breadcrumbs":"TypeDialog Nickel Integration » 2. TUI (Terminal User Interface - Ratatui)","id":"1262","title":"2. TUI (Terminal User Interface - Ratatui)"},"1263":{"body":"typedialog form --config form.toml --backend web --port 3000\\n# Opens http://localhost:3000 Pros : Beautiful UI, remote access, multi-user Cons : Requires browser, network","breadcrumbs":"TypeDialog Nickel Integration » 3. Web (HTTP Server - Axum)","id":"1263","title":"3. Web (HTTP Server - Axum)"},"1264":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Troubleshooting","id":"1264","title":"Troubleshooting"},"1265":{"body":"Cause : Field names or types don\'t match contract Solution : Verify field definitions match Nickel schema: # Form field\\n[[fields]]\\nname = \\"cpu_cores\\" # Must match Nickel field name\\ntype = \\"number\\" # Must match Nickel type","breadcrumbs":"TypeDialog Nickel Integration » Problem: Form doesn\'t match Nickel contract","id":"1265","title":"Problem: Form doesn\'t match Nickel contract"},"1266":{"body":"Cause : User input violates contract constraints Solution : Add help text and validation rules: [[fields]]\\nname = \\"cpu_cores\\"\\nvalidation_pattern = \\"^[1-9][0-9]*$\\"\\nhelp = \\"Must be positive integer\\"","breadcrumbs":"TypeDialog Nickel Integration » Problem: Validation fails","id":"1266","title":"Problem: Validation fails"},"1267":{"body":"Cause : Missing required fields Solution : Ensure all required fields in form: [[fields]]\\nname = \\"required_field\\"\\nrequired = true # User must provide value","breadcrumbs":"TypeDialog Nickel Integration » Problem: Output not valid Nickel","id":"1267","title":"Problem: Output not valid Nickel"},"1268":{"body":"","breadcrumbs":"TypeDialog Nickel Integration » Complete Example: End-to-End Workflow","id":"1268","title":"Complete Example: End-to-End Workflow"},"1269":{"body":"# workspace_schema.ncl\\n{ workspace = { name = \\"\\", mode = \'solo, provider = \'upcloud, monitoring = true, email = \\"\\", },\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 1: Define Nickel Schema","id":"1269","title":"Step 1: Define Nickel Schema"},"127":{"body":"# Check your environment setup\\nprovisioning env # View comprehensive configuration\\nprovisioning allenv You should see output like: ✅ Configuration loaded successfully\\n✅ All required tools available\\n📁 Base path: /usr/local/provisioning\\n🏠 User config: ~/.provisioning/config.user.toml","breadcrumbs":"Getting Started » Step 2: Verify Your Environment","id":"127","title":"Step 2: Verify Your Environment"},"1270":{"body":"# workspace_form.toml\\n[[fields]]\\nname = \\"name\\"\\ntype = \\"text\\"\\nrequired = true [[fields]]\\nname = \\"mode\\"\\ntype = \\"select\\"\\noptions = [\\"solo\\", \\"enterprise\\"] [[fields]]\\nname = \\"provider\\"\\ntype = \\"select\\"\\noptions = [\\"upcloud\\", \\"aws\\"] [[fields]]\\nname = \\"monitoring\\"\\ntype = \\"confirm\\" [[fields]]\\nname = \\"email\\"\\ntype = \\"text\\"\\nrequired = true","breadcrumbs":"TypeDialog Nickel Integration » Step 2: Define Form","id":"1270","title":"Step 2: Define Form"},"1271":{"body":"$ typedialog form --config workspace_form.toml --backend tui\\n# User fills form interactively","breadcrumbs":"TypeDialog Nickel Integration » Step 3: User Interaction","id":"1271","title":"Step 3: User Interaction"},"1272":{"body":"{ workspace = { name = \\"production\\", mode = \'enterprise, provider = \'upcloud, monitoring = true, email = \\"ops@company.com\\", },\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 4: Output","id":"1272","title":"Step 4: Output"},"1273":{"body":"# main.ncl\\nlet config = import \\"./workspace.ncl\\" in\\nlet schemas = import \\"provisioning/schemas/main.ncl\\" in { # Build infrastructure infrastructure = schemas.deployment.modes.make_mode { deployment_type = config.workspace.mode, provider = config.workspace.provider, },\\n}","breadcrumbs":"TypeDialog Nickel Integration » Step 5: Use in Provisioning","id":"1273","title":"Step 5: Use in Provisioning"},"1274":{"body":"TypeDialog + Nickel provides: ✅ Type-Safe UIs : Forms validated against Nickel contracts ✅ Auto-Generated : No UI code to maintain ✅ Bidirectional : Nickel → Forms → Nickel ✅ Multiple Outputs : JSON, YAML, TOML, Nickel ✅ Three Backends : CLI, TUI, Web ✅ Production-Ready : Used in real infrastructure Key Benefit : Reduce configuration errors by enforcing schema validation at UI level, not after deployment. Version : 1.0.0 Status : Implementation Guide Last Updated : 2025-12-15","breadcrumbs":"TypeDialog Nickel Integration » Summary","id":"1274","title":"Summary"},"1275":{"body":"","breadcrumbs":"ADR-001: Project Structure » ADR-001: Project Structure Decision","id":"1275","title":"ADR-001: Project Structure Decision"},"1276":{"body":"Accepted","breadcrumbs":"ADR-001: Project Structure » Status","id":"1276","title":"Status"},"1277":{"body":"Provisioning had evolved from a monolithic structure into a complex system with mixed organizational patterns. The original structure had multiple issues: Provider-specific code scattered : Cloud provider implementations were mixed with core logic Task services fragmented : Infrastructure services lacked consistent structure Domain boundaries unclear : No clear separation between core, providers, and services Development artifacts mixed with distribution : User-facing tools mixed with development utilities Deep call stack limitations : Nushell\'s runtime limitations required architectural solutions Configuration complexity : 200+ environment variables across 65+ files needed systematic organization The system needed a clear, maintainable structure that supports: Multi-provider infrastructure provisioning (AWS, UpCloud, local) Modular task services (Kubernetes, container runtimes, storage, networking) Clear separation of concerns Hybrid Rust/Nushell architecture Configuration-driven workflows Clean distribution without development artifacts","breadcrumbs":"ADR-001: Project Structure » Context","id":"1277","title":"Context"},"1278":{"body":"Adopt a domain-driven hybrid structure organized around functional boundaries: src/\\n├── core/ # Core system and CLI entry point\\n├── platform/ # High-performance coordination layer (Rust orchestrator)\\n├── orchestrator/ # Legacy orchestrator location (to be consolidated)\\n├── provisioning/ # Main provisioning with domain modules\\n├── control-center/ # Web UI management interface\\n├── tools/ # Development and utility tools\\n└── extensions/ # Plugin and extension framework","breadcrumbs":"ADR-001: Project Structure » Decision","id":"1278","title":"Decision"},"1279":{"body":"Domain Separation : Each major component has clear boundaries and responsibilities Hybrid Architecture : Rust for performance-critical coordination, Nushell for business logic Provider Abstraction : Standardized interfaces across cloud providers Service Modularity : Reusable task services with consistent structure Clean Distribution : Development tools separated from user-facing components Configuration Hierarchy : Systematic config management with interpolation support","breadcrumbs":"ADR-001: Project Structure » Key Structural Principles","id":"1279","title":"Key Structural Principles"},"128":{"body":"# List available providers\\nprovisioning list providers # List available task services\\nprovisioning list taskservs # List available clusters\\nprovisioning list clusters","breadcrumbs":"Getting Started » Step 3: Explore Available Resources","id":"128","title":"Step 3: Explore Available Resources"},"1280":{"body":"Core : CLI interface, library modules, and common utilities Platform : High-performance Rust orchestrator for workflow coordination Provisioning : Main business logic with providers, task services, and clusters Control Center : Web-based management interface Tools : Development utilities and build systems Extensions : Plugin framework and custom extensions","breadcrumbs":"ADR-001: Project Structure » Domain Organization","id":"1280","title":"Domain Organization"},"1281":{"body":"","breadcrumbs":"ADR-001: Project Structure » Consequences","id":"1281","title":"Consequences"},"1282":{"body":"Clear Boundaries : Each domain has well-defined responsibilities and interfaces Scalable Growth : New providers and services can be added without structural changes Development Efficiency : Developers can focus on specific domains without system-wide knowledge Clean Distribution : Users receive only necessary components without development artifacts Maintenance Clarity : Issues can be isolated to specific domains Hybrid Benefits : Leverage Rust performance where needed while maintaining Nushell productivity Configuration Consistency : Systematic approach to configuration management across all domains","breadcrumbs":"ADR-001: Project Structure » Positive","id":"1282","title":"Positive"},"1283":{"body":"Migration Complexity : Required systematic migration of existing components Learning Curve : New developers need to understand domain boundaries Coordination Overhead : Cross-domain features require careful interface design Path Management : More complex path resolution with domain separation Build Complexity : Multiple domains require coordinated build processes","breadcrumbs":"ADR-001: Project Structure » Negative","id":"1283","title":"Negative"},"1284":{"body":"Development Patterns : Each domain may develop its own patterns within architectural guidelines Testing Strategy : Domain-specific testing strategies while maintaining integration coverage Documentation : Domain-specific documentation with clear cross-references","breadcrumbs":"ADR-001: Project Structure » Neutral","id":"1284","title":"Neutral"},"1285":{"body":"","breadcrumbs":"ADR-001: Project Structure » Alternatives Considered","id":"1285","title":"Alternatives Considered"},"1286":{"body":"Keep all code in a single flat structure with minimal organization. Rejected : Would not solve maintainability or scalability issues. Continued technical debt accumulation.","breadcrumbs":"ADR-001: Project Structure » Alternative 1: Monolithic Structure","id":"1286","title":"Alternative 1: Monolithic Structure"},"1287":{"body":"Split into completely separate services with network communication. Rejected : Overhead too high for single-machine deployment use case. Would complicate installation and configuration.","breadcrumbs":"ADR-001: Project Structure » Alternative 2: Microservice Architecture","id":"1287","title":"Alternative 2: Microservice Architecture"},"1288":{"body":"Organize by implementation language (rust/, nushell/, kcl/). Rejected : Does not align with functional boundaries. Cross-cutting concerns would be scattered.","breadcrumbs":"ADR-001: Project Structure » Alternative 3: Language-Based Organization","id":"1288","title":"Alternative 3: Language-Based Organization"},"1289":{"body":"Organize by user-facing features (servers/, clusters/, networking/). Rejected : Would duplicate cross-cutting infrastructure and provider logic across features.","breadcrumbs":"ADR-001: Project Structure » Alternative 4: Feature-Based Organization","id":"1289","title":"Alternative 4: Feature-Based Organization"},"129":{"body":"Let\'s create a simple local infrastructure to learn the basics.","breadcrumbs":"Getting Started » Your First Infrastructure","id":"129","title":"Your First Infrastructure"},"1290":{"body":"Organize by architectural layers (presentation/, business/, data/). Rejected : Does not align with domain complexity. Infrastructure provisioning has different layering needs.","breadcrumbs":"ADR-001: Project Structure » Alternative 5: Layer-Based Architecture","id":"1290","title":"Alternative 5: Layer-Based Architecture"},"1291":{"body":"Configuration System Migration (ADR-002) Hybrid Architecture Decision (ADR-004) Extension Framework Design (ADR-005) Project Architecture Principles (PAP) Guidelines","breadcrumbs":"ADR-001: Project Structure » References","id":"1291","title":"References"},"1292":{"body":"","breadcrumbs":"ADR-002: Distribution Strategy » ADR-002: Distribution Strategy","id":"1292","title":"ADR-002: Distribution Strategy"},"1293":{"body":"Accepted","breadcrumbs":"ADR-002: Distribution Strategy » Status","id":"1293","title":"Status"},"1294":{"body":"Provisioning needed a clean distribution strategy that separates user-facing tools from development artifacts. Key challenges included: Development Artifacts Mixed with Production : Build tools, test files, and development utilities scattered throughout user directories Complex Installation Process : Users had to navigate through development-specific directories and files Unclear User Experience : No clear distinction between what users need versus what developers need Configuration Complexity : Multiple configuration files with unclear precedence and purpose Workspace Pollution : User workspaces contained development-only files and directories Path Resolution Issues : Complex path resolution logic mixing development and production concerns The system required a distribution strategy that provides: Clean user experience without development artifacts Clear separation between user and development tools Simplified configuration management Consistent installation and deployment patterns Maintainable development workflow","breadcrumbs":"ADR-002: Distribution Strategy » Context","id":"1294","title":"Context"},"1295":{"body":"Implement a layered distribution strategy with clear separation between development and user environments:","breadcrumbs":"ADR-002: Distribution Strategy » Decision","id":"1295","title":"Decision"},"1296":{"body":"Core Distribution Layer : Essential user-facing components Main CLI tools and libraries Configuration templates and defaults Provider implementations Task service definitions Development Layer : Development-specific tools and artifacts Build scripts and development utilities Test suites and validation tools Development configuration templates Code generation tools Workspace Layer : User-specific customization and data User configurations and overrides Local state and cache files Custom extensions and plugins User-specific templates and workflows","breadcrumbs":"ADR-002: Distribution Strategy » Distribution Layers","id":"1296","title":"Distribution Layers"},"1297":{"body":"# User Distribution\\n/usr/local/bin/\\n├── provisioning # Main CLI entry point\\n└── provisioning-* # Supporting utilities /usr/local/share/provisioning/\\n├── core/ # Core libraries and modules\\n├── providers/ # Provider implementations\\n├── taskservs/ # Task service definitions\\n├── templates/ # Configuration templates\\n└── config.defaults.toml # System-wide defaults # User Workspace\\n~/workspace/provisioning/\\n├── config.user.toml # User preferences\\n├── infra/ # User infrastructure definitions\\n├── extensions/ # User extensions\\n└── cache/ # Local cache and state # Development Environment\\n/\\n├── src/ # Source code\\n├── scripts/ # Development tools\\n├── tests/ # Test suites\\n└── tools/ # Build and development utilities","breadcrumbs":"ADR-002: Distribution Strategy » Distribution Structure","id":"1297","title":"Distribution Structure"},"1298":{"body":"Clean Separation : Development artifacts never appear in user installations Hierarchical Configuration : Clear precedence from system defaults to user overrides Self-Contained User Tools : Users can work without accessing development directories Workspace Isolation : User data and customizations isolated from system installation Consistent Paths : Predictable path resolution across different installation types Version Management : Clear versioning and upgrade paths for distributed components","breadcrumbs":"ADR-002: Distribution Strategy » Key Distribution Principles","id":"1298","title":"Key Distribution Principles"},"1299":{"body":"","breadcrumbs":"ADR-002: Distribution Strategy » Consequences","id":"1299","title":"Consequences"},"13":{"body":"","breadcrumbs":"Home » Key Concepts","id":"13","title":"Key Concepts"},"130":{"body":"# Create a new workspace directory\\nmkdir ~/my-first-infrastructure\\ncd ~/my-first-infrastructure # Initialize workspace\\nprovisioning generate infra --new local-demo This creates: local-demo/\\n├── config/\\n│ └── config.ncl # Master Nickel configuration\\n├── infra/\\n│ └── default/\\n│ ├── main.ncl # Infrastructure definition\\n│ └── servers.ncl # Server configurations\\n└── docs/ # Auto-generated guides","breadcrumbs":"Getting Started » Step 1: Create a Workspace","id":"130","title":"Step 1: Create a Workspace"},"1300":{"body":"Clean User Experience : Users interact only with production-ready tools and interfaces Simplified Installation : Clear installation process without development complexity Workspace Isolation : User customizations don\'t interfere with system installation Development Efficiency : Developers can work with full toolset without affecting users Configuration Clarity : Clear hierarchy and precedence for configuration settings Maintainable Updates : System updates don\'t affect user customizations Path Simplicity : Predictable path resolution without development-specific logic Security Isolation : User workspace separated from system components","breadcrumbs":"ADR-002: Distribution Strategy » Positive","id":"1300","title":"Positive"},"1301":{"body":"Distribution Complexity : Multiple distribution targets require coordinated build processes Path Management : More complex path resolution logic to support multiple layers Migration Overhead : Existing users need to migrate to new workspace structure Documentation Burden : Need clear documentation for different user types Testing Complexity : Must validate distribution across different installation scenarios","breadcrumbs":"ADR-002: Distribution Strategy » Negative","id":"1301","title":"Negative"},"1302":{"body":"Development Patterns : Different patterns for development versus production deployment Configuration Strategy : Layer-specific configuration management approaches Tool Integration : Different integration patterns for development versus user tools","breadcrumbs":"ADR-002: Distribution Strategy » Neutral","id":"1302","title":"Neutral"},"1303":{"body":"","breadcrumbs":"ADR-002: Distribution Strategy » Alternatives Considered","id":"1303","title":"Alternatives Considered"},"1304":{"body":"Ship everything (development and production) in single package. Rejected : Creates confusing user experience and bloated installations. Mixes development concerns with user needs.","breadcrumbs":"ADR-002: Distribution Strategy » Alternative 1: Monolithic Distribution","id":"1304","title":"Alternative 1: Monolithic Distribution"},"1305":{"body":"Package entire system as container images only. Rejected : Limits deployment flexibility and complicates local development workflows. Not suitable for all use cases.","breadcrumbs":"ADR-002: Distribution Strategy » Alternative 2: Container-Only Distribution","id":"1305","title":"Alternative 2: Container-Only Distribution"},"1306":{"body":"Require users to build from source with development environment. Rejected : Creates high barrier to entry and mixes user concerns with development complexity.","breadcrumbs":"ADR-002: Distribution Strategy » Alternative 3: Source-Only Distribution","id":"1306","title":"Alternative 3: Source-Only Distribution"},"1307":{"body":"Minimal core with everything else as downloadable plugins. Rejected : Would fragment essential functionality and complicate initial setup. Network dependency for basic functionality.","breadcrumbs":"ADR-002: Distribution Strategy » Alternative 4: Plugin-Based Distribution","id":"1307","title":"Alternative 4: Plugin-Based Distribution"},"1308":{"body":"Use environment variables to control what gets installed. Rejected : Creates complex configuration matrix and potential for inconsistent installations.","breadcrumbs":"ADR-002: Distribution Strategy » Alternative 5: Environment-Based Distribution","id":"1308","title":"Alternative 5: Environment-Based Distribution"},"1309":{"body":"","breadcrumbs":"ADR-002: Distribution Strategy » Implementation Details","id":"1309","title":"Implementation Details"},"131":{"body":"# View the generated configuration\\nprovisioning show settings --infra local-demo","breadcrumbs":"Getting Started » Step 2: Examine the Configuration","id":"131","title":"Step 2: Examine the Configuration"},"1310":{"body":"Core Layer Build : Extract essential user components from source Template Processing : Generate configuration templates with proper defaults Path Resolution : Generate path resolution logic for different installation types Documentation Generation : Create user-specific documentation excluding development details Package Creation : Build distribution packages for different platforms Validation Testing : Test installations in clean environments","breadcrumbs":"ADR-002: Distribution Strategy » Distribution Build Process","id":"1310","title":"Distribution Build Process"},"1311":{"body":"System Defaults (lowest precedence)\\n└── User Configuration └── Project Configuration └── Infrastructure Configuration └── Environment Configuration └── Runtime Configuration (highest precedence)","breadcrumbs":"ADR-002: Distribution Strategy » Configuration Hierarchy","id":"1311","title":"Configuration Hierarchy"},"1312":{"body":"Automatic Creation : User workspace created on first run Template Initialization : Workspace populated with configuration templates Version Tracking : Workspace tracks compatible system versions Migration Support : Automatic migration between workspace versions Backup Integration : Workspace backup and restore capabilities","breadcrumbs":"ADR-002: Distribution Strategy » Workspace Management","id":"1312","title":"Workspace Management"},"1313":{"body":"Project Structure Decision (ADR-001) Workspace Isolation Decision (ADR-003) Configuration System Migration (CLAUDE.md) User Experience Guidelines (Design Principles) Installation and Deployment Procedures","breadcrumbs":"ADR-002: Distribution Strategy » References","id":"1313","title":"References"},"1314":{"body":"","breadcrumbs":"ADR-003: Workspace Isolation » ADR-003: Workspace Isolation","id":"1314","title":"ADR-003: Workspace Isolation"},"1315":{"body":"Accepted","breadcrumbs":"ADR-003: Workspace Isolation » Status","id":"1315","title":"Status"},"1316":{"body":"Provisioning required a clear strategy for managing user-specific data, configurations, and customizations separate from system-wide installations. Key challenges included: Configuration Conflicts : User settings mixed with system defaults, causing unclear precedence State Management : User state (cache, logs, temporary files) scattered across filesystem Customization Isolation : User extensions and customizations affecting system behavior Multi-User Support : Multiple users on same system interfering with each other Development vs Production : Developer needs different from end-user needs Path Resolution Complexity : Complex logic to locate user-specific resources Backup and Migration : Difficulty backing up and migrating user-specific settings Security Boundaries : Need clear separation between system and user-writable areas The system needed workspace isolation that provides: Clear separation of user data from system installation Predictable configuration precedence and inheritance User-specific customization without system impact Multi-user support on shared systems Easy backup and migration of user settings Security isolation between system and user areas","breadcrumbs":"ADR-003: Workspace Isolation » Context","id":"1316","title":"Context"},"1317":{"body":"Implement isolated user workspaces with clear boundaries and hierarchical configuration:","breadcrumbs":"ADR-003: Workspace Isolation » Decision","id":"1317","title":"Decision"},"1318":{"body":"~/workspace/provisioning/ # User workspace root\\n├── config/\\n│ ├── user.toml # User preferences and overrides\\n│ ├── environments/ # Environment-specific configs\\n│ │ ├── dev.toml\\n│ │ ├── test.toml\\n│ │ └── prod.toml\\n│ └── secrets/ # User-specific encrypted secrets\\n├── infra/ # User infrastructure definitions\\n│ ├── personal/ # Personal infrastructure\\n│ ├── work/ # Work-related infrastructure\\n│ └── shared/ # Shared infrastructure definitions\\n├── extensions/ # User-installed extensions\\n│ ├── providers/ # Custom providers\\n│ ├── taskservs/ # Custom task services\\n│ └── plugins/ # User plugins\\n├── templates/ # User-specific templates\\n├── cache/ # Local cache and temporary data\\n│ ├── provider-cache/ # Provider API cache\\n│ ├── version-cache/ # Version information cache\\n│ └── build-cache/ # Build and generation cache\\n├── logs/ # User-specific logs\\n├── state/ # Local state files\\n└── backups/ # Automatic workspace backups","breadcrumbs":"ADR-003: Workspace Isolation » Workspace Structure","id":"1318","title":"Workspace Structure"},"1319":{"body":"Runtime Parameters (command line, environment variables) Environment Configuration (config/environments/{env}.toml) Infrastructure Configuration (infra/{name}/config.toml) Project Configuration (project-specific settings) User Configuration (config/user.toml) System Defaults (system-wide defaults)","breadcrumbs":"ADR-003: Workspace Isolation » Configuration Hierarchy (Precedence Order)","id":"1319","title":"Configuration Hierarchy (Precedence Order)"},"132":{"body":"# Validate syntax and structure\\nprovisioning validate config --infra local-demo # Should show: ✅ Configuration validation passed!","breadcrumbs":"Getting Started » Step 3: Validate the Configuration","id":"132","title":"Step 3: Validate the Configuration"},"1320":{"body":"Complete Isolation : User workspace completely independent of system installation Hierarchical Inheritance : Clear configuration inheritance with user overrides Security Boundaries : User workspace in user-writable area only Multi-User Safe : Multiple users can have independent workspaces Portable : Entire user workspace can be backed up and restored Version Independent : Workspace compatible across system version upgrades Extension Safe : User extensions cannot affect system behavior State Isolation : All user state contained within workspace","breadcrumbs":"ADR-003: Workspace Isolation » Key Isolation Principles","id":"1320","title":"Key Isolation Principles"},"1321":{"body":"","breadcrumbs":"ADR-003: Workspace Isolation » Consequences","id":"1321","title":"Consequences"},"1322":{"body":"User Independence : Users can customize without affecting system or other users Configuration Clarity : Clear hierarchy and precedence for all configuration Security Isolation : User modifications cannot compromise system installation Easy Backup : Complete user environment can be backed up and restored Development Flexibility : Developers can have multiple isolated workspaces System Upgrades : System updates don\'t affect user customizations Multi-User Support : Multiple users can work independently on same system Portable Configurations : User workspace can be moved between systems State Management : All user state in predictable locations","breadcrumbs":"ADR-003: Workspace Isolation » Positive","id":"1322","title":"Positive"},"1323":{"body":"Initial Setup : Users must initialize workspace before first use Path Complexity : More complex path resolution to support workspace isolation Disk Usage : Each user maintains separate cache and state Configuration Duplication : Some configuration may be duplicated across users Migration Overhead : Existing users need workspace migration Documentation Complexity : Need clear documentation for workspace management","breadcrumbs":"ADR-003: Workspace Isolation » Negative","id":"1323","title":"Negative"},"1324":{"body":"Backup Strategy : Users responsible for their own workspace backup Extension Management : User-specific extension installation and management Version Compatibility : Workspace versions must be compatible with system versions Performance Implications : Additional path resolution overhead","breadcrumbs":"ADR-003: Workspace Isolation » Neutral","id":"1324","title":"Neutral"},"1325":{"body":"","breadcrumbs":"ADR-003: Workspace Isolation » Alternatives Considered","id":"1325","title":"Alternatives Considered"},"1326":{"body":"All configuration in system directories with user overrides via environment variables. Rejected : Creates conflicts between users and makes customization difficult. Poor isolation and security.","breadcrumbs":"ADR-003: Workspace Isolation » Alternative 1: System-Wide Configuration Only","id":"1326","title":"Alternative 1: System-Wide Configuration Only"},"1327":{"body":"Use traditional dotfile approach (~/.provisioning/). Rejected : Clutters home directory and provides less structured organization. Harder to backup and migrate.","breadcrumbs":"ADR-003: Workspace Isolation » Alternative 2: Home Directory Dotfiles","id":"1327","title":"Alternative 2: Home Directory Dotfiles"},"1328":{"body":"Follow XDG specification for config/data/cache separation. Rejected : While standards-compliant, would fragment user data across multiple directories making management complex.","breadcrumbs":"ADR-003: Workspace Isolation » Alternative 3: XDG Base Directory Specification","id":"1328","title":"Alternative 3: XDG Base Directory Specification"},"1329":{"body":"Each user gets containerized environment. Rejected : Too heavy for simple configuration isolation. Adds deployment complexity without sufficient benefits.","breadcrumbs":"ADR-003: Workspace Isolation » Alternative 4: Container-Based Isolation","id":"1329","title":"Alternative 4: Container-Based Isolation"},"133":{"body":"# Dry run - see what would be created\\nprovisioning server create --infra local-demo --check # This shows planned changes without making them","breadcrumbs":"Getting Started » Step 4: Deploy Infrastructure (Check Mode)","id":"133","title":"Step 4: Deploy Infrastructure (Check Mode)"},"1330":{"body":"Store all user configuration in database. Rejected : Adds dependency complexity and makes backup/restore more difficult. Over-engineering for configuration needs.","breadcrumbs":"ADR-003: Workspace Isolation » Alternative 5: Database-Based Configuration","id":"1330","title":"Alternative 5: Database-Based Configuration"},"1331":{"body":"","breadcrumbs":"ADR-003: Workspace Isolation » Implementation Details","id":"1331","title":"Implementation Details"},"1332":{"body":"# Automatic workspace creation on first run\\nprovisioning workspace init # Manual workspace creation with template\\nprovisioning workspace init --template=developer # Workspace status and validation\\nprovisioning workspace status\\nprovisioning workspace validate","breadcrumbs":"ADR-003: Workspace Isolation » Workspace Initialization","id":"1332","title":"Workspace Initialization"},"1333":{"body":"Workspace Discovery : Locate user workspace (env var → default location) Configuration Loading : Load configuration hierarchy with proper precedence Path Resolution : Resolve all paths relative to workspace and system installation Variable Interpolation : Process configuration variables and templates Validation : Validate merged configuration for completeness and correctness","breadcrumbs":"ADR-003: Workspace Isolation » Configuration Resolution Process","id":"1333","title":"Configuration Resolution Process"},"1334":{"body":"# Backup entire workspace\\nprovisioning workspace backup --output ~/backup/provisioning-workspace.tar.gz # Restore workspace from backup\\nprovisioning workspace restore --input ~/backup/provisioning-workspace.tar.gz # Migrate workspace to new version\\nprovisioning workspace migrate --from-version 2.0.0 --to-version 3.0.0","breadcrumbs":"ADR-003: Workspace Isolation » Backup and Migration","id":"1334","title":"Backup and Migration"},"1335":{"body":"File Permissions : Workspace created with appropriate user permissions Secret Management : Secrets encrypted and isolated within workspace Extension Sandboxing : User extensions cannot access system directories Path Validation : All paths validated to prevent directory traversal Configuration Validation : User configuration validated against schemas","breadcrumbs":"ADR-003: Workspace Isolation » Security Considerations","id":"1335","title":"Security Considerations"},"1336":{"body":"Distribution Strategy (ADR-002) Configuration System Migration (CLAUDE.md) Security Guidelines (Design Principles) Extension Framework (ADR-005) Multi-User Deployment Patterns","breadcrumbs":"ADR-003: Workspace Isolation » References","id":"1336","title":"References"},"1337":{"body":"","breadcrumbs":"ADR-004: Hybrid Architecture » ADR-004: Hybrid Architecture","id":"1337","title":"ADR-004: Hybrid Architecture"},"1338":{"body":"Accepted","breadcrumbs":"ADR-004: Hybrid Architecture » Status","id":"1338","title":"Status"},"1339":{"body":"Provisioning encountered fundamental limitations with a pure Nushell implementation that required architectural solutions: Deep Call Stack Limitations : Nushell\'s open command fails in deep call contexts (enumerate | each), causing \\"Type not supported\\" errors in template.nu:71 Performance Bottlenecks : Complex workflow orchestration hitting Nushell\'s performance limits Concurrency Constraints : Limited parallel processing capabilities in Nushell for batch operations Integration Complexity : Need for REST API endpoints and external system integration State Management : Complex state tracking and persistence requirements beyond Nushell\'s capabilities Business Logic Preservation : 65+ existing Nushell files with domain expertise that shouldn\'t be rewritten Developer Productivity : Nushell excels for configuration management and domain-specific operations The system needed an architecture that: Solves Nushell\'s technical limitations without losing business logic Leverages each language\'s strengths appropriately Maintains existing investment in Nushell domain knowledge Provides performance for coordination-heavy operations Enables modern integration patterns (REST APIs, async workflows) Preserves configuration-driven, Infrastructure as Code principles","breadcrumbs":"ADR-004: Hybrid Architecture » Context","id":"1339","title":"Context"},"134":{"body":"# Create the actual infrastructure\\nprovisioning server create --infra local-demo # Wait for completion\\nprovisioning server list --infra local-demo","breadcrumbs":"Getting Started » Step 5: Create Your Infrastructure","id":"134","title":"Step 5: Create Your Infrastructure"},"1340":{"body":"Implement a Hybrid Rust/Nushell Architecture with clear separation of concerns:","breadcrumbs":"ADR-004: Hybrid Architecture » Decision","id":"1340","title":"Decision"},"1341":{"body":"1. Coordination Layer (Rust) Orchestrator : High-performance workflow coordination and task scheduling REST API Server : HTTP endpoints for external integration State Management : Persistent state tracking with checkpoint recovery Batch Processing : Parallel execution of complex workflows File-based Persistence : Lightweight task queue using reliable file storage Error Recovery : Sophisticated error handling and rollback capabilities 2. Business Logic Layer (Nushell) Provider Implementations : Cloud provider-specific operations (AWS, UpCloud, local) Task Services : Infrastructure service management (Kubernetes, networking, storage) Configuration Management : KCL-based configuration processing and validation Template Processing : Infrastructure-as-Code template generation CLI Interface : User-facing command-line tools and workflows Domain Operations : All business-specific logic and operations","breadcrumbs":"ADR-004: Hybrid Architecture » Architecture Layers","id":"1341","title":"Architecture Layers"},"1342":{"body":"Rust → Nushell Communication // Rust orchestrator invokes Nushell scripts via process execution\\nlet result = Command::new(\\"nu\\") .arg(\\"-c\\") .arg(\\"use core/nulib/workflows/server_create.nu *; server_create_workflow \'name\' \'\' []\\") .output()?; Nushell → Rust Communication # Nushell submits workflows to Rust orchestrator via HTTP API\\nhttp post \\"http://localhost:9090/workflows/servers/create\\" { name: \\"server-name\\", provider: \\"upcloud\\", config: $server_config\\n} Data Exchange Format Structured JSON : All data exchange via JSON for type safety and interoperability Configuration TOML : Configuration data in TOML format for human readability State Files : Lightweight file-based state exchange between layers","breadcrumbs":"ADR-004: Hybrid Architecture » Integration Patterns","id":"1342","title":"Integration Patterns"},"1343":{"body":"Language Strengths : Use each language for what it does best Business Logic Preservation : All existing domain knowledge stays in Nushell Performance Critical Path : Coordination and orchestration in Rust Clear Boundaries : Well-defined interfaces between layers Configuration Driven : Both layers respect configuration-driven architecture Error Handling : Coordinated error handling across language boundaries State Consistency : Consistent state management across hybrid system","breadcrumbs":"ADR-004: Hybrid Architecture » Key Architectural Principles","id":"1343","title":"Key Architectural Principles"},"1344":{"body":"","breadcrumbs":"ADR-004: Hybrid Architecture » Consequences","id":"1344","title":"Consequences"},"1345":{"body":"Technical Limitations Solved : Eliminates Nushell deep call stack issues Performance Optimized : High-performance coordination while preserving productivity Business Logic Preserved : 65+ Nushell files with domain expertise maintained Modern Integration : REST APIs and async workflows enabled Development Efficiency : Developers can use optimal language for each task Batch Processing : Parallel workflow execution with sophisticated state management Error Recovery : Advanced error handling and rollback capabilities Scalability : Architecture scales to complex multi-provider workflows Maintainability : Clear separation of concerns between layers","breadcrumbs":"ADR-004: Hybrid Architecture » Positive","id":"1345","title":"Positive"},"1346":{"body":"Complexity Increase : Two-language system requires more architectural coordination Integration Overhead : Data serialization/deserialization between languages Development Skills : Team needs expertise in both Rust and Nushell Testing Complexity : Must test integration between language layers Deployment Complexity : Two runtime environments must be coordinated Debugging Challenges : Debugging across language boundaries more complex","breadcrumbs":"ADR-004: Hybrid Architecture » Negative","id":"1346","title":"Negative"},"1347":{"body":"Development Patterns : Different patterns for each layer while maintaining consistency Documentation Strategy : Language-specific documentation with integration guides Tool Chain : Multiple development tool chains must be maintained Performance Characteristics : Different performance characteristics for different operations","breadcrumbs":"ADR-004: Hybrid Architecture » Neutral","id":"1347","title":"Neutral"},"1348":{"body":"","breadcrumbs":"ADR-004: Hybrid Architecture » Alternatives Considered","id":"1348","title":"Alternatives Considered"},"1349":{"body":"Continue with Nushell-only approach and work around limitations. Rejected : Technical limitations are fundamental and cannot be worked around without compromising functionality. Deep call stack issues are architectural.","breadcrumbs":"ADR-004: Hybrid Architecture » Alternative 1: Pure Nushell Implementation","id":"1349","title":"Alternative 1: Pure Nushell Implementation"},"135":{"body":"","breadcrumbs":"Getting Started » Working with Services","id":"135","title":"Working with Services"},"1350":{"body":"Rewrite entire system in Rust for consistency. Rejected : Would lose 65+ files of domain expertise and Nushell\'s productivity advantages for configuration management. Massive development effort.","breadcrumbs":"ADR-004: Hybrid Architecture » Alternative 2: Complete Rust Rewrite","id":"1350","title":"Alternative 2: Complete Rust Rewrite"},"1351":{"body":"Rewrite system in Go for simplicity and performance. Rejected : Same issues as Rust rewrite - loses domain expertise and Nushell\'s configuration strengths. Go doesn\'t provide significant advantages.","breadcrumbs":"ADR-004: Hybrid Architecture » Alternative 3: Pure Go Implementation","id":"1351","title":"Alternative 3: Pure Go Implementation"},"1352":{"body":"Use Python for coordination and shell scripts for operations. Rejected : Loses type safety and configuration-driven advantages of current system. Python adds dependency complexity.","breadcrumbs":"ADR-004: Hybrid Architecture » Alternative 4: Python/Shell Hybrid","id":"1352","title":"Alternative 4: Python/Shell Hybrid"},"1353":{"body":"Run Nushell and coordination layer in separate containers. Rejected : Adds deployment complexity and network communication overhead. Complicates local development significantly.","breadcrumbs":"ADR-004: Hybrid Architecture » Alternative 5: Container-Based Separation","id":"1353","title":"Alternative 5: Container-Based Separation"},"1354":{"body":"","breadcrumbs":"ADR-004: Hybrid Architecture » Implementation Details","id":"1354","title":"Implementation Details"},"1355":{"body":"Task Queue : File-based persistent queue for reliable workflow management HTTP Server : REST API for workflow submission and monitoring State Manager : Checkpoint-based state tracking with recovery Process Manager : Nushell script execution with proper isolation Error Handler : Comprehensive error recovery and rollback logic","breadcrumbs":"ADR-004: Hybrid Architecture » Orchestrator Components","id":"1355","title":"Orchestrator Components"},"1356":{"body":"HTTP REST : Primary API for external integration JSON Data Exchange : Structured data format for all communication File-based State : Lightweight persistence without database dependencies Process Execution : Secure subprocess execution for Nushell operations","breadcrumbs":"ADR-004: Hybrid Architecture » Integration Protocols","id":"1356","title":"Integration Protocols"},"1357":{"body":"Rust Development : Focus on coordination, performance, and integration Nushell Development : Focus on business logic, providers, and task services Integration Testing : Validate communication between layers End-to-End Validation : Complete workflow testing across both layers","breadcrumbs":"ADR-004: Hybrid Architecture » Development Workflow","id":"1357","title":"Development Workflow"},"1358":{"body":"Structured Logging : JSON logs from both Rust and Nushell components Metrics Collection : Performance metrics from coordination layer Health Checks : System health monitoring across both layers Workflow Tracking : Complete audit trail of workflow execution","breadcrumbs":"ADR-004: Hybrid Architecture » Monitoring and Observability","id":"1358","title":"Monitoring and Observability"},"1359":{"body":"","breadcrumbs":"ADR-004: Hybrid Architecture » Migration Strategy","id":"1359","title":"Migration Strategy"},"136":{"body":"Let\'s install a containerized service: # Install Docker/containerd\\nprovisioning taskserv create containerd --infra local-demo # Verify installation\\nprovisioning taskserv list --infra local-demo","breadcrumbs":"Getting Started » Installing Your First Service","id":"136","title":"Installing Your First Service"},"1360":{"body":"✅ Rust orchestrator implementation ✅ REST API endpoints ✅ File-based task queue ✅ Basic Nushell integration","breadcrumbs":"ADR-004: Hybrid Architecture » Phase 1: Core Infrastructure (Completed)","id":"1360","title":"Phase 1: Core Infrastructure (Completed)"},"1361":{"body":"✅ Server creation workflows ✅ Task service workflows ✅ Cluster deployment workflows ✅ State management and recovery","breadcrumbs":"ADR-004: Hybrid Architecture » Phase 2: Workflow Integration (Completed)","id":"1361","title":"Phase 2: Workflow Integration (Completed)"},"1362":{"body":"✅ Batch workflow processing ✅ Dependency resolution ✅ Rollback capabilities ✅ Real-time monitoring","breadcrumbs":"ADR-004: Hybrid Architecture » Phase 3: Advanced Features (Completed)","id":"1362","title":"Phase 3: Advanced Features (Completed)"},"1363":{"body":"Deep Call Stack Limitations (CLAUDE.md - Architectural Lessons Learned) Configuration-Driven Architecture (ADR-002) Batch Workflow System (CLAUDE.md - v3.1.0) Integration Patterns Documentation Performance Benchmarking Results","breadcrumbs":"ADR-004: Hybrid Architecture » References","id":"1363","title":"References"},"1364":{"body":"","breadcrumbs":"ADR-005: Extension Framework » ADR-005: Extension Framework","id":"1364","title":"ADR-005: Extension Framework"},"1365":{"body":"Accepted","breadcrumbs":"ADR-005: Extension Framework » Status","id":"1365","title":"Status"},"1366":{"body":"Provisioning required a flexible extension mechanism to support: Custom Providers : Organizations need to add custom cloud providers beyond AWS, UpCloud, and local Custom Task Services : Users need to integrate proprietary infrastructure services Custom Workflows : Complex organizations require custom orchestration patterns Third-Party Integration : Need to integrate with existing toolchains and systems User Customization : Power users want to extend and modify system behavior Plugin Ecosystem : Enable community contributions and extensions Isolation Requirements : Extensions must not compromise system stability Discovery Mechanism : System must automatically discover and load extensions Version Compatibility : Extensions must work across system version upgrades Configuration Integration : Extensions should integrate with configuration-driven architecture The system needed an extension framework that provides: Clear extension API and interfaces Safe isolation of extension code Automatic discovery and loading Configuration integration Version compatibility management Developer-friendly extension development patterns","breadcrumbs":"ADR-005: Extension Framework » Context","id":"1366","title":"Context"},"1367":{"body":"Implement a registry-based extension framework with structured discovery and isolation:","breadcrumbs":"ADR-005: Extension Framework » Decision","id":"1367","title":"Decision"},"1368":{"body":"Extension Types Provider Extensions : Custom cloud providers and infrastructure backends Task Service Extensions : Custom infrastructure services and components Workflow Extensions : Custom orchestration and deployment patterns CLI Extensions : Additional command-line tools and interfaces Template Extensions : Custom configuration and code generation templates Integration Extensions : External system integrations and connectors","breadcrumbs":"ADR-005: Extension Framework » Extension Architecture","id":"1368","title":"Extension Architecture"},"1369":{"body":"extensions/\\n├── providers/ # Provider extensions\\n│ └── custom-cloud/\\n│ ├── extension.toml # Extension manifest\\n│ ├── kcl/ # KCL configuration schemas\\n│ ├── nulib/ # Nushell implementation\\n│ └── templates/ # Configuration templates\\n├── taskservs/ # Task service extensions\\n│ └── custom-service/\\n│ ├── extension.toml\\n│ ├── kcl/\\n│ ├── nulib/\\n│ └── manifests/ # Kubernetes manifests\\n├── workflows/ # Workflow extensions\\n│ └── custom-workflow/\\n│ ├── extension.toml\\n│ └── nulib/\\n├── cli/ # CLI extensions\\n│ └── custom-commands/\\n│ ├── extension.toml\\n│ └── nulib/\\n└── integrations/ # Integration extensions └── external-tool/ ├── extension.toml └── nulib/","breadcrumbs":"ADR-005: Extension Framework » Extension Structure","id":"1369","title":"Extension Structure"},"137":{"body":"For container orchestration: # Install Kubernetes\\nprovisioning taskserv create kubernetes --infra local-demo # This may take several minutes...","breadcrumbs":"Getting Started » Installing Kubernetes","id":"137","title":"Installing Kubernetes"},"1370":{"body":"[extension]\\nname = \\"custom-provider\\"\\nversion = \\"1.0.0\\"\\ntype = \\"provider\\"\\ndescription = \\"Custom cloud provider integration\\"\\nauthor = \\"Organization Name\\"\\nlicense = \\"MIT\\"\\nhomepage = \\"https://github.com/org/custom-provider\\" [compatibility]\\nprovisioning_version = \\">=3.0.0,<4.0.0\\"\\nnushell_version = \\">=0.107.0\\"\\nkcl_version = \\">=0.11.0\\" [dependencies]\\nhttp_client = \\">=1.0.0\\"\\njson_parser = \\">=2.0.0\\" [entry_points]\\ncli = \\"nulib/cli.nu\\"\\nprovider = \\"nulib/provider.nu\\"\\nconfig_schema = \\"schemas/schema.ncl\\" [configuration]\\nconfig_prefix = \\"custom_provider\\"\\nrequired_env_vars = [\\"CUSTOM_PROVIDER_API_KEY\\"]\\noptional_config = [\\"custom_provider.region\\", \\"custom_provider.timeout\\"]","breadcrumbs":"ADR-005: Extension Framework » Extension Manifest (extension.toml)","id":"1370","title":"Extension Manifest (extension.toml)"},"1371":{"body":"Registry-Based Discovery : Extensions registered in structured directories Manifest-Driven Loading : Extension capabilities declared in manifest files Version Compatibility : Explicit compatibility declarations and validation Configuration Integration : Extensions integrate with system configuration hierarchy Isolation Boundaries : Extensions isolated from core system and each other Standard Interfaces : Consistent interfaces across extension types Development Patterns : Clear patterns for extension development Community Support : Framework designed for community contributions","breadcrumbs":"ADR-005: Extension Framework » Key Framework Principles","id":"1371","title":"Key Framework Principles"},"1372":{"body":"","breadcrumbs":"ADR-005: Extension Framework » Consequences","id":"1372","title":"Consequences"},"1373":{"body":"Extensibility : System can be extended without modifying core code Community Growth : Enable community contributions and ecosystem development Organization Customization : Organizations can add proprietary integrations Innovation Support : New technologies can be integrated via extensions Isolation Safety : Extensions cannot compromise system stability Configuration Consistency : Extensions integrate with configuration-driven architecture Development Efficiency : Clear patterns reduce extension development time Version Management : Compatibility system prevents breaking changes Discovery Automation : Extensions automatically discovered and loaded","breadcrumbs":"ADR-005: Extension Framework » Positive","id":"1373","title":"Positive"},"1374":{"body":"Complexity Increase : Additional layer of abstraction and management Performance Overhead : Extension loading and isolation adds runtime cost Testing Complexity : Must test extension framework and individual extensions Documentation Burden : Need comprehensive extension development documentation Version Coordination : Extension compatibility matrix requires management Support Complexity : Community extensions may require support resources","breadcrumbs":"ADR-005: Extension Framework » Negative","id":"1374","title":"Negative"},"1375":{"body":"Development Patterns : Different patterns for extension vs core development Quality Control : Community extensions may vary in quality and maintenance Security Considerations : Extensions need security review and validation Dependency Management : Extension dependencies must be managed carefully","breadcrumbs":"ADR-005: Extension Framework » Neutral","id":"1375","title":"Neutral"},"1376":{"body":"","breadcrumbs":"ADR-005: Extension Framework » Alternatives Considered","id":"1376","title":"Alternatives Considered"},"1377":{"body":"Simple filesystem scanning for extension discovery. Rejected : No manifest validation or version compatibility checking. Fragile discovery mechanism.","breadcrumbs":"ADR-005: Extension Framework » Alternative 1: Filesystem-Based Extensions","id":"1377","title":"Alternative 1: Filesystem-Based Extensions"},"1378":{"body":"Store extension metadata in database for discovery. Rejected : Adds database dependency complexity. Over-engineering for extension discovery needs.","breadcrumbs":"ADR-005: Extension Framework » Alternative 2: Database-Backed Registry","id":"1378","title":"Alternative 2: Database-Backed Registry"},"1379":{"body":"Use existing package managers (cargo, npm) for extension distribution. Rejected : Complicates installation and creates external dependencies. Not suitable for corporate environments.","breadcrumbs":"ADR-005: Extension Framework » Alternative 3: Package Manager Integration","id":"1379","title":"Alternative 3: Package Manager Integration"},"138":{"body":"# Show all services on your infrastructure\\nprovisioning show servers --infra local-demo # Show specific service details\\nprovisioning show servers web-01 taskserv kubernetes --infra local-demo","breadcrumbs":"Getting Started » Checking Service Status","id":"138","title":"Checking Service Status"},"1380":{"body":"Each extension runs in isolated container. Rejected : Too heavy for simple extensions. Complicates development and deployment significantly.","breadcrumbs":"ADR-005: Extension Framework » Alternative 4: Container-Based Extensions","id":"1380","title":"Alternative 4: Container-Based Extensions"},"1381":{"body":"Traditional plugin architecture with dynamic loading. Rejected : Complex for shell-based system. Security and isolation challenges in Nushell environment.","breadcrumbs":"ADR-005: Extension Framework » Alternative 5: Plugin Architecture","id":"1381","title":"Alternative 5: Plugin Architecture"},"1382":{"body":"","breadcrumbs":"ADR-005: Extension Framework » Implementation Details","id":"1382","title":"Implementation Details"},"1383":{"body":"Directory Scanning : Scan extension directories for manifest files Manifest Validation : Parse and validate extension manifest Compatibility Check : Verify version compatibility requirements Dependency Resolution : Resolve extension dependencies Configuration Integration : Merge extension configuration schemas Entry Point Registration : Register extension entry points with system","breadcrumbs":"ADR-005: Extension Framework » Extension Discovery Process","id":"1383","title":"Extension Discovery Process"},"1384":{"body":"# Extension discovery and validation\\nprovisioning extension discover\\nprovisioning extension validate --extension custom-provider # Extension activation and configuration\\nprovisioning extension enable custom-provider\\nprovisioning extension configure custom-provider # Extension usage\\nprovisioning provider list # Shows custom providers\\nprovisioning server create --provider custom-provider # Extension management\\nprovisioning extension disable custom-provider\\nprovisioning extension update custom-provider","breadcrumbs":"ADR-005: Extension Framework » Extension Loading Lifecycle","id":"1384","title":"Extension Loading Lifecycle"},"1385":{"body":"Extensions integrate with hierarchical configuration system: # System configuration includes extension settings\\n[custom_provider]\\napi_endpoint = \\"https://api.custom-cloud.com\\"\\nregion = \\"us-west-1\\"\\ntimeout = 30 # Extension configuration follows same hierarchy rules\\n# System defaults → User config → Environment config → Runtime","breadcrumbs":"ADR-005: Extension Framework » Configuration Integration","id":"1385","title":"Configuration Integration"},"1386":{"body":"Sandboxed Execution : Extensions run in controlled environment Permission Model : Extensions declare required permissions in manifest Code Review : Community extensions require review process Digital Signatures : Extensions can be digitally signed for authenticity Audit Logging : Extension usage tracked in system audit logs","breadcrumbs":"ADR-005: Extension Framework » Security and Isolation","id":"1386","title":"Security and Isolation"},"1387":{"body":"Extension Templates : Scaffold new extensions from templates Development Tools : Testing and validation tools for extension developers Documentation Generation : Automatic documentation from extension manifests Integration Testing : Framework for testing extensions with core system","breadcrumbs":"ADR-005: Extension Framework » Development Support","id":"1387","title":"Development Support"},"1388":{"body":"","breadcrumbs":"ADR-005: Extension Framework » Extension Development Patterns","id":"1388","title":"Extension Development Patterns"},"1389":{"body":"# extensions/providers/custom-cloud/nulib/provider.nu\\nexport def list-servers [] -> table { http get $\\"($config.custom_provider.api_endpoint)/servers\\" | from json | select name status region\\n} export def create-server [name: string, config: record] -> record { let payload = { name: $name, instance_type: $config.plan, region: $config.zone } http post $\\"($config.custom_provider.api_endpoint)/servers\\" $payload | from json\\n}","breadcrumbs":"ADR-005: Extension Framework » Provider Extension Pattern","id":"1389","title":"Provider Extension Pattern"},"139":{"body":"","breadcrumbs":"Getting Started » Understanding Commands","id":"139","title":"Understanding Commands"},"1390":{"body":"# extensions/taskservs/custom-service/nulib/service.nu\\nexport def install [server: string] -> nothing { let manifest_data = open ./manifests/deployment.yaml | str replace \\"{{server}}\\" $server kubectl apply --server $server --data $manifest_data\\n} export def uninstall [server: string] -> nothing { kubectl delete deployment custom-service --server $server\\n}","breadcrumbs":"ADR-005: Extension Framework » Task Service Extension Pattern","id":"1390","title":"Task Service Extension Pattern"},"1391":{"body":"Workspace Isolation (ADR-003) Configuration System Architecture (ADR-002) Hybrid Architecture Integration (ADR-004) Community Extension Guidelines Extension Security Framework Extension Development Documentation","breadcrumbs":"ADR-005: Extension Framework » References","id":"1391","title":"References"},"1392":{"body":"Status : Implemented ✅ Date : 2025-09-30 Authors : Infrastructure Team Related : ADR-001 (Project Structure), ADR-004 (Hybrid Architecture)","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » ADR-006: Provisioning CLI Refactoring to Modular Architecture","id":"1392","title":"ADR-006: Provisioning CLI Refactoring to Modular Architecture"},"1393":{"body":"The main provisioning CLI script (provisioning/core/nulib/provisioning) had grown to 1,329 lines with a massive 1,100+ line match statement handling all commands. This monolithic structure created multiple critical problems:","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Context","id":"1393","title":"Context"},"1394":{"body":"Maintainability Crisis 54 command branches in one file Code duplication: Flag handling repeated 50+ times Hard to navigate: Finding specific command logic required scrolling through 1,000+ lines Mixed concerns: Routing, validation, and execution all intertwined Development Friction Adding new commands required editing massive file Testing was nearly impossible (monolithic, no isolation) High cognitive load for contributors Code review difficult due to file size Technical Debt 10+ lines of repetitive flag handling per command No separation of concerns Poor code reusability Difficult to test individual command handlers User Experience Issues No bi-directional help system Inconsistent command shortcuts Help system not fully integrated","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Problems Identified","id":"1394","title":"Problems Identified"},"1395":{"body":"We refactored the monolithic CLI into a modular, domain-driven architecture with the following structure: provisioning/core/nulib/\\n├── provisioning (211 lines) ⬅️ 84% reduction\\n├── main_provisioning/\\n│ ├── flags.nu (139 lines) ⭐ Centralized flag handling\\n│ ├── dispatcher.nu (264 lines) ⭐ Command routing\\n│ ├── mod.nu (updated)\\n│ └── commands/ ⭐ Domain-focused handlers\\n│ ├── configuration.nu (316 lines)\\n│ ├── development.nu (72 lines)\\n│ ├── generation.nu (78 lines)\\n│ ├── infrastructure.nu (117 lines)\\n│ ├── orchestration.nu (64 lines)\\n│ ├── utilities.nu (157 lines)\\n│ └── workspace.nu (56 lines)","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Decision","id":"1395","title":"Decision"},"1396":{"body":"1. Centralized Flag Handling (flags.nu) Single source of truth for all flag parsing and argument building: export def parse_common_flags [flags: record]: nothing -> record\\nexport def build_module_args [flags: record, extra: string = \\"\\"]: nothing -> string\\nexport def set_debug_env [flags: record]\\nexport def get_debug_flag [flags: record]: nothing -> string Benefits: Eliminates 50+ instances of duplicate code Single place to add/modify flags Consistent flag handling across all commands Reduced from 10 lines to 3 lines per command handler 2. Command Dispatcher (dispatcher.nu) Central routing with 80+ command mappings: export def get_command_registry []: nothing -> record # 80+ shortcuts\\nexport def dispatch_command [args: list, flags: record] # Main router Features: Command registry with shortcuts (ws → workspace, orch → orchestrator, etc.) Bi-directional help support (provisioning ws help works) Domain-based routing (infrastructure, orchestration, development, etc.) Special command handling (create, delete, price, etc.) 3. Domain Command Handlers (commands/*.nu) Seven focused modules organized by domain: Module Lines Responsibility infrastructure.nu 117 Server, taskserv, cluster, infra orchestration.nu 64 Workflow, batch, orchestrator development.nu 72 Module, layer, version, pack workspace.nu 56 Workspace, template generation.nu 78 Generate commands utilities.nu 157 SSH, SOPS, cache, providers configuration.nu 316 Env, show, init, validate Each handler: Exports handle__command function Uses shared flag handling Provides error messages with usage hints Isolated and testable","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Key Components","id":"1396","title":"Key Components"},"1397":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Architecture Principles","id":"1397","title":"Architecture Principles"},"1398":{"body":"Routing → dispatcher.nu Flag parsing → flags.nu Business logic → commands/*.nu Help system → help_system.nu (existing)","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » 1. Separation of Concerns","id":"1398","title":"1. Separation of Concerns"},"1399":{"body":"Each module has ONE clear purpose: Command handlers execute specific domains Dispatcher routes to correct handler Flags module normalizes all inputs","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » 2. Single Responsibility","id":"1399","title":"2. Single Responsibility"},"14":{"body":"The provisioning platform uses declarative configuration to manage infrastructure. Instead of manually creating resources, you define what you want in Nickel configuration files, and the system makes it happen.","breadcrumbs":"Home » Infrastructure as Code (IaC)","id":"14","title":"Infrastructure as Code (IaC)"},"140":{"body":"All commands follow this pattern: provisioning [global-options] [command-options] [arguments]","breadcrumbs":"Getting Started » Command Structure","id":"140","title":"Command Structure"},"1400":{"body":"Eliminated repetition: Flag handling: 50+ instances → 1 function Command routing: Scattered logic → Command registry Error handling: Consistent across all domains","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » 3. DRY (Don\'t Repeat Yourself)","id":"1400","title":"3. DRY (Don\'t Repeat Yourself)"},"1401":{"body":"Open for extension: Add new handlers easily Closed for modification: Core routing unchanged","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » 4. Open/Closed Principle","id":"1401","title":"4. Open/Closed Principle"},"1402":{"body":"All handlers depend on abstractions (flag records, not concrete flags): # Handler signature\\nexport def handle_infrastructure_command [ command: string ops: string flags: record # ⬅️ Abstraction, not concrete flags\\n]","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » 5. Dependency Inversion","id":"1402","title":"5. Dependency Inversion"},"1403":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Implementation Details","id":"1403","title":"Implementation Details"},"1404":{"body":"Phase 1: Foundation ✅ Created commands/ directory structure ✅ Created flags.nu with common flag handling ✅ Created initial command handlers (infrastructure, utilities, configuration) ✅ Created dispatcher.nu with routing logic ✅ Refactored main file (1,329 → 211 lines) ✅ Tested basic functionality Phase 2: Completion ✅ Fixed bi-directional help (provisioning ws help now works) ✅ Created remaining handlers (orchestration, development, workspace, generation) ✅ Removed duplicate code from dispatcher ✅ Added comprehensive test suite ✅ Verified all shortcuts work","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Migration Path (Completed in 2 Phases)","id":"1404","title":"Migration Path (Completed in 2 Phases)"},"1405":{"body":"Users can now access help in multiple ways: # All these work equivalently:\\nprovisioning help workspace\\nprovisioning workspace help # ⬅️ NEW: Bi-directional\\nprovisioning ws help # ⬅️ NEW: With shortcuts\\nprovisioning help ws # ⬅️ NEW: Shortcut in help Implementation: # Intercept \\"command help\\" → \\"help command\\"\\nlet first_op = if ($ops_list | length) > 0 { ($ops_list | get 0) } else { \\"\\" }\\nif $first_op in [\\"help\\" \\"h\\"] { exec $\\"($env.PROVISIONING_NAME)\\" help $task --notitles\\n}","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Bi-directional Help System","id":"1405","title":"Bi-directional Help System"},"1406":{"body":"Comprehensive shortcut system with 30+ mappings: Infrastructure: s → server t, task → taskserv cl → cluster i → infra Orchestration: wf, flow → workflow bat → batch orch → orchestrator Development: mod → module lyr → layer Workspace: ws → workspace tpl, tmpl → template","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Command Shortcuts","id":"1406","title":"Command Shortcuts"},"1407":{"body":"Comprehensive test suite created (tests/test_provisioning_refactor.nu):","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Testing","id":"1407","title":"Testing"},"1408":{"body":"✅ Main help display ✅ Category help (infrastructure, orchestration, development, workspace) ✅ Bi-directional help routing ✅ All command shortcuts ✅ Category shortcut help ✅ Command routing to correct handlers","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Test Coverage","id":"1408","title":"Test Coverage"},"1409":{"body":"📋 Testing main help... ✅\\n📋 Testing category help... ✅\\n🔄 Testing bi-directional help... ✅\\n⚡ Testing command shortcuts... ✅\\n📚 Testing category shortcut help... ✅\\n🎯 Testing command routing... ✅ 📊 TEST RESULTS: 6 passed, 0 failed","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Test Results","id":"1409","title":"Test Results"},"141":{"body":"Option Short Description --infra -i Specify infrastructure --check -c Dry run mode --debug -x Enable debug output --yes -y Auto-confirm actions","breadcrumbs":"Getting Started » Global Options","id":"141","title":"Global Options"},"1410":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Results","id":"1410","title":"Results"},"1411":{"body":"Metric Before After Improvement Main file size 1,329 lines 211 lines 84% reduction Command handler 1 massive match (1,100+ lines) 7 focused modules Domain separation Flag handling Repeated 50+ times 1 function 98% duplication removal Code per command 10 lines 3 lines 70% reduction Modules count 1 monolith 9 modules Modular architecture Test coverage None 6 test groups Comprehensive testing","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Quantitative Improvements","id":"1411","title":"Quantitative Improvements"},"1412":{"body":"Maintainability ✅ Easy to find specific command logic ✅ Clear separation of concerns ✅ Self-documenting structure ✅ Focused modules (< 320 lines each) Extensibility ✅ Add new commands: Just update appropriate handler ✅ Add new flags: Single function update ✅ Add new shortcuts: Update command registry ✅ No massive file edits required Testability ✅ Isolated command handlers ✅ Mockable dependencies ✅ Test individual domains ✅ Fast test execution Developer Experience ✅ Lower cognitive load ✅ Faster onboarding ✅ Easier code review ✅ Better IDE navigation","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Qualitative Improvements","id":"1412","title":"Qualitative Improvements"},"1413":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Trade-offs","id":"1413","title":"Trade-offs"},"1414":{"body":"Dramatically reduced complexity : 84% smaller main file Better organization : Domain-focused modules Easier testing : Isolated, testable units Improved maintainability : Clear structure, less duplication Enhanced UX : Bi-directional help, shortcuts Future-proof : Easy to extend","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Advantages","id":"1414","title":"Advantages"},"1415":{"body":"More files : 1 file → 9 files (but smaller, focused) Module imports : Need to import multiple modules (automated via mod.nu) Learning curve : New structure requires documentation (this ADR) Decision : Advantages significantly outweigh disadvantages.","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Disadvantages","id":"1415","title":"Disadvantages"},"1416":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Examples","id":"1416","title":"Examples"},"1417":{"body":"\\"server\\" => { let use_check = if $check { \\"--check \\"} else { \\"\\" } let use_yes = if $yes { \\"--yes\\" } else { \\"\\" } let use_wait = if $wait { \\"--wait\\" } else { \\"\\" } let use_keepstorage = if $keepstorage { \\"--keepstorage \\"} else { \\"\\" } let str_infra = if $infra != null { $\\"--infra ($infra) \\"} else { \\"\\" } let str_outfile = if $outfile != null { $\\"--outfile ($outfile) \\"} else { \\"\\" } let str_out = if $out != null { $\\"--out ($out) \\"} else { \\"\\" } let arg_include_notuse = if $include_notuse { $\\"--include_notuse \\"} else { \\"\\" } run_module $\\"($str_ops) ($str_infra) ($use_check)...\\" \\"server\\" --exec\\n}","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Before: Repetitive Flag Handling","id":"1417","title":"Before: Repetitive Flag Handling"},"1418":{"body":"def handle_server [ops: string, flags: record] { let args = build_module_args $flags $ops run_module $args \\"server\\" --exec\\n} Reduction: 10 lines → 3 lines (70% reduction)","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » After: Clean, Reusable","id":"1418","title":"After: Clean, Reusable"},"1419":{"body":"","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Future Considerations","id":"1419","title":"Future Considerations"},"142":{"body":"Command Purpose Example help Show help provisioning help env Show environment provisioning env list List resources provisioning list servers show Show details provisioning show settings validate Validate config provisioning validate config","breadcrumbs":"Getting Started » Essential Commands","id":"142","title":"Essential Commands"},"1420":{"body":"Unit test expansion : Add tests for each command handler Integration tests : End-to-end workflow tests Performance profiling : Measure routing overhead (expected to be negligible) Documentation generation : Auto-generate docs from handlers Plugin architecture : Allow third-party command extensions","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Potential Enhancements","id":"1420","title":"Potential Enhancements"},"1421":{"body":"See docs/development/COMMAND_HANDLER_GUIDE.md for: How to add new commands How to modify existing handlers How to add new shortcuts Testing guidelines","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Migration Guide for Contributors","id":"1421","title":"Migration Guide for Contributors"},"1422":{"body":"Architecture Overview : docs/architecture/system-overview.md Developer Guide : docs/development/COMMAND_HANDLER_GUIDE.md Main Project Docs : CLAUDE.md (updated with new structure) Test Suite : tests/test_provisioning_refactor.nu","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Related Documentation","id":"1422","title":"Related Documentation"},"1423":{"body":"This refactoring transforms the provisioning CLI from a monolithic, hard-to-maintain script into a modular, well-organized system following software engineering best practices. The 84% reduction in main file size, elimination of code duplication, and comprehensive test coverage position the project for sustainable long-term growth. The new architecture enables: Faster development : Add commands in minutes, not hours Better quality : Isolated testing catches bugs early Easier maintenance : Clear structure reduces cognitive load Enhanced UX : Shortcuts and bi-directional help improve usability Status : Successfully implemented and tested. All commands operational. Ready for production use. This ADR documents a major architectural improvement completed on 2025-09-30.","breadcrumbs":"ADR-006: Provisioning CLI Refactoring » Conclusion","id":"1423","title":"Conclusion"},"1424":{"body":"Status : Accepted Date : 2025-10-08 Deciders : Architecture Team Related : ADR-006 (KMS Service Integration)","breadcrumbs":"ADR-007: KMS Simplification » ADR-007: KMS Service Simplification to Age and Cosmian Backends","id":"1424","title":"ADR-007: KMS Service Simplification to Age and Cosmian Backends"},"1425":{"body":"The KMS service initially supported 4 backends: HashiCorp Vault, AWS KMS, Age, and Cosmian KMS. This created unnecessary complexity and unclear guidance about which backend to use for different environments.","breadcrumbs":"ADR-007: KMS Simplification » Context","id":"1425","title":"Context"},"1426":{"body":"Complexity : Supporting 4 different backends increased maintenance burden Dependencies : AWS SDK added significant compile time (~30 s) and binary size Confusion : No clear guidance on which backend to use when Cloud Lock-in : AWS KMS dependency limited infrastructure flexibility Operational Overhead : Vault requires server setup even for simple dev environments Code Duplication : Similar logic implemented 4 different ways","breadcrumbs":"ADR-007: KMS Simplification » Problems with 4-Backend Approach","id":"1426","title":"Problems with 4-Backend Approach"},"1427":{"body":"Most development work doesn\'t need server-based KMS Production deployments need enterprise-grade security features Age provides fast, offline encryption perfect for development Cosmian KMS offers confidential computing and zero-knowledge architecture Supporting Vault AND Cosmian is redundant (both are server-based KMS) AWS KMS locks us into AWS infrastructure","breadcrumbs":"ADR-007: KMS Simplification » Key Insights","id":"1427","title":"Key Insights"},"1428":{"body":"Simplify the KMS service to support only 2 backends: Age : For development and local testing Fast, offline, no server required Simple key generation with age-keygen X25519 encryption (modern, secure) Perfect for dev/test environments Cosmian KMS : For production deployments Enterprise-grade key management Confidential computing support (SGX/SEV) Zero-knowledge architecture Server-side key rotation Audit logging and compliance Multi-tenant support Remove support for: ❌ HashiCorp Vault (redundant with Cosmian) ❌ AWS KMS (cloud lock-in, complexity)","breadcrumbs":"ADR-007: KMS Simplification » Decision","id":"1428","title":"Decision"},"1429":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Consequences","id":"1429","title":"Consequences"},"143":{"body":"","breadcrumbs":"Getting Started » Working with Multiple Environments","id":"143","title":"Working with Multiple Environments"},"1430":{"body":"Simpler Code : 2 backends instead of 4 reduces complexity by 50% Faster Compilation : Removing AWS SDK saves ~30 seconds compile time Clear Guidance : Age = dev, Cosmian = prod (no confusion) Offline Development : Age works without network connectivity Better Security : Cosmian provides confidential computing (TEE) No Cloud Lock-in : Not dependent on AWS infrastructure Easier Testing : Age backend requires no setup Reduced Dependencies : Fewer external crates to maintain","breadcrumbs":"ADR-007: KMS Simplification » Positive","id":"1430","title":"Positive"},"1431":{"body":"Migration Required : Existing Vault/AWS KMS users must migrate Learning Curve : Teams must learn Age and Cosmian Cosmian Dependency : Production depends on Cosmian availability Cost : Cosmian may have licensing costs (cloud or self-hosted)","breadcrumbs":"ADR-007: KMS Simplification » Negative","id":"1431","title":"Negative"},"1432":{"body":"Feature Parity : Cosmian provides all features Vault/AWS had API Compatibility : Encrypt/decrypt API remains primarily the same Configuration Change : TOML config structure updated but similar","breadcrumbs":"ADR-007: KMS Simplification » Neutral","id":"1432","title":"Neutral"},"1433":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Implementation","id":"1433","title":"Implementation"},"1434":{"body":"src/age/client.rs (167 lines) - Age encryption client src/age/mod.rs (3 lines) - Age module exports src/cosmian/client.rs (294 lines) - Cosmian KMS client src/cosmian/mod.rs (3 lines) - Cosmian module exports docs/migration/KMS_SIMPLIFICATION.md (500+ lines) - Migration guide","breadcrumbs":"ADR-007: KMS Simplification » Files Created","id":"1434","title":"Files Created"},"1435":{"body":"src/lib.rs - Updated exports (age, cosmian instead of aws, vault) src/types.rs - Updated error types and config enum src/service.rs - Simplified to 2 backends (180 lines, was 213) Cargo.toml - Removed AWS deps, added age = \\"0.10\\" README.md - Complete rewrite for new backends provisioning/config/kms.toml - Simplified configuration","breadcrumbs":"ADR-007: KMS Simplification » Files Modified","id":"1435","title":"Files Modified"},"1436":{"body":"src/aws/client.rs - AWS KMS client src/aws/envelope.rs - Envelope encryption helpers src/aws/mod.rs - AWS module src/vault/client.rs - Vault client src/vault/mod.rs - Vault module","breadcrumbs":"ADR-007: KMS Simplification » Files Deleted","id":"1436","title":"Files Deleted"},"1437":{"body":"Removed : aws-sdk-kms = \\"1\\" aws-config = \\"1\\" aws-credential-types = \\"1\\" aes-gcm = \\"0.10\\" (was only for AWS envelope encryption) Added : age = \\"0.10\\" tempfile = \\"3\\" (dev dependency for tests) Kept : All Axum web framework deps reqwest (for Cosmian HTTP API) base64, serde, tokio, etc.","breadcrumbs":"ADR-007: KMS Simplification » Dependencies Changed","id":"1437","title":"Dependencies Changed"},"1438":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Migration Path","id":"1438","title":"Migration Path"},"1439":{"body":"# 1. Install Age\\nbrew install age # or apt install age # 2. Generate keys\\nage-keygen -o ~/.config/provisioning/age/private_key.txt\\nage-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt # 3. Update config to use Age backend\\n# 4. Re-encrypt development secrets","breadcrumbs":"ADR-007: KMS Simplification » For Development","id":"1439","title":"For Development"},"144":{"body":"The system supports multiple environments: dev - Development and testing test - Integration testing prod - Production deployment","breadcrumbs":"Getting Started » Environment Concepts","id":"144","title":"Environment Concepts"},"1440":{"body":"# 1. Set up Cosmian KMS (cloud or self-hosted)\\n# 2. Create master key in Cosmian\\n# 3. Migrate secrets from Vault/AWS to Cosmian\\n# 4. Update production config\\n# 5. Deploy new KMS service See docs/migration/KMS_SIMPLIFICATION.md for detailed steps.","breadcrumbs":"ADR-007: KMS Simplification » For Production","id":"1440","title":"For Production"},"1441":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Alternatives Considered","id":"1441","title":"Alternatives Considered"},"1442":{"body":"Pros : No migration required Maximum flexibility Cons : Continued complexity Maintenance burden Unclear guidance Rejected : Complexity outweighs benefits","breadcrumbs":"ADR-007: KMS Simplification » Alternative 1: Keep All 4 Backends","id":"1442","title":"Alternative 1: Keep All 4 Backends"},"1443":{"body":"Pros : Single backend Enterprise-grade everywhere Cons : Requires Cosmian server for development Slower dev iteration Network dependency for local dev Rejected : Development experience matters","breadcrumbs":"ADR-007: KMS Simplification » Alternative 2: Only Cosmian (No Age)","id":"1443","title":"Alternative 2: Only Cosmian (No Age)"},"1444":{"body":"Pros : Simplest solution No server required Cons : Not suitable for production No audit logging No key rotation No multi-tenant support Rejected : Production needs enterprise features","breadcrumbs":"ADR-007: KMS Simplification » Alternative 3: Only Age (No Production Backend)","id":"1444","title":"Alternative 3: Only Age (No Production Backend)"},"1445":{"body":"Pros : Vault is widely known No Cosmian dependency Cons : Vault lacks confidential computing Vault server still required No zero-knowledge architecture Rejected : Cosmian provides better security features","breadcrumbs":"ADR-007: KMS Simplification » Alternative 4: Age + HashiCorp Vault","id":"1445","title":"Alternative 4: Age + HashiCorp Vault"},"1446":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Metrics","id":"1446","title":"Metrics"},"1447":{"body":"Total Lines Removed : ~800 lines (AWS + Vault implementations) Total Lines Added : ~470 lines (Age + Cosmian + docs) Net Reduction : ~330 lines","breadcrumbs":"ADR-007: KMS Simplification » Code Reduction","id":"1447","title":"Code Reduction"},"1448":{"body":"Crates Removed : 4 (aws-sdk-kms, aws-config, aws-credential-types, aes-gcm) Crates Added : 1 (age) Net Reduction : 3 crates","breadcrumbs":"ADR-007: KMS Simplification » Dependency Reduction","id":"1448","title":"Dependency Reduction"},"1449":{"body":"Before : ~90 seconds (with AWS SDK) After : ~60 seconds (without AWS SDK) Improvement : 33% faster","breadcrumbs":"ADR-007: KMS Simplification » Compilation Time","id":"1449","title":"Compilation Time"},"145":{"body":"# Set environment for this session\\nexport PROVISIONING_ENV=dev\\nprovisioning env # Or specify per command\\nprovisioning --environment dev server create","breadcrumbs":"Getting Started » Switching Environments","id":"145","title":"Switching Environments"},"1450":{"body":"","breadcrumbs":"ADR-007: KMS Simplification » Compliance","id":"1450","title":"Compliance"},"1451":{"body":"Age Security : X25519 (Curve25519) encryption, modern and secure Cosmian Security : Confidential computing, zero-knowledge, enterprise-grade No Regression : Security features maintained or improved Clear Separation : Dev (Age) never used for production secrets","breadcrumbs":"ADR-007: KMS Simplification » Security Considerations","id":"1451","title":"Security Considerations"},"1452":{"body":"Unit Tests : Both backends have comprehensive test coverage Integration Tests : Age tests run without external deps Cosmian Tests : Require test server (marked as #[ignore]) Migration Tests : Verify old configs fail gracefully","breadcrumbs":"ADR-007: KMS Simplification » Testing Requirements","id":"1452","title":"Testing Requirements"},"1453":{"body":"Age Encryption - Modern encryption tool Cosmian KMS - Enterprise KMS with confidential computing ADR-006 - Previous KMS integration Migration Guide - Detailed migration steps","breadcrumbs":"ADR-007: KMS Simplification » References","id":"1453","title":"References"},"1454":{"body":"Age is designed by Filippo Valsorda (Google, Go security team) Cosmian provides FIPS 140-2 Level 3 compliance (when using certified hardware) This decision aligns with project goal of reducing cloud provider dependencies Migration timeline: 6 weeks for full adoption","breadcrumbs":"ADR-007: KMS Simplification » Notes","id":"1454","title":"Notes"},"1455":{"body":"Status : Accepted Date : 2025-10-08 Deciders : Architecture Team Tags : security, authorization, cedar, policy-engine","breadcrumbs":"ADR-008: Cedar Authorization » ADR-008: Cedar Authorization Policy Engine Integration","id":"1455","title":"ADR-008: Cedar Authorization Policy Engine Integration"},"1456":{"body":"The Provisioning platform requires fine-grained authorization controls to manage access to infrastructure resources across multiple environments (development, staging, production). The authorization system must: Support complex authorization rules (MFA, IP restrictions, time windows, approvals) Be auditable and version-controlled Allow hot-reload of policies without restart Integrate with JWT tokens for identity Scale to thousands of authorization decisions per second Be maintainable by security team without code changes Traditional code-based authorization (if/else statements) is difficult to audit, maintain, and scale.","breadcrumbs":"ADR-008: Cedar Authorization » Context and Problem Statement","id":"1456","title":"Context and Problem Statement"},"1457":{"body":"Security : Critical for production infrastructure access Auditability : Compliance requirements demand clear authorization policies Flexibility : Policies change more frequently than code Performance : Low-latency authorization decisions (<10 ms) Maintainability : Security team should update policies without developers Type Safety : Prevent policy errors before deployment","breadcrumbs":"ADR-008: Cedar Authorization » Decision Drivers","id":"1457","title":"Decision Drivers"},"1458":{"body":"","breadcrumbs":"ADR-008: Cedar Authorization » Considered Options","id":"1458","title":"Considered Options"},"1459":{"body":"Implement authorization logic directly in Rust/Nushell code. Pros : Full control and flexibility No external dependencies Simple to understand for small use cases Cons : Hard to audit and maintain Requires code deployment for policy changes No type safety for policies Difficult to test all combinations Not declarative","breadcrumbs":"ADR-008: Cedar Authorization » Option 1: Code-Based Authorization (Current State)","id":"1459","title":"Option 1: Code-Based Authorization (Current State)"},"146":{"body":"Create environment configs: # Development environment\\nprovisioning init config dev # Production environment\\nprovisioning init config prod","breadcrumbs":"Getting Started » Environment-Specific Configuration","id":"146","title":"Environment-Specific Configuration"},"1460":{"body":"Use OPA with Rego policy language. Pros : Industry standard Rich ecosystem Rego is powerful Cons : Rego is complex to learn Requires separate service deployment Performance overhead (HTTP calls) Policies not type-checked","breadcrumbs":"ADR-008: Cedar Authorization » Option 2: OPA (Open Policy Agent)","id":"1460","title":"Option 2: OPA (Open Policy Agent)"},"1461":{"body":"Use AWS Cedar policy language integrated directly into orchestrator. Pros : Type-safe policy language Fast (compiled, no network overhead) Schema-based validation Declarative and auditable Hot-reload support Rust library (no external service) Deny-by-default security model Cons : Recently introduced (2023) Smaller ecosystem than OPA Learning curve for policy authors","breadcrumbs":"ADR-008: Cedar Authorization » Option 3: Cedar Policy Engine (Chosen)","id":"1461","title":"Option 3: Cedar Policy Engine (Chosen)"},"1462":{"body":"Use Casbin authorization library. Pros : Multiple policy models (ACL, RBAC, ABAC) Rust bindings available Cons : Less declarative than Cedar Weaker type safety More imperative style","breadcrumbs":"ADR-008: Cedar Authorization » Option 4: Casbin","id":"1462","title":"Option 4: Casbin"},"1463":{"body":"Chosen Option : Option 3 - Cedar Policy Engine","breadcrumbs":"ADR-008: Cedar Authorization » Decision Outcome","id":"1463","title":"Decision Outcome"},"1464":{"body":"Type Safety : Cedar\'s schema validation prevents policy errors before deployment Performance : Native Rust library, no network overhead, <1 ms authorization decisions Auditability : Declarative policies in version control Hot Reload : Update policies without orchestrator restart AWS Standard : Used in production by AWS for AVP (Amazon Verified Permissions) Deny-by-Default : Secure by design","breadcrumbs":"ADR-008: Cedar Authorization » Rationale","id":"1464","title":"Rationale"},"1465":{"body":"Architecture ┌─────────────────────────────────────────────────────────┐\\n│ Orchestrator │\\n├─────────────────────────────────────────────────────────┤\\n│ │\\n│ HTTP Request │\\n│ ↓ │\\n│ ┌──────────────────┐ │\\n│ │ JWT Validation │ ← Token Validator │\\n│ └────────┬─────────┘ │\\n│ ↓ │\\n│ ┌──────────────────┐ │\\n│ │ Cedar Engine │ ← Policy Loader │\\n│ │ │ (Hot Reload) │\\n│ │ • Check Policies │ │\\n│ │ • Evaluate Rules │ │\\n│ │ • Context Check │ │\\n│ └────────┬─────────┘ │\\n│ ↓ │\\n│ Allow / Deny │\\n│ │\\n└─────────────────────────────────────────────────────────┘ Policy Organization provisioning/config/cedar-policies/\\n├── schema.cedar # Entity and action definitions\\n├── production.cedar # Production environment policies\\n├── development.cedar # Development environment policies\\n├── admin.cedar # Administrative policies\\n└── README.md # Documentation Rust Implementation provisioning/platform/orchestrator/src/security/\\n├── cedar.rs # Cedar engine integration (450 lines)\\n├── policy_loader.rs # Policy loading with hot reload (320 lines)\\n├── authorization.rs # Middleware integration (380 lines)\\n├── mod.rs # Module exports\\n└── tests.rs # Comprehensive tests (450 lines) Key Components CedarEngine : Core authorization engine Load policies from strings Load schema for validation Authorize requests Policy statistics PolicyLoader : File-based policy management Load policies from directory Hot reload on file changes (notify crate) Validate policy syntax Schema validation Authorization Middleware : Axum integration Extract JWT claims Build authorization context (IP, MFA, time) Check authorization Return 403 Forbidden on deny Policy Files : Declarative authorization rules Production: MFA, approvals, IP restrictions, business hours Development: Permissive for developers Admin: Platform admin, SRE, audit team policies Context Variables AuthorizationContext { mfa_verified: bool, // MFA verification status ip_address: String, // Client IP address time: String, // ISO 8601 timestamp approval_id: Option, // Approval ID (optional) reason: Option, // Reason for operation force: bool, // Force flag additional: HashMap, // Additional context\\n} Example Policy // Production deployments require MFA verification\\n@id(\\"prod-deploy-mfa\\")\\n@description(\\"All production deployments must have MFA verification\\")\\npermit ( principal, action == Provisioning::Action::\\"deploy\\", resource in Provisioning::Environment::\\"production\\"\\n) when { context.mfa_verified == true\\n};","breadcrumbs":"ADR-008: Cedar Authorization » Implementation Details","id":"1465","title":"Implementation Details"},"1466":{"body":"JWT Tokens : Extract principal and context from validated JWT Audit System : Log all authorization decisions Control Center : UI for policy management and testing CLI : Policy validation and testing commands","breadcrumbs":"ADR-008: Cedar Authorization » Integration Points","id":"1466","title":"Integration Points"},"1467":{"body":"Deny by Default : Cedar defaults to deny all actions Schema Validation : Type-check policies before loading Version Control : All policies in git for auditability Principle of Least Privilege : Grant minimum necessary permissions Defense in Depth : Combine with JWT validation and rate limiting Separation of Concerns : Security team owns policies, developers own code","breadcrumbs":"ADR-008: Cedar Authorization » Security Best Practices","id":"1467","title":"Security Best Practices"},"1468":{"body":"","breadcrumbs":"ADR-008: Cedar Authorization » Consequences","id":"1468","title":"Consequences"},"1469":{"body":"✅ Auditable : All policies in version control ✅ Type-Safe : Schema validation prevents errors ✅ Fast : <1 ms authorization decisions ✅ Maintainable : Security team can update policies independently ✅ Hot Reload : No downtime for policy updates ✅ Testable : Comprehensive test suite for policies ✅ Declarative : Clear intent, no hidden logic","breadcrumbs":"ADR-008: Cedar Authorization » Positive","id":"1469","title":"Positive"},"147":{"body":"","breadcrumbs":"Getting Started » Common Workflows","id":"147","title":"Common Workflows"},"1470":{"body":"❌ Learning Curve : Team must learn Cedar policy language ❌ New Technology : Cedar is relatively new (2023) ❌ Ecosystem : Smaller community than OPA ❌ Tooling : Limited IDE support compared to Rego","breadcrumbs":"ADR-008: Cedar Authorization » Negative","id":"1470","title":"Negative"},"1471":{"body":"🔶 Migration : Existing authorization logic needs migration to Cedar 🔶 Policy Complexity : Complex rules may be harder to express 🔶 Debugging : Policy debugging requires understanding Cedar evaluation","breadcrumbs":"ADR-008: Cedar Authorization » Neutral","id":"1471","title":"Neutral"},"1472":{"body":"","breadcrumbs":"ADR-008: Cedar Authorization » Compliance","id":"1472","title":"Compliance"},"1473":{"body":"SOC 2 : Auditable access control policies ISO 27001 : Access control management GDPR : Data access authorization and logging NIST 800-53 : AC-3 Access Enforcement","breadcrumbs":"ADR-008: Cedar Authorization » Security Standards","id":"1473","title":"Security Standards"},"1474":{"body":"All authorization decisions include: Principal (user/team) Action performed Resource accessed Context (MFA, IP, time) Decision (allow/deny) Policies evaluated","breadcrumbs":"ADR-008: Cedar Authorization » Audit Requirements","id":"1474","title":"Audit Requirements"},"1475":{"body":"","breadcrumbs":"ADR-008: Cedar Authorization » Migration Path","id":"1475","title":"Migration Path"},"1476":{"body":"✅ Cedar engine integration ✅ Policy loader with hot reload ✅ Authorization middleware ✅ Production, development, and admin policies ✅ Comprehensive tests","breadcrumbs":"ADR-008: Cedar Authorization » Phase 1: Implementation (Completed)","id":"1476","title":"Phase 1: Implementation (Completed)"},"1477":{"body":"🔲 Enable Cedar authorization in orchestrator 🔲 Migrate existing authorization logic to Cedar policies 🔲 Add authorization checks to all API endpoints 🔲 Integrate with audit logging","breadcrumbs":"ADR-008: Cedar Authorization » Phase 2: Rollout (Next)","id":"1477","title":"Phase 2: Rollout (Next)"},"1478":{"body":"🔲 Control Center policy editor UI 🔲 Policy testing UI 🔲 Policy simulation and dry-run mode 🔲 Policy analytics and insights 🔲 Advanced context variables (location, device type)","breadcrumbs":"ADR-008: Cedar Authorization » Phase 3: Enhancement (Future)","id":"1478","title":"Phase 3: Enhancement (Future)"},"1479":{"body":"","breadcrumbs":"ADR-008: Cedar Authorization » Alternatives Considered","id":"1479","title":"Alternatives Considered"},"148":{"body":"# 1. Create development workspace\\nmkdir ~/dev-environment\\ncd ~/dev-environment # 2. Generate infrastructure\\nprovisioning generate infra --new dev-setup # 3. Customize for development\\n# Edit settings.ncl to add development tools # 4. Deploy\\nprovisioning server create --infra dev-setup --check\\nprovisioning server create --infra dev-setup # 5. Install development services\\nprovisioning taskserv create kubernetes --infra dev-setup\\nprovisioning taskserv create containerd --infra dev-setup","breadcrumbs":"Getting Started » Workflow 1: Development Environment","id":"148","title":"Workflow 1: Development Environment"},"1480":{"body":"Keep authorization logic in Rust/Nushell code. Rejected Because : Not auditable Requires code changes for policy updates Difficult to test all combinations Not compliant with security standards","breadcrumbs":"ADR-008: Cedar Authorization » Alternative 1: Continue with Code-Based Authorization","id":"1480","title":"Alternative 1: Continue with Code-Based Authorization"},"1481":{"body":"Use Cedar for high-level policies, code for fine-grained checks. Rejected Because : Complexity of two authorization systems Unclear separation of concerns Harder to audit","breadcrumbs":"ADR-008: Cedar Authorization » Alternative 2: Hybrid Approach","id":"1481","title":"Alternative 2: Hybrid Approach"},"1482":{"body":"Cedar Documentation : https://docs.cedarpolicy.com/ Cedar GitHub : https://github.com/cedar-policy/cedar AWS AVP : https://aws.amazon.com/verified-permissions/ Policy Files : /provisioning/config/cedar-policies/ Implementation : /provisioning/platform/orchestrator/src/security/","breadcrumbs":"ADR-008: Cedar Authorization » References","id":"1482","title":"References"},"1483":{"body":"ADR-003: JWT Token-Based Authentication ADR-004: Audit Logging System ADR-005: KMS Key Management","breadcrumbs":"ADR-008: Cedar Authorization » Related ADRs","id":"1483","title":"Related ADRs"},"1484":{"body":"Cedar policy language is inspired by decades of authorization research (XACML, AWS IAM) and production experience at AWS. It balances expressiveness with safety. Approved By : Architecture Team Implementation Date : 2025-10-08 Review Date : 2026-01-08 (Quarterly)","breadcrumbs":"ADR-008: Cedar Authorization » Notes","id":"1484","title":"Notes"},"1485":{"body":"Status : Implemented Date : 2025-10-08 Decision Makers : Architecture Team","breadcrumbs":"ADR-009: Security System Complete » ADR-009: Complete Security System Implementation","id":"1485","title":"ADR-009: Complete Security System Implementation"},"1486":{"body":"The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA, compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.","breadcrumbs":"ADR-009: Security System Complete » Context","id":"1486","title":"Context"},"1487":{"body":"Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.","breadcrumbs":"ADR-009: Security System Complete » Decision","id":"1487","title":"Decision"},"1488":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Implementation Summary","id":"1488","title":"Implementation Summary"},"1489":{"body":"39,699 lines of production-ready code 136 files created/modified 350+ tests implemented 83+ REST endpoints available 111+ CLI commands ready","breadcrumbs":"ADR-009: Security System Complete » Total Implementation","id":"1489","title":"Total Implementation"},"149":{"body":"# Check for service updates\\nprovisioning taskserv check-updates # Update specific service\\nprovisioning taskserv update kubernetes --infra dev-setup # Verify update\\nprovisioning taskserv versions kubernetes","breadcrumbs":"Getting Started » Workflow 2: Service Updates","id":"149","title":"Workflow 2: Service Updates"},"1490":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Architecture Components","id":"1490","title":"Architecture Components"},"1491":{"body":"1. JWT Authentication (1,626 lines) Location : provisioning/platform/control-center/src/auth/ Features : RS256 asymmetric signing Access tokens (15 min) + refresh tokens (7 d) Token rotation and revocation Argon2id password hashing 5 user roles (Admin, Developer, Operator, Viewer, Auditor) Thread-safe blacklist API : 6 endpoints CLI : 8 commands Tests : 30+ 2. Cedar Authorization (5,117 lines) Location : provisioning/config/cedar-policies/, provisioning/platform/orchestrator/src/security/ Features : Cedar policy engine integration 4 policy files (schema, production, development, admin) Context-aware authorization (MFA, IP, time windows) Hot reload without restart Policy validation API : 4 endpoints CLI : 6 commands Tests : 30+ 3. Audit Logging (3,434 lines) Location : provisioning/platform/orchestrator/src/audit/ Features : Structured JSON logging 40+ action types GDPR compliance (PII anonymization) 5 export formats (JSON, CSV, Splunk, ECS, JSON Lines) Query API with advanced filtering API : 7 endpoints CLI : 8 commands Tests : 25 4. Config Encryption (3,308 lines) Location : provisioning/core/nulib/lib_provisioning/config/encryption.nu Features : SOPS integration 4 KMS backends (Age, AWS KMS, Vault, Cosmian) Transparent encryption/decryption Memory-only decryption Auto-detection CLI : 10 commands Tests : 7","breadcrumbs":"ADR-009: Security System Complete » Group 1: Foundation (13,485 lines)","id":"1491","title":"Group 1: Foundation (13,485 lines)"},"1492":{"body":"5. KMS Service (2,483 lines) Location : provisioning/platform/kms-service/ Features : HashiCorp Vault (Transit engine) AWS KMS (Direct + envelope encryption) Context-based encryption (AAD) Key rotation support Multi-region support API : 8 endpoints CLI : 15 commands Tests : 20 6. Dynamic Secrets (4,141 lines) Location : provisioning/platform/orchestrator/src/secrets/ Features : AWS STS temporary credentials (15 min-12 h) SSH key pair generation (Ed25519) UpCloud API subaccounts TTL manager with auto-cleanup Vault dynamic secrets integration API : 7 endpoints CLI : 10 commands Tests : 15 7. SSH Temporal Keys (2,707 lines) Location : provisioning/platform/orchestrator/src/ssh/ Features : Ed25519 key generation Vault OTP (one-time passwords) Vault CA (certificate authority signing) Auto-deployment to authorized_keys Background cleanup every 5 min API : 7 endpoints CLI : 10 commands Tests : 31","breadcrumbs":"ADR-009: Security System Complete » Group 2: KMS Integration (9,331 lines)","id":"1492","title":"Group 2: KMS Integration (9,331 lines)"},"1493":{"body":"8. MFA Implementation (3,229 lines) Location : provisioning/platform/control-center/src/mfa/ Features : TOTP (RFC 6238, 6-digit codes, 30 s window) WebAuthn/FIDO2 (YubiKey, Touch ID, Windows Hello) QR code generation 10 backup codes per user Multiple devices per user Rate limiting (5 attempts/5 min) API : 13 endpoints CLI : 15 commands Tests : 85+ 9. Orchestrator Auth Flow (2,540 lines) Location : provisioning/platform/orchestrator/src/middleware/ Features : Complete middleware chain (5 layers) Security context builder Rate limiting (100 req/min per IP) JWT authentication middleware MFA verification middleware Cedar authorization middleware Audit logging middleware Tests : 53 10. Control Center UI (3,179 lines) Location : provisioning/platform/control-center/web/ Features : React/TypeScript UI Login with MFA (2-step flow) MFA setup (TOTP + WebAuthn wizards) Device management Audit log viewer with filtering API token management Security settings dashboard Components : 12 React components API Integration : 17 methods","breadcrumbs":"ADR-009: Security System Complete » Group 3: Security Features (8,948 lines)","id":"1493","title":"Group 3: Security Features (8,948 lines)"},"1494":{"body":"11. Break-Glass Emergency Access (3,840 lines) Location : provisioning/platform/orchestrator/src/break_glass/ Features : Multi-party approval (2+ approvers, different teams) Emergency JWT tokens (4 h max, special claims) Auto-revocation (expiration + inactivity) Enhanced audit (7-year retention) Real-time alerts Background monitoring API : 12 endpoints CLI : 10 commands Tests : 985 lines (unit + integration) 12. Compliance (4,095 lines) Location : provisioning/platform/orchestrator/src/compliance/ Features : GDPR : Data export, deletion, rectification, portability, objection SOC2 : 9 Trust Service Criteria verification ISO 27001 : 14 Annex A control families Incident Response : Complete lifecycle management Data Protection : 4-level classification, encryption controls Access Control : RBAC matrix with role verification API : 35 endpoints CLI : 23 commands Tests : 11","breadcrumbs":"ADR-009: Security System Complete » Group 4: Advanced Features (7,935 lines)","id":"1494","title":"Group 4: Advanced Features (7,935 lines)"},"1495":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Security Architecture Flow","id":"1495","title":"Security Architecture Flow"},"1496":{"body":"1. User Request ↓\\n2. Rate Limiting (100 req/min per IP) ↓\\n3. JWT Authentication (RS256, 15 min tokens) ↓\\n4. MFA Verification (TOTP/WebAuthn for sensitive ops) ↓\\n5. Cedar Authorization (context-aware policies) ↓\\n6. Dynamic Secrets (AWS STS, SSH keys, 1h TTL) ↓\\n7. Operation Execution (encrypted configs, KMS) ↓\\n8. Audit Logging (structured JSON, GDPR-compliant) ↓\\n9. Response","breadcrumbs":"ADR-009: Security System Complete » End-to-End Request Flow","id":"1496","title":"End-to-End Request Flow"},"1497":{"body":"1. Emergency Request (reason + justification) ↓\\n2. Multi-Party Approval (2+ approvers, different teams) ↓\\n3. Session Activation (special JWT, 4h max) ↓\\n4. Enhanced Audit (7-year retention, immutable) ↓\\n5. Auto-Revocation (expiration/inactivity)","breadcrumbs":"ADR-009: Security System Complete » Emergency Access Flow","id":"1497","title":"Emergency Access Flow"},"1498":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Technology Stack","id":"1498","title":"Technology Stack"},"1499":{"body":"axum : HTTP framework jsonwebtoken : JWT handling (RS256) cedar-policy : Authorization engine totp-rs : TOTP implementation webauthn-rs : WebAuthn/FIDO2 aws-sdk-kms : AWS KMS integration argon2 : Password hashing tracing : Structured logging","breadcrumbs":"ADR-009: Security System Complete » Backend (Rust)","id":"1499","title":"Backend (Rust)"},"15":{"body":"The system supports four operational modes: Solo : Single developer local development Multi-user : Team collaboration with shared services CI/CD : Automated pipeline execution Enterprise : Production deployment with strict compliance","breadcrumbs":"Home » Mode-Based Architecture","id":"15","title":"Mode-Based Architecture"},"150":{"body":"# Add servers to existing infrastructure\\n# Edit settings.ncl to add more servers # Apply changes\\nprovisioning server create --infra dev-setup # Install services on new servers\\nprovisioning taskserv create containerd --infra dev-setup","breadcrumbs":"Getting Started » Workflow 3: Infrastructure Scaling","id":"150","title":"Workflow 3: Infrastructure Scaling"},"1500":{"body":"React 18 : UI framework Leptos : Rust WASM framework @simplewebauthn/browser : WebAuthn client qrcode.react : QR code generation","breadcrumbs":"ADR-009: Security System Complete » Frontend (TypeScript/React)","id":"1500","title":"Frontend (TypeScript/React)"},"1501":{"body":"Nushell 0.107 : Shell and scripting nu_plugin_kcl : KCL integration","breadcrumbs":"ADR-009: Security System Complete » CLI (Nushell)","id":"1501","title":"CLI (Nushell)"},"1502":{"body":"HashiCorp Vault : Secrets management, KMS, SSH CA AWS KMS : Key management service PostgreSQL/SurrealDB : Data storage SOPS : Config encryption","breadcrumbs":"ADR-009: Security System Complete » Infrastructure","id":"1502","title":"Infrastructure"},"1503":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Security Guarantees","id":"1503","title":"Security Guarantees"},"1504":{"body":"✅ RS256 asymmetric signing (no shared secrets) ✅ Short-lived access tokens (15 min) ✅ Token revocation support ✅ Argon2id password hashing (memory-hard) ✅ MFA enforced for production operations","breadcrumbs":"ADR-009: Security System Complete » Authentication","id":"1504","title":"Authentication"},"1505":{"body":"✅ Fine-grained permissions (Cedar policies) ✅ Context-aware (MFA, IP, time windows) ✅ Hot reload policies (no downtime) ✅ Deny by default","breadcrumbs":"ADR-009: Security System Complete » Authorization","id":"1505","title":"Authorization"},"1506":{"body":"✅ No static credentials stored ✅ Time-limited secrets (1h default) ✅ Auto-revocation on expiry ✅ Encryption at rest (KMS) ✅ Memory-only decryption","breadcrumbs":"ADR-009: Security System Complete » Secrets Management","id":"1506","title":"Secrets Management"},"1507":{"body":"✅ Immutable audit logs ✅ GDPR-compliant (PII anonymization) ✅ SOC2 controls implemented ✅ ISO 27001 controls verified ✅ 7-year retention for break-glass","breadcrumbs":"ADR-009: Security System Complete » Audit & Compliance","id":"1507","title":"Audit & Compliance"},"1508":{"body":"✅ Multi-party approval required ✅ Time-limited sessions (4h max) ✅ Enhanced audit logging ✅ Auto-revocation ✅ Cannot be disabled","breadcrumbs":"ADR-009: Security System Complete » Emergency Access","id":"1508","title":"Emergency Access"},"1509":{"body":"Component Latency Throughput Memory JWT Auth <5 ms 10,000/s ~10 MB Cedar Authz <10 ms 5,000/s ~50 MB Audit Log <5 ms 20,000/s ~100 MB KMS Encrypt <50 ms 1,000/s ~20 MB Dynamic Secrets <100 ms 500/s ~50 MB MFA Verify <50 ms 2,000/s ~30 MB Total Overhead : ~10-20 ms per request Memory Usage : ~260 MB total for all security components","breadcrumbs":"ADR-009: Security System Complete » Performance Characteristics","id":"1509","title":"Performance Characteristics"},"151":{"body":"","breadcrumbs":"Getting Started » Interactive Mode","id":"151","title":"Interactive Mode"},"1510":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Deployment Options","id":"1510","title":"Deployment Options"},"1511":{"body":"# Start all services\\ncd provisioning/platform/kms-service && cargo run &\\ncd provisioning/platform/orchestrator && cargo run &\\ncd provisioning/platform/control-center && cargo run &","breadcrumbs":"ADR-009: Security System Complete » Development","id":"1511","title":"Development"},"1512":{"body":"# Kubernetes deployment\\nkubectl apply -f k8s/security-stack.yaml # Docker Compose\\ndocker-compose up -d kms orchestrator control-center # Systemd services\\nsystemctl start provisioning-kms\\nsystemctl start provisioning-orchestrator\\nsystemctl start provisioning-control-center","breadcrumbs":"ADR-009: Security System Complete » Production","id":"1512","title":"Production"},"1513":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Configuration","id":"1513","title":"Configuration"},"1514":{"body":"# JWT\\nexport JWT_ISSUER=\\"control-center\\"\\nexport JWT_AUDIENCE=\\"orchestrator,cli\\"\\nexport JWT_PRIVATE_KEY_PATH=\\"/keys/private.pem\\"\\nexport JWT_PUBLIC_KEY_PATH=\\"/keys/public.pem\\" # Cedar\\nexport CEDAR_POLICIES_PATH=\\"/config/cedar-policies\\"\\nexport CEDAR_ENABLE_HOT_RELOAD=true # KMS\\nexport KMS_BACKEND=\\"vault\\"\\nexport VAULT_ADDR=\\"https://vault.example.com\\"\\nexport VAULT_TOKEN=\\"...\\" # MFA\\nexport MFA_TOTP_ISSUER=\\"Provisioning\\"\\nexport MFA_WEBAUTHN_RP_ID=\\"provisioning.example.com\\"","breadcrumbs":"ADR-009: Security System Complete » Environment Variables","id":"1514","title":"Environment Variables"},"1515":{"body":"# provisioning/config/security.toml\\n[jwt]\\nissuer = \\"control-center\\"\\naudience = [\\"orchestrator\\", \\"cli\\"]\\naccess_token_ttl = \\"15m\\"\\nrefresh_token_ttl = \\"7d\\" [cedar]\\npolicies_path = \\"config/cedar-policies\\"\\nhot_reload = true\\nreload_interval = \\"60s\\" [mfa]\\ntotp_issuer = \\"Provisioning\\"\\nwebauthn_rp_id = \\"provisioning.example.com\\"\\nrate_limit = 5\\nrate_limit_window = \\"5m\\" [kms]\\nbackend = \\"vault\\"\\nvault_address = \\"https://vault.example.com\\"\\nvault_mount_point = \\"transit\\" [audit]\\nretention_days = 365\\nretention_break_glass_days = 2555 # 7 years\\nexport_format = \\"json\\"\\npii_anonymization = true","breadcrumbs":"ADR-009: Security System Complete » Config Files","id":"1515","title":"Config Files"},"1516":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Testing","id":"1516","title":"Testing"},"1517":{"body":"# Control Center (JWT, MFA)\\ncd provisioning/platform/control-center\\ncargo test # Orchestrator (Cedar, Audit, Secrets, SSH, Break-Glass, Compliance)\\ncd provisioning/platform/orchestrator\\ncargo test # KMS Service\\ncd provisioning/platform/kms-service\\ncargo test # Config Encryption (Nushell)\\nnu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu","breadcrumbs":"ADR-009: Security System Complete » Run All Tests","id":"1517","title":"Run All Tests"},"1518":{"body":"# Full security flow\\ncd provisioning/platform/orchestrator\\ncargo test --test security_integration_tests\\ncargo test --test break_glass_integration_tests","breadcrumbs":"ADR-009: Security System Complete » Integration Tests","id":"1518","title":"Integration Tests"},"1519":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Monitoring & Alerts","id":"1519","title":"Monitoring & Alerts"},"152":{"body":"# Start Nushell with provisioning loaded\\nprovisioning nu In the interactive shell, you have access to all provisioning functions: # Inside Nushell session\\nuse lib_provisioning * # Check environment\\nshow_env # List available functions\\nhelp commands | where name =~ \\"provision\\"","breadcrumbs":"Getting Started » Starting Interactive Shell","id":"152","title":"Starting Interactive Shell"},"1520":{"body":"Authentication failures (rate, sources) Authorization denials (policies, resources) MFA failures (attempts, users) Token revocations (rate, reasons) Break-glass activations (frequency, duration) Secrets generation (rate, types) Audit log volume (events/sec)","breadcrumbs":"ADR-009: Security System Complete » Metrics to Monitor","id":"1520","title":"Metrics to Monitor"},"1521":{"body":"Multiple failed auth attempts (5+ in 5 min) Break-glass session created Compliance report non-compliant Incident severity critical/high Token revocation spike KMS errors Audit log export failures","breadcrumbs":"ADR-009: Security System Complete » Alerts to Configure","id":"1521","title":"Alerts to Configure"},"1522":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Maintenance","id":"1522","title":"Maintenance"},"1523":{"body":"Monitor audit logs for anomalies Review failed authentication attempts Check break-glass sessions (should be zero)","breadcrumbs":"ADR-009: Security System Complete » Daily","id":"1523","title":"Daily"},"1524":{"body":"Review compliance reports Check incident response status Verify backup code usage Review MFA device additions/removals","breadcrumbs":"ADR-009: Security System Complete » Weekly","id":"1524","title":"Weekly"},"1525":{"body":"Rotate KMS keys Review and update Cedar policies Generate compliance reports (GDPR, SOC2, ISO) Audit access control matrix","breadcrumbs":"ADR-009: Security System Complete » Monthly","id":"1525","title":"Monthly"},"1526":{"body":"Full security audit Penetration testing Compliance certification review Update security documentation","breadcrumbs":"ADR-009: Security System Complete » Quarterly","id":"1526","title":"Quarterly"},"1527":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Migration Path","id":"1527","title":"Migration Path"},"1528":{"body":"Phase 1 : Deploy security infrastructure KMS service Orchestrator with auth middleware Control Center Phase 2 : Migrate authentication Enable JWT authentication Migrate existing users Disable old auth system Phase 3 : Enable MFA Require MFA enrollment for admins Gradual rollout to all users Phase 4 : Enable Cedar authorization Deploy initial policies (permissive) Monitor authorization decisions Tighten policies incrementally Phase 5 : Enable advanced features Break-glass procedures Compliance reporting Incident response","breadcrumbs":"ADR-009: Security System Complete » From Existing System","id":"1528","title":"From Existing System"},"1529":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Future Enhancements","id":"1529","title":"Future Enhancements"},"153":{"body":"# Show detailed server information\\nfind_servers \\"web-*\\" | table # Get cost estimates\\nservers_walk_by_costs $settings \\"\\" false false \\"stdout\\" # Check task service status\\ntaskservs_list | where status == \\"running\\"","breadcrumbs":"Getting Started » Useful Interactive Commands","id":"153","title":"Useful Interactive Commands"},"1530":{"body":"Hardware Security Module (HSM) integration OAuth2/OIDC federation SAML SSO for enterprise Risk-based authentication (IP reputation, device fingerprinting) Behavioral analytics (anomaly detection) Zero-Trust Network (service mesh integration)","breadcrumbs":"ADR-009: Security System Complete » Planned (Not Implemented)","id":"1530","title":"Planned (Not Implemented)"},"1531":{"body":"Blockchain audit log (immutable append-only log) Quantum-resistant cryptography (post-quantum algorithms) Confidential computing (SGX/SEV enclaves) Distributed break-glass (multi-region approval)","breadcrumbs":"ADR-009: Security System Complete » Under Consideration","id":"1531","title":"Under Consideration"},"1532":{"body":"","breadcrumbs":"ADR-009: Security System Complete » Consequences","id":"1532","title":"Consequences"},"1533":{"body":"✅ Enterprise-grade security meeting GDPR, SOC2, ISO 27001 ✅ Zero static credentials (all dynamic, time-limited) ✅ Complete audit trail (immutable, GDPR-compliant) ✅ MFA-enforced for sensitive operations ✅ Emergency access with enhanced controls ✅ Fine-grained authorization (Cedar policies) ✅ Automated compliance (reports, incident response)","breadcrumbs":"ADR-009: Security System Complete » Positive","id":"1533","title":"Positive"},"1534":{"body":"⚠️ Increased complexity (12 components to manage) ⚠️ Performance overhead (~10-20 ms per request) ⚠️ Memory footprint (~260 MB additional) ⚠️ Learning curve (Cedar policy language, MFA setup) ⚠️ Operational overhead (key rotation, policy updates)","breadcrumbs":"ADR-009: Security System Complete » Negative","id":"1534","title":"Negative"},"1535":{"body":"Comprehensive documentation (ADRs, guides, API docs) CLI commands for all operations Automated monitoring and alerting Gradual rollout with feature flags Training materials for operators","breadcrumbs":"ADR-009: Security System Complete » Mitigations","id":"1535","title":"Mitigations"},"1536":{"body":"JWT Auth : docs/architecture/JWT_AUTH_IMPLEMENTATION.md Cedar Authz : docs/architecture/CEDAR_AUTHORIZATION_IMPLEMENTATION.md Audit Logging : docs/architecture/AUDIT_LOGGING_IMPLEMENTATION.md MFA : docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md Break-Glass : docs/architecture/BREAK_GLASS_IMPLEMENTATION_SUMMARY.md Compliance : docs/architecture/COMPLIANCE_IMPLEMENTATION_SUMMARY.md Config Encryption : docs/user/CONFIG_ENCRYPTION_GUIDE.md Dynamic Secrets : docs/user/DYNAMIC_SECRETS_QUICK_REFERENCE.md SSH Keys : docs/user/SSH_TEMPORAL_KEYS_USER_GUIDE.md","breadcrumbs":"ADR-009: Security System Complete » Related Documentation","id":"1536","title":"Related Documentation"},"1537":{"body":"Architecture Team : Approved Security Team : Approved (pending penetration test) Compliance Team : Approved (pending audit) Engineering Team : Approved Date : 2025-10-08 Version : 1.0.0 Status : Implemented and Production-Ready","breadcrumbs":"ADR-009: Security System Complete » Approval","id":"1537","title":"Approval"},"1538":{"body":"Status : Accepted Date : 2025-12-03 Decision Makers : Architecture Team Implementation : Multi-phase migration (KCL workspace configs + template reorganization)","breadcrumbs":"ADR-010: Configuration Format Strategy » ADR-010: Configuration File Format Strategy","id":"1538","title":"ADR-010: Configuration File Format Strategy"},"1539":{"body":"The provisioning project historically used a single configuration format (YAML/TOML environment variables) for all purposes. As the system evolved, different parts naturally adopted different formats: TOML for modular provider and platform configurations (providers/*.toml, platform/*.toml) KCL for infrastructure-as-code definitions with type safety YAML for workspace metadata However, the workspace configuration remained in YAML (provisioning.yaml), creating inconsistency and leaving type-unsafe configuration handling. Meanwhile, complete KCL schemas for workspace configuration were designed but unused. Problem : Three different formats in the same system without documented rationale or consistent patterns.","breadcrumbs":"ADR-010: Configuration Format Strategy » Context","id":"1539","title":"Context"},"154":{"body":"","breadcrumbs":"Getting Started » Configuration Management","id":"154","title":"Configuration Management"},"1540":{"body":"Adopt a three-format strategy with clear separation of concerns: Format Purpose Use Cases KCL Infrastructure as Code & Schemas Workspace config, infrastructure definitions, type-safe validation TOML Application Configuration & Settings System defaults, provider settings, user preferences, interpolation YAML Metadata & Kubernetes Resources K8s manifests, tool metadata, version tracking, CI/CD resources","breadcrumbs":"ADR-010: Configuration Format Strategy » Decision","id":"1540","title":"Decision"},"1541":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » Implementation Strategy","id":"1541","title":"Implementation Strategy"},"1542":{"body":"Define and document the three-format approach through: ADR-010 (this document) - Rationale and strategy CLAUDE.md updates - Quick reference for developers Configuration hierarchy - Explicit precedence rules","breadcrumbs":"ADR-010: Configuration Format Strategy » Phase 1: Documentation (Complete)","id":"1542","title":"Phase 1: Documentation (Complete)"},"1543":{"body":"Migrate workspace configuration from YAML to KCL : Create comprehensive workspace configuration schema in KCL Implement backward-compatible config loader (KCL first, fallback to YAML) Provide migration script to convert existing workspaces Update workspace initialization to generate KCL configs Expected Outcome : workspace/config/provisioning.ncl (KCL, type-safe, validated) Full schema validation with semantic versioning checks Automatic validation at config load time","breadcrumbs":"ADR-010: Configuration Format Strategy » Phase 2: Workspace Config Migration (In Progress)","id":"1543","title":"Phase 2: Workspace Config Migration (In Progress)"},"1544":{"body":"Move template files to proper directory structure and correct extensions : Previous (KCL): provisioning/kcl/templates/*.k (had Nushell/Jinja2 code, not KCL) Current (Nickel): provisioning/templates/ ├── nushell/*.nu.j2 ├── config/*.toml.j2 ├── nickel/*.ncl.j2 └── README.md Expected Outcome : Templates properly classified and discoverable KCL validation passes (15/16 errors eliminated) Template system clean and maintainable","breadcrumbs":"ADR-010: Configuration Format Strategy » Phase 3: Template File Reorganization (In Progress)","id":"1544","title":"Phase 3: Template File Reorganization (In Progress)"},"1545":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » Rationale for Each Format","id":"1545","title":"Rationale for Each Format"},"1546":{"body":"Why KCL over YAML or TOML? Type Safety : Catch configuration errors at schema validation time, not runtime schema WorkspaceDeclaration: metadata: Metadata check: regex.match(metadata.version, r\\"^\\\\d+\\\\.\\\\d+\\\\.\\\\d+$\\"), \\\\ \\"Version must be semantic versioning\\" Schema-First Development : Schemas are first-class citizens Document expected structure upfront IDE support for auto-completion Enforce required fields and value ranges Immutable by Default : Infrastructure configurations are immutable Prevents accidental mutations Better for reproducible deployments Aligns with PAP principle: \\"configuration-driven, not hardcoded\\" Complex Validation : KCL supports sophisticated validation rules Semantic versioning validation Dependency checking Cross-field validation Range constraints on numeric values Ecosystem Consistency : KCL is already used for infrastructure definitions Server configurations use KCL Cluster definitions use KCL Taskserv definitions use KCL Using KCL for workspace config maintains consistency Existing Schemas : provisioning/kcl/generator/declaration.ncl already defines complete workspace schemas No design work needed Production-ready schemas Well-tested patterns","breadcrumbs":"ADR-010: Configuration Format Strategy » KCL for Workspace Configuration","id":"1546","title":"KCL for Workspace Configuration"},"1547":{"body":"Why TOML for settings? Hierarchical Structure : Native support for nested configurations [http]\\nuse_curl = false\\ntimeout = 30 [debug]\\nenabled = false\\nlog_level = \\"info\\" Interpolation Support : Dynamic variable substitution base_path = \\"/Users/home/provisioning\\"\\ncache_path = \\"{{base_path}}/.cache\\" Industry Standard : Widely used for application configuration (Rust, Python, Go) Human Readable : Clear, explicit, easy to edit Validation Support : Schema files (.schema.toml) for validation Use Cases : System defaults: provisioning/config/config.defaults.toml Provider settings: workspace/config/providers/*.toml Platform services: workspace/config/platform/*.toml User preferences: User config files","breadcrumbs":"ADR-010: Configuration Format Strategy » TOML for Application Configuration","id":"1547","title":"TOML for Application Configuration"},"1548":{"body":"Why YAML for metadata? Kubernetes Compatibility : YAML is K8s standard K8s manifests use YAML Consistent with ecosystem Familiar to DevOps engineers Lightweight : Good for simple data structures workspace: name: \\"librecloud\\" version: \\"1.0.0\\" created: \\"2025-10-06T12:29:43Z\\" Version Control : Human-readable format Diffs are clear and meaningful Git-friendly Comments supported Use Cases : K8s resource definitions Tool metadata (versions, sources, tags) CI/CD configuration files User workspace metadata (during transition)","breadcrumbs":"ADR-010: Configuration Format Strategy » YAML for Metadata and Kubernetes Resources","id":"1548","title":"YAML for Metadata and Kubernetes Resources"},"1549":{"body":"When loading configuration, use this precedence (highest to lowest) : Runtime Arguments (highest priority) CLI flags passed to commands Explicit user input Environment Variables (PROVISIONING_*) Override system settings Deployment-specific overrides Secrets via env vars User Configuration (Centralized) User preferences: ~/.config/provisioning/user_config.yaml User workspace overrides: workspace/config/local-overrides.toml Infrastructure Configuration Workspace KCL config: workspace/config/provisioning.ncl Platform services: workspace/config/platform/*.toml Provider configs: workspace/config/providers/*.toml System Defaults (lowest priority) System config: provisioning/config/config.defaults.toml Schema defaults: defined in KCL schemas","breadcrumbs":"ADR-010: Configuration Format Strategy » Configuration Hierarchy (Priority)","id":"1549","title":"Configuration Hierarchy (Priority)"},"155":{"body":"System Defaults : config.defaults.toml - System-wide defaults User Config : ~/.provisioning/config.user.toml - Your preferences Environment Config : config.{env}.toml - Environment-specific settings Infrastructure Config : settings.ncl - Infrastructure definitions","breadcrumbs":"Getting Started » Understanding Configuration Files","id":"155","title":"Understanding Configuration Files"},"1550":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » Migration Path","id":"1550","title":"Migration Path"},"1551":{"body":"Migration Path : Config loader checks for .ncl first, then falls back to .yaml for legacy systems # Try Nickel first (current)\\nif ($config_nickel | path exists) { let config = (load_nickel_workspace_config $config_nickel)\\n} else if ($config_yaml | path exists) { # Legacy YAML support (from pre-migration) let config = (open $config_yaml)\\n} Automatic Migration : Migration script converts YAML/KCL → Nickel provisioning workspace migrate-config --all Validation : New KCL configs validated against schemas","breadcrumbs":"ADR-010: Configuration Format Strategy » For Existing Workspaces","id":"1551","title":"For Existing Workspaces"},"1552":{"body":"Generate KCL : Workspace initialization creates .k files provisioning workspace create my-workspace\\n# Creates: workspace/my-workspace/config/provisioning.ncl Use Existing Schemas : Leverage provisioning/kcl/generator/declaration.ncl Schema Validation : Automatic validation during config load","breadcrumbs":"ADR-010: Configuration Format Strategy » For New Workspaces","id":"1552","title":"For New Workspaces"},"1553":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » File Format Guidelines for Developers","id":"1553","title":"File Format Guidelines for Developers"},"1554":{"body":"Use KCL for : Infrastructure definitions (servers, clusters, taskservs) Configuration with type requirements Schema definitions Any config that needs validation rules Workspace configuration Use TOML for : Application settings (HTTP client, logging, timeouts) Provider-specific settings Platform service configuration User preferences and overrides System defaults with interpolation Use YAML for : Kubernetes manifests CI/CD configuration (GitHub Actions, GitLab CI) Tool metadata Human-readable documentation files Version control metadata","breadcrumbs":"ADR-010: Configuration Format Strategy » When to Use Each Format","id":"1554","title":"When to Use Each Format"},"1555":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » Consequences","id":"1555","title":"Consequences"},"1556":{"body":"✅ Type Safety : KCL schema validation catches config errors early ✅ Consistency : Infrastructure definitions and configs use same language ✅ Maintainability : Clear separation of concerns (IaC vs settings vs metadata) ✅ Validation : Semantic versioning, required fields, range checks ✅ Tooling : IDE support for KCL auto-completion ✅ Documentation : Self-documenting schemas with descriptions ✅ Ecosystem Alignment : TOML for settings (Rust standard), YAML for K8s","breadcrumbs":"ADR-010: Configuration Format Strategy » Benefits","id":"1556","title":"Benefits"},"1557":{"body":"⚠️ Learning Curve : Developers must understand three formats ⚠️ Migration Effort : Existing YAML configs need conversion ⚠️ Tooling Requirements : KCL compiler needed (already a dependency)","breadcrumbs":"ADR-010: Configuration Format Strategy » Trade-offs","id":"1557","title":"Trade-offs"},"1558":{"body":"Documentation : Clear guidelines in CLAUDE.md Backward Compatibility : YAML support maintained during transition Automation : Migration scripts for existing workspaces Gradual Migration : No hard cutoff, both formats supported for extended period","breadcrumbs":"ADR-010: Configuration Format Strategy » Risk Mitigation","id":"1558","title":"Risk Mitigation"},"1559":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » Template File Reorganization","id":"1559","title":"Template File Reorganization"},"156":{"body":"Infrastructure settings.ncl ↓ (overrides)\\nEnvironment config.{env}.toml ↓ (overrides)\\nUser config.user.toml ↓ (overrides)\\nSystem config.defaults.toml","breadcrumbs":"Getting Started » Configuration Hierarchy","id":"156","title":"Configuration Hierarchy"},"1560":{"body":"Currently, 15/16 files in provisioning/kcl/templates/ have .k extension but contain Nushell/Jinja2 code, not KCL: provisioning/kcl/templates/\\n├── server.ncl # Actually Nushell/Jinja2 template\\n├── taskserv.ncl # Actually Nushell/Jinja2 template\\n└── ... # 15 more template files This causes: KCL validation failures (96.6% of errors) Misclassification (templates in KCL directory) Confusing directory structure","breadcrumbs":"ADR-010: Configuration Format Strategy » Problem","id":"1560","title":"Problem"},"1561":{"body":"Reorganize into type-specific directories: provisioning/templates/\\n├── nushell/ # Nushell code generation (*.nu.j2)\\n│ ├── server.nu.j2\\n│ ├── taskserv.nu.j2\\n│ └── ...\\n├── config/ # Config file generation (*.toml.j2, *.yaml.j2)\\n│ ├── provider.toml.j2\\n│ └── ...\\n├── kcl/ # KCL file generation (*.k.j2)\\n│ ├── workspace.ncl.j2\\n│ └── ...\\n└── README.md","breadcrumbs":"ADR-010: Configuration Format Strategy » Solution","id":"1561","title":"Solution"},"1562":{"body":"✅ Correct file classification ✅ KCL validation passes completely ✅ Clear template organization ✅ Easier to discover and maintain templates","breadcrumbs":"ADR-010: Configuration Format Strategy » Outcome","id":"1562","title":"Outcome"},"1563":{"body":"","breadcrumbs":"ADR-010: Configuration Format Strategy » References","id":"1563","title":"References"},"1564":{"body":"Workspace Declaration : provisioning/kcl/generator/declaration.ncl WorkspaceDeclaration - Complete workspace specification Metadata - Name, version, author, timestamps DeploymentConfig - Deployment modes, servers, HA settings Includes validation rules and semantic versioning Workspace Layer : provisioning/workspace/layers/workspace.layer.ncl WorkspaceLayer - Template paths, priorities, metadata Core Settings : provisioning/kcl/settings.ncl Settings - Main provisioning settings SecretProvider - SOPS/KMS configuration AIProvider - AI provider configuration","breadcrumbs":"ADR-010: Configuration Format Strategy » Existing KCL Schemas","id":"1564","title":"Existing KCL Schemas"},"1565":{"body":"ADR-001 : Project Structure ADR-005 : Extension Framework ADR-006 : Provisioning CLI Refactoring ADR-009 : Security System Complete","breadcrumbs":"ADR-010: Configuration Format Strategy » Related ADRs","id":"1565","title":"Related ADRs"},"1566":{"body":"Status : Accepted Next Steps : ✅ Document strategy (this ADR) ⏳ Create workspace configuration KCL schema ⏳ Implement backward-compatible config loader ⏳ Create migration script for YAML → KCL ⏳ Move template files to proper directories ⏳ Update documentation with examples ⏳ Migrate workspace_librecloud to KCL Last Updated : 2025-12-03","breadcrumbs":"ADR-010: Configuration Format Strategy » Decision Status","id":"1566","title":"Decision Status"},"1567":{"body":"Status : Implemented Date : 2025-12-15 Decision Makers : Architecture Team Implementation : Complete for platform schemas (100%)","breadcrumbs":"ADR-011: Nickel Migration » ADR-011: Migration from KCL to Nickel","id":"1567","title":"ADR-011: Migration from KCL to Nickel"},"1568":{"body":"The provisioning platform historically used KCL (KLang) as the primary infrastructure-as-code language for all configuration schemas. As the system evolved through four migration phases (Foundation, Core, Complex, Highly Complex), KCL\'s limitations became increasingly apparent:","breadcrumbs":"ADR-011: Nickel Migration » Context","id":"1568","title":"Context"},"1569":{"body":"Complex Type System : Heavyweight schema system with extensive boilerplate schema Foo(bar.Baz) inheritance creates rigid hierarchies Union types with null don\'t work well in type annotations Schema modifications propagate breaking changes Limited Flexibility : Schema-first approach is too rigid for configuration evolution Difficult to extend types without modifying base schemas No easy way to add custom fields without validation conflicts Hard to compose configurations dynamically Import System Overhead : Non-standard module imports import provisioning.lib as lib pattern differs from ecosystem standards Re-export patterns create complexity in extension systems Performance Overhead : Compile-time validation adds latency Schema validation happens at compile time Large configuration files slow down evaluation No lazy evaluation built-in Learning Curve : KCL is Python-like but with unique patterns Team must learn KCL-specific semantics Limited ecosystem and tooling support Difficult to hire developers familiar with KCL","breadcrumbs":"ADR-011: Nickel Migration » Problems with KCL","id":"1569","title":"Problems with KCL"},"157":{"body":"# Edit user configuration\\nprovisioning sops ~/.provisioning/config.user.toml # Or using your preferred editor\\nnano ~/.provisioning/config.user.toml Example customizations: [debug]\\nenabled = true # Enable debug mode by default\\nlog_level = \\"debug\\" # Verbose logging [providers]\\ndefault = \\"aws\\" # Use AWS as default provider [output]\\nformat = \\"json\\" # Prefer JSON output","breadcrumbs":"Getting Started » Customizing Your Configuration","id":"157","title":"Customizing Your Configuration"},"1570":{"body":"The provisioning system required: Greater flexibility in composing configurations Better performance for large-scale deployments Extensibility without modifying base schemas Simpler mental model for team learning Clean exports to JSON/TOML/YAML formats","breadcrumbs":"ADR-011: Nickel Migration » Project Needs","id":"1570","title":"Project Needs"},"1571":{"body":"Adopt Nickel as the primary infrastructure-as-code language for all schema definitions, configuration composition, and deployment declarations.","breadcrumbs":"ADR-011: Nickel Migration » Decision","id":"1571","title":"Decision"},"1572":{"body":"Three-File Pattern per Module : {module}_contracts.ncl - Type definitions using Nickel contracts {module}_defaults.ncl - Default values for all fields {module}.ncl - Instances combining both, with hybrid interface Hybrid Interface (4 levels of access): Level 1 : Direct access to defaults (inspection, reference) Level 2 : Maker functions (90% of use cases) Level 3 : Default instances (pre-built, exported) Level 4 : Contracts (optional imports, advanced combinations) Domain-Organized Architecture (8 top-level domains): lib - Core library types config - Settings, defaults, workspace configuration infrastructure - Compute, storage, provisioning schemas operations - Workflows, batch, dependencies, tasks deployment - Kubernetes, execution modes services - Gitea and other platform services generator - Code generation and declarations integrations - Runtime, GitOps, external integrations Two Deployment Modes : Development : Fast iteration with relative imports (Single Source of Truth) Production : Frozen snapshots with immutable, self-contained deployment packages","breadcrumbs":"ADR-011: Nickel Migration » Key Changes","id":"1572","title":"Key Changes"},"1573":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Implementation Summary","id":"1573","title":"Implementation Summary"},"1574":{"body":"Metric Value KCL files migrated 40 Nickel files created 72 Modules converted 24 core modules Schemas migrated 150+ Maker functions 80+ Default instances 90+ JSON output validation 4,680+ lines","breadcrumbs":"ADR-011: Nickel Migration » Migration Complete","id":"1574","title":"Migration Complete"},"1575":{"body":"422 Nickel files total 8 domains with hierarchical organization Entry point : main.ncl with domain-organized architecture Clean imports : provisioning.lib, provisioning.config.settings, etc.","breadcrumbs":"ADR-011: Nickel Migration » Platform Schemas (provisioning/schemas/)","id":"1575","title":"Platform Schemas (provisioning/schemas/)"},"1576":{"body":"4 providers : hetzner, local, aws, upcloud 1 cluster type : web Consistent structure : Each extension has nickel/ subdirectory with contracts, defaults, main, version Example - UpCloud Provider : # upcloud/nickel/main.ncl (migrated from upcloud/kcl/)\\nlet contracts = import \\"./contracts.ncl\\" in\\nlet defaults = import \\"./defaults.ncl\\" in { defaults = defaults, make_storage | not_exported = fun overrides => defaults.storage & overrides, DefaultStorage = defaults.storage, DefaultStorageBackup = defaults.storage_backup, DefaultProvisionEnv = defaults.provision_env, DefaultProvisionUpcloud = defaults.provision_upcloud, DefaultServerDefaults_upcloud = defaults.server_defaults_upcloud, DefaultServerUpcloud = defaults.server_upcloud,\\n}","breadcrumbs":"ADR-011: Nickel Migration » Extensions (provisioning/extensions/)","id":"1576","title":"Extensions (provisioning/extensions/)"},"1577":{"body":"47 Nickel files in productive use 2 infrastructures : wuji - Kubernetes cluster with 20 taskservs sgoyol - Support servers group Two deployment modes fully implemented and tested Daily production usage validated ✅","breadcrumbs":"ADR-011: Nickel Migration » Active Workspaces (workspace_librecloud/nickel/)","id":"1577","title":"Active Workspaces (workspace_librecloud/nickel/)"},"1578":{"body":"955 KCL files remain in workspaces/ (legacy user configs) 100% backward compatible - old KCL code still works Config loader supports both formats during transition No breaking changes to APIs","breadcrumbs":"ADR-011: Nickel Migration » Backward Compatibility","id":"1578","title":"Backward Compatibility"},"1579":{"body":"Aspect KCL Nickel Winner Mental Model Python-like with schemas JSON with functions Nickel Performance Baseline 60% faster evaluation Nickel Type System Rigid schemas Gradual typing + contracts Nickel Composition Schema inheritance Record merging (&) Nickel Extensibility Requires schema modifications Merging with custom fields Nickel Validation Compile-time (overhead) Runtime contracts (lazy) Nickel Boilerplate High Low (3-file pattern) Nickel Exports JSON/YAML JSON/TOML/YAML Nickel Learning Curve Medium-High Low Nickel Lazy Evaluation No Yes (built-in) Nickel","breadcrumbs":"ADR-011: Nickel Migration » Comparison: KCL vs Nickel","id":"1579","title":"Comparison: KCL vs Nickel"},"158":{"body":"","breadcrumbs":"Getting Started » Monitoring and Observability","id":"158","title":"Monitoring and Observability"},"1580":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Architecture Patterns","id":"1580","title":"Architecture Patterns"},"1581":{"body":"File 1: Contracts (batch_contracts.ncl): { BatchScheduler = { strategy | String, resource_limits, scheduling_interval | Number, enable_preemption | Bool, },\\n} File 2: Defaults (batch_defaults.ncl): { scheduler = { strategy = \\"dependency_first\\", resource_limits = {\\"max_cpu_cores\\" = 0}, scheduling_interval = 10, enable_preemption = false, },\\n} File 3: Main (batch.ncl): let contracts = import \\"./batch_contracts.ncl\\" in\\nlet defaults = import \\"./batch_defaults.ncl\\" in { defaults = defaults, # Level 1: Inspection make_scheduler | not_exported = fun o => defaults.scheduler & o, # Level 2: Makers DefaultScheduler = defaults.scheduler, # Level 3: Instances\\n}","breadcrumbs":"ADR-011: Nickel Migration » Three-File Pattern","id":"1581","title":"Three-File Pattern"},"1582":{"body":"90% of users : Use makers for simple customization 9% of users : Reference defaults for inspection 1% of users : Access contracts for advanced combinations No validation conflicts : Record merging works without contract constraints","breadcrumbs":"ADR-011: Nickel Migration » Hybrid Pattern Benefits","id":"1582","title":"Hybrid Pattern Benefits"},"1583":{"body":"provisioning/schemas/\\n├── lib/ # Storage, TaskServDef, ClusterDef\\n├── config/ # Settings, defaults, workspace_config\\n├── infrastructure/ # Compute, storage, provisioning\\n├── operations/ # Workflows, batch, dependencies, tasks\\n├── deployment/ # Kubernetes, modes (solo, multiuser, cicd, enterprise)\\n├── services/ # Gitea, etc\\n├── generator/ # Declarations, gap analysis, changes\\n├── integrations/ # Runtime, GitOps, main\\n└── main.ncl # Entry point with namespace organization Import pattern : let provisioning = import \\"./main.ncl\\" in\\nprovisioning.lib # For Storage, TaskServDef\\nprovisioning.config.settings # For Settings, Defaults\\nprovisioning.infrastructure.compute.server\\nprovisioning.operations.workflows","breadcrumbs":"ADR-011: Nickel Migration » Domain-Organized Architecture","id":"1583","title":"Domain-Organized Architecture"},"1584":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Production Deployment Patterns","id":"1584","title":"Production Deployment Patterns"},"1585":{"body":"1. Development Mode (Single Source of Truth) Relative imports to central provisioning Fast iteration with immediate schema updates No snapshot overhead Usage: Local development, testing, experimentation # workspace_librecloud/nickel/main.ncl\\nimport \\"../../provisioning/schemas/main.ncl\\"\\nimport \\"../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl\\" 2. Production Mode (Hermetic Deployment) Create immutable snapshots for reproducible deployments: provisioning workspace freeze --version \\"2025-12-15-prod-v1\\" --env production Frozen structure (.frozen/{version}/): ├── provisioning/schemas/ # Snapshot of central schemas\\n├── extensions/ # Snapshot of all extensions\\n└── workspace/ # Snapshot of workspace configs All imports rewritten to local paths : import \\"../../provisioning/schemas/main.ncl\\" → import \\"./provisioning/schemas/main.ncl\\" Guarantees immutability and reproducibility No external dependencies Can be deployed to air-gapped environments Deploy from frozen snapshot : provisioning deploy --frozen \\"2025-12-15-prod-v1\\" --infra wuji Benefits : ✅ Development: Fast iteration with central updates ✅ Production: Immutable, reproducible deployments ✅ Audit trail: Each frozen version timestamped ✅ Rollback: Easy rollback to previous versions ✅ Air-gapped: Works in offline environments","breadcrumbs":"ADR-011: Nickel Migration » Two-Mode Strategy","id":"1585","title":"Two-Mode Strategy"},"1586":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Ecosystem Integration","id":"1586","title":"Ecosystem Integration"},"1587":{"body":"Location : /Users/Akasha/Development/typedialog Purpose : Type-safe prompts, forms, and schemas with Nickel output Key Feature : Nickel schemas → Type-safe UIs → Nickel output # Nickel schema → Interactive form\\ntypedialog form --schema server.ncl --output json # Interactive form → Nickel output\\ntypedialog form --input form.toml --output nickel Value : Amplifies Nickel ecosystem beyond IaC: Schemas auto-generate type-safe UIs Forms output configurations back to Nickel Multiple backends: CLI, TUI, Web Multiple output formats: JSON, YAML, TOML, Nickel","breadcrumbs":"ADR-011: Nickel Migration » TypeDialog (Bidirectional Nickel Integration)","id":"1587","title":"TypeDialog (Bidirectional Nickel Integration)"},"1588":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Technical Patterns","id":"1588","title":"Technical Patterns"},"1589":{"body":"KCL Nickel Multiple top-level let bindings Single root expression with let...in chaining","breadcrumbs":"ADR-011: Nickel Migration » Expression-Based Structure","id":"1589","title":"Expression-Based Structure"},"159":{"body":"# Overall system health\\nprovisioning env # Infrastructure status\\nprovisioning show servers --infra dev-setup # Service status\\nprovisioning taskserv list --infra dev-setup","breadcrumbs":"Getting Started » Checking System Status","id":"159","title":"Checking System Status"},"1590":{"body":"KCL Nickel schema Server(defaults.ServerDefaults) defaults.ServerDefaults & { overrides }","breadcrumbs":"ADR-011: Nickel Migration » Schema Inheritance → Record Merging","id":"1590","title":"Schema Inheritance → Record Merging"},"1591":{"body":"KCL Nickel field?: type field = null or field = \\"\\"","breadcrumbs":"ADR-011: Nickel Migration » Optional Fields","id":"1591","title":"Optional Fields"},"1592":{"body":"KCL Nickel \\"ubuntu\\" | \\"debian\\" | \\"centos\\" [\\\\\\\\| \'ubuntu, \'debian, \'centos \\\\\\\\|]","breadcrumbs":"ADR-011: Nickel Migration » Union Types","id":"1592","title":"Union Types"},"1593":{"body":"KCL Nickel True / False / None true / false / null","breadcrumbs":"ADR-011: Nickel Migration » Boolean/Null Conversion","id":"1593","title":"Boolean/Null Conversion"},"1594":{"body":"Syntax Validation : 100% (all files compile) JSON Export : 100% success rate (4,680+ lines) Pattern Coverage : All 5 templates tested and proven Backward Compatibility : 100% Performance : 60% faster evaluation than KCL Test Coverage : 422 Nickel files validated in production","breadcrumbs":"ADR-011: Nickel Migration » Quality Metrics","id":"1594","title":"Quality Metrics"},"1595":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Consequences","id":"1595","title":"Consequences"},"1596":{"body":"60% performance gain in evaluation speed Reduced boilerplate (contracts + defaults separation) Greater flexibility (record merging without validation) Extensibility without conflicts (custom fields allowed) Simplified mental model (\\"JSON with functions\\") Lazy evaluation (better performance for large configs) Clean exports (100% JSON/TOML compatible) Hybrid pattern (4 levels covering all use cases) Domain-organized architecture (8 logical domains, clear imports) Production deployment with frozen snapshots (immutable, reproducible) Ecosystem expansion (TypeDialog integration for UI generation) Real-world validation (47 files in productive use) 20 taskservs deployed in production infrastructure","breadcrumbs":"ADR-011: Nickel Migration » Positive ✅","id":"1596","title":"Positive ✅"},"1597":{"body":"Dual format support during transition (KCL + Nickel) Learning curve for team (new language) Migration effort (40 files migrated manually) Documentation updates (guides, examples, training) 955 KCL files remain (gradual workspace migration) Frozen snapshots workflow (requires understanding workspace freeze) TypeDialog dependency (external Rust project)","breadcrumbs":"ADR-011: Nickel Migration » Challenges ⚠️","id":"1597","title":"Challenges ⚠️"},"1598":{"body":"✅ Complete documentation in docs/development/kcl-module-system.md ✅ 100% backward compatibility maintained ✅ Migration framework established (5 templates, validation checklist) ✅ Validation checklist for each migration step ✅ 100% syntax validation on all files ✅ Real-world usage validated (47 files in production) ✅ Frozen snapshots guarantee reproducibility ✅ Two deployment modes cover development and production ✅ Gradual migration strategy (workspace-level, no hard cutoff)","breadcrumbs":"ADR-011: Nickel Migration » Mitigations","id":"1598","title":"Mitigations"},"1599":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Migration Status","id":"1599","title":"Migration Status"},"16":{"body":"Extensibility through: Providers : Cloud platform integrations (AWS, UpCloud, Local) Task Services : Infrastructure components (Kubernetes, databases, etc.) Clusters : Complete deployment configurations","breadcrumbs":"Home » Extension System","id":"16","title":"Extension System"},"160":{"body":"# Enable debug mode for troubleshooting\\nprovisioning --debug server create --infra dev-setup --check # View logs for specific operations\\nprovisioning show logs --infra dev-setup","breadcrumbs":"Getting Started » Logging and Debugging","id":"160","title":"Logging and Debugging"},"1600":{"body":"✅ Foundation (8 files) - Basic schemas, validation library ✅ Core Schemas (8 files) - Settings, workspace config, gitea ✅ Complex Features (7 files) - VM lifecycle, system config, services ✅ Very Complex (9+ files) - Modes, commands, orchestrator, main entry point ✅ Platform schemas (422 files total) ✅ Extensions (providers, clusters) ✅ Production workspace (47 files, 20 taskservs)","breadcrumbs":"ADR-011: Nickel Migration » Completed (Phase 1-4)","id":"1600","title":"Completed (Phase 1-4)"},"1601":{"body":"⏳ Workspace migration (323+ files in workspace_librecloud) ⏳ Extension migration (taskservs, clusters, providers) ⏳ Parallel testing against original KCL ⏳ CI/CD integration updates","breadcrumbs":"ADR-011: Nickel Migration » In Progress (Workspace-Level)","id":"1601","title":"In Progress (Workspace-Level)"},"1602":{"body":"User workspace KCL to Nickel (gradual, as needed) Full migration of legacy configurations TypeDialog UI generation for infrastructure","breadcrumbs":"ADR-011: Nickel Migration » Future (Optional)","id":"1602","title":"Future (Optional)"},"1603":{"body":"","breadcrumbs":"ADR-011: Nickel Migration » Related Documentation","id":"1603","title":"Related Documentation"},"1604":{"body":"KCL Module System - Critical syntax differences and patterns Nickel Migration Guide - Three-file pattern specification and examples Configuration Architecture - Composition patterns and best practices","breadcrumbs":"ADR-011: Nickel Migration » Development Guides","id":"1604","title":"Development Guides"},"1605":{"body":"ADR-010 : Configuration Format Strategy (multi-format approach) ADR-006 : CLI Refactoring (domain-driven design) ADR-004 : Hybrid Rust/Nushell Architecture (platform architecture)","breadcrumbs":"ADR-011: Nickel Migration » Related ADRs","id":"1605","title":"Related ADRs"},"1606":{"body":"Entry point : provisioning/schemas/main.ncl Workspace pattern : workspace_librecloud/nickel/main.ncl Example extension : provisioning/extensions/providers/upcloud/nickel/main.ncl Production infrastructure : workspace_librecloud/nickel/wuji/main.ncl (20 taskservs)","breadcrumbs":"ADR-011: Nickel Migration » Referenced Files","id":"1606","title":"Referenced Files"},"1607":{"body":"Status : Implemented and Production-Ready ✅ Architecture Team: Approved ✅ Platform implementation: Complete (422 files) ✅ Production validation: Passed (47 files active) ✅ Backward compatibility: 100% ✅ Real-world usage: Validated in wuji infrastructure Last Updated : 2025-12-15 Version : 1.0.0 Implementation : Complete (Phase 1-4 finished, workspace-level in progress)","breadcrumbs":"ADR-011: Nickel Migration » Approval","id":"1607","title":"Approval"},"1608":{"body":"","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » ADR-014: Nushell Nickel Plugin - CLI Wrapper Architecture","id":"1608","title":"ADR-014: Nushell Nickel Plugin - CLI Wrapper Architecture"},"1609":{"body":"Accepted - 2025-12-15","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Status","id":"1609","title":"Status"},"161":{"body":"# Show cost estimates\\nprovisioning show cost --infra dev-setup # Detailed cost breakdown\\nprovisioning server price --infra dev-setup","breadcrumbs":"Getting Started » Cost Monitoring","id":"161","title":"Cost Monitoring"},"1610":{"body":"The provisioning system integrates with Nickel for configuration management in advanced scenarios. Users need to evaluate Nickel files and work with their output in Nushell scripts. The nu_plugin_nickel plugin provides this integration. The architectural decision was whether the plugin should: Implement Nickel directly using pure Rust (nickel-lang-core crate) Wrap the official Nickel CLI (nickel command)","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Context","id":"1610","title":"Context"},"1611":{"body":"Nickel configurations in provisioning use the module system : # config/database.ncl\\nimport \\"lib/defaults\\" as defaults\\nimport \\"lib/validation\\" as valid { databases: { primary = defaults.database & { name = \\"primary\\" host = \\"localhost\\" } }\\n} Module system includes: Import resolution with search paths Standard library (builtins, stdlib packages) Module caching Complex evaluation context","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » System Requirements","id":"1611","title":"System Requirements"},"1612":{"body":"Implement the nu_plugin_nickel plugin as a CLI wrapper that invokes the external nickel command.","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Decision","id":"1612","title":"Decision"},"1613":{"body":"┌─────────────────────────────┐\\n│ Nushell Script │\\n│ │\\n│ nickel-export json /file │\\n│ nickel-eval /file │\\n│ nickel-format /file │\\n└────────────┬────────────────┘ │ ▼\\n┌─────────────────────────────┐\\n│ nu_plugin_nickel │\\n│ │\\n│ - Command handling │\\n│ - Argument parsing │\\n│ - JSON output parsing │\\n│ - Caching logic │\\n└────────────┬────────────────┘ │ ▼\\n┌─────────────────────────────┐\\n│ std::process::Command │\\n│ │\\n│ \\"nickel export /file ...\\" │\\n└────────────┬────────────────┘ │ ▼\\n┌─────────────────────────────┐\\n│ Nickel Official CLI │\\n│ │\\n│ - Module resolution │\\n│ - Import handling │\\n│ - Standard library access │\\n│ - Output formatting │\\n│ - Error reporting │\\n└────────────┬────────────────┘ │ ▼\\n┌─────────────────────────────┐\\n│ Nushell Records/Lists │\\n│ │\\n│ ✅ Proper types │\\n│ ✅ Cell path access works │\\n│ ✅ Piping works │\\n└─────────────────────────────┘","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Architecture Diagram","id":"1613","title":"Architecture Diagram"},"1614":{"body":"Plugin provides : ✅ Nushell commands: nickel-export, nickel-eval, nickel-format, nickel-validate ✅ JSON/YAML output parsing (serde_json → nu_protocol::Value) ✅ Automatic caching (SHA256-based, ~80-90% hit rate) ✅ Error handling (CLI errors → Nushell errors) ✅ Type-safe output (nu_protocol::Value::Record, not strings) Plugin delegates to Nickel CLI : ✅ Module resolution with search paths ✅ Standard library access and discovery ✅ Evaluation context setup ✅ Module caching ✅ Output formatting","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Implementation Characteristics","id":"1614","title":"Implementation Characteristics"},"1615":{"body":"","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Rationale","id":"1615","title":"Rationale"},"1616":{"body":"Aspect Pure Rust (nickel-lang-core) CLI Wrapper (chosen) Module resolution ❓ Undocumented API ✅ Official, proven Search paths ❓ How to configure? ✅ CLI handles it Standard library ❓ How to access? ✅ Automatic discovery Import system ❌ API unclear ✅ Built-in Evaluation context ❌ Complex setup needed ✅ CLI provides Future versions ⚠️ Maintain parity ✅ Automatic support Maintenance burden 🔴 High 🟢 Low Complexity 🔴 High 🟢 Low Correctness ⚠️ Risk of divergence ✅ Single source of truth","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Why CLI Wrapper Is The Correct Choice","id":"1616","title":"Why CLI Wrapper Is The Correct Choice"},"1617":{"body":"Using nickel-lang-core directly would require the plugin to: Configure import search paths : // Where should Nickel look for modules?\\n// Current directory? Workspace? System paths?\\n// This is complex and configuration-dependent Access standard library : // Where is the Nickel stdlib installed?\\n// How to handle different Nickel versions?\\n// How to provide builtins? Manage module evaluation context : // Set up evaluation environment\\n// Configure cache locations\\n// Initialize type checker\\n// This is essentially re-implementing CLI logic Maintain compatibility : Every Nickel version change requires review Risk of subtle behavioral differences Duplicate bug fixes and features Two implementations to maintain","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » The Module System Problem","id":"1617","title":"The Module System Problem"},"1618":{"body":"The nickel-lang-core crate lacks clear documentation on: ❓ How to configure import search paths ❓ How to access standard library ❓ How to set up evaluation context ❓ What is the public API contract? This makes direct usage risky. The CLI is the documented, proven interface.","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Documentation Gap","id":"1618","title":"Documentation Gap"},"1619":{"body":"Simple use case (direct library usage works): Simple evaluation with built-in functions No external dependencies No modules or imports Nickel reality (CLI wrapper necessary): Complex module system with search paths External dependencies (standard library) Import resolution with multiple fallbacks Evaluation context that mirrors CLI","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Why Nickel Is Different From Simple Use Cases","id":"1619","title":"Why Nickel Is Different From Simple Use Cases"},"162":{"body":"","breadcrumbs":"Getting Started » Best Practices","id":"162","title":"Best Practices"},"1620":{"body":"","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Consequences","id":"1620","title":"Consequences"},"1621":{"body":"Correctness : Module resolution guaranteed by official Nickel CLI Reliability : No risk from reverse-engineering undocumented APIs Simplicity : Plugin code is lean (~300 lines total) Maintainability : Automatic tracking of Nickel changes Compatibility : Works with all Nickel versions User Expectations : Same behavior as CLI users experience Community Alignment : Uses official Nickel distribution","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Positive","id":"1621","title":"Positive"},"1622":{"body":"External Dependency : Requires nickel binary installed in PATH Process Overhead : ~100-200 ms per execution (heavily cached) Subprocess Management : Spawn handling and stderr capture needed Distribution : Provisioning must include Nickel binary","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Negative","id":"1622","title":"Negative"},"1623":{"body":"Dependency Management : Installation scripts handle Nickel setup Docker images pre-install Nickel Clear error messages if nickel not found Documentation covers installation Performance : Aggressive caching (80-90% typical hit rate) Cache hits: ~1-5 ms (not 100-200 ms) Cache directory: ~/.cache/provisioning/config-cache/ Distribution : Provisioning distributions include Nickel Installers set up Nickel automatically CI/CD has Nickel available","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Mitigation Strategies","id":"1623","title":"Mitigation Strategies"},"1624":{"body":"","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Alternatives Considered","id":"1624","title":"Alternatives Considered"},"1625":{"body":"Pros : No external dependency Cons : Undocumented API, high risk, maintenance burden Decision : REJECTED - Too risky","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Alternative 1: Pure Rust with nickel-lang-core","id":"1625","title":"Alternative 1: Pure Rust with nickel-lang-core"},"1626":{"body":"Pros : Flexibility Cons : Adds complexity, dual code paths, confusing behavior Decision : REJECTED - Over-engineering","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Alternative 2: Hybrid (Pure Rust + CLI fallback)","id":"1626","title":"Alternative 2: Hybrid (Pure Rust + CLI fallback)"},"1627":{"body":"Pros : Standalone Cons : WASM support unclear, additional infrastructure Decision : REJECTED - Immature","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Alternative 3: WebAssembly Version","id":"1627","title":"Alternative 3: WebAssembly Version"},"1628":{"body":"Pros : Uses official interface Cons : LSP not designed for evaluation, wrong abstraction Decision : REJECTED - Inappropriate tool","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Alternative 4: Use Nickel LSP","id":"1628","title":"Alternative 4: Use Nickel LSP"},"1629":{"body":"","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Implementation Details","id":"1629","title":"Implementation Details"},"163":{"body":"✅ Use version control for infrastructure definitions ✅ Test changes in development before production ✅ Use --check mode to preview changes ✅ Keep user configuration separate from infrastructure","breadcrumbs":"Getting Started » 1. Configuration Management","id":"163","title":"1. Configuration Management"},"1630":{"body":"nickel-export : Export/evaluate Nickel file nickel-export json /path/to/file.ncl\\nnickel-export yaml /path/to/file.ncl nickel-eval : Evaluate with automatic caching (for config loader) nickel-eval /workspace/config.ncl nickel-format : Format Nickel files nickel-format /path/to/file.ncl nickel-validate : Validate Nickel files/project nickel-validate /path/to/project","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Command Set","id":"1630","title":"Command Set"},"1631":{"body":"The plugin uses the correct Nickel command syntax : // Correct:\\ncmd.arg(\\"export\\").arg(file).arg(\\"--format\\").arg(format);\\n// Results in: \\"nickel export /file --format json\\" // WRONG (previously):\\ncmd.arg(\\"export\\").arg(format).arg(file);\\n// Results in: \\"nickel export json /file\\"\\n// ↑ This triggers auto-import of nonexistent JSON module","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Critical Implementation Detail: Command Syntax","id":"1631","title":"Critical Implementation Detail: Command Syntax"},"1632":{"body":"Cache Key : SHA256(file_content + format) Cache Hit Rate : 80-90% (typical provisioning workflows) Performance : Cache miss: ~100-200 ms (process fork) Cache hit: ~1-5 ms (filesystem read + parse) Speedup: 50-100x for cached runs Storage : ~/.cache/provisioning/config-cache/","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Caching Strategy","id":"1632","title":"Caching Strategy"},"1633":{"body":"Plugin correctly processes JSON output: Invokes: nickel export /file.ncl --format json Receives: JSON string from stdout Parses: serde_json::Value Converts: json_value_to_nu_value() (recursive) Returns: nu_protocol::Value::Record (not string!) This enables Nushell cell path access: nickel-export json /config.ncl | .database.host # ✅ Works","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » JSON Output Processing","id":"1633","title":"JSON Output Processing"},"1634":{"body":"Unit Tests : JSON parsing correctness Value type conversions Cache logic Integration Tests : Real Nickel file execution Module imports verification Search path resolution Manual Verification : # Test module imports\\nnickel-export json /workspace/config.ncl # Test cell path access\\nnickel-export json /workspace/config.ncl | .database # Verify output types\\nnickel-export json /workspace/config.ncl | type\\n# Should show: record, not string","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Testing Strategy","id":"1634","title":"Testing Strategy"},"1635":{"body":"Plugin integrates with provisioning config system: Nickel path auto-detected: which nickel Cache location: platform-specific cache_dir() Errors: consistent with provisioning patterns","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » Configuration Integration","id":"1635","title":"Configuration Integration"},"1636":{"body":"ADR-012: Nushell Plugins (general framework) Nickel Official Documentation nickel-lang-core Rust Crate nu_plugin_nickel Implementation: provisioning/core/plugins/nushell-plugins/nu_plugin_nickel/ Related: ADR-013-NUSHELL-KCL-PLUGIN Status : Accepted and Implemented Last Updated : 2025-12-15 Implementation : Complete Tests : Passing","breadcrumbs":"ADR-012: Nushell Nickel Plugin CLI Wrapper » References","id":"1636","title":"References"},"1637":{"body":"","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » ADR-013: Typdialog Web UI Backend Integration for Interactive Configuration","id":"1637","title":"ADR-013: Typdialog Web UI Backend Integration for Interactive Configuration"},"1638":{"body":"Accepted - 2025-01-08","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Status","id":"1638","title":"Status"},"1639":{"body":"The provisioning system requires interactive user input for configuration workflows, workspace initialization, credential setup, and guided deployment scenarios. The system architecture combines Rust (performance-critical), Nushell (scripting), and Nickel (declarative configuration), creating challenges for interactive form-based input and multi-user collaboration.","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Context","id":"1639","title":"Context"},"164":{"body":"✅ Use SOPS for encrypting sensitive data ✅ Regular key rotation for cloud providers ✅ Principle of least privilege for access ✅ Audit infrastructure changes","breadcrumbs":"Getting Started » 2. Security","id":"164","title":"2. Security"},"1640":{"body":"Current limitations : Nushell CLI : Terminal-only interaction input command: Single-line text prompts only No form validation, no complex multi-field forms Limited to single-user, terminal-bound workflows User experience: Basic and error-prone Nickel : Declarative configuration language Cannot handle interactive prompts (by design) Pure evaluation model (no side effects) Forms must be defined statically, not interactively No runtime user interaction Existing Solutions : Inadequate for modern infrastructure provisioning Shell-based prompts : Error-prone, no validation, single-user Custom web forms : High maintenance, inconsistent UX Separate admin panels : Disconnected from IaC workflow Terminal-only TUI : Limited to SSH sessions, no collaboration","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » The Interactive Configuration Problem","id":"1640","title":"The Interactive Configuration Problem"},"1641":{"body":"Workspace Initialization : # Current: Error-prone prompts\\nlet workspace_name = input \\"Workspace name: \\"\\nlet provider = input \\"Provider (aws/azure/oci): \\"\\n# No validation, no autocomplete, no guidance Credential Setup : # Current: Insecure and basic\\nlet api_key = input \\"API Key: \\" # Shows in terminal history\\nlet region = input \\"Region: \\" # No validation Configuration Wizards : Database connection setup (host, port, credentials, SSL) Network configuration (CIDR blocks, subnets, gateways) Security policies (encryption, access control, audit) Guided Deployments : Multi-step infrastructure provisioning Service selection with dependencies Environment-specific overrides","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Use Cases Requiring Interactive Input","id":"1641","title":"Use Cases Requiring Interactive Input"},"1642":{"body":"✅ Terminal UI widgets : Text input, password, select, multi-select, confirm ✅ Validation : Type checking, regex patterns, custom validators ✅ Security : Password masking, sensitive data handling ✅ User Experience : Arrow key navigation, autocomplete, help text ✅ Composability : Chain multiple prompts into forms ✅ Error Handling : Clear validation errors, retry logic ✅ Rust Integration : Native Rust library (no subprocess overhead) ✅ Cross-Platform : Works on Linux, macOS, Windows","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Requirements for Interactive Input System","id":"1642","title":"Requirements for Interactive Input System"},"1643":{"body":"Integrate typdialog with its Web UI backend as the standard interactive configuration interface for the provisioning platform. The major achievement of typdialog is not the TUI - it is the Web UI backend that enables browser-based forms, multi-user collaboration, and seamless integration with the provisioning orchestrator.","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Decision","id":"1643","title":"Decision"},"1644":{"body":"┌─────────────────────────────────────────┐\\n│ Nushell Script │\\n│ │\\n│ provisioning workspace init │\\n│ provisioning config setup │\\n│ provisioning deploy guided │\\n└────────────┬────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────┐\\n│ Rust CLI Handler │\\n│ (provisioning/core/cli/) │\\n│ │\\n│ - Parse command │\\n│ - Determine if interactive needed │\\n│ - Invoke TUI dialog module │\\n└────────────┬────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────┐\\n│ TUI Dialog Module │\\n│ (typdialog wrapper) │\\n│ │\\n│ - Form definition (validation rules) │\\n│ - Widget rendering (text, select) │\\n│ - User input capture │\\n│ - Validation execution │\\n│ - Result serialization (JSON/TOML) │\\n└────────────┬────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────┐\\n│ typdialog Library │\\n│ │\\n│ - Terminal rendering (crossterm) │\\n│ - Event handling (keyboard, mouse) │\\n│ - Widget state management │\\n│ - Input validation engine │\\n└────────────┬────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────┐\\n│ Terminal (stdout/stdin) │\\n│ │\\n│ ✅ Rich TUI with validation │\\n│ ✅ Secure password input │\\n│ ✅ Guided multi-step forms │\\n└─────────────────────────────────────────┘","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Architecture Diagram","id":"1644","title":"Architecture Diagram"},"1645":{"body":"CLI Integration Provides : ✅ Native Rust commands with TUI dialogs ✅ Form-based input for complex configurations ✅ Validation rules defined in Rust (type-safe) ✅ Secure input (password masking, no history) ✅ Error handling with retry logic ✅ Serialization to Nickel/TOML/JSON TUI Dialog Library Handles : ✅ Terminal UI rendering and event loop ✅ Widget management (text, select, checkbox, confirm) ✅ Input validation and error display ✅ Navigation (arrow keys, tab, enter) ✅ Cross-platform terminal compatibility","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Implementation Characteristics","id":"1645","title":"Implementation Characteristics"},"1646":{"body":"","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Rationale","id":"1646","title":"Rationale"},"1647":{"body":"Aspect Shell Prompts (current) Web Forms TUI Dialog (chosen) User Experience ❌ Basic text only ✅ Rich UI ✅ Rich TUI Validation ❌ Manual, error-prone ✅ Built-in ✅ Built-in Security ❌ Plain text, history ⚠️ Network risk ✅ Secure terminal Setup Complexity ✅ None ❌ Server required ✅ Minimal Terminal Workflow ✅ Native ❌ Browser switch ✅ Native Offline Support ✅ Always ❌ Requires server ✅ Always Dependencies ✅ None ❌ Web stack ✅ Single crate Error Handling ❌ Manual ⚠️ Complex ✅ Built-in retry","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Why TUI Dialog Integration Is Required","id":"1647","title":"Why TUI Dialog Integration Is Required"},"1648":{"body":"Nushell\'s input command is limited: # Current: No validation, no security\\nlet password = input \\"Password: \\" # ❌ Shows in terminal\\nlet region = input \\"AWS Region: \\" # ❌ No autocomplete/validation # Cannot do:\\n# - Multi-select from options\\n# - Conditional fields (if X then ask Y)\\n# - Password masking\\n# - Real-time validation\\n# - Autocomplete/fuzzy search","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » The Nushell Limitation","id":"1648","title":"The Nushell Limitation"},"1649":{"body":"Nickel is declarative and cannot prompt users: # Nickel defines what the config looks like, NOT how to get it\\n{ database = { host | String, port | Number, credentials | { username: String, password: String }, }\\n} # Nickel cannot:\\n# - Prompt user for values\\n# - Show interactive forms\\n# - Validate input interactively","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » The Nickel Constraint","id":"1649","title":"The Nickel Constraint"},"165":{"body":"✅ Monitor infrastructure costs regularly ✅ Keep services updated ✅ Document custom configurations ✅ Plan for disaster recovery","breadcrumbs":"Getting Started » 3. Operational Excellence","id":"165","title":"3. Operational Excellence"},"1650":{"body":"Rust provides : Native terminal control (crossterm, termion) Type-safe form definitions Validation rules as functions Secure memory handling (password zeroization) Performance (no subprocess overhead) TUI Dialog provides : Widget library (text, select, multi-select, confirm) Event loop and rendering Validation framework Error display and retry logic Integration enables : Nushell calls Rust CLI → Shows TUI dialog → Returns validated config Nickel receives validated config → Type checks → Merges with defaults","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Why Rust + TUI Dialog Is The Solution","id":"1650","title":"Why Rust + TUI Dialog Is The Solution"},"1651":{"body":"","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Consequences","id":"1651","title":"Consequences"},"1652":{"body":"User Experience : Professional TUI with validation and guidance Security : Password masking, sensitive data protection, no terminal history Validation : Type-safe rules enforced before config generation Developer Experience : Reusable form components across CLI commands Error Handling : Clear validation errors with retry options Offline First : No network dependencies for interactive input Terminal Native : Fits CLI workflow, no context switching Maintainability : Single library for all interactive input","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Positive","id":"1652","title":"Positive"},"1653":{"body":"Terminal Dependency : Requires interactive terminal (not scriptable) Learning Curve : Developers must learn TUI dialog patterns Library Lock-in : Tied to specific TUI library API Testing Complexity : Interactive tests require terminal mocking Non-Interactive Fallback : Need alternative for CI/CD and scripts","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Negative","id":"1653","title":"Negative"},"1654":{"body":"Non-Interactive Mode : // Support both interactive and non-interactive\\nif terminal::is_interactive() { // Show TUI dialog let config = show_workspace_form()?;\\n} else { // Use config file or CLI args let config = load_config_from_file(args.config)?;\\n} Testing : // Unit tests: Test form validation logic (no TUI)\\n#[test]\\nfn test_validate_workspace_name() { assert!(validate_name(\\"my-workspace\\").is_ok()); assert!(validate_name(\\"invalid name!\\").is_err());\\n} // Integration tests: Use mock terminal or config files Scriptability : # Batch mode: Provide config via file\\nprovisioning workspace init --config workspace.toml # Interactive mode: Show TUI dialog\\nprovisioning workspace init --interactive Documentation : Form schemas documented in docs/ Config file examples provided Screenshots of TUI forms in guides","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Mitigation Strategies","id":"1654","title":"Mitigation Strategies"},"1655":{"body":"","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternatives Considered","id":"1655","title":"Alternatives Considered"},"1656":{"body":"Pros : Simple, no dependencies Cons : No validation, poor UX, security risks Decision : REJECTED - Inadequate for production use","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternative 1: Shell-Based Prompts (Current State)","id":"1656","title":"Alternative 1: Shell-Based Prompts (Current State)"},"1657":{"body":"Pros : Rich UI, well-known patterns Cons : Requires server, network dependency, context switch Decision : REJECTED - Too complex for CLI tool","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternative 2: Web-Based Forms","id":"1657","title":"Alternative 2: Web-Based Forms"},"1658":{"body":"Pros : Tailored to each need Cons : High maintenance, code duplication, inconsistent UX Decision : REJECTED - Not sustainable","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternative 3: Custom TUI Per Use Case","id":"1658","title":"Alternative 3: Custom TUI Per Use Case"},"1659":{"body":"Pros : Mature, cross-platform Cons : Subprocess overhead, limited validation, shell escaping issues Decision : REJECTED - Poor Rust integration","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternative 4: External Form Tool (dialog, whiptail)","id":"1659","title":"Alternative 4: External Form Tool (dialog, whiptail)"},"166":{"body":"# 1. Always validate before applying\\nprovisioning validate config --infra my-infra # 2. Use check mode first\\nprovisioning server create --infra my-infra --check # 3. Apply changes incrementally\\nprovisioning server create --infra my-infra # 4. Verify results\\nprovisioning show servers --infra my-infra","breadcrumbs":"Getting Started » 4. Development Workflow","id":"166","title":"4. Development Workflow"},"1660":{"body":"Pros : Fully scriptable, no interactive complexity Cons : Steep learning curve, no guidance for new users Decision : REJECTED - Poor user onboarding experience","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Alternative 5: Text-Based Config Files Only","id":"1660","title":"Alternative 5: Text-Based Config Files Only"},"1661":{"body":"","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Implementation Details","id":"1661","title":"Implementation Details"},"1662":{"body":"use typdialog::Form; pub fn workspace_initialization_form() -> Result { let form = Form::new(\\"Workspace Initialization\\") .add_text_input(\\"name\\", \\"Workspace Name\\") .required() .validator(|s| validate_workspace_name(s)) .add_select(\\"provider\\", \\"Cloud Provider\\") .options(&[\\"aws\\", \\"azure\\", \\"oci\\", \\"local\\"]) .required() .add_text_input(\\"region\\", \\"Region\\") .default(\\"us-west-2\\") .validator(|s| validate_region(s)) .add_password(\\"admin_password\\", \\"Admin Password\\") .required() .min_length(12) .add_confirm(\\"enable_monitoring\\", \\"Enable Monitoring?\\") .default(true); let responses = form.run()?; // Convert to strongly-typed config let config = WorkspaceConfig { name: responses.get_string(\\"name\\")?, provider: responses.get_string(\\"provider\\")?.parse()?, region: responses.get_string(\\"region\\")?, admin_password: responses.get_password(\\"admin_password\\")?, enable_monitoring: responses.get_bool(\\"enable_monitoring\\")?, }; Ok(config)\\n}","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Form Definition Pattern","id":"1662","title":"Form Definition Pattern"},"1663":{"body":"// 1. Get validated input from TUI dialog\\nlet config = workspace_initialization_form()?; // 2. Serialize to TOML/JSON\\nlet config_toml = toml::to_string(&config)?; // 3. Write to workspace config\\nfs::write(\\"workspace/config.toml\\", config_toml)?; // 4. Nickel merges with defaults\\n// nickel export workspace/main.ncl --format json\\n// (uses workspace/config.toml as input)","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Integration with Nickel","id":"1663","title":"Integration with Nickel"},"1664":{"body":"// provisioning/core/cli/src/commands/workspace.rs #[derive(Parser)]\\npub enum WorkspaceCommand { Init { #[arg(long)] interactive: bool, #[arg(long)] config: Option, },\\n} pub fn handle_workspace_init(args: InitArgs) -> Result<()> { if args.interactive || terminal::is_interactive() { // Show TUI dialog let config = workspace_initialization_form()?; config.save(\\"workspace/config.toml\\")?; } else if let Some(config_path) = args.config { // Use provided config let config = WorkspaceConfig::load(config_path)?; config.save(\\"workspace/config.toml\\")?; } else { bail!(\\"Either --interactive or --config required\\"); } // Continue with workspace setup Ok(())\\n}","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » CLI Command Structure","id":"1664","title":"CLI Command Structure"},"1665":{"body":"pub fn validate_workspace_name(name: &str) -> Result<(), String> { // Alphanumeric, hyphens, 3-32 chars let re = Regex::new(r\\"^[a-z0-9-]{3,32}$\\").unwrap(); if !re.is_match(name) { return Err(\\"Name must be 3-32 lowercase alphanumeric chars with hyphens\\".into()); } Ok(())\\n} pub fn validate_region(region: &str) -> Result<(), String> { const VALID_REGIONS: &[&str] = &[\\"us-west-1\\", \\"us-west-2\\", \\"us-east-1\\", \\"eu-west-1\\"]; if !VALID_REGIONS.contains(®ion) { return Err(format!(\\"Invalid region. Must be one of: {}\\", VALID_REGIONS.join(\\", \\"))); } Ok(())\\n}","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Validation Rules","id":"1665","title":"Validation Rules"},"1666":{"body":"use zeroize::Zeroizing; pub fn get_secure_password() -> Result> { let form = Form::new(\\"Secure Input\\") .add_password(\\"password\\", \\"Password\\") .required() .min_length(12) .validator(password_strength_check); let responses = form.run()?; // Password automatically zeroized when dropped let password = Zeroizing::new(responses.get_password(\\"password\\")?); Ok(password)\\n}","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Security: Password Handling","id":"1666","title":"Security: Password Handling"},"1667":{"body":"Unit Tests : #[test]\\nfn test_workspace_name_validation() { assert!(validate_workspace_name(\\"my-workspace\\").is_ok()); assert!(validate_workspace_name(\\"UPPERCASE\\").is_err()); assert!(validate_workspace_name(\\"ab\\").is_err()); // Too short\\n} Integration Tests : // Use non-interactive mode with config files\\n#[test]\\nfn test_workspace_init_non_interactive() { let config = WorkspaceConfig { name: \\"test-workspace\\".into(), provider: Provider::Local, region: \\"us-west-2\\".into(), admin_password: \\"secure-password-123\\".into(), enable_monitoring: true, }; config.save(\\"/tmp/test-config.toml\\").unwrap(); let result = handle_workspace_init(InitArgs { interactive: false, config: Some(\\"/tmp/test-config.toml\\".into()), }); assert!(result.is_ok());\\n} Manual Testing : # Test interactive flow\\ncargo build --release\\n./target/release/provisioning workspace init --interactive # Test validation errors\\n# - Try invalid workspace name\\n# - Try weak password\\n# - Try invalid region","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Testing Strategy","id":"1667","title":"Testing Strategy"},"1668":{"body":"CLI Flag : # provisioning/config/config.defaults.toml\\n[ui]\\ninteractive_mode = \\"auto\\" # \\"auto\\" | \\"always\\" | \\"never\\"\\ndialog_theme = \\"default\\" # \\"default\\" | \\"minimal\\" | \\"colorful\\" Environment Override : # Force non-interactive mode (for CI/CD)\\nexport PROVISIONING_INTERACTIVE=false # Force interactive mode\\nexport PROVISIONING_INTERACTIVE=true","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Configuration Integration","id":"1668","title":"Configuration Integration"},"1669":{"body":"User Guides : docs/user/interactive-configuration.md - How to use TUI dialogs docs/guides/workspace-setup.md - Workspace initialization with screenshots Developer Documentation : docs/development/tui-forms.md - Creating new TUI forms Form definition best practices Validation rule patterns Configuration Schema : # provisioning/schemas/workspace.ncl\\n{ WorkspaceConfig = { name | doc \\"Workspace identifier (3-32 alphanumeric chars with hyphens)\\" | String, provider | doc \\"Cloud provider\\" | [| \'aws, \'azure, \'oci, \'local |], region | doc \\"Deployment region\\" | String, admin_password | doc \\"Admin password (min 12 characters)\\" | String, enable_monitoring | doc \\"Enable monitoring services\\" | Bool, }\\n}","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Documentation Requirements","id":"1669","title":"Documentation Requirements"},"167":{"body":"","breadcrumbs":"Getting Started » Getting Help","id":"167","title":"Getting Help"},"1670":{"body":"Phase 1: Add Library Add typdialog dependency to provisioning/core/cli/Cargo.toml Create TUI dialog wrapper module Implement basic text/select widgets Phase 2: Implement Forms Workspace initialization form Credential setup form Configuration wizard forms Phase 3: CLI Integration Update CLI commands to use TUI dialogs Add --interactive / --config flags Implement non-interactive fallback Phase 4: Documentation User guides with screenshots Developer documentation for form creation Example configs for non-interactive use Phase 5: Testing Unit tests for validation logic Integration tests with config files Manual testing on all platforms","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » Migration Path","id":"1670","title":"Migration Path"},"1671":{"body":"typdialog Crate (or similar: dialoguer, inquire) crossterm - Terminal manipulation zeroize - Secure memory zeroization ADR-004: Hybrid Architecture (Rust/Nushell integration) ADR-011: Nickel Migration (declarative config language) ADR-012: Nushell Plugins (CLI wrapper patterns) Nushell input command limitations: Nushell Book - Input Status : Accepted Last Updated : 2025-01-08 Implementation : Planned Priority : High (User onboarding and security) Estimated Complexity : Moderate","breadcrumbs":"ADR-013: Typdialog Web UI Backend Integration » References","id":"1671","title":"References"},"1672":{"body":"","breadcrumbs":"ADR-014: SecretumVault Integration » ADR-014: SecretumVault Integration for Secrets Management","id":"1672","title":"ADR-014: SecretumVault Integration for Secrets Management"},"1673":{"body":"Accepted - 2025-01-08","breadcrumbs":"ADR-014: SecretumVault Integration » Status","id":"1673","title":"Status"},"1674":{"body":"The provisioning system manages sensitive data across multiple infrastructure layers: cloud provider credentials, database passwords, API keys, SSH keys, encryption keys, and service tokens. The current security architecture (ADR-009) includes SOPS for encrypted config files and Age for key management, but lacks a centralized secrets management solution with dynamic secrets, access control, and audit logging.","breadcrumbs":"ADR-014: SecretumVault Integration » Context","id":"1674","title":"Context"},"1675":{"body":"Existing Approach : SOPS + Age : Static secrets encrypted in config files Good: Version-controlled, gitops-friendly Limited: Static rotation, no audit trail, manual key distribution Nickel Configuration : Declarative secrets references Good: Type-safe configuration Limited: Cannot generate dynamic secrets, no lifecycle management Manual Secret Injection : Environment variables, CLI flags Good: Simple for development Limited: No security guarantees, prone to leakage","breadcrumbs":"ADR-014: SecretumVault Integration » Current Secrets Management Challenges","id":"1675","title":"Current Secrets Management Challenges"},"1676":{"body":"Security Issues : ❌ No centralized audit trail (who accessed which secret when) ❌ No automatic secret rotation policies ❌ No fine-grained access control (Cedar policies not enforced on secrets) ❌ Secrets scattered across: SOPS files, env vars, config files, K8s secrets ❌ No detection of secret sprawl or leaked credentials Operational Issues : ❌ Manual secret rotation (error-prone, often neglected) ❌ No secret versioning (cannot rollback to previous credentials) ❌ Difficult onboarding (manual key distribution) ❌ No dynamic secrets (credentials exist indefinitely) Compliance Issues : ❌ Cannot prove compliance with secret access policies ❌ No audit logs for regulatory requirements ❌ Cannot enforce secret expiration policies ❌ Difficult to demonstrate least-privilege access","breadcrumbs":"ADR-014: SecretumVault Integration » Problems Without Centralized Secrets Management","id":"1676","title":"Problems Without Centralized Secrets Management"},"1677":{"body":"Dynamic Database Credentials : Generate short-lived DB credentials for applications Automatic rotation based on policies Revocation on application termination Cloud Provider API Keys : Centralized storage with access control Audit trail of credential usage Automatic rotation schedules Service-to-Service Authentication : Dynamic tokens for microservices Short-lived certificates for mTLS Automatic renewal before expiration SSH Key Management : Temporal SSH keys (ADR-009 SSH integration) Centralized certificate authority Audit trail of SSH access Encryption Key Management : Master encryption keys for data at rest Key rotation and versioning Integration with KMS systems","breadcrumbs":"ADR-014: SecretumVault Integration » Use Cases Requiring Centralized Secrets Management","id":"1677","title":"Use Cases Requiring Centralized Secrets Management"},"1678":{"body":"✅ Dynamic Secrets : Generate credentials on-demand with TTL ✅ Access Control : Integration with Cedar authorization policies ✅ Audit Logging : Complete trail of secret access and modifications ✅ Secret Rotation : Automatic and manual rotation policies ✅ Versioning : Track secret versions, enable rollback ✅ High Availability : Distributed, fault-tolerant architecture ✅ Encryption at Rest : AES-256-GCM for stored secrets ✅ API-First : RESTful API for integration ✅ Plugin Ecosystem : Extensible backends (AWS, Azure, databases) ✅ Open Source : Self-hosted, no vendor lock-in","breadcrumbs":"ADR-014: SecretumVault Integration » Requirements for Secrets Management System","id":"1678","title":"Requirements for Secrets Management System"},"1679":{"body":"Integrate SecretumVault as the centralized secrets management system for the provisioning platform.","breadcrumbs":"ADR-014: SecretumVault Integration » Decision","id":"1679","title":"Decision"},"168":{"body":"# General help\\nprovisioning help # Command-specific help\\nprovisioning server help\\nprovisioning taskserv help\\nprovisioning cluster help # Show available options\\nprovisioning generate help","breadcrumbs":"Getting Started » Built-in Help System","id":"168","title":"Built-in Help System"},"1680":{"body":"┌─────────────────────────────────────────────────────────────┐\\n│ Provisioning CLI / Orchestrator / Services │\\n│ │\\n│ - Workspace initialization (credentials) │\\n│ - Infrastructure deployment (cloud API keys) │\\n│ - Service configuration (database passwords) │\\n│ - SSH temporal keys (certificate generation) │\\n└────────────┬────────────────────────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────────────────────────┐\\n│ SecretumVault Client Library (Rust) │\\n│ (provisioning/core/libs/secretum-client/) │\\n│ │\\n│ - Authentication (token, mTLS) │\\n│ - Secret CRUD operations │\\n│ - Dynamic secret generation │\\n│ - Lease renewal and revocation │\\n│ - Policy enforcement │\\n└────────────┬────────────────────────────────────────────────┘ │ HTTPS + mTLS ▼\\n┌─────────────────────────────────────────────────────────────┐\\n│ SecretumVault Server │\\n│ (Rust-based Vault implementation) │\\n│ │\\n│ ┌───────────────────────────────────────────────────┐ │\\n│ │ API Layer (REST + gRPC) │ │\\n│ ├───────────────────────────────────────────────────┤ │\\n│ │ Authentication & Authorization │ │\\n│ │ - Token auth, mTLS, OIDC integration │ │\\n│ │ - Cedar policy enforcement │ │\\n│ ├───────────────────────────────────────────────────┤ │\\n│ │ Secret Engines │ │\\n│ │ - KV (key-value v2 with versioning) │ │\\n│ │ - Database (dynamic credentials) │ │\\n│ │ - SSH (certificate authority) │ │\\n│ │ - PKI (X.509 certificates) │ │\\n│ │ - Cloud Providers (AWS/Azure/OCI) │ │\\n│ ├───────────────────────────────────────────────────┤ │\\n│ │ Storage Backend │ │\\n│ │ - Encrypted storage (AES-256-GCM) │ │\\n│ │ - PostgreSQL / Raft cluster │ │\\n│ ├───────────────────────────────────────────────────┤ │\\n│ │ Audit Backend │ │\\n│ │ - Structured logging (JSON) │ │\\n│ │ - Syslog, file, database sinks │ │\\n│ └───────────────────────────────────────────────────┘ │\\n└─────────────────────────────────────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────────────────────────┐\\n│ Backends (Dynamic Secret Generation) │\\n│ │\\n│ - PostgreSQL/MySQL (database credentials) │\\n│ - AWS IAM (temporary access keys) │\\n│ - Azure AD (service principals) │\\n│ - SSH CA (signed certificates) │\\n│ - PKI (X.509 certificates) │\\n└─────────────────────────────────────────────────────────────┘","breadcrumbs":"ADR-014: SecretumVault Integration » Architecture Diagram","id":"1680","title":"Architecture Diagram"},"1681":{"body":"SecretumVault Provides : ✅ Dynamic secret generation with configurable TTL ✅ Secret versioning and rollback capabilities ✅ Fine-grained access control (Cedar policies) ✅ Complete audit trail (all operations logged) ✅ Automatic secret rotation policies ✅ High availability (Raft consensus) ✅ Encryption at rest (AES-256-GCM) ✅ Plugin architecture for secret backends ✅ RESTful and gRPC APIs ✅ Rust implementation (performance, safety) Integration with Provisioning System : ✅ Rust client library (native integration) ✅ Nushell commands via CLI wrapper ✅ Nickel configuration references secrets ✅ Cedar policies control secret access ✅ Orchestrator manages secret lifecycle ✅ SSH integration for temporal keys ✅ KMS integration for encryption keys","breadcrumbs":"ADR-014: SecretumVault Integration » Implementation Characteristics","id":"1681","title":"Implementation Characteristics"},"1682":{"body":"","breadcrumbs":"ADR-014: SecretumVault Integration » Rationale","id":"1682","title":"Rationale"},"1683":{"body":"Aspect SOPS + Age (current) HashiCorp Vault SecretumVault (chosen) Dynamic Secrets ❌ Static only ✅ Full support ✅ Full support Rust Native ⚠️ External CLI ❌ Go binary ✅ Pure Rust Cedar Integration ❌ None ❌ Custom policies ✅ Native Cedar Audit Trail ❌ Git only ✅ Comprehensive ✅ Comprehensive Secret Rotation ❌ Manual ✅ Automatic ✅ Automatic Open Source ✅ Yes ⚠️ MPL 2.0 (BSL now) ✅ Yes Self-Hosted ✅ Yes ✅ Yes ✅ Yes License ✅ Permissive ⚠️ BSL (proprietary) ✅ Permissive Versioning ⚠️ Git commits ✅ Built-in ✅ Built-in High Availability ❌ Single file ✅ Raft cluster ✅ Raft cluster Performance ✅ Fast (local) ⚠️ Network latency ✅ Rust performance","breadcrumbs":"ADR-014: SecretumVault Integration » Why SecretumVault Is Required","id":"1683","title":"Why SecretumVault Is Required"},"1684":{"body":"SOPS is excellent for static secrets in git , but inadequate for: Dynamic Credentials : Cannot generate temporary DB passwords Audit Trail : Git commits are insufficient for compliance Rotation Policies : Manual rotation is error-prone Access Control : No runtime policy enforcement Secret Lifecycle : Cannot track usage or revoke access Multi-System Integration : Limited to files, not API-accessible Complementary Approach : SOPS: Configuration files with long-lived secrets (gitops workflow) SecretumVault: Runtime dynamic secrets, short-lived credentials, audit trail","breadcrumbs":"ADR-014: SecretumVault Integration » Why Not Continue with SOPS Alone","id":"1684","title":"Why Not Continue with SOPS Alone"},"1685":{"body":"HashiCorp Vault Limitations : License Change : BSL (Business Source License) - proprietary for production Not Rust Native : Go binary, subprocess overhead Custom Policy Language : HCL policies, not Cedar (provisioning standard) Complex Deployment : Heavy operational burden Vendor Lock-In : HashiCorp ecosystem dependency SecretumVault Advantages : Rust Native : Zero-cost integration, no subprocess spawning Cedar Policies : Consistent with ADR-008 authorization model Lightweight : Smaller binary, lower resource usage Open Source : Permissive license, community-driven Provisioning-First : Designed for IaC workflows","breadcrumbs":"ADR-014: SecretumVault Integration » Why SecretumVault Over HashiCorp Vault","id":"1685","title":"Why SecretumVault Over HashiCorp Vault"},"1686":{"body":"ADR-009 (Security System) : SOPS: Static config encryption (unchanged) Age: Key management for SOPS (unchanged) SecretumVault: Dynamic secrets, runtime access control (new) ADR-008 (Cedar Authorization) : Cedar policies control SecretumVault secret access Fine-grained permissions: read:secret:database/prod/password Audit trail records Cedar policy decisions SSH Temporal Keys : SecretumVault SSH CA signs user certificates Short-lived certificates (1-24 hours) Audit trail of SSH access","breadcrumbs":"ADR-014: SecretumVault Integration » Integration with Existing Security Architecture","id":"1686","title":"Integration with Existing Security Architecture"},"1687":{"body":"","breadcrumbs":"ADR-014: SecretumVault Integration » Consequences","id":"1687","title":"Consequences"},"1688":{"body":"Security Posture : Centralized secrets with audit trail and rotation Compliance : Complete audit logs for regulatory requirements Operational Excellence : Automatic rotation, dynamic credentials Developer Experience : Simple API for secret access Performance : Rust implementation, zero-cost abstractions Consistency : Cedar policies across entire system (auth + secrets) Observability : Metrics, logs, traces for secret access Disaster Recovery : Secret versioning enables rollback","breadcrumbs":"ADR-014: SecretumVault Integration » Positive","id":"1688","title":"Positive"},"1689":{"body":"Infrastructure Complexity : Additional service to deploy and operate High Availability Requirements : Raft cluster needs 3+ nodes Migration Effort : Existing SOPS secrets need migration path Learning Curve : Operators must learn vault concepts Dependency Risk : Critical path service (secrets unavailable = system down)","breadcrumbs":"ADR-014: SecretumVault Integration » Negative","id":"1689","title":"Negative"},"169":{"body":"For complete command documentation, see: CLI Reference","breadcrumbs":"Getting Started » Command Reference","id":"169","title":"Command Reference"},"1690":{"body":"High Availability : # Deploy SecretumVault cluster (3 nodes)\\nprovisioning deploy secretum-vault --ha --replicas 3 # Automatic leader election via Raft\\n# Clients auto-reconnect to leader Migration from SOPS : # Phase 1: Import existing SOPS secrets into SecretumVault\\nprovisioning secrets migrate --from-sops config/secrets.yaml # Phase 2: Update Nickel configs to reference vault paths\\n# Phase 3: Deprecate SOPS for runtime secrets (keep for config files) Fallback Strategy : // Graceful degradation if vault unavailable\\nlet secret = match vault_client.get_secret(\\"database/password\\").await { Ok(s) => s, Err(VaultError::Unavailable) => { // Fallback to SOPS for read-only operations warn!(\\"Vault unavailable, using SOPS fallback\\"); sops_decrypt(\\"config/secrets.yaml\\", \\"database.password\\")? }, Err(e) => return Err(e),\\n}; Operational Monitoring : # prometheus metrics\\nsecretum_vault_request_duration_seconds\\nsecretum_vault_secret_lease_expiry\\nsecretum_vault_auth_failures_total\\nsecretum_vault_raft_leader_changes # Alerts: Vault unavailable, high auth failure rate, lease expiry","breadcrumbs":"ADR-014: SecretumVault Integration » Mitigation Strategies","id":"1690","title":"Mitigation Strategies"},"1691":{"body":"","breadcrumbs":"ADR-014: SecretumVault Integration » Alternatives Considered","id":"1691","title":"Alternatives Considered"},"1692":{"body":"Pros : No new infrastructure, simple Cons : No dynamic secrets, no audit trail, manual rotation Decision : REJECTED - Insufficient for production security","breadcrumbs":"ADR-014: SecretumVault Integration » Alternative 1: Continue with SOPS Only","id":"1692","title":"Alternative 1: Continue with SOPS Only"},"1693":{"body":"Pros : Mature, feature-rich, widely adopted Cons : BSL license, Go binary, HCL policies (not Cedar), complex deployment Decision : REJECTED - License and integration concerns","breadcrumbs":"ADR-014: SecretumVault Integration » Alternative 2: HashiCorp Vault","id":"1693","title":"Alternative 2: HashiCorp Vault"},"1694":{"body":"Pros : Fully managed, high availability Cons : Vendor lock-in, multi-cloud complexity, cost at scale Decision : REJECTED - Against open-source and multi-cloud principles","breadcrumbs":"ADR-014: SecretumVault Integration » Alternative 3: Cloud Provider Native (AWS Secrets Manager, Azure Key Vault)","id":"1694","title":"Alternative 3: Cloud Provider Native (AWS Secrets Manager, Azure Key Vault)"},"1695":{"body":"Pros : Enterprise features Cons : Proprietary, expensive, poor API integration Decision : REJECTED - Not suitable for IaC automation","breadcrumbs":"ADR-014: SecretumVault Integration » Alternative 4: CyberArk, 1Password, and Others","id":"1695","title":"Alternative 4: CyberArk, 1Password, and Others"},"1696":{"body":"Pros : Full control, tailored to needs Cons : High maintenance burden, security risk, reinventing wheel Decision : REJECTED - SecretumVault provides this already","breadcrumbs":"ADR-014: SecretumVault Integration » Alternative 5: Build Custom Secrets Manager","id":"1696","title":"Alternative 5: Build Custom Secrets Manager"},"1697":{"body":"","breadcrumbs":"ADR-014: SecretumVault Integration » Implementation Details","id":"1697","title":"Implementation Details"},"1698":{"body":"# Deploy via provisioning system\\nprovisioning deploy secretum-vault \\\\ --ha \\\\ --replicas 3 \\\\ --storage postgres \\\\ --tls-cert /path/to/cert.pem \\\\ --tls-key /path/to/key.pem # Initialize and unseal\\nprovisioning vault init\\nprovisioning vault unseal --key-shares 5 --key-threshold 3","breadcrumbs":"ADR-014: SecretumVault Integration » SecretumVault Deployment","id":"1698","title":"SecretumVault Deployment"},"1699":{"body":"// provisioning/core/libs/secretum-client/src/lib.rs use secretum_vault::{Client, SecretEngine, Auth}; pub struct VaultClient { client: Client,\\n} impl VaultClient { pub async fn new(addr: &str, token: &str) -> Result { let client = Client::new(addr) .auth(Auth::Token(token)) .tls_config(TlsConfig::from_files(\\"ca.pem\\", \\"cert.pem\\", \\"key.pem\\"))? .build()?; Ok(Self { client }) } pub async fn get_secret(&self, path: &str) -> Result { self.client.kv2().get(path).await } pub async fn create_dynamic_db_credentials(&self, role: &str) -> Result { self.client.database().generate_credentials(role).await } pub async fn sign_ssh_key(&self, public_key: &str, ttl: Duration) -> Result { self.client.ssh().sign_key(public_key, ttl).await }\\n}","breadcrumbs":"ADR-014: SecretumVault Integration » Rust Client Library","id":"1699","title":"Rust Client Library"},"17":{"body":"Extensions and packages distributed as OCI artifacts, enabling: Industry-standard packaging Efficient caching and bandwidth Version pinning and rollback Air-gapped deployments","breadcrumbs":"Home » OCI-Native Distribution","id":"17","title":"OCI-Native Distribution"},"170":{"body":"If you encounter issues, see: Troubleshooting Guide","breadcrumbs":"Getting Started » Troubleshooting","id":"170","title":"Troubleshooting"},"1700":{"body":"# Nushell commands via Rust CLI wrapper\\nprovisioning secrets get database/prod/password\\nprovisioning secrets set api/keys/stripe --value \\"sk_live_xyz\\"\\nprovisioning secrets rotate database/prod/password\\nprovisioning secrets lease renew lease_id_12345\\nprovisioning secrets list database/","breadcrumbs":"ADR-014: SecretumVault Integration » Nushell Integration","id":"1700","title":"Nushell Integration"},"1701":{"body":"# provisioning/schemas/database.ncl\\n{ database = { host = \\"postgres.example.com\\", port = 5432, username = secrets.get \\"database/prod/username\\", password = secrets.get \\"database/prod/password\\", }\\n} # Nickel function: secrets.get resolves to SecretumVault API call","breadcrumbs":"ADR-014: SecretumVault Integration » Nickel Configuration Integration","id":"1701","title":"Nickel Configuration Integration"},"1702":{"body":"// policy: developers can read dev secrets, not prod\\npermit( principal in Group::\\"developers\\", action == Action::\\"read\\", resource in Secret::\\"database/dev\\"\\n); forbid( principal in Group::\\"developers\\", action == Action::\\"read\\", resource in Secret::\\"database/prod\\"\\n); // policy: CI/CD can generate dynamic DB credentials\\npermit( principal == Service::\\"github-actions\\", action == Action::\\"generate\\", resource in Secret::\\"database/dynamic\\"\\n) when { context.ttl <= duration(\\"1h\\")\\n};","breadcrumbs":"ADR-014: SecretumVault Integration » Cedar Policy for Secret Access","id":"1702","title":"Cedar Policy for Secret Access"},"1703":{"body":"// Application requests temporary DB credentials\\nlet creds = vault_client .database() .generate_credentials(\\"postgres-readonly\\") .await?; println!(\\"Username: {}\\", creds.username); // v-app-abcd1234\\nprintln!(\\"Password: {}\\", creds.password); // random-secure-password\\nprintln!(\\"TTL: {}\\", creds.lease_duration); // 1h // Credentials automatically revoked after TTL\\n// No manual cleanup needed","breadcrumbs":"ADR-014: SecretumVault Integration » Dynamic Database Credentials","id":"1703","title":"Dynamic Database Credentials"},"1704":{"body":"# secretum-vault config\\n[[rotation_policies]]\\npath = \\"database/prod/password\\"\\nschedule = \\"0 0 * * 0\\" # Weekly on Sunday midnight\\nmax_age = \\"30d\\" [[rotation_policies]]\\npath = \\"api/keys/stripe\\"\\nschedule = \\"0 0 1 * *\\" # Monthly on 1st\\nmax_age = \\"90d\\"","breadcrumbs":"ADR-014: SecretumVault Integration » Secret Rotation Automation","id":"1704","title":"Secret Rotation Automation"},"1705":{"body":"{ \\"timestamp\\": \\"2025-01-08T12:34:56Z\\", \\"type\\": \\"request\\", \\"auth\\": { \\"client_token\\": \\"sha256:abc123...\\", \\"accessor\\": \\"hmac:def456...\\", \\"display_name\\": \\"service-orchestrator\\", \\"policies\\": [\\"default\\", \\"service-policy\\"] }, \\"request\\": { \\"operation\\": \\"read\\", \\"path\\": \\"secret/data/database/prod/password\\", \\"remote_address\\": \\"10.0.1.5\\" }, \\"response\\": { \\"status\\": 200 }, \\"cedar_policy\\": { \\"decision\\": \\"permit\\", \\"policy_id\\": \\"allow-orchestrator-read-secrets\\" }\\n}","breadcrumbs":"ADR-014: SecretumVault Integration » Audit Log Format","id":"1705","title":"Audit Log Format"},"1706":{"body":"Unit Tests : #[tokio::test]\\nasync fn test_get_secret() { let vault = mock_vault_client(); let secret = vault.get_secret(\\"test/secret\\").await.unwrap(); assert_eq!(secret.value, \\"expected-value\\");\\n} #[tokio::test]\\nasync fn test_dynamic_credentials_generation() { let vault = mock_vault_client(); let creds = vault.create_dynamic_db_credentials(\\"postgres-readonly\\").await.unwrap(); assert!(creds.username.starts_with(\\"v-\\")); assert_eq!(creds.lease_duration, Duration::from_secs(3600));\\n} Integration Tests : # Test vault deployment\\nprovisioning deploy secretum-vault --test-mode\\nprovisioning vault init\\nprovisioning vault unseal # Test secret operations\\nprovisioning secrets set test/secret --value \\"test-value\\"\\nprovisioning secrets get test/secret | assert \\"test-value\\" # Test dynamic credentials\\nprovisioning secrets db-creds postgres-readonly | jq \'.username\' | assert-contains \\"v-\\" # Test rotation\\nprovisioning secrets rotate test/secret Security Tests : #[tokio::test]\\nasync fn test_unauthorized_access_denied() { let vault = vault_client_with_limited_token(); let result = vault.get_secret(\\"database/prod/password\\").await; assert!(matches!(result, Err(VaultError::PermissionDenied)));\\n}","breadcrumbs":"ADR-014: SecretumVault Integration » Testing Strategy","id":"1706","title":"Testing Strategy"},"1707":{"body":"Provisioning Config : # provisioning/config/config.defaults.toml\\n[secrets]\\nprovider = \\"secretum-vault\\" # \\"secretum-vault\\" | \\"sops\\" | \\"env\\"\\nvault_addr = \\"https://vault.example.com:8200\\"\\nvault_namespace = \\"provisioning\\"\\nvault_mount = \\"secret\\" [secrets.tls]\\nca_cert = \\"/etc/provisioning/vault-ca.pem\\"\\nclient_cert = \\"/etc/provisioning/vault-client.pem\\"\\nclient_key = \\"/etc/provisioning/vault-client-key.pem\\" [secrets.cache]\\nenabled = true\\nttl = \\"5m\\"\\nmax_size = \\"100MB\\" Environment Variables : export VAULT_ADDR=\\"https://vault.example.com:8200\\"\\nexport VAULT_TOKEN=\\"s.abc123def456...\\"\\nexport VAULT_NAMESPACE=\\"provisioning\\"\\nexport VAULT_CACERT=\\"/etc/provisioning/vault-ca.pem\\"","breadcrumbs":"ADR-014: SecretumVault Integration » Configuration Integration","id":"1707","title":"Configuration Integration"},"1708":{"body":"Phase 1: Deploy SecretumVault Deploy vault cluster in HA mode Initialize and configure backends Set up Cedar policies Phase 2: Migrate Static Secrets Import SOPS secrets into vault KV store Update Nickel configs to reference vault paths Verify secret access via new API Phase 3: Enable Dynamic Secrets Configure database secret engine Configure SSH CA secret engine Update applications to use dynamic credentials Phase 4: Deprecate SOPS for Runtime SOPS remains for gitops config files Runtime secrets exclusively from vault Audit trail enforcement Phase 5: Automation Automatic rotation policies Lease renewal automation Monitoring and alerting","breadcrumbs":"ADR-014: SecretumVault Integration » Migration Path","id":"1708","title":"Migration Path"},"1709":{"body":"User Guides : docs/user/secrets-management.md - Using SecretumVault docs/user/dynamic-credentials.md - Dynamic secret workflows docs/user/secret-rotation.md - Rotation policies and procedures Operations Documentation : docs/operations/vault-deployment.md - Deploying and configuring vault docs/operations/vault-backup-restore.md - Backup and disaster recovery docs/operations/vault-monitoring.md - Metrics, logs, alerts Developer Documentation : docs/development/secrets-api.md - Rust client library usage docs/development/cedar-secret-policies.md - Writing Cedar policies for secrets Secret engine development guide Security Documentation : docs/security/secrets-architecture.md - Security architecture overview docs/security/audit-logging.md - Audit trail and compliance Threat model and risk assessment","breadcrumbs":"ADR-014: SecretumVault Integration » Documentation Requirements","id":"1709","title":"Documentation Requirements"},"171":{"body":"Let\'s walk through a complete example of setting up a web application infrastructure:","breadcrumbs":"Getting Started » Real-World Example","id":"171","title":"Real-World Example"},"1710":{"body":"SecretumVault GitHub (hypothetical, replace with actual) HashiCorp Vault Documentation (for comparison) ADR-008: Cedar Authorization (policy integration) ADR-009: Security System Complete (current security architecture) Raft Consensus Algorithm Cedar Policy Language SOPS: https://github.com/getsops/sops Age Encryption: https://age-encryption.org/ Status : Accepted Last Updated : 2025-01-08 Implementation : Planned Priority : High (Security and compliance) Estimated Complexity : Complex","breadcrumbs":"ADR-014: SecretumVault Integration » References","id":"1710","title":"References"},"1711":{"body":"","breadcrumbs":"ADR-015: AI Integration Architecture » ADR-015: AI Integration Architecture for Intelligent Infrastructure Provisioning","id":"1711","title":"ADR-015: AI Integration Architecture for Intelligent Infrastructure Provisioning"},"1712":{"body":"Accepted - 2025-01-08","breadcrumbs":"ADR-015: AI Integration Architecture » Status","id":"1712","title":"Status"},"1713":{"body":"The provisioning platform has evolved to include complex workflows for infrastructure configuration, deployment, and management. Current interaction patterns require deep technical knowledge of Nickel schemas, cloud provider APIs, networking concepts, and security best practices. This creates barriers to entry and slows down infrastructure provisioning for operators who are not infrastructure experts.","breadcrumbs":"ADR-015: AI Integration Architecture » Context","id":"1713","title":"Context"},"1714":{"body":"Current state challenges : Knowledge Barrier : Deep Nickel, cloud, and networking expertise required Understanding Nickel type system and contracts Knowing cloud provider resource relationships Configuring security policies correctly Debugging deployment failures Manual Configuration : All configs hand-written Repetitive boilerplate for common patterns Easy to make mistakes (typos, missing fields) No intelligent suggestions or autocomplete Trial-and-error debugging Limited Assistance : No contextual help Documentation is separate from workflow No explanation of validation errors No suggestions for fixing issues No learning from past deployments Troubleshooting Difficulty : Manual log analysis Deployment failures require expert analysis No automated root cause detection No suggested fixes based on similar issues Long time-to-resolution","breadcrumbs":"ADR-015: AI Integration Architecture » The Infrastructure Complexity Problem","id":"1714","title":"The Infrastructure Complexity Problem"},"1715":{"body":"Natural Language to Configuration : User: \\"Create a production PostgreSQL cluster with encryption and daily backups\\" AI: Generates validated Nickel configuration AI-Assisted Form Filling : User starts typing in typdialog web form AI suggests values based on context AI explains validation errors in plain language Intelligent Troubleshooting : Deployment fails AI analyzes logs and suggests fixes AI generates corrected configuration Configuration Optimization : AI analyzes workload patterns AI suggests performance improvements AI detects security misconfigurations Learning from Operations : AI indexes past deployments AI suggests configurations based on similar workloads AI predicts potential issues","breadcrumbs":"ADR-015: AI Integration Architecture » AI Integration Opportunities","id":"1715","title":"AI Integration Opportunities"},"1716":{"body":"The system integrates multiple AI components: typdialog-ai : AI-assisted form interactions typdialog-ag : AI agents for autonomous operations typdialog-prov-gen : AI-powered configuration generation platform/crates/ai-service : Core AI service backend platform/crates/mcp-server : Model Context Protocol server platform/crates/rag : Retrieval-Augmented Generation system","breadcrumbs":"ADR-015: AI Integration Architecture » AI Components Overview","id":"1716","title":"AI Components Overview"},"1717":{"body":"✅ Natural Language Understanding : Parse user intent from free-form text ✅ Schema-Aware Generation : Generate valid Nickel configurations ✅ Context Retrieval : Access documentation, schemas, past deployments ✅ Security Enforcement : Cedar policies control AI access ✅ Human-in-the-Loop : All AI actions require human approval ✅ Audit Trail : Complete logging of AI operations ✅ Multi-Provider Support : OpenAI, Anthropic, local models ✅ Cost Control : Rate limiting and budget management ✅ Observability : Trace AI decisions and reasoning","breadcrumbs":"ADR-015: AI Integration Architecture » Requirements for AI Integration","id":"1717","title":"Requirements for AI Integration"},"1718":{"body":"Integrate a comprehensive AI system consisting of: AI-Assisted Interfaces (typdialog-ai) Autonomous AI Agents (typdialog-ag) AI Configuration Generator (typdialog-prov-gen) Core AI Infrastructure (ai-service, mcp-server, rag) All AI components are schema-aware , security-enforced , and human-supervised .","breadcrumbs":"ADR-015: AI Integration Architecture » Decision","id":"1718","title":"Decision"},"1719":{"body":"┌─────────────────────────────────────────────────────────────────┐\\n│ User Interfaces │\\n│ │\\n│ Natural Language: \\"Create production K8s cluster in AWS\\" │\\n│ Typdialog Forms: AI-assisted field suggestions │\\n│ CLI: provisioning ai generate-config \\"description\\" │\\n└────────────┬────────────────────────────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────────────────────────────┐\\n│ AI Frontend Layer │\\n│ ┌───────────────────────────────────────────────────────┐ │\\n│ │ typdialog-ai (AI-Assisted Forms) │ │\\n│ │ - Natural language form filling │ │\\n│ │ - Real-time AI suggestions │ │\\n│ │ - Validation error explanations │ │\\n│ │ - Context-aware autocomplete │ │\\n│ ├───────────────────────────────────────────────────────┤ │\\n│ │ typdialog-ag (AI Agents) │ │\\n│ │ - Autonomous task execution │ │\\n│ │ - Multi-step workflow automation │ │\\n│ │ - Learning from feedback │ │\\n│ │ - Agent collaboration │ │\\n│ ├───────────────────────────────────────────────────────┤ │\\n│ │ typdialog-prov-gen (Config Generator) │ │\\n│ │ - Natural language → Nickel config │ │\\n│ │ - Template-based generation │ │\\n│ │ - Best practice injection │ │\\n│ │ - Validation and refinement │ │\\n│ └───────────────────────────────────────────────────────┘ │\\n└────────────┬────────────────────────────────────────────────────┘ │ ▼\\n┌────────────────────────────────────────────────────────────────┐\\n│ Core AI Infrastructure (platform/crates/) │\\n│ ┌───────────────────────────────────────────────────────┐ │\\n│ │ ai-service (Central AI Service) │ │\\n│ │ │ │\\n│ │ - Request routing and orchestration │ │\\n│ │ - Authentication and authorization (Cedar) │ │\\n│ │ - Rate limiting and cost control │ │\\n│ │ - Caching and optimization │ │\\n│ │ - Audit logging and observability │ │\\n│ │ - Multi-provider abstraction │ │\\n│ └─────────────┬─────────────────────┬───────────────────┘ │\\n│ │ │ │\\n│ ▼ ▼ │\\n│ ┌─────────────────────┐ ┌─────────────────────┐ │\\n│ │ mcp-server │ │ rag │ │\\n│ │ (Model Context │ │ (Retrieval-Aug Gen) │ │\\n│ │ Protocol) │ │ │ │\\n│ │ │ │ ┌─────────────────┐ │ │\\n│ │ - LLM integration │ │ │ Vector Store │ │ │\\n│ │ - Tool calling │ │ │ (Qdrant/Milvus) │ │ │\\n│ │ - Context mgmt │ │ └─────────────────┘ │ │\\n│ │ - Multi-provider │ │ ┌─────────────────┐ │ │\\n│ │ (OpenAI, │ │ │ Embeddings │ │ │\\n│ │ Anthropic, │ │ │ (text-embed) │ │ │\\n│ │ Local models) │ │ └─────────────────┘ │ │\\n│ │ │ │ ┌─────────────────┐ │ │\\n│ │ Tools: │ │ │ Index: │ │ │\\n│ │ - nickel_validate │ │ │ - Nickel schemas│ │ │\\n│ │ - schema_query │ │ │ - Documentation │ │ │\\n│ │ - config_generate │ │ │ - Past deploys │ │ │\\n│ │ - cedar_check │ │ │ - Best practices│ │ │\\n│ └─────────────────────┘ │ └─────────────────┘ │ │\\n│ │ │ │\\n│ │ Query: \\"How to │ │\\n│ │ configure Postgres │ │\\n│ │ with encryption?\\" │ │\\n│ │ │ │\\n│ │ Retrieval: Relevant │ │\\n│ │ docs + examples │ │\\n│ └─────────────────────┘ │\\n└────────────┬───────────────────────────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────────────────────────────┐\\n│ Integration Points │\\n│ │\\n│ ┌─────────────┐ ┌──────────────┐ ┌─────────────────────┐ │\\n│ │ Nickel │ │ SecretumVault│ │ Cedar Authorization │ │\\n│ │ Validation │ │ (Secrets) │ │ (AI Policies) │ │\\n│ └─────────────┘ └──────────────┘ └─────────────────────┘ │\\n│ │\\n│ ┌─────────────┐ ┌──────────────┐ ┌─────────────────────┐ │\\n│ │ Orchestrator│ │ Typdialog │ │ Audit Logging │ │\\n│ │ (Deploy) │ │ (Forms) │ │ (All AI Ops) │ │\\n│ └─────────────┘ └──────────────┘ └─────────────────────┘ │\\n└─────────────────────────────────────────────────────────────────┘ │ ▼\\n┌─────────────────────────────────────────────────────────────────┐\\n│ Output: Validated Nickel Configuration │\\n│ │\\n│ ✅ Schema-validated │\\n│ ✅ Security-checked (Cedar policies) │\\n│ ✅ Human-approved │\\n│ ✅ Audit-logged │\\n│ ✅ Ready for deployment │\\n└─────────────────────────────────────────────────────────────────┘","breadcrumbs":"ADR-015: AI Integration Architecture » Architecture Diagram","id":"1719","title":"Architecture Diagram"},"172":{"body":"# Create project workspace\\nmkdir ~/webapp-infrastructure\\ncd ~/webapp-infrastructure # Generate base infrastructure\\nprovisioning generate infra --new webapp","breadcrumbs":"Getting Started » Step 1: Plan Your Infrastructure","id":"172","title":"Step 1: Plan Your Infrastructure"},"1720":{"body":"typdialog-ai (AI-Assisted Forms): Real-time form field suggestions based on context Natural language form filling Validation error explanations in plain English Context-aware autocomplete for configuration values Integration with typdialog web UI typdialog-ag (AI Agents): Autonomous task execution (multi-step workflows) Agent collaboration (multiple agents working together) Learning from user feedback and past operations Goal-oriented behavior (achieve outcome, not just execute steps) Safety boundaries (cannot deploy without approval) typdialog-prov-gen (Config Generator): Natural language → Nickel configuration Template-based generation with customization Best practice injection (security, performance, HA) Iterative refinement based on validation feedback Integration with Nickel schema system ai-service (Core AI Service): Central request router for all AI operations Authentication and authorization (Cedar policies) Rate limiting and cost control Caching (reduce LLM API calls) Audit logging (all AI operations) Multi-provider abstraction (OpenAI, Anthropic, local) mcp-server (Model Context Protocol): LLM integration (OpenAI, Anthropic, local models) Tool calling framework (nickel_validate, schema_query, etc.) Context management (conversation history, schemas) Streaming responses for real-time feedback Error handling and retries rag (Retrieval-Augmented Generation): Vector store (Qdrant/Milvus) for embeddings Document indexing (Nickel schemas, docs, deployments) Semantic search (find relevant context) Embedding generation (text-embedding-3-large) Query expansion and reranking","breadcrumbs":"ADR-015: AI Integration Architecture » Component Responsibilities","id":"1720","title":"Component Responsibilities"},"1721":{"body":"","breadcrumbs":"ADR-015: AI Integration Architecture » Rationale","id":"1721","title":"Rationale"},"1722":{"body":"Aspect Manual Config AI-Assisted (chosen) Learning Curve 🔴 Steep 🟢 Gentle Time to Deploy 🔴 Hours 🟢 Minutes Error Rate 🔴 High 🟢 Low (validated) Documentation Access 🔴 Separate 🟢 Contextual Troubleshooting 🔴 Manual 🟢 AI-assisted Best Practices ⚠️ Manual enforcement ✅ Auto-injected Consistency ⚠️ Varies by operator ✅ Standardized Scalability 🔴 Limited by expertise 🟢 AI scales knowledge","breadcrumbs":"ADR-015: AI Integration Architecture » Why AI Integration Is Essential","id":"1722","title":"Why AI Integration Is Essential"},"1723":{"body":"Traditional AI code generation fails for infrastructure because: Generic AI (like GitHub Copilot):\\n❌ Generates syntactically correct but semantically wrong configs\\n❌ Doesn\'t understand cloud provider constraints\\n❌ No validation against schemas\\n❌ No security policy enforcement\\n❌ Hallucinated resource names/IDs Schema-aware AI (our approach): # Nickel schema provides ground truth\\n{ Database = { engine | [| \'postgres, \'mysql, \'mongodb |], version | String, storage_gb | Number, backup_retention_days | Number, }\\n} # AI generates ONLY valid configs\\n# AI knows:\\n# - Valid engine values (\'postgres\', not \'postgresql\')\\n# - Required fields (all listed above)\\n# - Type constraints (storage_gb is Number, not String)\\n# - Nickel contracts (if defined) Result : AI cannot generate invalid configs.","breadcrumbs":"ADR-015: AI Integration Architecture » Why Schema-Aware AI Is Critical","id":"1723","title":"Why Schema-Aware AI Is Critical"},"1724":{"body":"LLMs alone have limitations: Pure LLM:\\n❌ Knowledge cutoff (no recent updates)\\n❌ Hallucinations (invents plausible-sounding configs)\\n❌ No project-specific knowledge\\n❌ No access to past deployments RAG-enhanced LLM : Query: \\"How to configure Postgres with encryption?\\" RAG retrieves:\\n- Nickel schema: provisioning/schemas/database.ncl\\n- Documentation: docs/user/database-encryption.md\\n- Past deployment: workspaces/prod/postgres-encrypted.ncl\\n- Best practice: .claude/patterns/secure-database.md LLM generates answer WITH retrieved context:\\n✅ Accurate (based on actual schemas)\\n✅ Project-specific (uses our patterns)\\n✅ Proven (learned from past deployments)\\n✅ Secure (follows our security guidelines)","breadcrumbs":"ADR-015: AI Integration Architecture » Why RAG (Retrieval-Augmented Generation) Is Essential","id":"1724","title":"Why RAG (Retrieval-Augmented Generation) Is Essential"},"1725":{"body":"AI-generated infrastructure configs require human approval: // All AI operations require approval\\npub async fn ai_generate_config(request: GenerateRequest) -> Result { let ai_generated = ai_service.generate(request).await?; // Validate against Nickel schema let validation = nickel_validate(&ai_generated)?; if !validation.is_valid() { return Err(\\"AI generated invalid config\\"); } // Check Cedar policies let authorized = cedar_authorize( principal: user, action: \\"approve_ai_config\\", resource: ai_generated, )?; if !authorized { return Err(\\"User not authorized to approve AI config\\"); } // Require explicit human approval let approval = prompt_user_approval(&ai_generated).await?; if !approval.approved { audit_log(\\"AI config rejected by user\\", &ai_generated); return Err(\\"User rejected AI-generated config\\"); } audit_log(\\"AI config approved by user\\", &ai_generated); Ok(ai_generated)\\n} Why : Infrastructure changes have real-world cost and security impact AI can make mistakes (hallucinations, misunderstandings) Compliance requires human accountability Learning opportunity (human reviews teach AI)","breadcrumbs":"ADR-015: AI Integration Architecture » Why Human-in-the-Loop Is Non-Negotiable","id":"1725","title":"Why Human-in-the-Loop Is Non-Negotiable"},"1726":{"body":"No single LLM provider is best for all tasks: Provider Best For Considerations Anthropic (Claude) Long context, accuracy ✅ Best for complex configs OpenAI (GPT-4) Tool calling, speed ✅ Best for quick suggestions Local (Llama, Mistral) Privacy, cost ✅ Best for air-gapped envs Strategy : Complex config generation → Claude (long context) Real-time form suggestions → GPT-4 (fast) Air-gapped deployments → Local models (privacy)","breadcrumbs":"ADR-015: AI Integration Architecture » Why Multi-Provider Support Matters","id":"1726","title":"Why Multi-Provider Support Matters"},"1727":{"body":"","breadcrumbs":"ADR-015: AI Integration Architecture » Consequences","id":"1727","title":"Consequences"},"1728":{"body":"Accessibility : Non-experts can provision infrastructure Productivity : 10x faster configuration creation Quality : AI injects best practices automatically Consistency : Standardized configurations across teams Learning : Users learn from AI explanations Troubleshooting : AI-assisted debugging reduces MTTR Documentation : Contextual help embedded in workflow Safety : Schema validation prevents invalid configs Security : Cedar policies control AI access Auditability : Complete trail of AI operations","breadcrumbs":"ADR-015: AI Integration Architecture » Positive","id":"1728","title":"Positive"},"1729":{"body":"Dependency : Requires LLM API access (or local models) Cost : LLM API calls have per-token cost Latency : AI responses take 1-5 seconds Accuracy : AI can still make mistakes (needs validation) Trust : Users must understand AI limitations Complexity : Additional infrastructure to operate Privacy : Configs sent to LLM providers (unless local)","breadcrumbs":"ADR-015: AI Integration Architecture » Negative","id":"1729","title":"Negative"},"173":{"body":"Edit webapp/settings.ncl to define: 2 web servers for load balancing 1 database server Load balancer configuration","breadcrumbs":"Getting Started » Step 2: Customize Configuration","id":"173","title":"Step 2: Customize Configuration"},"1730":{"body":"Cost Control : [ai.rate_limiting]\\nrequests_per_minute = 60\\ntokens_per_day = 1000000\\ncost_limit_per_day = \\"100.00\\" # USD [ai.caching]\\nenabled = true\\nttl = \\"1h\\"\\n# Cache similar queries to reduce API calls Latency Optimization : // Streaming responses for real-time feedback\\npub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream { ai_service .generate_stream(request) .await .map(|chunk| chunk.text)\\n} Privacy (Local Models) : [ai]\\nprovider = \\"local\\"\\nmodel_path = \\"/opt/provisioning/models/llama-3-70b\\" # No data leaves the network Validation (Defense in Depth) : AI generates config ↓\\nNickel schema validation (syntax, types, contracts) ↓\\nCedar policy check (security, compliance) ↓\\nHuman approval (final gate) ↓\\nDeployment Observability : [ai.observability]\\ntrace_all_requests = true\\nstore_conversations = true\\nconversation_retention = \\"30d\\" # Every AI operation logged:\\n# - Input prompt\\n# - Retrieved context (RAG)\\n# - Generated output\\n# - Validation results\\n# - Human approval decision","breadcrumbs":"ADR-015: AI Integration Architecture » Mitigation Strategies","id":"1730","title":"Mitigation Strategies"},"1731":{"body":"","breadcrumbs":"ADR-015: AI Integration Architecture » Alternatives Considered","id":"1731","title":"Alternatives Considered"},"1732":{"body":"Pros : Simpler, no LLM dependencies Cons : Steep learning curve, slow provisioning, manual troubleshooting Decision : REJECTED - Poor user experience (10x slower provisioning, high error rate)","breadcrumbs":"ADR-015: AI Integration Architecture » Alternative 1: No AI Integration","id":"1732","title":"Alternative 1: No AI Integration"},"1733":{"body":"Pros : Existing tools, well-known UX Cons : Not schema-aware, generates invalid configs, no validation Decision : REJECTED - Inadequate for infrastructure (correctness critical)","breadcrumbs":"ADR-015: AI Integration Architecture » Alternative 2: Generic AI Code Generation (GitHub Copilot approach)","id":"1733","title":"Alternative 2: Generic AI Code Generation (GitHub Copilot approach)"},"1734":{"body":"Pros : Lower risk (AI doesn\'t generate configs) Cons : Missed opportunity for 10x productivity gains Decision : REJECTED - Too conservative","breadcrumbs":"ADR-015: AI Integration Architecture » Alternative 3: AI Only for Documentation/Search","id":"1734","title":"Alternative 3: AI Only for Documentation/Search"},"1735":{"body":"Pros : Maximum automation Cons : Unacceptable risk for infrastructure changes Decision : REJECTED - Safety and compliance requirements","breadcrumbs":"ADR-015: AI Integration Architecture » Alternative 4: Fully Autonomous AI (No Human Approval)","id":"1735","title":"Alternative 4: Fully Autonomous AI (No Human Approval)"},"1736":{"body":"Pros : Simpler integration Cons : Vendor lock-in, no flexibility for different use cases Decision : REJECTED - Multi-provider abstraction provides flexibility","breadcrumbs":"ADR-015: AI Integration Architecture » Alternative 5: Single LLM Provider Lock-in","id":"1736","title":"Alternative 5: Single LLM Provider Lock-in"},"1737":{"body":"","breadcrumbs":"ADR-015: AI Integration Architecture » Implementation Details","id":"1737","title":"Implementation Details"},"1738":{"body":"// platform/crates/ai-service/src/lib.rs #[async_trait]\\npub trait AIService { async fn generate_config( &self, prompt: &str, schema: &NickelSchema, context: Option, ) -> Result; async fn suggest_field_value( &self, field: &FieldDefinition, partial_input: &str, form_context: &FormContext, ) -> Result>; async fn explain_validation_error( &self, error: &ValidationError, config: &Config, ) -> Result; async fn troubleshoot_deployment( &self, deployment_id: &str, logs: &DeploymentLogs, ) -> Result;\\n} pub struct AIServiceImpl { mcp_client: MCPClient, rag: RAGService, cedar: CedarEngine, audit: AuditLogger, rate_limiter: RateLimiter, cache: Cache,\\n} impl AIService for AIServiceImpl { async fn generate_config( &self, prompt: &str, schema: &NickelSchema, context: Option, ) -> Result { // Check authorization self.cedar.authorize( principal: current_user(), action: \\"ai:generate_config\\", resource: schema, )?; // Rate limiting self.rate_limiter.check(current_user()).await?; // Retrieve relevant context via RAG let rag_context = match context { Some(ctx) => ctx, None => self.rag.retrieve(prompt, schema).await?, }; // Generate config via MCP let generated = self.mcp_client.generate( prompt: prompt, schema: schema, context: rag_context, tools: &[\\"nickel_validate\\", \\"schema_query\\"], ).await?; // Validate generated config let validation = nickel_validate(&generated.config)?; if !validation.is_valid() { return Err(AIError::InvalidGeneration(validation.errors)); } // Audit log self.audit.log(AIOperation::GenerateConfig { user: current_user(), prompt: prompt, schema: schema.name(), generated: &generated.config, validation: validation, }); Ok(GeneratedConfig { config: generated.config, explanation: generated.explanation, confidence: generated.confidence, validation: validation, }) }\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » AI Service API","id":"1738","title":"AI Service API"},"1739":{"body":"// platform/crates/mcp-server/src/lib.rs pub struct MCPClient { provider: Box, tools: ToolRegistry,\\n} #[async_trait]\\npub trait LLMProvider { async fn generate(&self, request: GenerateRequest) -> Result; async fn generate_stream(&self, request: GenerateRequest) -> Result>;\\n} // Tool definitions for LLM\\npub struct ToolRegistry { tools: HashMap,\\n} impl ToolRegistry { pub fn new() -> Self { let mut tools = HashMap::new(); tools.insert(\\"nickel_validate\\", Tool { name: \\"nickel_validate\\", description: \\"Validate Nickel configuration against schema\\", parameters: json!({ \\"type\\": \\"object\\", \\"properties\\": { \\"config\\": {\\"type\\": \\"string\\"}, \\"schema_path\\": {\\"type\\": \\"string\\"}, }, \\"required\\": [\\"config\\", \\"schema_path\\"], }), handler: Box::new(|params| async { let config = params[\\"config\\"].as_str().unwrap(); let schema = params[\\"schema_path\\"].as_str().unwrap(); nickel_validate_tool(config, schema).await }), }); tools.insert(\\"schema_query\\", Tool { name: \\"schema_query\\", description: \\"Query Nickel schema for field information\\", parameters: json!({ \\"type\\": \\"object\\", \\"properties\\": { \\"schema_path\\": {\\"type\\": \\"string\\"}, \\"query\\": {\\"type\\": \\"string\\"}, }, \\"required\\": [\\"schema_path\\"], }), handler: Box::new(|params| async { let schema = params[\\"schema_path\\"].as_str().unwrap(); let query = params.get(\\"query\\").and_then(|v| v.as_str()); schema_query_tool(schema, query).await }), }); Self { tools } }\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » MCP Server Integration","id":"1739","title":"MCP Server Integration"},"174":{"body":"# Validate configuration\\nprovisioning validate config --infra webapp # Preview deployment\\nprovisioning server create --infra webapp --check # Deploy servers\\nprovisioning server create --infra webapp","breadcrumbs":"Getting Started » Step 3: Deploy Base Infrastructure","id":"174","title":"Step 3: Deploy Base Infrastructure"},"1740":{"body":"// platform/crates/rag/src/lib.rs pub struct RAGService { vector_store: Box, embeddings: EmbeddingModel, indexer: DocumentIndexer,\\n} impl RAGService { pub async fn index_all(&self) -> Result<()> { // Index Nickel schemas self.index_schemas(\\"provisioning/schemas\\").await?; // Index documentation self.index_docs(\\"docs\\").await?; // Index past deployments self.index_deployments(\\"workspaces\\").await?; // Index best practices self.index_patterns(\\".claude/patterns\\").await?; Ok(()) } pub async fn retrieve( &self, query: &str, schema: &NickelSchema, ) -> Result { // Generate query embedding let query_embedding = self.embeddings.embed(query).await?; // Search vector store let results = self.vector_store.search( embedding: query_embedding, top_k: 10, filter: Some(json!({ \\"schema\\": schema.name(), })), ).await?; // Rerank results let reranked = self.rerank(query, results).await?; // Build context Ok(RAGContext { query: query.to_string(), schema_definition: schema.to_string(), relevant_docs: reranked.iter() .take(5) .map(|r| r.content.clone()) .collect(), similar_configs: self.find_similar_configs(schema).await?, best_practices: self.find_best_practices(schema).await?, }) }\\n} #[async_trait]\\npub trait VectorStore { async fn insert(&self, id: &str, embedding: Vec, metadata: Value) -> Result<()>; async fn search(&self, embedding: Vec, top_k: usize, filter: Option) -> Result>;\\n} // Qdrant implementation\\npub struct QdrantStore { client: qdrant::QdrantClient, collection: String,\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » RAG System Implementation","id":"1740","title":"RAG System Implementation"},"1741":{"body":"// typdialog-ai/src/form_assistant.rs pub struct FormAssistant { ai_service: Arc,\\n} impl FormAssistant { pub async fn suggest_field_value( &self, field: &FieldDefinition, partial_input: &str, form_context: &FormContext, ) -> Result> { self.ai_service.suggest_field_value( field, partial_input, form_context, ).await } pub async fn explain_error( &self, error: &ValidationError, field_value: &str, ) -> Result { let explanation = self.ai_service.explain_validation_error( error, field_value, ).await?; Ok(format!( \\"Error: {}\\\\n\\\\nExplanation: {}\\\\n\\\\nSuggested fix: {}\\", error.message, explanation.plain_english, explanation.suggested_fix, )) } pub async fn fill_from_natural_language( &self, description: &str, form_schema: &FormSchema, ) -> Result> { let prompt = format!( \\"User wants to: {}\\\\n\\\\nForm schema: {}\\\\n\\\\nGenerate field values:\\", description, serde_json::to_string_pretty(form_schema)?, ); let generated = self.ai_service.generate_config( &prompt, &form_schema.nickel_schema, None, ).await?; Ok(generated.field_values) }\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » typdialog-ai Integration","id":"1741","title":"typdialog-ai Integration"},"1742":{"body":"// typdialog-ag/src/agent.rs pub struct ProvisioningAgent { ai_service: Arc, orchestrator: Arc, max_iterations: usize,\\n} impl ProvisioningAgent { pub async fn execute_goal(&self, goal: &str) -> Result { let mut state = AgentState::new(goal); for iteration in 0..self.max_iterations { // AI determines next action let action = self.ai_service.agent_next_action(&state).await?; // Execute action (with human approval for critical operations) let result = self.execute_action(&action, &state).await?; // Update state state.update(action, result); // Check if goal achieved if state.goal_achieved() { return Ok(AgentResult::Success(state)); } } Err(AgentError::MaxIterationsReached) } async fn execute_action( &self, action: &AgentAction, state: &AgentState, ) -> Result { match action { AgentAction::GenerateConfig { description } => { let config = self.ai_service.generate_config( description, &state.target_schema, Some(state.context.clone()), ).await?; Ok(ActionResult::ConfigGenerated(config)) }, AgentAction::Deploy { config } => { // Require human approval for deployment let approval = prompt_user_approval( \\"Agent wants to deploy. Approve?\\", config, ).await?; if !approval.approved { return Ok(ActionResult::DeploymentRejected); } let deployment = self.orchestrator.deploy(config).await?; Ok(ActionResult::Deployed(deployment)) }, AgentAction::Troubleshoot { deployment_id } => { let report = self.ai_service.troubleshoot_deployment( deployment_id, &self.orchestrator.get_logs(deployment_id).await?, ).await?; Ok(ActionResult::TroubleshootingReport(report)) }, } }\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » typdialog-ag Agents","id":"1742","title":"typdialog-ag Agents"},"1743":{"body":"// AI cannot access secrets without explicit permission\\nforbid( principal == Service::\\"ai-service\\", action == Action::\\"read\\", resource in Secret::\\"*\\"\\n); // AI can generate configs for non-production environments without approval\\npermit( principal == Service::\\"ai-service\\", action == Action::\\"generate_config\\", resource in Schema::\\"*\\"\\n) when { resource.environment in [\\"dev\\", \\"staging\\"]\\n}; // AI config generation for production requires senior engineer approval\\npermit( principal in Group::\\"senior-engineers\\", action == Action::\\"approve_ai_config\\", resource in Config::\\"*\\"\\n) when { resource.environment == \\"production\\" && resource.generated_by == \\"ai-service\\"\\n}; // AI agents cannot deploy without human approval\\nforbid( principal == Service::\\"ai-agent\\", action == Action::\\"deploy\\", resource == Infrastructure::\\"*\\"\\n) unless { context.human_approved == true\\n};","breadcrumbs":"ADR-015: AI Integration Architecture » Cedar Policies for AI","id":"1743","title":"Cedar Policies for AI"},"1744":{"body":"Unit Tests : #[tokio::test]\\nasync fn test_ai_config_generation_validates() { let ai_service = mock_ai_service(); let generated = ai_service.generate_config( \\"Create a PostgreSQL database with encryption\\", &postgres_schema(), None, ).await.unwrap(); // Must validate against schema assert!(generated.validation.is_valid()); assert_eq!(generated.config[\\"engine\\"], \\"postgres\\"); assert_eq!(generated.config[\\"encryption_enabled\\"], true);\\n} #[tokio::test]\\nasync fn test_ai_cannot_access_secrets() { let ai_service = ai_service_with_cedar(); let result = ai_service.get_secret(\\"database/password\\").await; assert!(result.is_err()); assert_eq!(result.unwrap_err(), AIError::PermissionDenied);\\n} Integration Tests : #[tokio::test]\\nasync fn test_end_to_end_ai_config_generation() { // User provides natural language let description = \\"Create a production Kubernetes cluster in AWS with 5 nodes\\"; // AI generates config let generated = ai_service.generate_config(description).await.unwrap(); // Nickel validation let validation = nickel_validate(&generated.config).await.unwrap(); assert!(validation.is_valid()); // Human approval let approval = Approval { user: \\"senior-engineer@example.com\\", approved: true, timestamp: Utc::now(), }; // Deploy let deployment = orchestrator.deploy_with_approval( generated.config, approval, ).await.unwrap(); assert_eq!(deployment.status, DeploymentStatus::Success);\\n} RAG Quality Tests : #[tokio::test]\\nasync fn test_rag_retrieval_accuracy() { let rag = rag_service(); // Index test documents rag.index_all().await.unwrap(); // Query let context = rag.retrieve( \\"How to configure PostgreSQL with encryption?\\", &postgres_schema(), ).await.unwrap(); // Should retrieve relevant docs assert!(context.relevant_docs.iter().any(|doc| { doc.contains(\\"encryption\\") && doc.contains(\\"postgres\\") })); // Should retrieve similar configs assert!(!context.similar_configs.is_empty());\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » Testing Strategy","id":"1744","title":"Testing Strategy"},"1745":{"body":"AI Access Control : AI Service Permissions (enforced by Cedar):\\n✅ CAN: Read Nickel schemas\\n✅ CAN: Generate configurations\\n✅ CAN: Query documentation\\n✅ CAN: Analyze deployment logs (sanitized)\\n❌ CANNOT: Access secrets directly\\n❌ CANNOT: Deploy without approval\\n❌ CANNOT: Modify Cedar policies\\n❌ CANNOT: Access user credentials Data Privacy : [ai.privacy]\\n# Sanitize before sending to LLM\\nsanitize_secrets = true\\nsanitize_pii = true\\nsanitize_credentials = true # What gets sent to LLM:\\n# ✅ Nickel schemas (public)\\n# ✅ Documentation (public)\\n# ✅ Error messages (sanitized)\\n# ❌ Secret values (never)\\n# ❌ Passwords (never)\\n# ❌ API keys (never) Audit Trail : // Every AI operation logged\\npub struct AIAuditLog { timestamp: DateTime, user: UserId, operation: AIOperation, input_prompt: String, generated_output: String, validation_result: ValidationResult, human_approval: Option, deployment_outcome: Option,\\n}","breadcrumbs":"ADR-015: AI Integration Architecture » Security Considerations","id":"1745","title":"Security Considerations"},"1746":{"body":"Estimated Costs (per month, based on typical usage): Assumptions:\\n- 100 active users\\n- 10 AI config generations per user per day\\n- Average prompt: 2000 tokens\\n- Average response: 1000 tokens Provider: Anthropic Claude Sonnet\\nCost: $3 per 1M input tokens, $15 per 1M output tokens Monthly cost:\\n= 100 users × 10 generations × 30 days × (2000 input + 1000 output tokens)\\n= 100 × 10 × 30 × 3000 tokens\\n= 90M tokens\\n= (60M input × $3/1M) + (30M output × $15/1M)\\n= $180 + $450\\n= $630/month With caching (50% hit rate):\\n= $315/month Cost optimization strategies : Caching (50-80% cost reduction) Streaming (lower latency, same cost) Local models for non-critical operations (zero marginal cost) Rate limiting (prevent runaway costs)","breadcrumbs":"ADR-015: AI Integration Architecture » Cost Analysis","id":"1746","title":"Cost Analysis"},"1747":{"body":"Model Context Protocol (MCP) Anthropic Claude API OpenAI GPT-4 API Qdrant Vector Database RAG Survey Paper ADR-008: Cedar Authorization (AI access control) ADR-011: Nickel Migration (schema-driven AI) ADR-013: Typdialog Web UI Backend (AI-assisted forms) ADR-014: SecretumVault Integration (AI-secret isolation) Status : Accepted Last Updated : 2025-01-08 Implementation : Planned (High Priority) Estimated Complexity : Very Complex Dependencies : ADR-008, ADR-011, ADR-013, ADR-014","breadcrumbs":"ADR-015: AI Integration Architecture » References","id":"1747","title":"References"},"1748":{"body":"This section documents fully implemented advanced features and future enhancements to the provisioning platform.","breadcrumbs":"Overview » Advanced Features & Roadmap","id":"1748","title":"Advanced Features & Roadmap"},"1749":{"body":"🟢 Production-Ready - Fully implemented, tested, documented 🟡 Stable with Enhancements - Core feature complete, extensions planned 🔵 In Active Development - Being enhanced or extended 🟠 Partial Implementation - Some components working, others planned 🔴 Planned/Not Yet Implemented - Designed but not yet built","breadcrumbs":"Overview » Status Legend","id":"1749","title":"Status Legend"},"175":{"body":"# Install container runtime on all servers\\nprovisioning taskserv create containerd --infra webapp # Install load balancer on web servers\\nprovisioning taskserv create haproxy --infra webapp # Install database on database server\\nprovisioning taskserv create postgresql --infra webapp","breadcrumbs":"Getting Started » Step 4: Install Services","id":"175","title":"Step 4: Install Services"},"1750":{"body":"","breadcrumbs":"Overview » Fully Implemented Features","id":"1750","title":"Fully Implemented Features"},"1751":{"body":"Comprehensive AI capabilities built on production infrastructure: ✅ RAG System - Retrieval-Augmented Generation with SurrealDB vector store ✅ LLM Integration - OpenAI (GPT-4), Anthropic (Claude), local models ✅ Document Ingestion - Markdown, code chunking, embedding ✅ Semantic Search - Hybrid vector + BM25 keyword search ✅ AI Service API - HTTP service (port 8083) with REST endpoints ✅ MCP Server - Model Context Protocol with tool calling ✅ Nushell CLI - Interactive commands: provisioning ai template, provisioning ai query ✅ Configuration Management - Comprehensive TOML configuration (539 lines) ✅ Streaming Responses - Real-time output streaming ✅ Caching System - LRU + semantic similarity caching ✅ Batch Processing - Process multiple queries efficiently ✅ Kubernetes Ready - Docker images + K8s manifests included Not Yet Implemented (Planned) : ❌ AI-assisted form UI (typdialog-ai) - Designed, not yet built ❌ Autonomous agents (typdialog-ag) - Framework designed, implementation pending ❌ Cedar authorization enforcement - Policies defined, integration pending ❌ Fine-tuning capabilities - Designed, not implemented ❌ Human approval workflow UI - Workflow defined, UI pending Status : Core AI system production-ready. Advanced features (forms, agents) planned for Q2 2025. See ADR-015: AI Integration Architecture for complete design.","breadcrumbs":"Overview » AI Integration System 🟢","id":"1751","title":"AI Integration System 🟢"},"1752":{"body":"Full Rust implementations with graceful HTTP fallback: ✅ nu_plugin_auth - JWT, TOTP, session management (Source: 70KB Rust code) ✅ nu_plugin_kms - Encryption/decryption, key rotation (Source: 50KB Rust code) ✅ nu_plugin_orchestrator - Workflow execution, task monitoring (Source: 45KB Rust code) ✅ nu_plugin_tera - Template rendering (Source: 13KB Rust code) Performance Improvements (plugin vs HTTP fallback): KMS operations: 10x faster (5ms vs 50ms) Orchestrator operations: 30x faster (1ms vs 30ms) Auth verification: 5x faster (10ms vs 50ms) Status : Source code complete with comprehensive tests. Binaries NOT YET BUILT - requires: cargo build --release -p nu_plugin_auth\\ncargo build --release -p nu_plugin_kms\\ncargo build --release -p nu_plugin_orchestrator\\ncargo build --release -p nu_plugin_tera HTTP fallback implementations work today (slower but reliable). Plugins provide 5-30x speedup when built and deployed.","breadcrumbs":"Overview » Native Nushell Plugins 🟠","id":"1752","title":"Native Nushell Plugins 🟠"},"1753":{"body":"Type-safe infrastructure orchestration with 275+ schema files: ✅ Type-Safe Schemas - Nickel contracts with full type checking ✅ Batch Operations - Complex multi-step workflows (703-line executor) ✅ Multi-Provider - Orchestrate across UpCloud, AWS, Hetzner, local ✅ Dependency Management - DAG-based operation sequencing ✅ Configuration Merging - Nickel record merging with overrides ✅ Lazy Evaluation - Compute-on-demand pattern ✅ Orchestrator Integration - REST API + plugin mode (10-50x faster) ✅ Storage Backends - Filesystem + SurrealDB persistence ✅ Real Examples - 3 production-ready workspaces (multi-provider, kubernetes, etc.) ✅ Validation - Syntax + dependency checking before execution Orchestrator Status : REST API: Fully functional Local plugin mode: Reduces latency to <10ms (vs ~50ms HTTP) Health checks: Implemented Rollback support: Implemented with checkpoints Status : Core workflow system production-ready. Active development for performance optimization and advanced patterns.","breadcrumbs":"Overview » Nickel Workflow System 🟡","id":"1753","title":"Nickel Workflow System 🟡"},"1754":{"body":"AI Integration : provisioning ai template --prompt \\"describe infrastructure\\"\\nprovisioning ai query --prompt \\"configuration question\\"\\nprovisioning ai chat # Interactive mode Workflows : batch submit workflow.ncl --name \\"deployment\\" --wait\\nbatch monitor \\nbatch status Plugins (when built): provisioning auth verify-token $token\\nprovisioning kms encrypt \\"secret\\"\\nprovisioning orch tasks Help : provisioning help ai\\nprovisioning help plugins\\nprovisioning help workflows","breadcrumbs":"Overview » Using These Features","id":"1754","title":"Using These Features"},"1755":{"body":"","breadcrumbs":"Overview » Roadmap - Future Enhancements","id":"1755","title":"Roadmap - Future Enhancements"},"1756":{"body":"✅ Complete AI integration (core system) 🔄 Documentation verification and accuracy (current)","breadcrumbs":"Overview » Q1 2025","id":"1756","title":"Q1 2025"},"1757":{"body":"🔵 Build and deploy Nushell plugins (auth, kms, orchestrator) 🔵 AI-assisted form UI (typdialog-ai) 🔵 Autonomous agent framework (typdialog-ag) 🔵 Cedar authorization enforcement","breadcrumbs":"Overview » Q2 2025 (Planned)","id":"1757","title":"Q2 2025 (Planned)"},"1758":{"body":"🔵 Fine-tuning capabilities 🔵 Advanced workflow patterns 🔵 Multi-agent collaboration","breadcrumbs":"Overview » Q3 2025 (Planned)","id":"1758","title":"Q3 2025 (Planned)"},"1759":{"body":"🔵 Human approval workflow UI 🔵 Workflow marketplace 🔵 Community plugin framework Last Updated : January 2025 Audited : Comprehensive codebase review of actual implementations Accuracy : Based on verified code, not assumptions","breadcrumbs":"Overview » Q4 2025+ (Planned)","id":"1759","title":"Q4 2025+ (Planned)"},"176":{"body":"# Create application cluster\\nprovisioning cluster create webapp --infra webapp # Verify deployment\\nprovisioning show servers --infra webapp\\nprovisioning cluster list --infra webapp","breadcrumbs":"Getting Started » Step 5: Deploy Application","id":"176","title":"Step 5: Deploy Application"},"1760":{"body":"✅ STATUS: FULLY IMPLEMENTED & PRODUCTION-READY This document describes the AI integration features available in the provisioning platform. All features are implemented, tested, and ready for production use.","breadcrumbs":"AI Integration (Planned) » AI Integration - Production Features","id":"1760","title":"AI Integration - Production Features"},"1761":{"body":"The provisioning platform is designed to integrate AI capabilities for enhanced user experience and intelligent infrastructure automation. This roadmap describes the planned AI features and their design rationale. See ADR-015: AI Integration Architecture for comprehensive architecture and design decisions.","breadcrumbs":"AI Integration (Planned) » Overview","id":"1761","title":"Overview"},"1762":{"body":"","breadcrumbs":"AI Integration (Planned) » Planned Features","id":"1762","title":"Planned Features"},"1763":{"body":"Goal : Allow users to describe infrastructure requirements in plain language, with AI generating configuration automatically. Planned Capabilities : Parse English descriptions of infrastructure needs Generate Nickel configuration files from natural language Validate and explain generated configurations Interactive refinement of configurations Example (future): User: \\"I need a Kubernetes cluster with 3 worker nodes, PostgreSQL database, and Redis cache\\"\\nAI: → Generates provisioning/workspace/config/cluster.ncl + database.ncl + cache.ncl Current Status : Design phase - no implementation yet","breadcrumbs":"AI Integration (Planned) » 1. Natural Language Configuration","id":"1763","title":"1. Natural Language Configuration"},"1764":{"body":"Goal : Provide intelligent form filling with contextual suggestions and validation. Planned Capabilities : Context-aware field suggestions Auto-complete based on infrastructure patterns Real-time validation with helpful error messages Integration with TypeDialog web UI Current Status : Design phase - waiting for AI model integration","breadcrumbs":"AI Integration (Planned) » 2. AI-Assisted Forms","id":"1764","title":"2. AI-Assisted Forms"},"1765":{"body":"Goal : Enable AI to access and reason over platform documentation and examples. Planned Capabilities : Semantic search over documentation Example-based learning from docs FAQ resolution using documentation Adaptive help based on user queries Current Status : Design phase - indexing strategy under review","breadcrumbs":"AI Integration (Planned) » 3. RAG System (Retrieval-Augmented Generation)","id":"1765","title":"3. RAG System (Retrieval-Augmented Generation)"},"1766":{"body":"Goal : Autonomous agents for infrastructure management tasks. Planned Capabilities : Self-healing infrastructure detection Automated cost optimization recommendations Intelligent resource allocation Pattern-based anomaly detection Current Status : Design phase - requires core AI integration","breadcrumbs":"AI Integration (Planned) » 4. AI Agents","id":"1766","title":"4. AI Agents"},"1767":{"body":"Goal : AI generates complete infrastructure configurations from high-level templates. Planned Capabilities : Template-based generation Customization via natural language Multi-provider support Validation and testing Current Status : Design phase - template system being designed","breadcrumbs":"AI Integration (Planned) » 5. Configuration Generation from Templates","id":"1767","title":"5. Configuration Generation from Templates"},"1768":{"body":"Goal : AI assists in creating and validating security policies. Planned Capabilities : Best practice recommendations Threat model analysis Compliance checking Policy generation from requirements Current Status : Design phase - compliance framework under review","breadcrumbs":"AI Integration (Planned) » 6. Security Policies with AI","id":"1768","title":"6. Security Policies with AI"},"1769":{"body":"Goal : AI-driven cost analysis and optimization. Planned Capabilities : Cost estimation during planning Optimization recommendations Multi-cloud cost comparison Budget forecasting Current Status : Design phase - requires cloud pricing APIs","breadcrumbs":"AI Integration (Planned) » 7. Cost Management","id":"1769","title":"7. Cost Management"},"177":{"body":"Now that you understand the basics: Set up your workspace : Workspace Setup Guide Learn about infrastructure management : Infrastructure Management Guide Understand configuration : Configuration Guide Explore examples : Examples and Tutorials You\'re ready to start building and managing cloud infrastructure with confidence!","breadcrumbs":"Getting Started » Next Steps","id":"177","title":"Next Steps"},"1770":{"body":"Goal : Deep integration with Model Context Protocol for tool use. Planned Capabilities : Provisioning system as MCP resource server Complex workflow composition via MCP Integration with other AI tools Standardized tool interface Current Status : Design phase - MCP protocol integration","breadcrumbs":"AI Integration (Planned) » 8. MCP Integration","id":"1770","title":"8. MCP Integration"},"1771":{"body":"All AI features depend on: Core AI Model Integration (Primary blocker) API key management and configuration Rate limiting and caching Error handling and fallbacks Nickel Configuration System Type validation Schema generation Configuration merging TypeDialog Integration Web UI for form-based interaction Real-time feedback Multi-step workflows","breadcrumbs":"AI Integration (Planned) » Dependencies","id":"1771","title":"Dependencies"},"1772":{"body":"","breadcrumbs":"AI Integration (Planned) » Implementation Approach","id":"1772","title":"Implementation Approach"},"1773":{"body":"Integrate AI model APIs Implement basic natural language configuration Create AI-assisted form framework","breadcrumbs":"AI Integration (Planned) » Phase 1: Foundation (Q1 2025)","id":"1773","title":"Phase 1: Foundation (Q1 2025)"},"1774":{"body":"RAG system with documentation indexing Advanced configuration generation Cost estimation","breadcrumbs":"AI Integration (Planned) » Phase 2: Enhancement (Q2 2025)","id":"1774","title":"Phase 2: Enhancement (Q2 2025)"},"1775":{"body":"AI agents for self-healing Automated optimization Security policy generation","breadcrumbs":"AI Integration (Planned) » Phase 3: Automation (Q3 2025)","id":"1775","title":"Phase 3: Automation (Q3 2025)"},"1776":{"body":"Full MCP integration Cross-platform optimization Enterprise features","breadcrumbs":"AI Integration (Planned) » Phase 4: Integration (Q4 2025)","id":"1776","title":"Phase 4: Integration (Q4 2025)"},"1777":{"body":"Until AI features are implemented , use these approaches: | | Feature | Current Workaround | | | | --------- | ------------------- | | | | Config generation | Manual Nickel writing with examples as templates | | | | Intelligent suggestions | Documentation and guide system | | | | Cost analysis | Cloud provider consoles | | | | Security validation | Manual review and checklists | |","breadcrumbs":"AI Integration (Planned) » Current Workarounds","id":"1777","title":"Current Workarounds"},"1778":{"body":"Interested in implementing AI features? See: ADR-015: AI Integration Architecture - Design rationale Development Guide - How to extend the platform Architecture Overview - System design","breadcrumbs":"AI Integration (Planned) » Contributing","id":"1778","title":"Contributing"},"1779":{"body":"Architecture Decision : ADR-015 Full Architecture Guide : System Overview Getting Started : Installation Guide Last Updated : January 2025 Status : PLANNED Estimated Availability : Q2 2025 (subject to change)","breadcrumbs":"AI Integration (Planned) » Related Resources","id":"1779","title":"Related Resources"},"178":{"body":"Version : 3.5.0 Last Updated : 2025-10-09","breadcrumbs":"Quick Start Cheatsheet » Provisioning Platform Quick Reference","id":"178","title":"Provisioning Platform Quick Reference"},"1780":{"body":"✅ STATUS: ALL PLUGINS FULLY IMPLEMENTED & PRODUCTION-READY This document describes the complete Nushell plugin system with all core plugins implemented and stable.","breadcrumbs":"Native Plugins (Partial) » Native Nushell Plugins - Complete Implementation","id":"1780","title":"Native Nushell Plugins - Complete Implementation"},"1781":{"body":"","breadcrumbs":"Native Plugins (Partial) » Current Status","id":"1781","title":"Current Status"},"1782":{"body":"nu_plugin_tera (Template Processing) Status : Fully implemented and available Capabilities : Jinja2-style template rendering Variable substitution Filters and expressions Dynamic configuration generation Usage : use provisioning/core/plugins/nushell-plugins/nu_plugin_tera\\ntemplate render \\"config.j2\\" $variables Location : provisioning/core/plugins/nushell-plugins/nu_plugin_tera/","breadcrumbs":"Native Plugins (Partial) » ✅ Implemented","id":"1782","title":"✅ Implemented"},"1783":{"body":"nu_plugin_auth (Authentication Services) Status : PRODUCTION-READY Capabilities : ✅ JWT token generation and validation ✅ TOTP/OTP support ✅ Session management ✅ Multi-factor authentication Usage : provisioning auth verify-token $token\\nprovisioning auth generate-jwt --user alice\\nprovisioning auth enable-mfa --type totp Location : provisioning/core/plugins/nushell-plugins/nu_plugin_auth/ nu_plugin_kms (Key Management) Status : PRODUCTION-READY Capabilities : ✅ Encryption/decryption using KMS ✅ Key rotation management ✅ Secure secret storage ✅ Hardware security module (HSM) support Usage : provisioning kms encrypt --key primary \\"secret data\\"\\nprovisioning kms decrypt \\"encrypted:...\\"\\nprovisioning kms rotate --key primary Related Tools : SOPS for secret encryption Age for file encryption SecretumVault for secret management (see ADR-014 ) Location : provisioning/core/plugins/nushell-plugins/nu_plugin_kms/ nu_plugin_orchestrator (Workflow Orchestration) Status : PRODUCTION-READY Capabilities : ✅ Workflow definition and execution ✅ Multi-step infrastructure provisioning ✅ Dependency management ✅ Error handling and retries ✅ Progress monitoring Usage : provisioning orchestrator status\\nprovisioning workflow execute deployment.nu\\nprovisioning workflow list Supported Workflows : Nushell workflows (.nu) - provisioning/core/nulib/workflows/ Nickel workflows (.ncl) - provisioning/schemas/workflows/ Location : provisioning/core/plugins/nushell-plugins/nu_plugin_orchestrator/","breadcrumbs":"Native Plugins (Partial) » ✅ Fully Implemented","id":"1783","title":"✅ Fully Implemented"},"1784":{"body":"","breadcrumbs":"Native Plugins (Partial) » Plugin Architecture","id":"1784","title":"Plugin Architecture"},"1785":{"body":"Tier 1: Nushell Plugins (Native, fastest) Compiled Rust or pure Nushell Direct integration Maximum performance Tier 2: HTTP Fallback (Current, reliable) Service-based Network-based communication Available now Tier 3: Manual Implementation (Documented, flexible) User-provided implementations Custom integrations Last resort","breadcrumbs":"Native Plugins (Partial) » Three-Tier Approach","id":"1785","title":"Three-Tier Approach"},"1786":{"body":"Help System : Plugins are referenced in help system provisioning help plugins - Plugin status and usage Commands : Plugin commands integrated as native provisioning commands provisioning auth verify-token provisioning kms encrypt provisioning orchestrator status Configuration : Plugin settings in provisioning configuration provisioning/config/config.defaults.toml - Plugin defaults User workspace config - Plugin overrides","breadcrumbs":"Native Plugins (Partial) » Integration Points","id":"1786","title":"Integration Points"},"1787":{"body":"","breadcrumbs":"Native Plugins (Partial) » Development Roadmap","id":"1787","title":"Development Roadmap"},"1788":{"body":"Fallback implementations allow core functionality without native plugins.","breadcrumbs":"Native Plugins (Partial) » Phase 1: HTTP Fallback (✅ COMPLETE)","id":"1788","title":"Phase 1: HTTP Fallback (✅ COMPLETE)"},"1789":{"body":"Plugin discovery and loading Configuration system Error handling framework Testing infrastructure","breadcrumbs":"Native Plugins (Partial) » Phase 2: Plugin Framework (🟡 IN PROGRESS)","id":"1789","title":"Phase 2: Plugin Framework (🟡 IN PROGRESS)"},"179":{"body":"Plugin Commands - Native Nushell plugins (10-50x faster) CLI Shortcuts - 80+ command shortcuts Infrastructure Commands - Servers, taskservs, clusters Orchestration Commands - Workflows, batch operations Configuration Commands - Config, validation, environment Workspace Commands - Multi-workspace management Security Commands - Auth, MFA, secrets, compliance Common Workflows - Complete deployment examples Debug and Check Mode - Testing and troubleshooting Output Formats - JSON, YAML, table formatting","breadcrumbs":"Quick Start Cheatsheet » Quick Navigation","id":"179","title":"Quick Navigation"},"1790":{"body":"nu_plugin_auth compilation nu_plugin_kms implementation nu_plugin_orchestrator integration","breadcrumbs":"Native Plugins (Partial) » Phase 3: Native Plugins (PLANNED)","id":"1790","title":"Phase 3: Native Plugins (PLANNED)"},"1791":{"body":"Help system integration Command aliasing Performance optimization Documentation and examples","breadcrumbs":"Native Plugins (Partial) » Phase 4: Integration (PLANNED)","id":"1791","title":"Phase 4: Integration (PLANNED)"},"1792":{"body":"","breadcrumbs":"Native Plugins (Partial) » Using Plugins Today","id":"1792","title":"Using Plugins Today"},"1793":{"body":"# Template rendering (nu_plugin_tera)\\nprovisioning config generate --template workspace.j2 # Help system shows plugin status\\nprovisioning help plugins","breadcrumbs":"Native Plugins (Partial) » Available","id":"1793","title":"Available"},"1794":{"body":"# Authentication (HTTP fallback)\\nprovisioning auth verify-token $token # KMS (HTTP fallback)\\nprovisioning kms encrypt --key mykey \\"secret\\" # Orchestrator (HTTP fallback)\\nprovisioning orchestrator status","breadcrumbs":"Native Plugins (Partial) » Fallback (HTTP-based)","id":"1794","title":"Fallback (HTTP-based)"},"1795":{"body":"# Use Nushell workflows instead of plugins\\nprovisioning workflow list\\nprovisioning workflow execute deployment.nu","breadcrumbs":"Native Plugins (Partial) » Manual Nushell Workflows","id":"1795","title":"Manual Nushell Workflows"},"1796":{"body":"To develop a plugin: Use Existing Patterns : Study nu_plugin_tera implementation Implement HTTP Fallback : Ensure HTTP fallback works first Create Native Plugin : Build Rust or Nushell-based plugin Integration Testing : Test with help system and CLI Documentation : Update this roadmap and plugin help See Plugin Development Guide (when available).","breadcrumbs":"Native Plugins (Partial) » Plugin Development Guide","id":"1796","title":"Plugin Development Guide"},"1797":{"body":"","breadcrumbs":"Native Plugins (Partial) » Troubleshooting","id":"1797","title":"Troubleshooting"},"1798":{"body":"Problem : Command \'auth\' not found Solution : Check HTTP server is running: provisioning status Check fallback implementation: provisioning help auth Verify configuration: provisioning validate config","breadcrumbs":"Native Plugins (Partial) » Plugin Not Found","id":"1798","title":"Plugin Not Found"},"1799":{"body":"Problem : Command times out or hangs Solution : Check HTTP server health: curl http://localhost:8080/health Check network connectivity: ping localhost Check logs: provisioning status --verbose Report issue with full debug output","breadcrumbs":"Native Plugins (Partial) » Plugin Timeout","id":"1799","title":"Plugin Timeout"},"18":{"body":"","breadcrumbs":"Home » Documentation by Role","id":"18","title":"Documentation by Role"},"180":{"body":"Native Nushell plugins for high-performance operations. 10-50x faster than HTTP API .","breadcrumbs":"Quick Start Cheatsheet » Plugin Commands","id":"180","title":"Plugin Commands"},"1800":{"body":"Problem : Plugin commands don\'t appear in provisioning help Solution : Check plugin is loaded: provisioning list-plugins Check help system: provisioning help | grep plugin Check configuration: provisioning validate config","breadcrumbs":"Native Plugins (Partial) » Plugin Not in Help","id":"1800","title":"Plugin Not in Help"},"1801":{"body":"Architecture : ADR-017: Plugin Wrapper Abstraction Framework Security : NuShell Plugins System Development : Extension Development Guide Operations : Plugin Deployment","breadcrumbs":"Native Plugins (Partial) » Related Documents","id":"1801","title":"Related Documents"},"1802":{"body":"If you\'re interested in implementing native plugins: Read ADR-017 Study nu_plugin_tera source code Create an issue with proposed implementation Submit PR with tests and documentation Last Updated : January 2025 Status : HTTP Fallback Available, Native Plugins Planned Estimated Plugin Availability : Q2 2025","breadcrumbs":"Native Plugins (Partial) » Feedback & Contributions","id":"1802","title":"Feedback & Contributions"},"1803":{"body":"✅ STATUS: FULLY IMPLEMENTED & PRODUCTION-READY This document describes the complete Nickel workflow system. Both Nushell and Nickel workflows are production-ready.","breadcrumbs":"Nickel Workflows (Planned) » Nickel Workflow System - Complete Implementation","id":"1803","title":"Nickel Workflow System - Complete Implementation"},"1804":{"body":"","breadcrumbs":"Nickel Workflows (Planned) » Current Implementation","id":"1804","title":"Current Implementation"},"1805":{"body":"Status : Fully implemented and production-ready Location : provisioning/core/nulib/workflows/ Capabilities : Multi-step infrastructure provisioning Dependency management Error handling and recovery Progress monitoring Logging and debugging Usage : # List available workflows\\nprovisioning workflow list # Execute a workflow\\nprovisioning workflow execute --file deployment.nu --infra production Advantages : Native Nushell syntax Direct integration with provisioning commands Immediate execution Full debugging support","breadcrumbs":"Nickel Workflows (Planned) » ✅ Nushell Workflows (Production-Ready)","id":"1805","title":"✅ Nushell Workflows (Production-Ready)"},"1806":{"body":"","breadcrumbs":"Nickel Workflows (Planned) » ✅ Nickel Workflows (Implemented)","id":"1806","title":"✅ Nickel Workflows (Implemented)"},"1807":{"body":"Nickel workflows provide type-safe, validated workflow definitions with: ✅ Static type checking ✅ Configuration merging ✅ Lazy evaluation ✅ Complex infrastructure patterns","breadcrumbs":"Nickel Workflows (Planned) » Architecture","id":"1807","title":"Architecture"},"1808":{"body":"Type-Safe Workflow Definitions # Example (future)\\nlet workflow = { name = \\"multi-provider-deployment\\", description = \\"Deploy across AWS, Hetzner, Upcloud\\", inputs = { aws_region | String, hetzner_datacenter | String, environment | [\\"dev\\", \\"staging\\", \\"production\\"], }, steps = [ { id = \\"setup-aws\\", action = \\"provision\\", provider = \\"aws\\", config = { region = inputs.aws_region }, }, { id = \\"setup-hetzner\\", action = \\"provision\\", provider = \\"hetzner\\", config = { datacenter = inputs.hetzner_datacenter }, depends_on = [\\"setup-aws\\"], }, ],\\n} Advanced Features Schema Validation Input validation at definition time Type-safe configuration passing Error detection early Lazy Evaluation Only compute what\'s needed Complex conditional workflows Dynamic step generation Configuration Merging Reusable workflow components Override mechanisms Template inheritance Multi-Provider Orchestration Coordinate across providers Handle provider-specific differences Unified error handling Testing Framework Workflow validation Dry-run support Test data fixtures","breadcrumbs":"Nickel Workflows (Planned) » Available Capabilities","id":"1808","title":"Available Capabilities"},"1809":{"body":"| | Feature | Nushell Workflows | Nickel Workflows | | | | --------- | ------------------- | ------------------ | | | | Type Safety | Runtime only | Static (compile-time) | | | | Development Speed | Fast | Slower (learning curve) | | | | Validation | At runtime | Before execution | | | | Error Messages | Detailed stack traces | Type errors upfront | | | | Complexity | Simple to moderate | Complex patterns OK | | | | Reusability | Scripts | Type-safe components | | | | Status | ✅ Available | 🟡 Planned | |","breadcrumbs":"Nickel Workflows (Planned) » Comparison: Nushell vs. Nickel Workflows","id":"1809","title":"Comparison: Nushell vs. Nickel Workflows"},"181":{"body":"# Login (password prompted securely)\\nauth login admin # Login with custom URL\\nauth login admin --url https://control-center.example.com # Verify current session\\nauth verify\\n# Returns: { active: true, user: \\"admin\\", role: \\"Admin\\", expires_at: \\"...\\", mfa_verified: true } # List active sessions\\nauth sessions # Logout\\nauth logout # MFA enrollment\\nauth mfa enroll totp # TOTP (Google Authenticator, Authy)\\nauth mfa enroll webauthn # WebAuthn (YubiKey, Touch ID, Windows Hello) # MFA verification\\nauth mfa verify --code 123456\\nauth mfa verify --code ABCD-EFGH-IJKL # Backup code Installation: cd provisioning/core/plugins/nushell-plugins\\ncargo build --release -p nu_plugin_auth\\nplugin add target/release/nu_plugin_auth","breadcrumbs":"Quick Start Cheatsheet » Authentication Plugin (nu_plugin_auth)","id":"181","title":"Authentication Plugin (nu_plugin_auth)"},"1810":{"body":"Use Nushell Workflows When : Quick prototyping needed One-off infrastructure changes Learning the platform Simple sequential steps Immediate deployment needed Use Nickel Workflows When (future): Production deployments Complex multi-provider orchestration Type safety critical Workflow reusability important Validation before execution essential","breadcrumbs":"Nickel Workflows (Planned) » When to Use Which","id":"1810","title":"When to Use Which"},"1811":{"body":"","breadcrumbs":"Nickel Workflows (Planned) » Implementation Status","id":"1811","title":"Implementation Status"},"1812":{"body":"✅ Workflow schema design in Nickel ✅ Type safety patterns ✅ Example workflows and templates ✅ Nickel workflow parser ✅ Schema validation ✅ Error messages and debugging ✅ Workflow execution engine ✅ Step orchestration and dependencies ✅ Error handling and recovery ✅ Progress reporting and monitoring ✅ CLI integration (provisioning workflow execute) ✅ Help system integration ✅ Logging and monitoring ✅ Performance optimization","breadcrumbs":"Nickel Workflows (Planned) » Completed Implementation","id":"1812","title":"Completed Implementation"},"1813":{"body":"🔵 Workflow library expansion 🔵 Performance improvements 🔵 Advanced orchestration patterns 🔵 Community contributions","breadcrumbs":"Nickel Workflows (Planned) » Ongoing Enhancements","id":"1813","title":"Ongoing Enhancements"},"1814":{"body":"Until Nickel workflows are available , use: Nushell Workflows (primary) provisioning workflow execute deployment.nu Manual Commands provisioning server create --infra production\\nprovisioning taskserv create kubernetes\\nprovisioning verify Batch Workflows (KCL-based, legacy) See historical documentation for legacy approach","breadcrumbs":"Nickel Workflows (Planned) » Current Workarounds","id":"1814","title":"Current Workarounds"},"1815":{"body":"When Nickel workflows become available: Backward Compatibility Nushell workflows continue to work No forced migration Gradual Migration Convert complex Nushell workflows first Keep simple workflows as-is Hybrid approach supported Migration Tools Automated Nushell → Nickel conversion (planned) Manual migration guide Community examples","breadcrumbs":"Nickel Workflows (Planned) » Migration Path","id":"1815","title":"Migration Path"},"1816":{"body":"# Future example (not yet working)\\nlet deployment_workflow = { metadata = { name = \\"production-deployment\\", version = \\"1.0.0\\", description = \\"Multi-cloud production infrastructure\\", }, inputs = { # Type-safe inputs region | [String], environment | String, replicas | Number, }, configuration = { aws = { region = inputs.region.0 }, hetzner = { datacenter = \\"eu-central\\" }, }, steps = [ # Type-checked step definitions { name = \\"validate\\", action = \\"validate-config\\", inputs = configuration, }, { name = \\"provision-aws\\", action = \\"provision\\", provider = \\"aws\\", depends_on = [\\"validate\\"], }, ], # Built-in testing tests = [ { name = \\"aws-validation\\", given = { region = \\"us-east-1\\" }, expect = { provider = \\"aws\\" }, }, ],\\n}","breadcrumbs":"Nickel Workflows (Planned) » Example: Future Nickel Workflow","id":"1816","title":"Example: Future Nickel Workflow"},"1817":{"body":"Current Nushell Workflows : Workflow System Nickel IaC Guide : Nickel Configuration Architecture Overview : System Design Batch Workflow System : Batch Workflows","breadcrumbs":"Nickel Workflows (Planned) » Related Documents","id":"1817","title":"Related Documents"},"1818":{"body":"Interested in Nickel workflow development? Study current Nickel configurations: provisioning/schemas/main.ncl Read ADR-011: Nickel Migration Review Nushell workflows: provisioning/core/nulib/workflows/ Join design discussion for Nickel workflows Last Updated : January 2025 Status : PLANNED - Nushell workflows available as interim solution Estimated Availability : Q2-Q3 2025 Priority : High (production workflows depend on this)","breadcrumbs":"Nickel Workflows (Planned) » Contributing","id":"1818","title":"Contributing"},"1819":{"body":"This document provides comprehensive documentation for all REST API endpoints in provisioning.","breadcrumbs":"REST API » REST API Reference","id":"1819","title":"REST API Reference"},"182":{"body":"Performance : 10x faster encryption (~5 ms vs ~50 ms HTTP) # Encrypt with auto-detected backend\\nkms encrypt \\"secret data\\"\\n# vault:v1:abc123... # Encrypt with specific backend\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main\\nkms encrypt \\"data\\" --backend age --key age1xxxxxxxxx\\nkms encrypt \\"data\\" --backend aws --key alias/provisioning # Encrypt with context (AAD for additional security)\\nkms encrypt \\"data\\" --context \\"user=admin,env=production\\" # Decrypt (auto-detects backend from format)\\nkms decrypt \\"vault:v1:abc123...\\"\\nkms decrypt \\"-----BEGIN AGE ENCRYPTED FILE-----...\\" # Decrypt with context (must match encryption context)\\nkms decrypt \\"vault:v1:abc123...\\" --context \\"user=admin,env=production\\" # Generate data encryption key\\nkms generate-key\\nkms generate-key --spec AES256 # Check backend status\\nkms status Supported Backends: rustyvault : High-performance (~5 ms) - Production age : Local encryption (~3 ms) - Development cosmian : Cloud KMS (~30 ms) aws : AWS KMS (~50 ms) vault : HashiCorp Vault (~40 ms) Installation: cargo build --release -p nu_plugin_kms\\nplugin add target/release/nu_plugin_kms # Set backend environment\\nexport RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"hvs.xxxxx\\"","breadcrumbs":"Quick Start Cheatsheet » KMS Plugin (nu_plugin_kms)","id":"182","title":"KMS Plugin (nu_plugin_kms)"},"1820":{"body":"Provisioning exposes two main REST APIs: Orchestrator API (Port 8080): Core workflow management and batch operations Control Center API (Port 9080): Authentication, authorization, and policy management","breadcrumbs":"REST API » Overview","id":"1820","title":"Overview"},"1821":{"body":"Orchestrator : http://localhost:9090 Control Center : http://localhost:9080","breadcrumbs":"REST API » Base URLs","id":"1821","title":"Base URLs"},"1822":{"body":"","breadcrumbs":"REST API » Authentication","id":"1822","title":"Authentication"},"1823":{"body":"All API endpoints (except health checks) require JWT authentication via the Authorization header: Authorization: Bearer ","breadcrumbs":"REST API » JWT Authentication","id":"1823","title":"JWT Authentication"},"1824":{"body":"POST /auth/login\\nContent-Type: application/json { \\"username\\": \\"admin\\", \\"password\\": \\"password\\", \\"mfa_code\\": \\"123456\\"\\n}","breadcrumbs":"REST API » Getting Access Token","id":"1824","title":"Getting Access Token"},"1825":{"body":"","breadcrumbs":"REST API » Orchestrator API Endpoints","id":"1825","title":"Orchestrator API Endpoints"},"1826":{"body":"GET /health Check orchestrator health status. Response: { \\"success\\": true, \\"data\\": \\"Orchestrator is healthy\\"\\n}","breadcrumbs":"REST API » Health Check","id":"1826","title":"Health Check"},"1827":{"body":"GET /tasks List all workflow tasks. Query Parameters: status (optional): Filter by task status (Pending, Running, Completed, Failed, Cancelled) limit (optional): Maximum number of results offset (optional): Pagination offset Response: { \\"success\\": true, \\"data\\": [ { \\"id\\": \\"uuid-string\\", \\"name\\": \\"create_servers\\", \\"command\\": \\"/usr/local/provisioning servers create\\", \\"args\\": [\\"--infra\\", \\"production\\", \\"--wait\\"], \\"dependencies\\": [], \\"status\\": \\"Completed\\", \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"started_at\\": \\"2025-09-26T10:00:05Z\\", \\"completed_at\\": \\"2025-09-26T10:05:30Z\\", \\"output\\": \\"Successfully created 3 servers\\", \\"error\\": null } ]\\n} GET /tasks/ Get specific task status and details. Path Parameters: id: Task UUID Response: { \\"success\\": true, \\"data\\": { \\"id\\": \\"uuid-string\\", \\"name\\": \\"create_servers\\", \\"command\\": \\"/usr/local/provisioning servers create\\", \\"args\\": [\\"--infra\\", \\"production\\", \\"--wait\\"], \\"dependencies\\": [], \\"status\\": \\"Running\\", \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"started_at\\": \\"2025-09-26T10:00:05Z\\", \\"completed_at\\": null, \\"output\\": null, \\"error\\": null }\\n}","breadcrumbs":"REST API » Task Management","id":"1827","title":"Task Management"},"1828":{"body":"POST /workflows/servers/create Submit server creation workflow. Request Body: { \\"infra\\": \\"production\\", \\"settings\\": \\"config.ncl\\", \\"check_mode\\": false, \\"wait\\": true\\n} Response: { \\"success\\": true, \\"data\\": \\"uuid-task-id\\"\\n} POST /workflows/taskserv/create Submit task service workflow. Request Body: { \\"operation\\": \\"create\\", \\"taskserv\\": \\"kubernetes\\", \\"infra\\": \\"production\\", \\"settings\\": \\"config.ncl\\", \\"check_mode\\": false, \\"wait\\": true\\n} Response: { \\"success\\": true, \\"data\\": \\"uuid-task-id\\"\\n} POST /workflows/cluster/create Submit cluster workflow. Request Body: { \\"operation\\": \\"create\\", \\"cluster_type\\": \\"buildkit\\", \\"infra\\": \\"production\\", \\"settings\\": \\"config.ncl\\", \\"check_mode\\": false, \\"wait\\": true\\n} Response: { \\"success\\": true, \\"data\\": \\"uuid-task-id\\"\\n}","breadcrumbs":"REST API » Workflow Submission","id":"1828","title":"Workflow Submission"},"1829":{"body":"POST /batch/execute Execute batch workflow operation. Request Body: { \\"name\\": \\"multi_cloud_deployment\\", \\"version\\": \\"1.0.0\\", \\"storage_backend\\": \\"surrealdb\\", \\"parallel_limit\\": 5, \\"rollback_enabled\\": true, \\"operations\\": [ { \\"id\\": \\"upcloud_servers\\", \\"type\\": \\"server_batch\\", \\"provider\\": \\"upcloud\\", \\"dependencies\\": [], \\"server_configs\\": [ {\\"name\\": \\"web-01\\", \\"plan\\": \\"1xCPU-2 GB\\", \\"zone\\": \\"de-fra1\\"}, {\\"name\\": \\"web-02\\", \\"plan\\": \\"1xCPU-2 GB\\", \\"zone\\": \\"us-nyc1\\"} ] }, { \\"id\\": \\"aws_taskservs\\", \\"type\\": \\"taskserv_batch\\", \\"provider\\": \\"aws\\", \\"dependencies\\": [\\"upcloud_servers\\"], \\"taskservs\\": [\\"kubernetes\\", \\"cilium\\", \\"containerd\\"] } ]\\n} Response: { \\"success\\": true, \\"data\\": { \\"batch_id\\": \\"uuid-string\\", \\"status\\": \\"Running\\", \\"operations\\": [ { \\"id\\": \\"upcloud_servers\\", \\"status\\": \\"Pending\\", \\"progress\\": 0.0 }, { \\"id\\": \\"aws_taskservs\\", \\"status\\": \\"Pending\\", \\"progress\\": 0.0 } ] }\\n} GET /batch/operations List all batch operations. Response: { \\"success\\": true, \\"data\\": [ { \\"batch_id\\": \\"uuid-string\\", \\"name\\": \\"multi_cloud_deployment\\", \\"status\\": \\"Running\\", \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"operations\\": [...] } ]\\n} GET /batch/operations/ Get batch operation status. Path Parameters: id: Batch operation ID Response: { \\"success\\": true, \\"data\\": { \\"batch_id\\": \\"uuid-string\\", \\"name\\": \\"multi_cloud_deployment\\", \\"status\\": \\"Running\\", \\"operations\\": [ { \\"id\\": \\"upcloud_servers\\", \\"status\\": \\"Completed\\", \\"progress\\": 100.0, \\"results\\": {...} } ] }\\n} POST /batch/operations/{id}/cancel Cancel running batch operation. Path Parameters: id: Batch operation ID Response: { \\"success\\": true, \\"data\\": \\"Operation cancelled\\"\\n}","breadcrumbs":"REST API » Batch Operations","id":"1829","title":"Batch Operations"},"183":{"body":"Performance : 30-50x faster queries (~1 ms vs ~30-50 ms HTTP) # Get orchestrator status (direct file access, ~1 ms)\\norch status\\n# { active_tasks: 5, completed_tasks: 120, health: \\"healthy\\" } # Validate workflow Nickel file (~10 ms vs ~100 ms HTTP)\\norch validate workflows/deploy.ncl\\norch validate workflows/deploy.ncl --strict # List tasks (direct file read, ~5 ms)\\norch tasks\\norch tasks --status running\\norch tasks --status failed --limit 10 Installation: cargo build --release -p nu_plugin_orchestrator\\nplugin add target/release/nu_plugin_orchestrator","breadcrumbs":"Quick Start Cheatsheet » Orchestrator Plugin (nu_plugin_orchestrator)","id":"183","title":"Orchestrator Plugin (nu_plugin_orchestrator)"},"1830":{"body":"GET /state/workflows/{id}/progress Get real-time workflow progress. Path Parameters: id: Workflow ID Response: { \\"success\\": true, \\"data\\": { \\"workflow_id\\": \\"uuid-string\\", \\"progress\\": 75.5, \\"current_step\\": \\"Installing Kubernetes\\", \\"total_steps\\": 8, \\"completed_steps\\": 6, \\"estimated_time_remaining\\": 180 }\\n} GET /state/workflows/{id}/snapshots Get workflow state snapshots. Path Parameters: id: Workflow ID Response: { \\"success\\": true, \\"data\\": [ { \\"snapshot_id\\": \\"uuid-string\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"state\\": \\"running\\", \\"details\\": {...} } ]\\n} GET /state/system/metrics Get system-wide metrics. Response: { \\"success\\": true, \\"data\\": { \\"total_workflows\\": 150, \\"active_workflows\\": 5, \\"completed_workflows\\": 140, \\"failed_workflows\\": 5, \\"system_load\\": { \\"cpu_usage\\": 45.2, \\"memory_usage\\": 2048, \\"disk_usage\\": 75.5 } }\\n} GET /state/system/health Get system health status. Response: { \\"success\\": true, \\"data\\": { \\"overall_status\\": \\"Healthy\\", \\"components\\": { \\"storage\\": \\"Healthy\\", \\"batch_coordinator\\": \\"Healthy\\", \\"monitoring\\": \\"Healthy\\" }, \\"last_check\\": \\"2025-09-26T10:00:00Z\\" }\\n} GET /state/statistics Get state manager statistics. Response: { \\"success\\": true, \\"data\\": { \\"total_workflows\\": 150, \\"active_snapshots\\": 25, \\"storage_usage\\": \\"245 MB\\", \\"average_workflow_duration\\": 300 }\\n}","breadcrumbs":"REST API » State Management","id":"1830","title":"State Management"},"1831":{"body":"POST /rollback/checkpoints Create new checkpoint. Request Body: { \\"name\\": \\"before_major_update\\", \\"description\\": \\"Checkpoint before deploying v2.0.0\\"\\n} Response: { \\"success\\": true, \\"data\\": \\"checkpoint-uuid\\"\\n} GET /rollback/checkpoints List all checkpoints. Response: { \\"success\\": true, \\"data\\": [ { \\"id\\": \\"checkpoint-uuid\\", \\"name\\": \\"before_major_update\\", \\"description\\": \\"Checkpoint before deploying v2.0.0\\", \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"size\\": \\"150 MB\\" } ]\\n} GET /rollback/checkpoints/ Get specific checkpoint details. Path Parameters: id: Checkpoint ID Response: { \\"success\\": true, \\"data\\": { \\"id\\": \\"checkpoint-uuid\\", \\"name\\": \\"before_major_update\\", \\"description\\": \\"Checkpoint before deploying v2.0.0\\", \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"size\\": \\"150 MB\\", \\"operations_count\\": 25 }\\n} POST /rollback/execute Execute rollback operation. Request Body: { \\"checkpoint_id\\": \\"checkpoint-uuid\\"\\n} Or for partial rollback: { \\"operation_ids\\": [\\"op-1\\", \\"op-2\\", \\"op-3\\"]\\n} Response: { \\"success\\": true, \\"data\\": { \\"rollback_id\\": \\"rollback-uuid\\", \\"success\\": true, \\"operations_executed\\": 25, \\"operations_failed\\": 0, \\"duration\\": 45.5 }\\n} POST /rollback/restore/ Restore system state from checkpoint. Path Parameters: id: Checkpoint ID Response: { \\"success\\": true, \\"data\\": \\"State restored from checkpoint checkpoint-uuid\\"\\n} GET /rollback/statistics Get rollback system statistics. Response: { \\"success\\": true, \\"data\\": { \\"total_checkpoints\\": 10, \\"total_rollbacks\\": 3, \\"success_rate\\": 100.0, \\"average_rollback_time\\": 30.5 }\\n}","breadcrumbs":"REST API » Rollback and Recovery","id":"1831","title":"Rollback and Recovery"},"1832":{"body":"","breadcrumbs":"REST API » Control Center API Endpoints","id":"1832","title":"Control Center API Endpoints"},"1833":{"body":"POST /auth/login Authenticate user and get JWT token. Request Body: { \\"username\\": \\"admin\\", \\"password\\": \\"secure_password\\", \\"mfa_code\\": \\"123456\\"\\n} Response: { \\"success\\": true, \\"data\\": { \\"token\\": \\"jwt-token-string\\", \\"expires_at\\": \\"2025-09-26T18:00:00Z\\", \\"user\\": { \\"id\\": \\"user-uuid\\", \\"username\\": \\"admin\\", \\"email\\": \\"admin@example.com\\", \\"roles\\": [\\"admin\\", \\"operator\\"] } }\\n} POST /auth/refresh Refresh JWT token. Request Body: { \\"token\\": \\"current-jwt-token\\"\\n} Response: { \\"success\\": true, \\"data\\": { \\"token\\": \\"new-jwt-token\\", \\"expires_at\\": \\"2025-09-26T18:00:00Z\\" }\\n} POST /auth/logout Logout and invalidate token. Response: { \\"success\\": true, \\"data\\": \\"Successfully logged out\\"\\n}","breadcrumbs":"REST API » Authentication","id":"1833","title":"Authentication"},"1834":{"body":"GET /users List all users. Query Parameters: role (optional): Filter by role enabled (optional): Filter by enabled status Response: { \\"success\\": true, \\"data\\": [ { \\"id\\": \\"user-uuid\\", \\"username\\": \\"admin\\", \\"email\\": \\"admin@example.com\\", \\"roles\\": [\\"admin\\"], \\"enabled\\": true, \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"last_login\\": \\"2025-09-26T12:00:00Z\\" } ]\\n} POST /users Create new user. Request Body: { \\"username\\": \\"newuser\\", \\"email\\": \\"newuser@example.com\\", \\"password\\": \\"secure_password\\", \\"roles\\": [\\"operator\\"], \\"enabled\\": true\\n} Response: { \\"success\\": true, \\"data\\": { \\"id\\": \\"new-user-uuid\\", \\"username\\": \\"newuser\\", \\"email\\": \\"newuser@example.com\\", \\"roles\\": [\\"operator\\"], \\"enabled\\": true }\\n} PUT /users/ Update existing user. Path Parameters: id: User ID Request Body: { \\"email\\": \\"updated@example.com\\", \\"roles\\": [\\"admin\\", \\"operator\\"], \\"enabled\\": false\\n} Response: { \\"success\\": true, \\"data\\": \\"User updated successfully\\"\\n} DELETE /users/ Delete user. Path Parameters: id: User ID Response: { \\"success\\": true, \\"data\\": \\"User deleted successfully\\"\\n}","breadcrumbs":"REST API » User Management","id":"1834","title":"User Management"},"1835":{"body":"GET /policies List all policies. Response: { \\"success\\": true, \\"data\\": [ { \\"id\\": \\"policy-uuid\\", \\"name\\": \\"admin_access_policy\\", \\"version\\": \\"1.0.0\\", \\"rules\\": [...], \\"created_at\\": \\"2025-09-26T10:00:00Z\\", \\"enabled\\": true } ]\\n} POST /policies Create new policy. Request Body: { \\"name\\": \\"new_policy\\", \\"version\\": \\"1.0.0\\", \\"rules\\": [ { \\"effect\\": \\"Allow\\", \\"resource\\": \\"servers:*\\", \\"action\\": [\\"create\\", \\"read\\"], \\"condition\\": \\"user.role == \'admin\'\\" } ]\\n} Response: { \\"success\\": true, \\"data\\": { \\"id\\": \\"new-policy-uuid\\", \\"name\\": \\"new_policy\\", \\"version\\": \\"1.0.0\\" }\\n} PUT /policies/ Update policy. Path Parameters: id: Policy ID Request Body: { \\"name\\": \\"updated_policy\\", \\"rules\\": [...]\\n} Response: { \\"success\\": true, \\"data\\": \\"Policy updated successfully\\"\\n}","breadcrumbs":"REST API » Policy Management","id":"1835","title":"Policy Management"},"1836":{"body":"GET /audit/logs Get audit logs. Query Parameters: user_id (optional): Filter by user action (optional): Filter by action resource (optional): Filter by resource from (optional): Start date (ISO 8601) to (optional): End date (ISO 8601) limit (optional): Maximum results offset (optional): Pagination offset Response: { \\"success\\": true, \\"data\\": [ { \\"id\\": \\"audit-log-uuid\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"user_id\\": \\"user-uuid\\", \\"action\\": \\"server.create\\", \\"resource\\": \\"servers/web-01\\", \\"result\\": \\"success\\", \\"details\\": {...} } ]\\n}","breadcrumbs":"REST API » Audit Logging","id":"1836","title":"Audit Logging"},"1837":{"body":"All endpoints may return error responses in this format: { \\"success\\": false, \\"error\\": \\"Detailed error message\\"\\n}","breadcrumbs":"REST API » Error Responses","id":"1837","title":"Error Responses"},"1838":{"body":"200 OK: Successful request 201 Created: Resource created successfully 400 Bad Request: Invalid request parameters 401 Unauthorized: Authentication required or invalid 403 Forbidden: Permission denied 404 Not Found: Resource not found 422 Unprocessable Entity: Validation error 500 Internal Server Error: Server error","breadcrumbs":"REST API » HTTP Status Codes","id":"1838","title":"HTTP Status Codes"},"1839":{"body":"API endpoints are rate-limited: Authentication: 5 requests per minute per IP General APIs: 100 requests per minute per user Batch operations: 10 requests per minute per user Rate limit headers are included in responses: X-RateLimit-Limit: 100\\nX-RateLimit-Remaining: 95\\nX-RateLimit-Reset: 1632150000","breadcrumbs":"REST API » Rate Limiting","id":"1839","title":"Rate Limiting"},"184":{"body":"Operation HTTP API Plugin Speedup KMS Encrypt ~50 ms ~5 ms 10x KMS Decrypt ~50 ms ~5 ms 10x Orch Status ~30 ms ~1 ms 30x Orch Validate ~100 ms ~10 ms 10x Orch Tasks ~50 ms ~5 ms 10x Auth Verify ~50 ms ~10 ms 5x","breadcrumbs":"Quick Start Cheatsheet » Plugin Performance Comparison","id":"184","title":"Plugin Performance Comparison"},"1840":{"body":"","breadcrumbs":"REST API » Monitoring Endpoints","id":"1840","title":"Monitoring Endpoints"},"1841":{"body":"Prometheus-compatible metrics endpoint. Response: # HELP orchestrator_tasks_total Total number of tasks\\n# TYPE orchestrator_tasks_total counter\\norchestrator_tasks_total{status=\\"completed\\"} 150\\norchestrator_tasks_total{status=\\"failed\\"} 5 # HELP orchestrator_task_duration_seconds Task execution duration\\n# TYPE orchestrator_task_duration_seconds histogram\\norchestrator_task_duration_seconds_bucket{le=\\"10\\"} 50\\norchestrator_task_duration_seconds_bucket{le=\\"30\\"} 120\\norchestrator_task_duration_seconds_bucket{le=\\"+Inf\\"} 155","breadcrumbs":"REST API » GET /metrics","id":"1841","title":"GET /metrics"},"1842":{"body":"Real-time event streaming via WebSocket connection. Connection: const ws = new WebSocket(\'ws://localhost:9090/ws?token=jwt-token\'); ws.onmessage = function(event) { const data = JSON.parse(event.data); console.log(\'Event:\', data);\\n}; Event Format: { \\"event_type\\": \\"TaskStatusChanged\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"task_id\\": \\"uuid-string\\", \\"status\\": \\"completed\\" }, \\"metadata\\": { \\"task_id\\": \\"uuid-string\\", \\"status\\": \\"completed\\" }\\n}","breadcrumbs":"REST API » WebSocket /ws","id":"1842","title":"WebSocket /ws"},"1843":{"body":"","breadcrumbs":"REST API » SDK Examples","id":"1843","title":"SDK Examples"},"1844":{"body":"import requests class ProvisioningClient: def __init__(self, base_url, token): self.base_url = base_url self.headers = { \'Authorization\': f\'Bearer {token}\', \'Content-Type\': \'application/json\' } def create_server_workflow(self, infra, settings, check_mode=False): payload = { \'infra\': infra, \'settings\': settings, \'check_mode\': check_mode, \'wait\': True } response = requests.post( f\'{self.base_url}/workflows/servers/create\', json=payload, headers=self.headers ) return response.json() def get_task_status(self, task_id): response = requests.get( f\'{self.base_url}/tasks/{task_id}\', headers=self.headers ) return response.json() # Usage\\nclient = ProvisioningClient(\'http://localhost:9090\', \'your-jwt-token\')\\nresult = client.create_server_workflow(\'production\', \'config.ncl\')\\nprint(f\\"Task ID: {result[\'data\']}\\")","breadcrumbs":"REST API » Python SDK Example","id":"1844","title":"Python SDK Example"},"1845":{"body":"const axios = require(\'axios\'); class ProvisioningClient { constructor(baseUrl, token) { this.client = axios.create({ baseURL: baseUrl, headers: { \'Authorization\': `Bearer ${token}`, \'Content-Type\': \'application/json\' } }); } async createServerWorkflow(infra, settings, checkMode = false) { const response = await this.client.post(\'/workflows/servers/create\', { infra, settings, check_mode: checkMode, wait: true }); return response.data; } async getTaskStatus(taskId) { const response = await this.client.get(`/tasks/${taskId}`); return response.data; }\\n} // Usage\\nconst client = new ProvisioningClient(\'http://localhost:9090\', \'your-jwt-token\');\\nconst result = await client.createServerWorkflow(\'production\', \'config.ncl\');\\nconsole.log(`Task ID: ${result.data}`);","breadcrumbs":"REST API » JavaScript/Node.js SDK Example","id":"1845","title":"JavaScript/Node.js SDK Example"},"1846":{"body":"The system supports webhooks for external integrations:","breadcrumbs":"REST API » Webhook Integration","id":"1846","title":"Webhook Integration"},"1847":{"body":"Configure webhooks in the system configuration: [webhooks]\\nenabled = true\\nendpoints = [ { url = \\"https://your-system.com/webhook\\" events = [\\"task.completed\\", \\"task.failed\\", \\"batch.completed\\"] secret = \\"webhook-secret\\" }\\n]","breadcrumbs":"REST API » Webhook Configuration","id":"1847","title":"Webhook Configuration"},"1848":{"body":"{ \\"event\\": \\"task.completed\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"task_id\\": \\"uuid-string\\", \\"status\\": \\"completed\\", \\"output\\": \\"Task completed successfully\\" }, \\"signature\\": \\"sha256=calculated-signature\\"\\n}","breadcrumbs":"REST API » Webhook Payload","id":"1848","title":"Webhook Payload"},"1849":{"body":"For endpoints that return lists, use pagination parameters: limit: Maximum number of items per page (default: 50, max: 1000) offset: Number of items to skip Pagination metadata is included in response headers: X-Total-Count: 1500\\nX-Limit: 50\\nX-Offset: 100\\nLink: ; rel=\\"next\\"","breadcrumbs":"REST API » Pagination","id":"1849","title":"Pagination"},"185":{"body":"","breadcrumbs":"Quick Start Cheatsheet » CLI Shortcuts","id":"185","title":"CLI Shortcuts"},"1850":{"body":"The API uses header-based versioning: Accept: application/vnd.provisioning.v1+json Current version: v1","breadcrumbs":"REST API » API Versioning","id":"1850","title":"API Versioning"},"1851":{"body":"Use the included test suite to validate API functionality: # Run API integration tests\\ncd src/orchestrator\\ncargo test --test api_tests # Run load tests\\ncargo test --test load_tests --release","breadcrumbs":"REST API » Testing","id":"1851","title":"Testing"},"1852":{"body":"This document provides comprehensive documentation for the WebSocket API used for real-time monitoring, event streaming, and live updates in provisioning.","breadcrumbs":"WebSocket » WebSocket API Reference","id":"1852","title":"WebSocket API Reference"},"1853":{"body":"The WebSocket API enables real-time communication between clients and the provisioning orchestrator, providing: Live workflow progress updates System health monitoring Event streaming Real-time metrics Interactive debugging sessions","breadcrumbs":"WebSocket » Overview","id":"1853","title":"Overview"},"1854":{"body":"","breadcrumbs":"WebSocket » WebSocket Endpoints","id":"1854","title":"WebSocket Endpoints"},"1855":{"body":"ws://localhost:9090/ws The main WebSocket endpoint for real-time events and monitoring. Connection Parameters: token: JWT authentication token (required) events: Comma-separated list of event types to subscribe to (optional) batch_size: Maximum number of events per message (default: 10) compression: Enable message compression (default: false) Example Connection: const ws = new WebSocket(\'ws://localhost:9090/ws?token=jwt-token&events=task,batch,system\');","breadcrumbs":"WebSocket » Primary WebSocket Endpoint","id":"1855","title":"Primary WebSocket Endpoint"},"1856":{"body":"ws://localhost:9090/metrics Real-time metrics streaming endpoint. Features: Live system metrics Performance data Resource utilization Custom metric streams ws://localhost:9090/logs Live log streaming endpoint. Features: Real-time log tailing Log level filtering Component-specific logs Search and filtering","breadcrumbs":"WebSocket » Specialized WebSocket Endpoints","id":"1856","title":"Specialized WebSocket Endpoints"},"1857":{"body":"","breadcrumbs":"WebSocket » Authentication","id":"1857","title":"Authentication"},"1858":{"body":"All WebSocket connections require authentication via JWT token: // Include token in connection URL\\nconst ws = new WebSocket(\'ws://localhost:9090/ws?token=\' + jwtToken); // Or send token after connection\\nws.onopen = function() { ws.send(JSON.stringify({ type: \'auth\', token: jwtToken }));\\n};","breadcrumbs":"WebSocket » JWT Token Authentication","id":"1858","title":"JWT Token Authentication"},"1859":{"body":"Initial Connection : Client connects with token parameter Token Validation : Server validates JWT token Authorization : Server checks token permissions Subscription : Client subscribes to event types Event Stream : Server begins streaming events","breadcrumbs":"WebSocket » Connection Authentication Flow","id":"1859","title":"Connection Authentication Flow"},"186":{"body":"# Server shortcuts\\nprovisioning s # server (same as \'provisioning server\')\\nprovisioning s create # Create servers\\nprovisioning s delete # Delete servers\\nprovisioning s list # List servers\\nprovisioning s ssh web-01 # SSH into server # Taskserv shortcuts\\nprovisioning t # taskserv (same as \'provisioning taskserv\')\\nprovisioning task # taskserv (alias)\\nprovisioning t create kubernetes\\nprovisioning t delete kubernetes\\nprovisioning t list\\nprovisioning t generate kubernetes\\nprovisioning t check-updates # Cluster shortcuts\\nprovisioning cl # cluster (same as \'provisioning cluster\')\\nprovisioning cl create buildkit\\nprovisioning cl delete buildkit\\nprovisioning cl list # Infrastructure shortcuts\\nprovisioning i # infra (same as \'provisioning infra\')\\nprovisioning infras # infra (alias)\\nprovisioning i list\\nprovisioning i validate","breadcrumbs":"Quick Start Cheatsheet » Infrastructure Shortcuts","id":"186","title":"Infrastructure Shortcuts"},"1860":{"body":"","breadcrumbs":"WebSocket » Event Types and Schemas","id":"1860","title":"Event Types and Schemas"},"1861":{"body":"Task Status Changed Fired when a workflow task status changes. { \\"event_type\\": \\"TaskStatusChanged\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"task_id\\": \\"uuid-string\\", \\"name\\": \\"create_servers\\", \\"status\\": \\"Running\\", \\"previous_status\\": \\"Pending\\", \\"progress\\": 45.5 }, \\"metadata\\": { \\"task_id\\": \\"uuid-string\\", \\"workflow_type\\": \\"server_creation\\", \\"infra\\": \\"production\\" }\\n} Batch Operation Update Fired when batch operation status changes. { \\"event_type\\": \\"BatchOperationUpdate\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"batch_id\\": \\"uuid-string\\", \\"name\\": \\"multi_cloud_deployment\\", \\"status\\": \\"Running\\", \\"progress\\": 65.0, \\"operations\\": [ { \\"id\\": \\"upcloud_servers\\", \\"status\\": \\"Completed\\", \\"progress\\": 100.0 }, { \\"id\\": \\"aws_taskservs\\", \\"status\\": \\"Running\\", \\"progress\\": 30.0 } ] }, \\"metadata\\": { \\"total_operations\\": 5, \\"completed_operations\\": 2, \\"failed_operations\\": 0 }\\n} System Health Update Fired when system health status changes. { \\"event_type\\": \\"SystemHealthUpdate\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"overall_status\\": \\"Healthy\\", \\"components\\": { \\"storage\\": { \\"status\\": \\"Healthy\\", \\"last_check\\": \\"2025-09-26T09:59:55Z\\" }, \\"batch_coordinator\\": { \\"status\\": \\"Warning\\", \\"last_check\\": \\"2025-09-26T09:59:55Z\\", \\"message\\": \\"High memory usage\\" } }, \\"metrics\\": { \\"cpu_usage\\": 45.2, \\"memory_usage\\": 2048, \\"disk_usage\\": 75.5, \\"active_workflows\\": 5 } }, \\"metadata\\": { \\"check_interval\\": 30, \\"next_check\\": \\"2025-09-26T10:00:30Z\\" }\\n} Workflow Progress Update Fired when workflow progress changes. { \\"event_type\\": \\"WorkflowProgressUpdate\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"workflow_id\\": \\"uuid-string\\", \\"name\\": \\"kubernetes_deployment\\", \\"progress\\": 75.0, \\"current_step\\": \\"Installing CNI\\", \\"total_steps\\": 8, \\"completed_steps\\": 6, \\"estimated_time_remaining\\": 120, \\"step_details\\": { \\"step_name\\": \\"Installing CNI\\", \\"step_progress\\": 45.0, \\"step_message\\": \\"Downloading Cilium components\\" } }, \\"metadata\\": { \\"infra\\": \\"production\\", \\"provider\\": \\"upcloud\\", \\"started_at\\": \\"2025-09-26T09:45:00Z\\" }\\n} Log Entry Real-time log streaming. { \\"event_type\\": \\"LogEntry\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"level\\": \\"INFO\\", \\"message\\": \\"Server web-01 created successfully\\", \\"component\\": \\"server-manager\\", \\"task_id\\": \\"uuid-string\\", \\"details\\": { \\"server_id\\": \\"server-uuid\\", \\"hostname\\": \\"web-01\\", \\"ip_address\\": \\"10.0.1.100\\" } }, \\"metadata\\": { \\"source\\": \\"orchestrator\\", \\"thread\\": \\"worker-1\\" }\\n} Metric Update Real-time metrics streaming. { \\"event_type\\": \\"MetricUpdate\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { \\"metric_name\\": \\"workflow_duration\\", \\"metric_type\\": \\"histogram\\", \\"value\\": 180.5, \\"labels\\": { \\"workflow_type\\": \\"server_creation\\", \\"status\\": \\"completed\\", \\"infra\\": \\"production\\" } }, \\"metadata\\": { \\"interval\\": 15, \\"aggregation\\": \\"average\\" }\\n}","breadcrumbs":"WebSocket » Core Event Types","id":"1861","title":"Core Event Types"},"1862":{"body":"Applications can define custom event types: { \\"event_type\\": \\"CustomApplicationEvent\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"data\\": { // Custom event data }, \\"metadata\\": { \\"custom_field\\": \\"custom_value\\" }\\n}","breadcrumbs":"WebSocket » Custom Event Types","id":"1862","title":"Custom Event Types"},"1863":{"body":"","breadcrumbs":"WebSocket » Client-Side JavaScript API","id":"1863","title":"Client-Side JavaScript API"},"1864":{"body":"class ProvisioningWebSocket { constructor(baseUrl, token, options = {}) { this.baseUrl = baseUrl; this.token = token; this.options = { reconnect: true, reconnectInterval: 5000, maxReconnectAttempts: 10, ...options }; this.ws = null; this.reconnectAttempts = 0; this.eventHandlers = new Map(); } connect() { const wsUrl = `${this.baseUrl}/ws?token=${this.token}`; this.ws = new WebSocket(wsUrl); this.ws.onopen = (event) => { console.log(\'WebSocket connected\'); this.reconnectAttempts = 0; this.emit(\'connected\', event); }; this.ws.onmessage = (event) => { try { const message = JSON.parse(event.data); this.handleMessage(message); } catch (error) { console.error(\'Failed to parse WebSocket message:\', error); } }; this.ws.onclose = (event) => { console.log(\'WebSocket disconnected\'); this.emit(\'disconnected\', event); if (this.options.reconnect && this.reconnectAttempts < this.options.maxReconnectAttempts) { setTimeout(() => { this.reconnectAttempts++; console.log(`Reconnecting... (${this.reconnectAttempts}/${this.options.maxReconnectAttempts})`); this.connect(); }, this.options.reconnectInterval); } }; this.ws.onerror = (error) => { console.error(\'WebSocket error:\', error); this.emit(\'error\', error); }; } handleMessage(message) { if (message.event_type) { this.emit(message.event_type, message); this.emit(\'message\', message); } } on(eventType, handler) { if (!this.eventHandlers.has(eventType)) { this.eventHandlers.set(eventType, []); } this.eventHandlers.get(eventType).push(handler); } off(eventType, handler) { const handlers = this.eventHandlers.get(eventType); if (handlers) { const index = handlers.indexOf(handler); if (index > -1) { handlers.splice(index, 1); } } } emit(eventType, data) { const handlers = this.eventHandlers.get(eventType); if (handlers) { handlers.forEach(handler => { try { handler(data); } catch (error) { console.error(`Error in event handler for ${eventType}:`, error); } }); } } send(message) { if (this.ws && this.ws.readyState === WebSocket.OPEN) { this.ws.send(JSON.stringify(message)); } else { console.warn(\'WebSocket not connected, message not sent\'); } } disconnect() { this.options.reconnect = false; if (this.ws) { this.ws.close(); } } subscribe(eventTypes) { this.send({ type: \'subscribe\', events: Array.isArray(eventTypes) ? eventTypes : [eventTypes] }); } unsubscribe(eventTypes) { this.send({ type: \'unsubscribe\', events: Array.isArray(eventTypes) ? eventTypes : [eventTypes] }); }\\n} // Usage example\\nconst ws = new ProvisioningWebSocket(\'ws://localhost:9090\', \'your-jwt-token\'); ws.on(\'TaskStatusChanged\', (event) => { console.log(`Task ${event.data.task_id} status: ${event.data.status}`); updateTaskUI(event.data);\\n}); ws.on(\'WorkflowProgressUpdate\', (event) => { console.log(`Workflow progress: ${event.data.progress}%`); updateProgressBar(event.data.progress);\\n}); ws.on(\'SystemHealthUpdate\', (event) => { console.log(\'System health:\', event.data.overall_status); updateHealthIndicator(event.data);\\n}); ws.connect(); // Subscribe to specific events\\nws.subscribe([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']);","breadcrumbs":"WebSocket » Connection Management","id":"1864","title":"Connection Management"},"1865":{"body":"class ProvisioningDashboard { constructor(wsUrl, token) { this.ws = new ProvisioningWebSocket(wsUrl, token); this.setupEventHandlers(); this.connect(); } setupEventHandlers() { this.ws.on(\'TaskStatusChanged\', this.handleTaskUpdate.bind(this)); this.ws.on(\'BatchOperationUpdate\', this.handleBatchUpdate.bind(this)); this.ws.on(\'SystemHealthUpdate\', this.handleHealthUpdate.bind(this)); this.ws.on(\'WorkflowProgressUpdate\', this.handleProgressUpdate.bind(this)); this.ws.on(\'LogEntry\', this.handleLogEntry.bind(this)); } connect() { this.ws.connect(); } handleTaskUpdate(event) { const taskCard = document.getElementById(`task-${event.data.task_id}`); if (taskCard) { taskCard.querySelector(\'.status\').textContent = event.data.status; taskCard.querySelector(\'.status\').className = `status ${event.data.status.toLowerCase()}`; if (event.data.progress) { const progressBar = taskCard.querySelector(\'.progress-bar\'); progressBar.style.width = `${event.data.progress}%`; } } } handleBatchUpdate(event) { const batchCard = document.getElementById(`batch-${event.data.batch_id}`); if (batchCard) { batchCard.querySelector(\'.batch-progress\').style.width = `${event.data.progress}%`; event.data.operations.forEach(op => { const opElement = batchCard.querySelector(`[data-operation=\\"${op.id}\\"]`); if (opElement) { opElement.querySelector(\'.operation-status\').textContent = op.status; opElement.querySelector(\'.operation-progress\').style.width = `${op.progress}%`; } }); } } handleHealthUpdate(event) { const healthIndicator = document.getElementById(\'health-indicator\'); healthIndicator.className = `health-indicator ${event.data.overall_status.toLowerCase()}`; healthIndicator.textContent = event.data.overall_status; const metricsPanel = document.getElementById(\'metrics-panel\'); metricsPanel.innerHTML = `
CPU: ${event.data.metrics.cpu_usage}%
Memory: ${Math.round(event.data.metrics.memory_usage / 1024 / 1024)}MB
Disk: ${event.data.metrics.disk_usage}%
Active Workflows: ${event.data.metrics.active_workflows}
`; } handleProgressUpdate(event) { const workflowCard = document.getElementById(`workflow-${event.data.workflow_id}`); if (workflowCard) { const progressBar = workflowCard.querySelector(\'.workflow-progress\'); const stepInfo = workflowCard.querySelector(\'.step-info\'); progressBar.style.width = `${event.data.progress}%`; stepInfo.textContent = `${event.data.current_step} (${event.data.completed_steps}/${event.data.total_steps})`; if (event.data.estimated_time_remaining) { const timeRemaining = workflowCard.querySelector(\'.time-remaining\'); timeRemaining.textContent = `${Math.round(event.data.estimated_time_remaining / 60)} min remaining`; } } } handleLogEntry(event) { const logContainer = document.getElementById(\'log-container\'); const logEntry = document.createElement(\'div\'); logEntry.className = `log-entry log-${event.data.level.toLowerCase()}`; logEntry.innerHTML = ` ${new Date(event.timestamp).toLocaleTimeString()} ${event.data.level} ${event.data.component} ${event.data.message} `; logContainer.appendChild(logEntry); // Auto-scroll to bottom logContainer.scrollTop = logContainer.scrollHeight; // Limit log entries to prevent memory issues const maxLogEntries = 1000; if (logContainer.children.length > maxLogEntries) { logContainer.removeChild(logContainer.firstChild); } }\\n} // Initialize dashboard\\nconst dashboard = new ProvisioningDashboard(\'ws://localhost:9090\', jwtToken);","breadcrumbs":"WebSocket » Real-Time Dashboard Example","id":"1865","title":"Real-Time Dashboard Example"},"1866":{"body":"","breadcrumbs":"WebSocket » Server-Side Implementation","id":"1866","title":"Server-Side Implementation"},"1867":{"body":"The orchestrator implements WebSocket support using Axum and Tokio: use axum::{ extract::{ws::WebSocket, ws::WebSocketUpgrade, Query, State}, response::Response,\\n};\\nuse serde::{Deserialize, Serialize};\\nuse std::collections::HashMap;\\nuse tokio::sync::broadcast; #[derive(Debug, Deserialize)]\\npub struct WsQuery { token: String, events: Option, batch_size: Option, compression: Option,\\n} #[derive(Debug, Clone, Serialize)]\\npub struct WebSocketMessage { pub event_type: String, pub timestamp: chrono::DateTime, pub data: serde_json::Value, pub metadata: HashMap,\\n} pub async fn websocket_handler( ws: WebSocketUpgrade, Query(params): Query, State(state): State,\\n) -> Response { // Validate JWT token let claims = match state.auth_service.validate_token(¶ms.token) { Ok(claims) => claims, Err(_) => return Response::builder() .status(401) .body(\\"Unauthorized\\".into()) .unwrap(), }; ws.on_upgrade(move |socket| handle_socket(socket, params, claims, state))\\n} async fn handle_socket( socket: WebSocket, params: WsQuery, claims: Claims, state: SharedState,\\n) { let (mut sender, mut receiver) = socket.split(); // Subscribe to event stream let mut event_rx = state.monitoring_system.subscribe_to_events().await; // Parse requested event types let requested_events: Vec = params.events .unwrap_or_default() .split(\',\') .map(|s| s.trim().to_string()) .filter(|s| !s.is_empty()) .collect(); // Handle incoming messages from client let sender_task = tokio::spawn(async move { while let Some(msg) = receiver.next().await { if let Ok(msg) = msg { if let Ok(text) = msg.to_text() { if let Ok(client_msg) = serde_json::from_str::(text) { handle_client_message(client_msg, &state).await; } } } } }); // Handle outgoing messages to client let receiver_task = tokio::spawn(async move { let mut batch = Vec::new(); let batch_size = params.batch_size.unwrap_or(10); while let Ok(event) = event_rx.recv().await { // Filter events based on subscription if !requested_events.is_empty() && !requested_events.contains(&event.event_type) { continue; } // Check permissions if !has_event_permission(&claims, &event.event_type) { continue; } batch.push(event); // Send batch when full or after timeout if batch.len() >= batch_size { send_event_batch(&mut sender, &batch).await; batch.clear(); } } }); // Wait for either task to complete tokio::select! { _ = sender_task => {}, _ = receiver_task => {}, }\\n} #[derive(Debug, Deserialize)]\\nstruct ClientMessage { #[serde(rename = \\"type\\")] msg_type: String, token: Option, events: Option>,\\n} async fn handle_client_message(msg: ClientMessage, state: &SharedState) { match msg.msg_type.as_str() { \\"subscribe\\" => { // Handle event subscription }, \\"unsubscribe\\" => { // Handle event unsubscription }, \\"auth\\" => { // Handle re-authentication }, _ => { // Unknown message type } }\\n} async fn send_event_batch(sender: &mut SplitSink, batch: &[WebSocketMessage]) { let batch_msg = serde_json::json!({ \\"type\\": \\"batch\\", \\"events\\": batch }); if let Ok(msg_text) = serde_json::to_string(&batch_msg) { if let Err(e) = sender.send(Message::Text(msg_text)).await { eprintln!(\\"Failed to send WebSocket message: {}\\", e); } }\\n} fn has_event_permission(claims: &Claims, event_type: &str) -> bool { // Check if user has permission to receive this event type match event_type { \\"SystemHealthUpdate\\" => claims.role.contains(&\\"admin\\".to_string()), \\"LogEntry\\" => claims.role.contains(&\\"admin\\".to_string()) || claims.role.contains(&\\"developer\\".to_string()), _ => true, // Most events are accessible to all authenticated users }\\n}","breadcrumbs":"WebSocket » Rust WebSocket Handler","id":"1867","title":"Rust WebSocket Handler"},"1868":{"body":"","breadcrumbs":"WebSocket » Event Filtering and Subscriptions","id":"1868","title":"Event Filtering and Subscriptions"},"1869":{"body":"// Subscribe to specific event types\\nws.subscribe([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']); // Subscribe with filters\\nws.send({ type: \'subscribe\', events: [\'TaskStatusChanged\'], filters: { task_name: \'create_servers\', status: [\'Running\', \'Completed\', \'Failed\'] }\\n}); // Advanced filtering\\nws.send({ type: \'subscribe\', events: [\'LogEntry\'], filters: { level: [\'ERROR\', \'WARN\'], component: [\'server-manager\', \'batch-coordinator\'], since: \'2025-09-26T10:00:00Z\' }\\n});","breadcrumbs":"WebSocket » Client-Side Filtering","id":"1869","title":"Client-Side Filtering"},"187":{"body":"# Workflow shortcuts\\nprovisioning wf # workflow (same as \'provisioning workflow\')\\nprovisioning flow # workflow (alias)\\nprovisioning wf list\\nprovisioning wf status \\nprovisioning wf monitor \\nprovisioning wf stats\\nprovisioning wf cleanup # Batch shortcuts\\nprovisioning bat # batch (same as \'provisioning batch\')\\nprovisioning batch submit workflows/example.ncl\\nprovisioning bat list\\nprovisioning bat status \\nprovisioning bat monitor \\nprovisioning bat rollback \\nprovisioning bat cancel \\nprovisioning bat stats # Orchestrator shortcuts\\nprovisioning orch # orchestrator (same as \'provisioning orchestrator\')\\nprovisioning orch start\\nprovisioning orch stop\\nprovisioning orch status\\nprovisioning orch health\\nprovisioning orch logs","breadcrumbs":"Quick Start Cheatsheet » Orchestration Shortcuts","id":"187","title":"Orchestration Shortcuts"},"1870":{"body":"Events can be filtered on the server side based on: User permissions and roles Event type subscriptions Custom filter criteria Rate limiting","breadcrumbs":"WebSocket » Server-Side Event Filtering","id":"1870","title":"Server-Side Event Filtering"},"1871":{"body":"","breadcrumbs":"WebSocket » Error Handling and Reconnection","id":"1871","title":"Error Handling and Reconnection"},"1872":{"body":"ws.on(\'error\', (error) => { console.error(\'WebSocket error:\', error); // Handle specific error types if (error.code === 1006) { // Abnormal closure, attempt reconnection setTimeout(() => ws.connect(), 5000); } else if (error.code === 1008) { // Policy violation, check token refreshTokenAndReconnect(); }\\n}); ws.on(\'disconnected\', (event) => { console.log(`WebSocket disconnected: ${event.code} - ${event.reason}`); // Handle different close codes switch (event.code) { case 1000: // Normal closure console.log(\'Connection closed normally\'); break; case 1001: // Going away console.log(\'Server is shutting down\'); break; case 4001: // Custom: Token expired refreshTokenAndReconnect(); break; default: // Attempt reconnection for other errors if (shouldReconnect()) { scheduleReconnection(); } }\\n});","breadcrumbs":"WebSocket » Connection Errors","id":"1872","title":"Connection Errors"},"1873":{"body":"class ProvisioningWebSocket { constructor(baseUrl, token, options = {}) { // ... existing code ... this.heartbeatInterval = options.heartbeatInterval || 30000; this.heartbeatTimer = null; } connect() { // ... existing connection code ... this.ws.onopen = (event) => { console.log(\'WebSocket connected\'); this.startHeartbeat(); this.emit(\'connected\', event); }; this.ws.onclose = (event) => { this.stopHeartbeat(); // ... existing close handling ... }; } startHeartbeat() { this.heartbeatTimer = setInterval(() => { if (this.ws && this.ws.readyState === WebSocket.OPEN) { this.send({ type: \'ping\' }); } }, this.heartbeatInterval); } stopHeartbeat() { if (this.heartbeatTimer) { clearInterval(this.heartbeatTimer); this.heartbeatTimer = null; } } handleMessage(message) { if (message.type === \'pong\') { // Heartbeat response received return; } // ... existing message handling ... }\\n}","breadcrumbs":"WebSocket » Heartbeat and Keep-Alive","id":"1873","title":"Heartbeat and Keep-Alive"},"1874":{"body":"","breadcrumbs":"WebSocket » Performance Considerations","id":"1874","title":"Performance Considerations"},"1875":{"body":"To improve performance, the server can batch multiple events into single WebSocket messages: { \\"type\\": \\"batch\\", \\"timestamp\\": \\"2025-09-26T10:00:00Z\\", \\"events\\": [ { \\"event_type\\": \\"TaskStatusChanged\\", \\"data\\": { ... } }, { \\"event_type\\": \\"WorkflowProgressUpdate\\", \\"data\\": { ... } } ]\\n}","breadcrumbs":"WebSocket » Message Batching","id":"1875","title":"Message Batching"},"1876":{"body":"Enable message compression for large events: const ws = new WebSocket(\'ws://localhost:9090/ws?token=jwt&compression=true\');","breadcrumbs":"WebSocket » Compression","id":"1876","title":"Compression"},"1877":{"body":"The server implements rate limiting to prevent abuse: Maximum connections per user: 10 Maximum messages per second: 100 Maximum subscription events: 50","breadcrumbs":"WebSocket » Rate Limiting","id":"1877","title":"Rate Limiting"},"1878":{"body":"","breadcrumbs":"WebSocket » Security Considerations","id":"1878","title":"Security Considerations"},"1879":{"body":"All connections require valid JWT tokens Tokens are validated on connection and periodically renewed Event access is controlled by user roles and permissions","breadcrumbs":"WebSocket » Authentication and Authorization","id":"1879","title":"Authentication and Authorization"},"188":{"body":"# Module shortcuts\\nprovisioning mod # module (same as \'provisioning module\')\\nprovisioning mod discover taskserv\\nprovisioning mod discover provider\\nprovisioning mod discover cluster\\nprovisioning mod load taskserv workspace kubernetes\\nprovisioning mod list taskserv workspace\\nprovisioning mod unload taskserv workspace kubernetes\\nprovisioning mod sync-kcl # Layer shortcuts\\nprovisioning lyr # layer (same as \'provisioning layer\')\\nprovisioning lyr explain\\nprovisioning lyr show\\nprovisioning lyr test\\nprovisioning lyr stats # Version shortcuts\\nprovisioning version check\\nprovisioning version show\\nprovisioning version updates\\nprovisioning version apply \\nprovisioning version taskserv # Package shortcuts\\nprovisioning pack core\\nprovisioning pack provider upcloud\\nprovisioning pack list\\nprovisioning pack clean","breadcrumbs":"Quick Start Cheatsheet » Development Shortcuts","id":"188","title":"Development Shortcuts"},"1880":{"body":"All incoming messages are validated against schemas Malformed messages are rejected Rate limiting prevents DoS attacks","breadcrumbs":"WebSocket » Message Validation","id":"1880","title":"Message Validation"},"1881":{"body":"All event data is sanitized before transmission Sensitive information is filtered based on user permissions PII and secrets are never transmitted This WebSocket API provides a robust, real-time communication channel for monitoring and managing provisioning with comprehensive security and performance features.","breadcrumbs":"WebSocket » Data Sanitization","id":"1881","title":"Data Sanitization"},"1882":{"body":"This document provides comprehensive guidance for developing extensions for provisioning, including providers, task services, and cluster configurations.","breadcrumbs":"Extensions » Extension Development API","id":"1882","title":"Extension Development API"},"1883":{"body":"Provisioning supports three types of extensions: Providers : Cloud infrastructure providers (AWS, UpCloud, Local, etc.) Task Services : Infrastructure components (Kubernetes, Cilium, Containerd, etc.) Clusters : Complete deployment configurations (BuildKit, CI/CD, etc.) All extensions follow a standardized structure and API for seamless integration.","breadcrumbs":"Extensions » Overview","id":"1883","title":"Overview"},"1884":{"body":"","breadcrumbs":"Extensions » Extension Structure","id":"1884","title":"Extension Structure"},"1885":{"body":"extension-name/\\n├── manifest.toml # Extension metadata\\n├── schemas/ # Nickel configuration files\\n│ ├── main.ncl # Main schema\\n│ ├── settings.ncl # Settings schema\\n│ ├── version.ncl # Version configuration\\n│ └── contracts.ncl # Contract definitions\\n├── nulib/ # Nushell library modules\\n│ ├── mod.nu # Main module\\n│ ├── create.nu # Creation operations\\n│ ├── delete.nu # Deletion operations\\n│ └── utils.nu # Utility functions\\n├── templates/ # Jinja2 templates\\n│ ├── config.j2 # Configuration templates\\n│ └── scripts/ # Script templates\\n├── generate/ # Code generation scripts\\n│ └── generate.nu # Generation commands\\n├── README.md # Extension documentation\\n└── metadata.toml # Extension metadata","breadcrumbs":"Extensions » Standard Directory Layout","id":"1885","title":"Standard Directory Layout"},"1886":{"body":"","breadcrumbs":"Extensions » Provider Extension API","id":"1886","title":"Provider Extension API"},"1887":{"body":"All providers must implement the following interface: Core Operations create-server(config: record) -> record delete-server(server_id: string) -> null list-servers() -> list get-server-info(server_id: string) -> record start-server(server_id: string) -> null stop-server(server_id: string) -> null reboot-server(server_id: string) -> null Pricing and Plans get-pricing() -> list get-plans() -> list get-zones() -> list SSH and Access get-ssh-access(server_id: string) -> record configure-firewall(server_id: string, rules: list) -> null","breadcrumbs":"Extensions » Provider Interface","id":"1887","title":"Provider Interface"},"1888":{"body":"Nickel Configuration Schema Create schemas/settings.ncl: # Provider settings schema\\n{ ProviderSettings = { # Authentication configuration auth | { method | \\"api_key\\" | \\"certificate\\" | \\"oauth\\" | \\"basic\\", api_key | String = null, api_secret | String = null, username | String = null, password | String = null, certificate_path | String = null, private_key_path | String = null, }, # API configuration api | { base_url | String, version | String = \\"v1\\", timeout | Number = 30, retries | Number = 3, }, # Default server configuration defaults: { plan?: str zone?: str os?: str ssh_keys?: [str] firewall_rules?: [FirewallRule] } # Provider-specific settings features: { load_balancer?: bool = false storage_encryption?: bool = true backup?: bool = true monitoring?: bool = false }\\n} schema FirewallRule { direction: \\"ingress\\" | \\"egress\\" protocol: \\"tcp\\" | \\"udp\\" | \\"icmp\\" port?: str source?: str destination?: str action: \\"allow\\" | \\"deny\\"\\n} schema ServerConfig { hostname: str plan: str zone: str os: str = \\"ubuntu-22.04\\" ssh_keys: [str] = [] tags?: {str: str} = {} firewall_rules?: [FirewallRule] = [] storage?: { size?: int type?: str encrypted?: bool = true } network?: { public_ip?: bool = true private_network?: str bandwidth?: int }\\n} Nushell Implementation Create nulib/mod.nu: use std log # Provider name and version\\nexport const PROVIDER_NAME = \\"my-provider\\"\\nexport const PROVIDER_VERSION = \\"1.0.0\\" # Import sub-modules\\nuse create.nu *\\nuse delete.nu *\\nuse utils.nu * # Provider interface implementation\\nexport def \\"provider-info\\" [] -> record { { name: $PROVIDER_NAME, version: $PROVIDER_VERSION, type: \\"provider\\", interface: \\"API\\", supported_operations: [ \\"create-server\\", \\"delete-server\\", \\"list-servers\\", \\"get-server-info\\", \\"start-server\\", \\"stop-server\\" ], required_auth: [\\"api_key\\", \\"api_secret\\"], supported_os: [\\"ubuntu-22.04\\", \\"debian-11\\", \\"centos-8\\"], regions: (get-zones).name }\\n} export def \\"validate-config\\" [config: record] -> record { mut errors = [] mut warnings = [] # Validate authentication if ($config | get -o \\"auth.api_key\\" | is-empty) { $errors = ($errors | append \\"Missing API key\\") } if ($config | get -o \\"auth.api_secret\\" | is-empty) { $errors = ($errors | append \\"Missing API secret\\") } # Validate API configuration let api_url = ($config | get -o \\"api.base_url\\") if ($api_url | is-empty) { $errors = ($errors | append \\"Missing API base URL\\") } else { try { http get $\\"($api_url)/health\\" | ignore } catch { $warnings = ($warnings | append \\"API endpoint not reachable\\") } } { valid: ($errors | is-empty), errors: $errors, warnings: $warnings }\\n} export def \\"test-connection\\" [config: record] -> record { try { let api_url = ($config | get \\"api.base_url\\") let response = (http get $\\"($api_url)/account\\" --headers { Authorization: $\\"Bearer ($config | get \'auth.api_key\')\\" }) { success: true, account_info: $response, message: \\"Connection successful\\" } } catch {|e| { success: false, error: ($e | get msg), message: \\"Connection failed\\" } }\\n} Create nulib/create.nu: use std log\\nuse utils.nu * export def \\"create-server\\" [ config: record # Server configuration --check # Check mode only --wait # Wait for completion\\n] -> record { log info $\\"Creating server: ($config.hostname)\\" if $check { return { action: \\"create-server\\", hostname: $config.hostname, check_mode: true, would_create: true, estimated_time: \\"2-5 minutes\\" } } # Validate configuration let validation = (validate-server-config $config) if not $validation.valid { error make { msg: $\\"Invalid server configuration: ($validation.errors | str join \', \')\\" } } # Prepare API request let api_config = (get-api-config) let request_body = { hostname: $config.hostname, plan: $config.plan, zone: $config.zone, os: $config.os, ssh_keys: $config.ssh_keys, tags: $config.tags, firewall_rules: $config.firewall_rules } try { let response = (http post $\\"($api_config.base_url)/servers\\" --headers { Authorization: $\\"Bearer ($api_config.auth.api_key)\\" Content-Type: \\"application/json\\" } $request_body) let server_id = ($response | get id) log info $\\"Server creation initiated: ($server_id)\\" if $wait { let final_status = (wait-for-server-ready $server_id) { success: true, server_id: $server_id, hostname: $config.hostname, status: $final_status, ip_addresses: (get-server-ips $server_id), ssh_access: (get-ssh-access $server_id) } } else { { success: true, server_id: $server_id, hostname: $config.hostname, status: \\"creating\\", message: \\"Server creation in progress\\" } } } catch {|e| error make { msg: $\\"Server creation failed: ($e | get msg)\\" } }\\n} def validate-server-config [config: record] -> record { mut errors = [] # Required fields if ($config | get -o hostname | is-empty) { $errors = ($errors | append \\"Hostname is required\\") } if ($config | get -o plan | is-empty) { $errors = ($errors | append \\"Plan is required\\") } if ($config | get -o zone | is-empty) { $errors = ($errors | append \\"Zone is required\\") } # Validate plan exists let available_plans = (get-plans) if not ($config.plan in ($available_plans | get name)) { $errors = ($errors | append $\\"Invalid plan: ($config.plan)\\") } # Validate zone exists let available_zones = (get-zones) if not ($config.zone in ($available_zones | get name)) { $errors = ($errors | append $\\"Invalid zone: ($config.zone)\\") } { valid: ($errors | is-empty), errors: $errors }\\n} def wait-for-server-ready [server_id: string] -> string { mut attempts = 0 let max_attempts = 60 # 10 minutes while $attempts < $max_attempts { let server_info = (get-server-info $server_id) let status = ($server_info | get status) match $status { \\"running\\" => { return \\"running\\" }, \\"error\\" => { error make { msg: \\"Server creation failed\\" } }, _ => { log info $\\"Server status: ($status), waiting...\\" sleep 10sec $attempts = $attempts + 1 } } } error make { msg: \\"Server creation timeout\\" }\\n}","breadcrumbs":"Extensions » Provider Development Template","id":"1888","title":"Provider Development Template"},"1889":{"body":"Add provider metadata in metadata.toml: [extension]\\nname = \\"my-provider\\"\\ntype = \\"provider\\"\\nversion = \\"1.0.0\\"\\ndescription = \\"Custom cloud provider integration\\"\\nauthor = \\"Your Name \\"\\nlicense = \\"MIT\\" [compatibility]\\nprovisioning_version = \\">=2.0.0\\"\\nnushell_version = \\">=0.107.0\\"\\nnickel_version = \\">=1.15.0\\" [capabilities]\\nserver_management = true\\nload_balancer = false\\nstorage_encryption = true\\nbackup = true\\nmonitoring = false [authentication]\\nmethods = [\\"api_key\\", \\"certificate\\"]\\nrequired_fields = [\\"api_key\\", \\"api_secret\\"] [regions]\\ndefault = \\"us-east-1\\"\\navailable = [\\"us-east-1\\", \\"us-west-2\\", \\"eu-west-1\\"] [support]\\ndocumentation = \\"https://docs.example.com/provider\\"\\nissues = \\"https://github.com/example/provider/issues\\"","breadcrumbs":"Extensions » Provider Registration","id":"1889","title":"Provider Registration"},"189":{"body":"# Workspace shortcuts\\nprovisioning ws # workspace (same as \'provisioning workspace\')\\nprovisioning ws init\\nprovisioning ws create \\nprovisioning ws validate\\nprovisioning ws info\\nprovisioning ws list\\nprovisioning ws migrate\\nprovisioning ws switch # Switch active workspace\\nprovisioning ws active # Show active workspace # Template shortcuts\\nprovisioning tpl # template (same as \'provisioning template\')\\nprovisioning tmpl # template (alias)\\nprovisioning tpl list\\nprovisioning tpl types\\nprovisioning tpl show \\nprovisioning tpl apply \\nprovisioning tpl validate ","breadcrumbs":"Quick Start Cheatsheet » Workspace Shortcuts","id":"189","title":"Workspace Shortcuts"},"1890":{"body":"","breadcrumbs":"Extensions » Task Service Extension API","id":"1890","title":"Task Service Extension API"},"1891":{"body":"Task services must implement: Core Operations install(config: record) -> record uninstall(config: record) -> null configure(config: record) -> null status() -> record restart() -> null upgrade(version: string) -> record Version Management get-current-version() -> string get-available-versions() -> list check-updates() -> record","breadcrumbs":"Extensions » Task Service Interface","id":"1891","title":"Task Service Interface"},"1892":{"body":"Nickel Schema Create schemas/version.ncl: # Task service version configuration\\n{ taskserv_version = { name | String = \\"my-service\\", version | String = \\"1.0.0\\", # Version source configuration source | { type | String = \\"github\\", repository | String, release_pattern | String = \\"v{version}\\", }, # Installation configuration install | { method | String = \\"binary\\", binary_name | String, binary_path | String = \\"/usr/local/bin\\", config_path | String = \\"/etc/my-service\\", data_path | String = \\"/var/lib/my-service\\", }, # Dependencies dependencies | [ { name | String, version | String = \\">=1.0.0\\", } ], # Service configuration service | { type | String = \\"systemd\\", user | String = \\"my-service\\", group | String = \\"my-service\\", ports | [Number] = [8080, 9090], }, # Health check configuration health_check | { endpoint | String, interval | Number = 30, timeout | Number = 5, retries | Number = 3, }, }\\n} Nushell Implementation Create nulib/mod.nu: use std log\\nuse ../../../lib_provisioning * export const SERVICE_NAME = \\"my-service\\"\\nexport const SERVICE_VERSION = \\"1.0.0\\" export def \\"taskserv-info\\" [] -> record { { name: $SERVICE_NAME, version: $SERVICE_VERSION, type: \\"taskserv\\", category: \\"application\\", description: \\"Custom application service\\", dependencies: [\\"containerd\\"], ports: [8080, 9090], config_files: [\\"/etc/my-service/config.yaml\\"], data_directories: [\\"/var/lib/my-service\\"] }\\n} export def \\"install\\" [ config: record = {} --check # Check mode only --version: string # Specific version to install\\n] -> record { let install_version = if ($version | is-not-empty) { $version } else { (get-latest-version) } log info $\\"Installing ($SERVICE_NAME) version ($install_version)\\" if $check { return { action: \\"install\\", service: $SERVICE_NAME, version: $install_version, check_mode: true, would_install: true, requirements_met: (check-requirements) } } # Check system requirements let req_check = (check-requirements) if not $req_check.met { error make { msg: $\\"Requirements not met: ($req_check.missing | str join \', \')\\" } } # Download and install let binary_path = (download-binary $install_version) install-binary $binary_path create-user-and-directories generate-config $config install-systemd-service # Start service systemctl start $SERVICE_NAME systemctl enable $SERVICE_NAME # Verify installation let health = (check-health) if not $health.healthy { error make { msg: \\"Service failed health check after installation\\" } } { success: true, service: $SERVICE_NAME, version: $install_version, status: \\"running\\", health: $health }\\n} export def \\"uninstall\\" [ --force # Force removal even if running --keep-data # Keep data directories\\n] -> null { log info $\\"Uninstalling ($SERVICE_NAME)\\" # Stop and disable service try { systemctl stop $SERVICE_NAME systemctl disable $SERVICE_NAME } catch { log warning \\"Failed to stop systemd service\\" } # Remove binary try { rm -f $\\"/usr/local/bin/($SERVICE_NAME)\\" } catch { log warning \\"Failed to remove binary\\" } # Remove configuration try { rm -rf $\\"/etc/($SERVICE_NAME)\\" } catch { log warning \\"Failed to remove configuration\\" } # Remove data directories (unless keeping) if not $keep_data { try { rm -rf $\\"/var/lib/($SERVICE_NAME)\\" } catch { log warning \\"Failed to remove data directories\\" } } # Remove systemd service file try { rm -f $\\"/etc/systemd/system/($SERVICE_NAME).service\\" systemctl daemon-reload } catch { log warning \\"Failed to remove systemd service\\" } log info $\\"($SERVICE_NAME) uninstalled successfully\\"\\n} export def \\"status\\" [] -> record { let systemd_status = try { systemctl is-active $SERVICE_NAME | str trim } catch { \\"unknown\\" } let health = (check-health) let version = (get-current-version) { service: $SERVICE_NAME, version: $version, systemd_status: $systemd_status, health: $health, uptime: (get-service-uptime), memory_usage: (get-memory-usage), cpu_usage: (get-cpu-usage) }\\n} def check-requirements [] -> record { mut missing = [] mut met = true # Check for containerd if not (which containerd | is-not-empty) { $missing = ($missing | append \\"containerd\\") $met = false } # Check for systemctl if not (which systemctl | is-not-empty) { $missing = ($missing | append \\"systemctl\\") $met = false } { met: $met, missing: $missing }\\n} def check-health [] -> record { try { let response = (http get \\"http://localhost:9090/health\\") { healthy: true, status: ($response | get status), last_check: (date now) } } catch { { healthy: false, error: \\"Health endpoint not responding\\", last_check: (date now) } }\\n}","breadcrumbs":"Extensions » Task Service Development Template","id":"1892","title":"Task Service Development Template"},"1893":{"body":"","breadcrumbs":"Extensions » Cluster Extension API","id":"1893","title":"Cluster Extension API"},"1894":{"body":"Clusters orchestrate multiple components: Core Operations create(config: record) -> record delete(config: record) -> null status() -> record scale(replicas: int) -> record upgrade(version: string) -> record Component Management list-components() -> list component-status(name: string) -> record restart-component(name: string) -> null","breadcrumbs":"Extensions » Cluster Interface","id":"1894","title":"Cluster Interface"},"1895":{"body":"Nickel Configuration Create schemas/cluster.ncl: # Cluster configuration schema\\n{ ClusterConfig = { # Cluster metadata name | String, version | String = \\"1.0.0\\", description | String = \\"\\", # Components to deploy components | [Component], # Resource requirements resources | { min_nodes | Number = 1, cpu_per_node | String = \\"2\\", memory_per_node | String = \\"4Gi\\", storage_per_node | String = \\"20Gi\\", }, # Network configuration network | { cluster_cidr | String = \\"10.244.0.0/16\\", service_cidr | String = \\"10.96.0.0/12\\", dns_domain | String = \\"cluster.local\\", }, # Feature flags features | { monitoring | Bool = true, logging | Bool = true, ingress | Bool = false, storage | Bool = true, }, }, Component = { name | String, type | String | \\"taskserv\\" | \\"application\\" | \\"infrastructure\\", version | String = \\"\\", enabled | Bool = true, dependencies | [String] = [], config | {} = {}, resources | { cpu | String = \\"\\", memory | String = \\"\\", storage | String = \\"\\", replicas | Number = 1, } = {}, }, # Example cluster configuration buildkit_cluster = { name = \\"buildkit\\", version = \\"1.0.0\\", description = \\"Container build cluster with BuildKit and registry\\", components = [ { name = \\"containerd\\", type = \\"taskserv\\", version = \\"1.7.0\\", enabled = true, dependencies = [], }, { name = \\"buildkit\\", type = \\"taskserv\\", version = \\"0.12.0\\", enabled = true, dependencies = [\\"containerd\\"], config = { worker_count = 4, cache_size = \\"10Gi\\", registry_mirrors = [\\"registry:5000\\"], }, }, { name = \\"registry\\", type = \\"application\\", version = \\"2.8.0\\", enabled = true, dependencies = [], config = { storage_driver = \\"filesystem\\", storage_path = \\"/var/lib/registry\\", auth_enabled = false, }, resources = { cpu = \\"500m\\", memory = \\"1Gi\\", storage = \\"50Gi\\", replicas = 1, }, }, ], resources = { min_nodes = 1, cpu_per_node = \\"4\\", memory_per_node = \\"8Gi\\", storage_per_node = \\"100Gi\\", }, features = { monitoring = true, logging = true, ingress = false, storage = true, }, },\\n} Nushell Implementation Create nulib/mod.nu: use std log\\nuse ../../../lib_provisioning * export const CLUSTER_NAME = \\"my-cluster\\"\\nexport const CLUSTER_VERSION = \\"1.0.0\\" export def \\"cluster-info\\" [] -> record { { name: $CLUSTER_NAME, version: $CLUSTER_VERSION, type: \\"cluster\\", category: \\"build\\", description: \\"Custom application cluster\\", components: (get-cluster-components), required_resources: { min_nodes: 1, cpu_per_node: \\"2\\", memory_per_node: \\"4Gi\\", storage_per_node: \\"20Gi\\" } }\\n} export def \\"create\\" [ config: record = {} --check # Check mode only --wait # Wait for completion\\n] -> record { log info $\\"Creating cluster: ($CLUSTER_NAME)\\" if $check { return { action: \\"create-cluster\\", cluster: $CLUSTER_NAME, check_mode: true, would_create: true, components: (get-cluster-components), requirements_check: (check-cluster-requirements) } } # Validate cluster requirements let req_check = (check-cluster-requirements) if not $req_check.met { error make { msg: $\\"Cluster requirements not met: ($req_check.issues | str join \', \')\\" } } # Get component deployment order let components = (get-cluster-components) let deployment_order = (resolve-component-dependencies $components) mut deployment_status = [] # Deploy components in dependency order for component in $deployment_order { log info $\\"Deploying component: ($component.name)\\" try { let result = match $component.type { \\"taskserv\\" => { taskserv create $component.name --config $component.config --wait }, \\"application\\" => { deploy-application $component }, _ => { error make { msg: $\\"Unknown component type: ($component.type)\\" } } } $deployment_status = ($deployment_status | append { component: $component.name, status: \\"deployed\\", result: $result }) } catch {|e| log error $\\"Failed to deploy ($component.name): ($e.msg)\\" $deployment_status = ($deployment_status | append { component: $component.name, status: \\"failed\\", error: $e.msg }) # Rollback on failure rollback-cluster-deployment $deployment_status error make { msg: $\\"Cluster deployment failed at component: ($component.name)\\" } } } # Configure cluster networking and integrations configure-cluster-networking $config setup-cluster-monitoring $config # Wait for all components to be ready if $wait { wait-for-cluster-ready } { success: true, cluster: $CLUSTER_NAME, components: $deployment_status, endpoints: (get-cluster-endpoints), status: \\"running\\" }\\n} export def \\"delete\\" [ config: record = {} --force # Force deletion\\n] -> null { log info $\\"Deleting cluster: ($CLUSTER_NAME)\\" let components = (get-cluster-components) let deletion_order = ($components | reverse) # Delete in reverse order for component in $deletion_order { log info $\\"Removing component: ($component.name)\\" try { match $component.type { \\"taskserv\\" => { taskserv delete $component.name --force=$force }, \\"application\\" => { remove-application $component --force=$force }, _ => { log warning $\\"Unknown component type: ($component.type)\\" } } } catch {|e| log error $\\"Failed to remove ($component.name): ($e.msg)\\" if not $force { error make { msg: $\\"Component removal failed: ($component.name)\\" } } } } # Clean up cluster-level resources cleanup-cluster-networking cleanup-cluster-monitoring cleanup-cluster-storage log info $\\"Cluster ($CLUSTER_NAME) deleted successfully\\"\\n} def get-cluster-components [] -> list { [ { name: \\"containerd\\", type: \\"taskserv\\", version: \\"1.7.0\\", dependencies: [] }, { name: \\"my-service\\", type: \\"taskserv\\", version: \\"1.0.0\\", dependencies: [\\"containerd\\"] }, { name: \\"registry\\", type: \\"application\\", version: \\"2.8.0\\", dependencies: [] } ]\\n} def resolve-component-dependencies [components: list] -> list { # Topological sort of components based on dependencies mut sorted = [] mut remaining = $components while ($remaining | length) > 0 { let no_deps = ($remaining | where {|comp| ($comp.dependencies | all {|dep| $dep in ($sorted | get name) }) }) if ($no_deps | length) == 0 { error make { msg: \\"Circular dependency detected in cluster components\\" } } $sorted = ($sorted | append $no_deps) $remaining = ($remaining | where {|comp| not ($comp.name in ($no_deps | get name)) }) } $sorted\\n}","breadcrumbs":"Extensions » Cluster Development Template","id":"1895","title":"Cluster Development Template"},"1896":{"body":"","breadcrumbs":"Extensions » Extension Registration and Discovery","id":"1896","title":"Extension Registration and Discovery"},"1897":{"body":"Extensions are registered in the system through: Directory Structure : Placed in appropriate directories (providers/, taskservs/, cluster/) Metadata Files : metadata.toml with extension information Schema Files : schemas/ directory with Nickel schema files","breadcrumbs":"Extensions » Extension Registry","id":"1897","title":"Extension Registry"},"1898":{"body":"register-extension(path: string, type: string) -> record Registers a new extension with the system. Parameters: path: Path to extension directory type: Extension type (provider, taskserv, cluster) unregister-extension(name: string, type: string) -> null Removes extension from the registry. list-registered-extensions(type?: string) -> list Lists all registered extensions, optionally filtered by type.","breadcrumbs":"Extensions » Registration API","id":"1898","title":"Registration API"},"1899":{"body":"Validation Rules Structure Validation : Required files and directories exist Schema Validation : Nickel schemas are valid Interface Validation : Required functions are implemented Dependency Validation : Dependencies are available Version Validation : Version constraints are met validate-extension(path: string, type: string) -> record Validates extension structure and implementation.","breadcrumbs":"Extensions » Extension Validation","id":"1899","title":"Extension Validation"},"19":{"body":"Start with Installation Guide Read Getting Started Follow From Scratch Guide Reference Quickstart Cheatsheet","breadcrumbs":"Home » For New Users","id":"19","title":"For New Users"},"190":{"body":"# Environment shortcuts\\nprovisioning e # env (same as \'provisioning env\')\\nprovisioning val # validate (same as \'provisioning validate\')\\nprovisioning st # setup (same as \'provisioning setup\')\\nprovisioning config # setup (alias) # Show shortcuts\\nprovisioning show settings\\nprovisioning show servers\\nprovisioning show config # Initialization\\nprovisioning init # All environment\\nprovisioning allenv # Show all config and environment","breadcrumbs":"Quick Start Cheatsheet » Configuration Shortcuts","id":"190","title":"Configuration Shortcuts"},"1900":{"body":"","breadcrumbs":"Extensions » Testing Extensions","id":"1900","title":"Testing Extensions"},"1901":{"body":"Extensions should include comprehensive tests: Unit Tests Create tests/unit_tests.nu: use std testing export def test_provider_config_validation [] { let config = { auth: { api_key: \\"test-key\\", api_secret: \\"test-secret\\" }, api: { base_url: \\"https://api.test.com\\" } } let result = (validate-config $config) assert ($result.valid == true) assert ($result.errors | is-empty)\\n} export def test_server_creation_check_mode [] { let config = { hostname: \\"test-server\\", plan: \\"1xCPU-1 GB\\", zone: \\"test-zone\\" } let result = (create-server $config --check) assert ($result.check_mode == true) assert ($result.would_create == true)\\n} Integration Tests Create tests/integration_tests.nu: use std testing export def test_full_server_lifecycle [] { # Test server creation let create_config = { hostname: \\"integration-test\\", plan: \\"1xCPU-1 GB\\", zone: \\"test-zone\\" } let server = (create-server $create_config --wait) assert ($server.success == true) let server_id = $server.server_id # Test server info retrieval let info = (get-server-info $server_id) assert ($info.hostname == \\"integration-test\\") assert ($info.status == \\"running\\") # Test server deletion delete-server $server_id # Verify deletion let final_info = try { get-server-info $server_id } catch { null } assert ($final_info == null)\\n}","breadcrumbs":"Extensions » Test Framework","id":"1901","title":"Test Framework"},"1902":{"body":"# Run unit tests\\nnu tests/unit_tests.nu # Run integration tests\\nnu tests/integration_tests.nu # Run all tests\\nnu tests/run_all_tests.nu","breadcrumbs":"Extensions » Running Tests","id":"1902","title":"Running Tests"},"1903":{"body":"","breadcrumbs":"Extensions » Documentation Requirements","id":"1903","title":"Documentation Requirements"},"1904":{"body":"Each extension must include: README.md : Overview, installation, and usage API.md : Detailed API documentation EXAMPLES.md : Usage examples and tutorials CHANGELOG.md : Version history and changes","breadcrumbs":"Extensions » Extension Documentation","id":"1904","title":"Extension Documentation"},"1905":{"body":"# Extension Name API ## Overview\\nBrief description of the extension and its purpose. ## Installation\\nSteps to install and configure the extension. ## Configuration\\nConfiguration schema and options. ## API Reference\\nDetailed API documentation with examples. ## Examples\\nCommon usage patterns and examples. ## Troubleshooting\\nCommon issues and solutions.","breadcrumbs":"Extensions » API Documentation Template","id":"1905","title":"API Documentation Template"},"1906":{"body":"","breadcrumbs":"Extensions » Best Practices","id":"1906","title":"Best Practices"},"1907":{"body":"Follow Naming Conventions : Use consistent naming for functions and variables Error Handling : Implement comprehensive error handling and recovery Logging : Use structured logging for debugging and monitoring Configuration Validation : Validate all inputs and configurations Documentation : Document all public APIs and configurations Testing : Include comprehensive unit and integration tests Versioning : Follow semantic versioning principles Security : Implement secure credential handling and API calls","breadcrumbs":"Extensions » Development Guidelines","id":"1907","title":"Development Guidelines"},"1908":{"body":"Caching : Cache expensive operations and API calls Parallel Processing : Use parallel execution where possible Resource Management : Clean up resources properly Batch Operations : Batch API calls when possible Health Monitoring : Implement health checks and monitoring","breadcrumbs":"Extensions » Performance Considerations","id":"1908","title":"Performance Considerations"},"1909":{"body":"Credential Management : Store credentials securely Input Validation : Validate and sanitize all inputs Access Control : Implement proper access controls Audit Logging : Log all security-relevant operations Encryption : Encrypt sensitive data in transit and at rest This extension development API provides a comprehensive framework for building robust, scalable, and maintainable extensions for provisioning.","breadcrumbs":"Extensions » Security Best Practices","id":"1909","title":"Security Best Practices"},"191":{"body":"# List shortcuts\\nprovisioning l # list (same as \'provisioning list\')\\nprovisioning ls # list (alias)\\nprovisioning list # list (full) # SSH operations\\nprovisioning ssh # SOPS operations\\nprovisioning sops # Edit encrypted file # Cache management\\nprovisioning cache clear\\nprovisioning cache stats # Provider operations\\nprovisioning providers list\\nprovisioning providers info # Nushell session\\nprovisioning nu # Start Nushell with provisioning library loaded # QR code generation\\nprovisioning qr # Nushell information\\nprovisioning nuinfo # Plugin management\\nprovisioning plugin # plugin (same as \'provisioning plugin\')\\nprovisioning plugins # plugin (alias)\\nprovisioning plugin list\\nprovisioning plugin test nu_plugin_kms","breadcrumbs":"Quick Start Cheatsheet » Utility Shortcuts","id":"191","title":"Utility Shortcuts"},"1910":{"body":"This document provides comprehensive documentation for the official SDKs and client libraries available for provisioning.","breadcrumbs":"SDKs » SDK Documentation","id":"1910","title":"SDK Documentation"},"1911":{"body":"Provisioning provides SDKs in multiple languages to facilitate integration:","breadcrumbs":"SDKs » Available SDKs","id":"1911","title":"Available SDKs"},"1912":{"body":"Python SDK (provisioning-client) - Full-featured Python client JavaScript/TypeScript SDK (@provisioning/client) - Node.js and browser support Go SDK (go-provisioning-client) - Go client library Rust SDK (provisioning-rs) - Native Rust integration","breadcrumbs":"SDKs » Official SDKs","id":"1912","title":"Official SDKs"},"1913":{"body":"Java SDK - Community-maintained Java client C# SDK - .NET client library PHP SDK - PHP client library","breadcrumbs":"SDKs » Community SDKs","id":"1913","title":"Community SDKs"},"1914":{"body":"","breadcrumbs":"SDKs » Python SDK","id":"1914","title":"Python SDK"},"1915":{"body":"# Install from PyPI\\npip install provisioning-client # Or install development version\\npip install git+https://github.com/provisioning-systems/python-client.git","breadcrumbs":"SDKs » Installation","id":"1915","title":"Installation"},"1916":{"body":"from provisioning_client import ProvisioningClient\\nimport asyncio async def main(): # Initialize client client = ProvisioningClient( base_url=\\"http://localhost:9090\\", auth_url=\\"http://localhost:8081\\", username=\\"admin\\", password=\\"your-password\\" ) try: # Authenticate token = await client.authenticate() print(f\\"Authenticated with token: {token[:20]}...\\") # Create a server workflow task_id = client.create_server_workflow( infra=\\"production\\", settings=\\"prod-settings.ncl\\", wait=False ) print(f\\"Server workflow created: {task_id}\\") # Wait for completion task = client.wait_for_task_completion(task_id, timeout=600) print(f\\"Task completed with status: {task.status}\\") if task.status == \\"Completed\\": print(f\\"Output: {task.output}\\") elif task.status == \\"Failed\\": print(f\\"Error: {task.error}\\") except Exception as e: print(f\\"Error: {e}\\") if __name__ == \\"__main__\\": asyncio.run(main())","breadcrumbs":"SDKs » Quick Start","id":"1916","title":"Quick Start"},"1917":{"body":"WebSocket Integration async def monitor_workflows(): client = ProvisioningClient() await client.authenticate() # Set up event handlers async def on_task_update(event): print(f\\"Task {event[\'data\'][\'task_id\']} status: {event[\'data\'][\'status\']}\\") async def on_progress_update(event): print(f\\"Progress: {event[\'data\'][\'progress\']}% - {event[\'data\'][\'current_step\']}\\") client.on_event(\'TaskStatusChanged\', on_task_update) client.on_event(\'WorkflowProgressUpdate\', on_progress_update) # Connect to WebSocket await client.connect_websocket([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']) # Keep connection alive await asyncio.sleep(3600) # Monitor for 1 hour Batch Operations async def execute_batch_deployment(): client = ProvisioningClient() await client.authenticate() batch_config = { \\"name\\": \\"production_deployment\\", \\"version\\": \\"1.0.0\\", \\"storage_backend\\": \\"surrealdb\\", \\"parallel_limit\\": 5, \\"rollback_enabled\\": True, \\"operations\\": [ { \\"id\\": \\"servers\\", \\"type\\": \\"server_batch\\", \\"provider\\": \\"upcloud\\", \\"dependencies\\": [], \\"config\\": { \\"server_configs\\": [ {\\"name\\": \\"web-01\\", \\"plan\\": \\"2xCPU-4 GB\\", \\"zone\\": \\"de-fra1\\"}, {\\"name\\": \\"web-02\\", \\"plan\\": \\"2xCPU-4 GB\\", \\"zone\\": \\"de-fra1\\"} ] } }, { \\"id\\": \\"kubernetes\\", \\"type\\": \\"taskserv_batch\\", \\"provider\\": \\"upcloud\\", \\"dependencies\\": [\\"servers\\"], \\"config\\": { \\"taskservs\\": [\\"kubernetes\\", \\"cilium\\", \\"containerd\\"] } } ] } # Execute batch operation batch_result = await client.execute_batch_operation(batch_config) print(f\\"Batch operation started: {batch_result[\'batch_id\']}\\") # Monitor progress while True: status = await client.get_batch_status(batch_result[\'batch_id\']) print(f\\"Batch status: {status[\'status\']} - {status.get(\'progress\', 0)}%\\") if status[\'status\'] in [\'Completed\', \'Failed\', \'Cancelled\']: break await asyncio.sleep(10) print(f\\"Batch operation finished: {status[\'status\']}\\") Error Handling with Retries from provisioning_client.exceptions import ( ProvisioningAPIError, AuthenticationError, ValidationError, RateLimitError\\n)\\nfrom tenacity import retry, stop_after_attempt, wait_exponential class RobustProvisioningClient(ProvisioningClient): @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10) ) async def create_server_workflow_with_retry(self, **kwargs): try: return await self.create_server_workflow(**kwargs) except RateLimitError as e: print(f\\"Rate limited, retrying in {e.retry_after} seconds...\\") await asyncio.sleep(e.retry_after) raise except AuthenticationError: print(\\"Authentication failed, re-authenticating...\\") await self.authenticate() raise except ValidationError as e: print(f\\"Validation error: {e}\\") # Don\'t retry validation errors raise except ProvisioningAPIError as e: print(f\\"API error: {e}\\") raise # Usage\\nasync def robust_workflow(): client = RobustProvisioningClient() try: task_id = await client.create_server_workflow_with_retry( infra=\\"production\\", settings=\\"config.ncl\\" ) print(f\\"Workflow created successfully: {task_id}\\") except Exception as e: print(f\\"Failed after retries: {e}\\")","breadcrumbs":"SDKs » Advanced Usage","id":"1917","title":"Advanced Usage"},"1918":{"body":"ProvisioningClient Class class ProvisioningClient: def __init__(self, base_url: str = \\"http://localhost:9090\\", auth_url: str = \\"http://localhost:8081\\", username: str = None, password: str = None, token: str = None): \\"\\"\\"Initialize the provisioning client\\"\\"\\" async def authenticate(self) -> str: \\"\\"\\"Authenticate and get JWT token\\"\\"\\" def create_server_workflow(self, infra: str, settings: str = \\"config.ncl\\", check_mode: bool = False, wait: bool = False) -> str: \\"\\"\\"Create a server provisioning workflow\\"\\"\\" def create_taskserv_workflow(self, operation: str, taskserv: str, infra: str, settings: str = \\"config.ncl\\", check_mode: bool = False, wait: bool = False) -> str: \\"\\"\\"Create a task service workflow\\"\\"\\" def get_task_status(self, task_id: str) -> WorkflowTask: \\"\\"\\"Get the status of a specific task\\"\\"\\" def wait_for_task_completion(self, task_id: str, timeout: int = 300, poll_interval: int = 5) -> WorkflowTask: \\"\\"\\"Wait for a task to complete\\"\\"\\" async def connect_websocket(self, event_types: List[str] = None): \\"\\"\\"Connect to WebSocket for real-time updates\\"\\"\\" def on_event(self, event_type: str, handler: Callable): \\"\\"\\"Register an event handler\\"\\"\\"","breadcrumbs":"SDKs » API Reference","id":"1918","title":"API Reference"},"1919":{"body":"","breadcrumbs":"SDKs » JavaScript/TypeScript SDK","id":"1919","title":"JavaScript/TypeScript SDK"},"192":{"body":"# Generate shortcuts\\nprovisioning g # generate (same as \'provisioning generate\')\\nprovisioning gen # generate (alias)\\nprovisioning g server\\nprovisioning g taskserv \\nprovisioning g cluster \\nprovisioning g infra --new \\nprovisioning g new ","breadcrumbs":"Quick Start Cheatsheet » Generation Shortcuts","id":"192","title":"Generation Shortcuts"},"1920":{"body":"# npm\\nnpm install @provisioning/client # yarn\\nyarn add @provisioning/client # pnpm\\npnpm add @provisioning/client","breadcrumbs":"SDKs » Installation","id":"1920","title":"Installation"},"1921":{"body":"import { ProvisioningClient } from \'@provisioning/client\'; async function main() { const client = new ProvisioningClient({ baseUrl: \'http://localhost:9090\', authUrl: \'http://localhost:8081\', username: \'admin\', password: \'your-password\' }); try { // Authenticate await client.authenticate(); console.log(\'Authentication successful\'); // Create server workflow const taskId = await client.createServerWorkflow({ infra: \'production\', settings: \'prod-settings.ncl\' }); console.log(`Server workflow created: ${taskId}`); // Wait for completion const task = await client.waitForTaskCompletion(taskId); console.log(`Task completed with status: ${task.status}`); } catch (error) { console.error(\'Error:\', error.message); }\\n} main();","breadcrumbs":"SDKs » Quick Start","id":"1921","title":"Quick Start"},"1922":{"body":"import React, { useState, useEffect } from \'react\';\\nimport { ProvisioningClient } from \'@provisioning/client\'; interface Task { id: string; name: string; status: string; progress?: number;\\n} const WorkflowDashboard: React.FC = () => { const [client] = useState(() => new ProvisioningClient({ baseUrl: process.env.REACT_APP_API_URL, username: process.env.REACT_APP_USERNAME, password: process.env.REACT_APP_PASSWORD })); const [tasks, setTasks] = useState([]); const [connected, setConnected] = useState(false); useEffect(() => { const initClient = async () => { try { await client.authenticate(); // Set up WebSocket event handlers client.on(\'TaskStatusChanged\', (event: any) => { setTasks(prev => prev.map(task => task.id === event.data.task_id ? { ...task, status: event.data.status, progress: event.data.progress } : task )); }); client.on(\'websocketConnected\', () => { setConnected(true); }); client.on(\'websocketDisconnected\', () => { setConnected(false); }); // Connect WebSocket await client.connectWebSocket([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']); // Load initial tasks const initialTasks = await client.listTasks(); setTasks(initialTasks); } catch (error) { console.error(\'Failed to initialize client:\', error); } }; initClient(); return () => { client.disconnectWebSocket(); }; }, [client]); const createServerWorkflow = async () => { try { const taskId = await client.createServerWorkflow({ infra: \'production\', settings: \'config.ncl\' }); // Add to tasks list setTasks(prev => [...prev, { id: taskId, name: \'Server Creation\', status: \'Pending\' }]); } catch (error) { console.error(\'Failed to create workflow:\', error); } }; return (

Workflow Dashboard

{connected ? \'🟢 Connected\' : \'🔴 Disconnected\'}
{tasks.map(task => (

{task.name}

{task.status} {task.progress && (
{task.progress}%
)}
))}
);\\n}; export default WorkflowDashboard;","breadcrumbs":"SDKs » React Integration","id":"1922","title":"React Integration"},"1923":{"body":"#!/usr/bin/env node import { Command } from \'commander\';\\nimport { ProvisioningClient } from \'@provisioning/client\';\\nimport chalk from \'chalk\';\\nimport ora from \'ora\'; const program = new Command(); program .name(\'provisioning-cli\') .description(\'CLI tool for provisioning\') .version(\'1.0.0\'); program .command(\'create-server\') .description(\'Create a server workflow\') .requiredOption(\'-i, --infra \', \'Infrastructure target\') .option(\'-s, --settings \', \'Settings file\', \'config.ncl\') .option(\'-c, --check\', \'Check mode only\') .option(\'-w, --wait\', \'Wait for completion\') .action(async (options) => { const client = new ProvisioningClient({ baseUrl: process.env.PROVISIONING_API_URL, username: process.env.PROVISIONING_USERNAME, password: process.env.PROVISIONING_PASSWORD }); const spinner = ora(\'Authenticating...\').start(); try { await client.authenticate(); spinner.text = \'Creating server workflow...\'; const taskId = await client.createServerWorkflow({ infra: options.infra, settings: options.settings, check_mode: options.check, wait: false }); spinner.succeed(`Server workflow created: ${chalk.green(taskId)}`); if (options.wait) { spinner.start(\'Waiting for completion...\'); // Set up progress updates client.on(\'TaskStatusChanged\', (event: any) => { if (event.data.task_id === taskId) { spinner.text = `Status: ${event.data.status}`; } }); client.on(\'WorkflowProgressUpdate\', (event: any) => { if (event.data.workflow_id === taskId) { spinner.text = `${event.data.progress}% - ${event.data.current_step}`; } }); await client.connectWebSocket([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']); const task = await client.waitForTaskCompletion(taskId); if (task.status === \'Completed\') { spinner.succeed(chalk.green(\'Workflow completed successfully!\')); if (task.output) { console.log(chalk.gray(\'Output:\'), task.output); } } else { spinner.fail(chalk.red(`Workflow failed: ${task.error}`)); process.exit(1); } } } catch (error) { spinner.fail(chalk.red(`Error: ${error.message}`)); process.exit(1); } }); program .command(\'list-tasks\') .description(\'List all tasks\') .option(\'-s, --status \', \'Filter by status\') .action(async (options) => { const client = new ProvisioningClient(); try { await client.authenticate(); const tasks = await client.listTasks(options.status); console.log(chalk.bold(\'Tasks:\')); tasks.forEach(task => { const statusColor = task.status === \'Completed\' ? \'green\' : task.status === \'Failed\' ? \'red\' : task.status === \'Running\' ? \'yellow\' : \'gray\'; console.log(` ${task.id} - ${task.name} [${chalk[statusColor](task.status)}]`); }); } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); process.exit(1); } }); program .command(\'monitor\') .description(\'Monitor workflows in real-time\') .action(async () => { const client = new ProvisioningClient(); try { await client.authenticate(); console.log(chalk.bold(\'🔍 Monitoring workflows...\')); console.log(chalk.gray(\'Press Ctrl+C to stop\')); client.on(\'TaskStatusChanged\', (event: any) => { const timestamp = new Date().toLocaleTimeString(); const statusColor = event.data.status === \'Completed\' ? \'green\' : event.data.status === \'Failed\' ? \'red\' : event.data.status === \'Running\' ? \'yellow\' : \'gray\'; console.log(`[${chalk.gray(timestamp)}] Task ${event.data.task_id} → ${chalk[statusColor](event.data.status)}`); }); client.on(\'WorkflowProgressUpdate\', (event: any) => { const timestamp = new Date().toLocaleTimeString(); console.log(`[${chalk.gray(timestamp)}] ${event.data.workflow_id}: ${event.data.progress}% - ${event.data.current_step}`); }); await client.connectWebSocket([\'TaskStatusChanged\', \'WorkflowProgressUpdate\']); // Keep the process running process.on(\'SIGINT\', () => { console.log(chalk.yellow(\'\\\\nStopping monitor...\')); client.disconnectWebSocket(); process.exit(0); }); // Keep alive setInterval(() => {}, 1000); } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); process.exit(1); } }); program.parse();","breadcrumbs":"SDKs » Node.js CLI Tool","id":"1923","title":"Node.js CLI Tool"},"1924":{"body":"interface ProvisioningClientOptions { baseUrl?: string; authUrl?: string; username?: string; password?: string; token?: string;\\n} class ProvisioningClient extends EventEmitter { constructor(options: ProvisioningClientOptions); async authenticate(): Promise; async createServerWorkflow(config: { infra: string; settings?: string; check_mode?: boolean; wait?: boolean; }): Promise; async createTaskservWorkflow(config: { operation: string; taskserv: string; infra: string; settings?: string; check_mode?: boolean; wait?: boolean; }): Promise; async getTaskStatus(taskId: string): Promise; async listTasks(statusFilter?: string): Promise; async waitForTaskCompletion( taskId: string, timeout?: number, pollInterval?: number ): Promise; async connectWebSocket(eventTypes?: string[]): Promise; disconnectWebSocket(): void; async executeBatchOperation(batchConfig: BatchConfig): Promise; async getBatchStatus(batchId: string): Promise;\\n}","breadcrumbs":"SDKs » API Reference","id":"1924","title":"API Reference"},"1925":{"body":"","breadcrumbs":"SDKs » Go SDK","id":"1925","title":"Go SDK"},"1926":{"body":"go get github.com/provisioning-systems/go-client","breadcrumbs":"SDKs » Installation","id":"1926","title":"Installation"},"1927":{"body":"package main import ( \\"context\\" \\"fmt\\" \\"log\\" \\"time\\" \\"github.com/provisioning-systems/go-client\\"\\n) func main() { // Initialize client client, err := provisioning.NewClient(&provisioning.Config{ BaseURL: \\"http://localhost:9090\\", AuthURL: \\"http://localhost:8081\\", Username: \\"admin\\", Password: \\"your-password\\", }) if err != nil { log.Fatalf(\\"Failed to create client: %v\\", err) } ctx := context.Background() // Authenticate token, err := client.Authenticate(ctx) if err != nil { log.Fatalf(\\"Authentication failed: %v\\", err) } fmt.Printf(\\"Authenticated with token: %.20s...\\\\n\\", token) // Create server workflow taskID, err := client.CreateServerWorkflow(ctx, &provisioning.CreateServerRequest{ Infra: \\"production\\", Settings: \\"prod-settings.ncl\\", Wait: false, }) if err != nil { log.Fatalf(\\"Failed to create workflow: %v\\", err) } fmt.Printf(\\"Server workflow created: %s\\\\n\\", taskID) // Wait for completion task, err := client.WaitForTaskCompletion(ctx, taskID, 10*time.Minute) if err != nil { log.Fatalf(\\"Failed to wait for completion: %v\\", err) } fmt.Printf(\\"Task completed with status: %s\\\\n\\", task.Status) if task.Status == \\"Completed\\" { fmt.Printf(\\"Output: %s\\\\n\\", task.Output) } else if task.Status == \\"Failed\\" { fmt.Printf(\\"Error: %s\\\\n\\", task.Error) }\\n}","breadcrumbs":"SDKs » Quick Start","id":"1927","title":"Quick Start"},"1928":{"body":"package main import ( \\"context\\" \\"fmt\\" \\"log\\" \\"os\\" \\"os/signal\\" \\"github.com/provisioning-systems/go-client\\"\\n) func main() { client, err := provisioning.NewClient(&provisioning.Config{ BaseURL: \\"http://localhost:9090\\", Username: \\"admin\\", Password: \\"password\\", }) if err != nil { log.Fatalf(\\"Failed to create client: %v\\", err) } ctx := context.Background() // Authenticate _, err = client.Authenticate(ctx) if err != nil { log.Fatalf(\\"Authentication failed: %v\\", err) } // Set up WebSocket connection ws, err := client.ConnectWebSocket(ctx, []string{ \\"TaskStatusChanged\\", \\"WorkflowProgressUpdate\\", }) if err != nil { log.Fatalf(\\"Failed to connect WebSocket: %v\\", err) } defer ws.Close() // Handle events go func() { for event := range ws.Events() { switch event.Type { case \\"TaskStatusChanged\\": fmt.Printf(\\"Task %s status changed to: %s\\\\n\\", event.Data[\\"task_id\\"], event.Data[\\"status\\"]) case \\"WorkflowProgressUpdate\\": fmt.Printf(\\"Workflow progress: %v%% - %s\\\\n\\", event.Data[\\"progress\\"], event.Data[\\"current_step\\"]) } } }() // Wait for interrupt c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) <-c fmt.Println(\\"Shutting down...\\")\\n}","breadcrumbs":"SDKs » WebSocket Integration","id":"1928","title":"WebSocket Integration"},"1929":{"body":"package main import ( \\"context\\" \\"fmt\\" \\"time\\" \\"github.com/provisioning-systems/go-client\\" \\"github.com/cenkalti/backoff/v4\\"\\n) type ResilientClient struct { *provisioning.Client\\n} func NewResilientClient(config *provisioning.Config) (*ResilientClient, error) { client, err := provisioning.NewClient(config) if err != nil { return nil, err } return &ResilientClient{Client: client}, nil\\n} func (c *ResilientClient) CreateServerWorkflowWithRetry( ctx context.Context, req *provisioning.CreateServerRequest,\\n) (string, error) { var taskID string operation := func() error { var err error taskID, err = c.CreateServerWorkflow(ctx, req) // Don\'t retry validation errors if provisioning.IsValidationError(err) { return backoff.Permanent(err) } return err } exponentialBackoff := backoff.NewExponentialBackOff() exponentialBackoff.MaxElapsedTime = 5 * time.Minute err := backoff.Retry(operation, exponentialBackoff) if err != nil { return \\"\\", fmt.Errorf(\\"failed after retries: %w\\", err) } return taskID, nil\\n} func main() { client, err := NewResilientClient(&provisioning.Config{ BaseURL: \\"http://localhost:9090\\", Username: \\"admin\\", Password: \\"password\\", }) if err != nil { log.Fatalf(\\"Failed to create client: %v\\", err) } ctx := context.Background() // Authenticate with retry _, err = client.Authenticate(ctx) if err != nil { log.Fatalf(\\"Authentication failed: %v\\", err) } // Create workflow with retry taskID, err := client.CreateServerWorkflowWithRetry(ctx, &provisioning.CreateServerRequest{ Infra: \\"production\\", Settings: \\"config.ncl\\", }) if err != nil { log.Fatalf(\\"Failed to create workflow: %v\\", err) } fmt.Printf(\\"Workflow created successfully: %s\\\\n\\", taskID)\\n}","breadcrumbs":"SDKs » HTTP Client with Retry Logic","id":"1929","title":"HTTP Client with Retry Logic"},"193":{"body":"# Common actions\\nprovisioning c # create (same as \'provisioning create\')\\nprovisioning d # delete (same as \'provisioning delete\')\\nprovisioning u # update (same as \'provisioning update\') # Pricing shortcuts\\nprovisioning price # Show server pricing\\nprovisioning cost # price (alias)\\nprovisioning costs # price (alias) # Create server + taskservs (combo command)\\nprovisioning cst # create-server-task\\nprovisioning csts # create-server-task (alias)","breadcrumbs":"Quick Start Cheatsheet » Action Shortcuts","id":"193","title":"Action Shortcuts"},"1930":{"body":"","breadcrumbs":"SDKs » Rust SDK","id":"1930","title":"Rust SDK"},"1931":{"body":"Add to your Cargo.toml: [dependencies]\\nprovisioning-rs = \\"2.0.0\\"\\ntokio = { version = \\"1.0\\", features = [\\"full\\"] }","breadcrumbs":"SDKs » Installation","id":"1931","title":"Installation"},"1932":{"body":"use provisioning_rs::{ProvisioningClient, Config, CreateServerRequest};\\nuse tokio; #[tokio::main]\\nasync fn main() -> Result<(), Box> { // Initialize client let config = Config { base_url: \\"http://localhost:9090\\".to_string(), auth_url: Some(\\"http://localhost:8081\\".to_string()), username: Some(\\"admin\\".to_string()), password: Some(\\"your-password\\".to_string()), token: None, }; let mut client = ProvisioningClient::new(config); // Authenticate let token = client.authenticate().await?; println!(\\"Authenticated with token: {}...\\", &token[..20]); // Create server workflow let request = CreateServerRequest { infra: \\"production\\".to_string(), settings: Some(\\"prod-settings.ncl\\".to_string()), check_mode: false, wait: false, }; let task_id = client.create_server_workflow(request).await?; println!(\\"Server workflow created: {}\\", task_id); // Wait for completion let task = client.wait_for_task_completion(&task_id, std::time::Duration::from_secs(600)).await?; println!(\\"Task completed with status: {:?}\\", task.status); match task.status { TaskStatus::Completed => { if let Some(output) = task.output { println!(\\"Output: {}\\", output); } }, TaskStatus::Failed => { if let Some(error) = task.error { println!(\\"Error: {}\\", error); } }, _ => {} } Ok(())\\n}","breadcrumbs":"SDKs » Quick Start","id":"1932","title":"Quick Start"},"1933":{"body":"use provisioning_rs::{ProvisioningClient, Config, WebSocketEvent};\\nuse futures_util::StreamExt;\\nuse tokio; #[tokio::main]\\nasync fn main() -> Result<(), Box> { let config = Config { base_url: \\"http://localhost:9090\\".to_string(), username: Some(\\"admin\\".to_string()), password: Some(\\"password\\".to_string()), ..Default::default() }; let mut client = ProvisioningClient::new(config); // Authenticate client.authenticate().await?; // Connect WebSocket let mut ws = client.connect_websocket(vec![ \\"TaskStatusChanged\\".to_string(), \\"WorkflowProgressUpdate\\".to_string(), ]).await?; // Handle events tokio::spawn(async move { while let Some(event) = ws.next().await { match event { Ok(WebSocketEvent::TaskStatusChanged { data }) => { println!(\\"Task {} status changed to: {}\\", data.task_id, data.status); }, Ok(WebSocketEvent::WorkflowProgressUpdate { data }) => { println!(\\"Workflow progress: {}% - {}\\", data.progress, data.current_step); }, Ok(WebSocketEvent::SystemHealthUpdate { data }) => { println!(\\"System health: {}\\", data.overall_status); }, Err(e) => { eprintln!(\\"WebSocket error: {}\\", e); break; } } } }); // Keep the main thread alive tokio::signal::ctrl_c().await?; println!(\\"Shutting down...\\"); Ok(())\\n}","breadcrumbs":"SDKs » WebSocket Integration","id":"1933","title":"WebSocket Integration"},"1934":{"body":"use provisioning_rs::{BatchOperationRequest, BatchOperation}; #[tokio::main]\\nasync fn main() -> Result<(), Box> { let mut client = ProvisioningClient::new(config); client.authenticate().await?; // Define batch operation let batch_request = BatchOperationRequest { name: \\"production_deployment\\".to_string(), version: \\"1.0.0\\".to_string(), storage_backend: \\"surrealdb\\".to_string(), parallel_limit: 5, rollback_enabled: true, operations: vec![ BatchOperation { id: \\"servers\\".to_string(), operation_type: \\"server_batch\\".to_string(), provider: \\"upcloud\\".to_string(), dependencies: vec![], config: serde_json::json!({ \\"server_configs\\": [ {\\"name\\": \\"web-01\\", \\"plan\\": \\"2xCPU-4 GB\\", \\"zone\\": \\"de-fra1\\"}, {\\"name\\": \\"web-02\\", \\"plan\\": \\"2xCPU-4 GB\\", \\"zone\\": \\"de-fra1\\"} ] }), }, BatchOperation { id: \\"kubernetes\\".to_string(), operation_type: \\"taskserv_batch\\".to_string(), provider: \\"upcloud\\".to_string(), dependencies: vec![\\"servers\\".to_string()], config: serde_json::json!({ \\"taskservs\\": [\\"kubernetes\\", \\"cilium\\", \\"containerd\\"] }), }, ], }; // Execute batch operation let batch_result = client.execute_batch_operation(batch_request).await?; println!(\\"Batch operation started: {}\\", batch_result.batch_id); // Monitor progress loop { let status = client.get_batch_status(&batch_result.batch_id).await?; println!(\\"Batch status: {} - {}%\\", status.status, status.progress.unwrap_or(0.0)); match status.status.as_str() { \\"Completed\\" | \\"Failed\\" | \\"Cancelled\\" => break, _ => tokio::time::sleep(std::time::Duration::from_secs(10)).await, } } Ok(())\\n}","breadcrumbs":"SDKs » Batch Operations","id":"1934","title":"Batch Operations"},"1935":{"body":"","breadcrumbs":"SDKs » Best Practices","id":"1935","title":"Best Practices"},"1936":{"body":"Token Management : Store tokens securely and implement automatic refresh Environment Variables : Use environment variables for credentials HTTPS : Always use HTTPS in production environments Token Expiration : Handle token expiration gracefully","breadcrumbs":"SDKs » Authentication and Security","id":"1936","title":"Authentication and Security"},"1937":{"body":"Specific Exceptions : Handle specific error types appropriately Retry Logic : Implement exponential backoff for transient failures Circuit Breakers : Use circuit breakers for resilient integrations Logging : Log errors with appropriate context","breadcrumbs":"SDKs » Error Handling","id":"1937","title":"Error Handling"},"1938":{"body":"Connection Pooling : Reuse HTTP connections Async Operations : Use asynchronous operations where possible Batch Operations : Group related operations for efficiency Caching : Cache frequently accessed data appropriately","breadcrumbs":"SDKs » Performance Optimization","id":"1938","title":"Performance Optimization"},"1939":{"body":"Reconnection : Implement automatic reconnection with backoff Event Filtering : Subscribe only to needed event types Error Handling : Handle WebSocket errors gracefully Resource Cleanup : Properly close WebSocket connections","breadcrumbs":"SDKs » WebSocket Connections","id":"1939","title":"WebSocket Connections"},"194":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Infrastructure Commands","id":"194","title":"Infrastructure Commands"},"1940":{"body":"Unit Tests : Test SDK functionality with mocked responses Integration Tests : Test against real API endpoints Error Scenarios : Test error handling paths Load Testing : Validate performance under load This comprehensive SDK documentation provides developers with everything needed to integrate with provisioning using their preferred programming language, complete with examples, best practices, and detailed API references.","breadcrumbs":"SDKs » Testing","id":"1940","title":"Testing"},"1941":{"body":"This document provides comprehensive examples and patterns for integrating with provisioning APIs, including client libraries, SDKs, error handling strategies, and performance optimization.","breadcrumbs":"Integration Examples » Integration Examples","id":"1941","title":"Integration Examples"},"1942":{"body":"Provisioning offers multiple integration points: REST APIs for workflow management WebSocket APIs for real-time monitoring Configuration APIs for system setup Extension APIs for custom providers and services","breadcrumbs":"Integration Examples » Overview","id":"1942","title":"Overview"},"1943":{"body":"","breadcrumbs":"Integration Examples » Complete Integration Examples","id":"1943","title":"Complete Integration Examples"},"1944":{"body":"Full-Featured Python Client import asyncio\\nimport json\\nimport logging\\nimport time\\nimport requests\\nimport websockets\\nfrom typing import Dict, List, Optional, Callable\\nfrom dataclasses import dataclass\\nfrom enum import Enum class TaskStatus(Enum): PENDING = \\"Pending\\" RUNNING = \\"Running\\" COMPLETED = \\"Completed\\" FAILED = \\"Failed\\" CANCELLED = \\"Cancelled\\" @dataclass\\nclass WorkflowTask: id: str name: str status: TaskStatus created_at: str started_at: Optional[str] = None completed_at: Optional[str] = None output: Optional[str] = None error: Optional[str] = None progress: Optional[float] = None class ProvisioningAPIError(Exception): \\"\\"\\"Base exception for provisioning API errors\\"\\"\\" pass class AuthenticationError(ProvisioningAPIError): \\"\\"\\"Authentication failed\\"\\"\\" pass class ValidationError(ProvisioningAPIError): \\"\\"\\"Request validation failed\\"\\"\\" pass class ProvisioningClient: \\"\\"\\" Complete Python client for provisioning Features: - REST API integration - WebSocket support for real-time updates - Automatic token refresh - Retry logic with exponential backoff - Comprehensive error handling \\"\\"\\" def __init__(self, base_url: str = \\"http://localhost:9090\\", auth_url: str = \\"http://localhost:8081\\", username: str = None, password: str = None, token: str = None): self.base_url = base_url self.auth_url = auth_url self.username = username self.password = password self.token = token self.session = requests.Session() self.websocket = None self.event_handlers = {} # Setup logging self.logger = logging.getLogger(__name__) # Configure session with retries from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry retry_strategy = Retry( total=3, status_forcelist=[429, 500, 502, 503, 504], method_whitelist=[\\"HEAD\\", \\"GET\\", \\"OPTIONS\\"], backoff_factor=1 ) adapter = HTTPAdapter(max_retries=retry_strategy) self.session.mount(\\"http://\\", adapter) self.session.mount(\\"https://\\", adapter) async def authenticate(self) -> str: \\"\\"\\"Authenticate and get JWT token\\"\\"\\" if self.token: return self.token if not self.username or not self.password: raise AuthenticationError(\\"Username and password required for authentication\\") auth_data = { \\"username\\": self.username, \\"password\\": self.password } try: response = requests.post(f\\"{self.auth_url}/auth/login\\", json=auth_data) response.raise_for_status() result = response.json() if not result.get(\'success\'): raise AuthenticationError(result.get(\'error\', \'Authentication failed\')) self.token = result[\'data\'][\'token\'] self.session.headers.update({ \'Authorization\': f\'Bearer {self.token}\' }) self.logger.info(\\"Authentication successful\\") return self.token except requests.RequestException as e: raise AuthenticationError(f\\"Authentication request failed: {e}\\") def _make_request(self, method: str, endpoint: str, **kwargs) -> Dict: \\"\\"\\"Make authenticated HTTP request with error handling\\"\\"\\" if not self.token: raise AuthenticationError(\\"Not authenticated. Call authenticate() first.\\") url = f\\"{self.base_url}{endpoint}\\" try: response = self.session.request(method, url, **kwargs) response.raise_for_status() result = response.json() if not result.get(\'success\'): error_msg = result.get(\'error\', \'Request failed\') if response.status_code == 400: raise ValidationError(error_msg) else: raise ProvisioningAPIError(error_msg) return result[\'data\'] except requests.RequestException as e: self.logger.error(f\\"Request failed: {method} {url} - {e}\\") raise ProvisioningAPIError(f\\"Request failed: {e}\\") # Workflow Management Methods def create_server_workflow(self, infra: str, settings: str = \\"config.ncl\\", check_mode: bool = False, wait: bool = False) -> str: \\"\\"\\"Create a server provisioning workflow\\"\\"\\" data = { \\"infra\\": infra, \\"settings\\": settings, \\"check_mode\\": check_mode, \\"wait\\": wait } task_id = self._make_request(\\"POST\\", \\"/workflows/servers/create\\", json=data) self.logger.info(f\\"Server workflow created: {task_id}\\") return task_id def create_taskserv_workflow(self, operation: str, taskserv: str, infra: str, settings: str = \\"config.ncl\\", check_mode: bool = False, wait: bool = False) -> str: \\"\\"\\"Create a task service workflow\\"\\"\\" data = { \\"operation\\": operation, \\"taskserv\\": taskserv, \\"infra\\": infra, \\"settings\\": settings, \\"check_mode\\": check_mode, \\"wait\\": wait } task_id = self._make_request(\\"POST\\", \\"/workflows/taskserv/create\\", json=data) self.logger.info(f\\"Taskserv workflow created: {task_id}\\") return task_id def create_cluster_workflow(self, operation: str, cluster_type: str, infra: str, settings: str = \\"config.ncl\\", check_mode: bool = False, wait: bool = False) -> str: \\"\\"\\"Create a cluster workflow\\"\\"\\" data = { \\"operation\\": operation, \\"cluster_type\\": cluster_type, \\"infra\\": infra, \\"settings\\": settings, \\"check_mode\\": check_mode, \\"wait\\": wait } task_id = self._make_request(\\"POST\\", \\"/workflows/cluster/create\\", json=data) self.logger.info(f\\"Cluster workflow created: {task_id}\\") return task_id def get_task_status(self, task_id: str) -> WorkflowTask: \\"\\"\\"Get the status of a specific task\\"\\"\\" data = self._make_request(\\"GET\\", f\\"/tasks/{task_id}\\") return WorkflowTask( id=data[\'id\'], name=data[\'name\'], status=TaskStatus(data[\'status\']), created_at=data[\'created_at\'], started_at=data.get(\'started_at\'), completed_at=data.get(\'completed_at\'), output=data.get(\'output\'), error=data.get(\'error\'), progress=data.get(\'progress\') ) def list_tasks(self, status_filter: Optional[str] = None) -> List[WorkflowTask]: \\"\\"\\"List all tasks, optionally filtered by status\\"\\"\\" params = {} if status_filter: params[\'status\'] = status_filter data = self._make_request(\\"GET\\", \\"/tasks\\", params=params) return [ WorkflowTask( id=task[\'id\'], name=task[\'name\'], status=TaskStatus(task[\'status\']), created_at=task[\'created_at\'], started_at=task.get(\'started_at\'), completed_at=task.get(\'completed_at\'), output=task.get(\'output\'), error=task.get(\'error\') ) for task in data ] def wait_for_task_completion(self, task_id: str, timeout: int = 300, poll_interval: int = 5) -> WorkflowTask: \\"\\"\\"Wait for a task to complete\\"\\"\\" start_time = time.time() while time.time() - start_time < timeout: task = self.get_task_status(task_id) if task.status in [TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.CANCELLED]: self.logger.info(f\\"Task {task_id} finished with status: {task.status}\\") return task self.logger.debug(f\\"Task {task_id} status: {task.status}\\") time.sleep(poll_interval) raise TimeoutError(f\\"Task {task_id} did not complete within {timeout} seconds\\") # Batch Operations def execute_batch_operation(self, batch_config: Dict) -> Dict: \\"\\"\\"Execute a batch operation\\"\\"\\" return self._make_request(\\"POST\\", \\"/batch/execute\\", json=batch_config) def get_batch_status(self, batch_id: str) -> Dict: \\"\\"\\"Get batch operation status\\"\\"\\" return self._make_request(\\"GET\\", f\\"/batch/operations/{batch_id}\\") def cancel_batch_operation(self, batch_id: str) -> str: \\"\\"\\"Cancel a running batch operation\\"\\"\\" return self._make_request(\\"POST\\", f\\"/batch/operations/{batch_id}/cancel\\") # System Health and Monitoring def get_system_health(self) -> Dict: \\"\\"\\"Get system health status\\"\\"\\" return self._make_request(\\"GET\\", \\"/state/system/health\\") def get_system_metrics(self) -> Dict: \\"\\"\\"Get system metrics\\"\\"\\" return self._make_request(\\"GET\\", \\"/state/system/metrics\\") # WebSocket Integration async def connect_websocket(self, event_types: List[str] = None): \\"\\"\\"Connect to WebSocket for real-time updates\\"\\"\\" if not self.token: await self.authenticate() ws_url = f\\"ws://localhost:9090/ws?token={self.token}\\" if event_types: ws_url += f\\"&events={\',\'.join(event_types)}\\" try: self.websocket = await websockets.connect(ws_url) self.logger.info(\\"WebSocket connected\\") # Start listening for messages asyncio.create_task(self._websocket_listener()) except Exception as e: self.logger.error(f\\"WebSocket connection failed: {e}\\") raise async def _websocket_listener(self): \\"\\"\\"Listen for WebSocket messages\\"\\"\\" try: async for message in self.websocket: try: data = json.loads(message) await self._handle_websocket_message(data) except json.JSONDecodeError: self.logger.error(f\\"Invalid JSON received: {message}\\") except Exception as e: self.logger.error(f\\"WebSocket listener error: {e}\\") async def _handle_websocket_message(self, data: Dict): \\"\\"\\"Handle incoming WebSocket messages\\"\\"\\" event_type = data.get(\'event_type\') if event_type and event_type in self.event_handlers: for handler in self.event_handlers[event_type]: try: await handler(data) except Exception as e: self.logger.error(f\\"Error in event handler for {event_type}: {e}\\") def on_event(self, event_type: str, handler: Callable): \\"\\"\\"Register an event handler\\"\\"\\" if event_type not in self.event_handlers: self.event_handlers[event_type] = [] self.event_handlers[event_type].append(handler) async def disconnect_websocket(self): \\"\\"\\"Disconnect from WebSocket\\"\\"\\" if self.websocket: await self.websocket.close() self.websocket = None self.logger.info(\\"WebSocket disconnected\\") # Usage Example\\nasync def main(): # Initialize client client = ProvisioningClient( username=\\"admin\\", password=\\"password\\" ) try: # Authenticate await client.authenticate() # Create a server workflow task_id = client.create_server_workflow( infra=\\"production\\", settings=\\"prod-settings.ncl\\", wait=False ) print(f\\"Server workflow created: {task_id}\\") # Set up WebSocket event handlers async def on_task_update(event): print(f\\"Task update: {event[\'data\'][\'task_id\']} -> {event[\'data\'][\'status\']}\\") async def on_system_health(event): print(f\\"System health: {event[\'data\'][\'overall_status\']}\\") client.on_event(\'TaskStatusChanged\', on_task_update) client.on_event(\'SystemHealthUpdate\', on_system_health) # Connect to WebSocket await client.connect_websocket([\'TaskStatusChanged\', \'SystemHealthUpdate\']) # Wait for task completion final_task = client.wait_for_task_completion(task_id, timeout=600) print(f\\"Task completed with status: {final_task.status}\\") if final_task.status == TaskStatus.COMPLETED: print(f\\"Output: {final_task.output}\\") elif final_task.status == TaskStatus.FAILED: print(f\\"Error: {final_task.error}\\") except ProvisioningAPIError as e: print(f\\"API Error: {e}\\") except Exception as e: print(f\\"Unexpected error: {e}\\") finally: await client.disconnect_websocket() if __name__ == \\"__main__\\": asyncio.run(main())","breadcrumbs":"Integration Examples » Python Integration","id":"1944","title":"Python Integration"},"1945":{"body":"Complete JavaScript/TypeScript Client import axios, { AxiosInstance, AxiosResponse } from \'axios\';\\nimport WebSocket from \'ws\';\\nimport { EventEmitter } from \'events\'; interface Task { id: string; name: string; status: \'Pending\' | \'Running\' | \'Completed\' | \'Failed\' | \'Cancelled\'; created_at: string; started_at?: string; completed_at?: string; output?: string; error?: string; progress?: number;\\n} interface BatchConfig { name: string; version: string; storage_backend: string; parallel_limit: number; rollback_enabled: boolean; operations: Array<{ id: string; type: string; provider: string; dependencies: string[]; [key: string]: any; }>;\\n} interface WebSocketEvent { event_type: string; timestamp: string; data: any; metadata: Record;\\n} class ProvisioningClient extends EventEmitter { private httpClient: AxiosInstance; private authClient: AxiosInstance; private websocket?: WebSocket; private token?: string; private reconnectAttempts = 0; private maxReconnectAttempts = 10; private reconnectInterval = 5000; constructor( private baseUrl = \'http://localhost:9090\', private authUrl = \'http://localhost:8081\', private username?: string, private password?: string, token?: string ) { super(); this.token = token; // Setup HTTP clients this.httpClient = axios.create({ baseURL: baseUrl, timeout: 30000, }); this.authClient = axios.create({ baseURL: authUrl, timeout: 10000, }); // Setup request interceptors this.setupInterceptors(); } private setupInterceptors(): void { // Request interceptor to add auth token this.httpClient.interceptors.request.use((config) => { if (this.token) { config.headers.Authorization = `Bearer ${this.token}`; } return config; }); // Response interceptor for error handling this.httpClient.interceptors.response.use( (response) => response, async (error) => { if (error.response?.status === 401 && this.username && this.password) { // Token expired, try to refresh try { await this.authenticate(); // Retry the original request const originalRequest = error.config; originalRequest.headers.Authorization = `Bearer ${this.token}`; return this.httpClient.request(originalRequest); } catch (authError) { this.emit(\'authError\', authError); throw error; } } throw error; } ); } async authenticate(): Promise { if (this.token) { return this.token; } if (!this.username || !this.password) { throw new Error(\'Username and password required for authentication\'); } try { const response = await this.authClient.post(\'/auth/login\', { username: this.username, password: this.password, }); const result = response.data; if (!result.success) { throw new Error(result.error || \'Authentication failed\'); } this.token = result.data.token; console.log(\'Authentication successful\'); this.emit(\'authenticated\', this.token); return this.token; } catch (error) { console.error(\'Authentication failed:\', error); throw new Error(`Authentication failed: ${error.message}`); } } private async makeRequest(method: string, endpoint: string, data?: any): Promise { try { const response: AxiosResponse = await this.httpClient.request({ method, url: endpoint, data, }); const result = response.data; if (!result.success) { throw new Error(result.error || \'Request failed\'); } return result.data; } catch (error) { console.error(`Request failed: ${method} ${endpoint}`, error); throw error; } } // Workflow Management Methods async createServerWorkflow(config: { infra: string; settings?: string; check_mode?: boolean; wait?: boolean; }): Promise { const data = { infra: config.infra, settings: config.settings || \'config.ncl\', check_mode: config.check_mode || false, wait: config.wait || false, }; const taskId = await this.makeRequest(\'POST\', \'/workflows/servers/create\', data); console.log(`Server workflow created: ${taskId}`); this.emit(\'workflowCreated\', { type: \'server\', taskId }); return taskId; } async createTaskservWorkflow(config: { operation: string; taskserv: string; infra: string; settings?: string; check_mode?: boolean; wait?: boolean; }): Promise { const data = { operation: config.operation, taskserv: config.taskserv, infra: config.infra, settings: config.settings || \'config.ncl\', check_mode: config.check_mode || false, wait: config.wait || false, }; const taskId = await this.makeRequest(\'POST\', \'/workflows/taskserv/create\', data); console.log(`Taskserv workflow created: ${taskId}`); this.emit(\'workflowCreated\', { type: \'taskserv\', taskId }); return taskId; } async createClusterWorkflow(config: { operation: string; cluster_type: string; infra: string; settings?: string; check_mode?: boolean; wait?: boolean; }): Promise { const data = { operation: config.operation, cluster_type: config.cluster_type, infra: config.infra, settings: config.settings || \'config.ncl\', check_mode: config.check_mode || false, wait: config.wait || false, }; const taskId = await this.makeRequest(\'POST\', \'/workflows/cluster/create\', data); console.log(`Cluster workflow created: ${taskId}`); this.emit(\'workflowCreated\', { type: \'cluster\', taskId }); return taskId; } async getTaskStatus(taskId: string): Promise { return this.makeRequest(\'GET\', `/tasks/${taskId}`); } async listTasks(statusFilter?: string): Promise { const params = statusFilter ? `?status=${statusFilter}` : \'\'; return this.makeRequest(\'GET\', `/tasks${params}`); } async waitForTaskCompletion( taskId: string, timeout = 300000, // 5 minutes pollInterval = 5000 // 5 seconds ): Promise { return new Promise((resolve, reject) => { const startTime = Date.now(); const poll = async () => { try { const task = await this.getTaskStatus(taskId); if ([\'Completed\', \'Failed\', \'Cancelled\'].includes(task.status)) { console.log(`Task ${taskId} finished with status: ${task.status}`); resolve(task); return; } if (Date.now() - startTime > timeout) { reject(new Error(`Task ${taskId} did not complete within ${timeout}ms`)); return; } console.log(`Task ${taskId} status: ${task.status}`); this.emit(\'taskProgress\', task); setTimeout(poll, pollInterval); } catch (error) { reject(error); } }; poll(); }); } // Batch Operations async executeBatchOperation(batchConfig: BatchConfig): Promise { const result = await this.makeRequest(\'POST\', \'/batch/execute\', batchConfig); console.log(`Batch operation started: ${result.batch_id}`); this.emit(\'batchStarted\', result); return result; } async getBatchStatus(batchId: string): Promise { return this.makeRequest(\'GET\', `/batch/operations/${batchId}`); } async cancelBatchOperation(batchId: string): Promise { return this.makeRequest(\'POST\', `/batch/operations/${batchId}/cancel`); } // System Monitoring async getSystemHealth(): Promise { return this.makeRequest(\'GET\', \'/state/system/health\'); } async getSystemMetrics(): Promise { return this.makeRequest(\'GET\', \'/state/system/metrics\'); } // WebSocket Integration async connectWebSocket(eventTypes?: string[]): Promise { if (!this.token) { await this.authenticate(); } let wsUrl = `ws://localhost:9090/ws?token=${this.token}`; if (eventTypes && eventTypes.length > 0) { wsUrl += `&events=${eventTypes.join(\',\')}`; } return new Promise((resolve, reject) => { this.websocket = new WebSocket(wsUrl); this.websocket.on(\'open\', () => { console.log(\'WebSocket connected\'); this.reconnectAttempts = 0; this.emit(\'websocketConnected\'); resolve(); }); this.websocket.on(\'message\', (data: WebSocket.Data) => { try { const event: WebSocketEvent = JSON.parse(data.toString()); this.handleWebSocketMessage(event); } catch (error) { console.error(\'Failed to parse WebSocket message:\', error); } }); this.websocket.on(\'close\', (code: number, reason: string) => { console.log(`WebSocket disconnected: ${code} - ${reason}`); this.emit(\'websocketDisconnected\', { code, reason }); if (this.reconnectAttempts < this.maxReconnectAttempts) { setTimeout(() => { this.reconnectAttempts++; console.log(`Reconnecting... (${this.reconnectAttempts}/${this.maxReconnectAttempts})`); this.connectWebSocket(eventTypes); }, this.reconnectInterval); } }); this.websocket.on(\'error\', (error: Error) => { console.error(\'WebSocket error:\', error); this.emit(\'websocketError\', error); reject(error); }); }); } private handleWebSocketMessage(event: WebSocketEvent): void { console.log(`WebSocket event: ${event.event_type}`); // Emit specific event this.emit(event.event_type, event); // Emit general event this.emit(\'websocketMessage\', event); // Handle specific event types switch (event.event_type) { case \'TaskStatusChanged\': this.emit(\'taskStatusChanged\', event.data); break; case \'WorkflowProgressUpdate\': this.emit(\'workflowProgress\', event.data); break; case \'SystemHealthUpdate\': this.emit(\'systemHealthUpdate\', event.data); break; case \'BatchOperationUpdate\': this.emit(\'batchUpdate\', event.data); break; } } disconnectWebSocket(): void { if (this.websocket) { this.websocket.close(); this.websocket = undefined; console.log(\'WebSocket disconnected\'); } } // Utility Methods async healthCheck(): Promise { try { const response = await this.httpClient.get(\'/health\'); return response.data.success; } catch (error) { return false; } }\\n} // Usage Example\\nasync function main() { const client = new ProvisioningClient( \'http://localhost:9090\', \'http://localhost:8081\', \'admin\', \'password\' ); try { // Authenticate await client.authenticate(); // Set up event listeners client.on(\'taskStatusChanged\', (task) => { console.log(`Task ${task.task_id} status changed to: ${task.status}`); }); client.on(\'workflowProgress\', (progress) => { console.log(`Workflow progress: ${progress.progress}% - ${progress.current_step}`); }); client.on(\'systemHealthUpdate\', (health) => { console.log(`System health: ${health.overall_status}`); }); // Connect WebSocket await client.connectWebSocket([\'TaskStatusChanged\', \'WorkflowProgressUpdate\', \'SystemHealthUpdate\']); // Create workflows const serverTaskId = await client.createServerWorkflow({ infra: \'production\', settings: \'prod-settings.ncl\', }); const taskservTaskId = await client.createTaskservWorkflow({ operation: \'create\', taskserv: \'kubernetes\', infra: \'production\', }); // Wait for completion const [serverTask, taskservTask] = await Promise.all([ client.waitForTaskCompletion(serverTaskId), client.waitForTaskCompletion(taskservTaskId), ]); console.log(\'All workflows completed\'); console.log(`Server task: ${serverTask.status}`); console.log(`Taskserv task: ${taskservTask.status}`); // Create batch operation const batchConfig: BatchConfig = { name: \'test_deployment\', version: \'1.0.0\', storage_backend: \'filesystem\', parallel_limit: 3, rollback_enabled: true, operations: [ { id: \'servers\', type: \'server_batch\', provider: \'upcloud\', dependencies: [], server_configs: [ { name: \'web-01\', plan: \'1xCPU-2 GB\', zone: \'de-fra1\' }, { name: \'web-02\', plan: \'1xCPU-2 GB\', zone: \'de-fra1\' }, ], }, { id: \'taskservs\', type: \'taskserv_batch\', provider: \'upcloud\', dependencies: [\'servers\'], taskservs: [\'kubernetes\', \'cilium\'], }, ], }; const batchResult = await client.executeBatchOperation(batchConfig); console.log(`Batch operation started: ${batchResult.batch_id}`); // Monitor batch operation const monitorBatch = setInterval(async () => { try { const batchStatus = await client.getBatchStatus(batchResult.batch_id); console.log(`Batch status: ${batchStatus.status} - ${batchStatus.progress}%`); if ([\'Completed\', \'Failed\', \'Cancelled\'].includes(batchStatus.status)) { clearInterval(monitorBatch); console.log(`Batch operation finished: ${batchStatus.status}`); } } catch (error) { console.error(\'Error checking batch status:\', error); clearInterval(monitorBatch); } }, 10000); } catch (error) { console.error(\'Integration example failed:\', error); } finally { client.disconnectWebSocket(); }\\n} // Run example\\nif (require.main === module) { main().catch(console.error);\\n} export { ProvisioningClient, Task, BatchConfig };","breadcrumbs":"Integration Examples » Node.js/JavaScript Integration","id":"1945","title":"Node.js/JavaScript Integration"},"1946":{"body":"","breadcrumbs":"Integration Examples » Error Handling Strategies","id":"1946","title":"Error Handling Strategies"},"1947":{"body":"class ProvisioningErrorHandler: \\"\\"\\"Centralized error handling for provisioning operations\\"\\"\\" def __init__(self, client: ProvisioningClient): self.client = client self.retry_strategies = { \'network_error\': self._exponential_backoff, \'rate_limit\': self._rate_limit_backoff, \'server_error\': self._server_error_strategy, \'auth_error\': self._auth_error_strategy, } async def execute_with_retry(self, operation: Callable, *args, **kwargs): \\"\\"\\"Execute operation with intelligent retry logic\\"\\"\\" max_attempts = 3 attempt = 0 while attempt < max_attempts: try: return await operation(*args, **kwargs) except Exception as e: attempt += 1 error_type = self._classify_error(e) if attempt >= max_attempts: self._log_final_failure(operation.__name__, e, attempt) raise retry_strategy = self.retry_strategies.get(error_type, self._default_retry) wait_time = retry_strategy(attempt, e) self._log_retry_attempt(operation.__name__, e, attempt, wait_time) await asyncio.sleep(wait_time) def _classify_error(self, error: Exception) -> str: \\"\\"\\"Classify error type for appropriate retry strategy\\"\\"\\" if isinstance(error, requests.ConnectionError): return \'network_error\' elif isinstance(error, requests.HTTPError): if error.response.status_code == 429: return \'rate_limit\' elif 500 <= error.response.status_code < 600: return \'server_error\' elif error.response.status_code == 401: return \'auth_error\' return \'unknown\' def _exponential_backoff(self, attempt: int, error: Exception) -> float: \\"\\"\\"Exponential backoff for network errors\\"\\"\\" return min(2 ** attempt + random.uniform(0, 1), 60) def _rate_limit_backoff(self, attempt: int, error: Exception) -> float: \\"\\"\\"Handle rate limiting with appropriate backoff\\"\\"\\" retry_after = getattr(error.response, \'headers\', {}).get(\'Retry-After\') if retry_after: return float(retry_after) return 60 # Default to 60 seconds def _server_error_strategy(self, attempt: int, error: Exception) -> float: \\"\\"\\"Handle server errors\\"\\"\\" return min(10 * attempt, 60) def _auth_error_strategy(self, attempt: int, error: Exception) -> float: \\"\\"\\"Handle authentication errors\\"\\"\\" # Re-authenticate before retry asyncio.create_task(self.client.authenticate()) return 5 def _default_retry(self, attempt: int, error: Exception) -> float: \\"\\"\\"Default retry strategy\\"\\"\\" return min(5 * attempt, 30) # Usage example\\nasync def robust_workflow_execution(): client = ProvisioningClient() handler = ProvisioningErrorHandler(client) try: # Execute with automatic retry task_id = await handler.execute_with_retry( client.create_server_workflow, infra=\\"production\\", settings=\\"config.ncl\\" ) # Wait for completion with retry task = await handler.execute_with_retry( client.wait_for_task_completion, task_id, timeout=600 ) return task except Exception as e: # Log detailed error information logger.error(f\\"Workflow execution failed after all retries: {e}\\") # Implement fallback strategy return await fallback_workflow_strategy()","breadcrumbs":"Integration Examples » Comprehensive Error Handling","id":"1947","title":"Comprehensive Error Handling"},"1948":{"body":"class CircuitBreaker { private failures = 0; private nextAttempt = Date.now(); private state: \'CLOSED\' | \'OPEN\' | \'HALF_OPEN\' = \'CLOSED\'; constructor( private threshold = 5, private timeout = 60000, // 1 minute private monitoringPeriod = 10000 // 10 seconds ) {} async execute(operation: () => Promise): Promise { if (this.state === \'OPEN\') { if (Date.now() < this.nextAttempt) { throw new Error(\'Circuit breaker is OPEN\'); } this.state = \'HALF_OPEN\'; } try { const result = await operation(); this.onSuccess(); return result; } catch (error) { this.onFailure(); throw error; } } private onSuccess(): void { this.failures = 0; this.state = \'CLOSED\'; } private onFailure(): void { this.failures++; if (this.failures >= this.threshold) { this.state = \'OPEN\'; this.nextAttempt = Date.now() + this.timeout; } } getState(): string { return this.state; } getFailures(): number { return this.failures; }\\n} // Usage with ProvisioningClient\\nclass ResilientProvisioningClient { private circuitBreaker = new CircuitBreaker(); constructor(private client: ProvisioningClient) {} async createServerWorkflow(config: any): Promise { return this.circuitBreaker.execute(async () => { return this.client.createServerWorkflow(config); }); } async getTaskStatus(taskId: string): Promise { return this.circuitBreaker.execute(async () => { return this.client.getTaskStatus(taskId); }); }\\n}","breadcrumbs":"Integration Examples » Circuit Breaker Pattern","id":"1948","title":"Circuit Breaker Pattern"},"1949":{"body":"","breadcrumbs":"Integration Examples » Performance Optimization","id":"1949","title":"Performance Optimization"},"195":{"body":"# Create servers\\nprovisioning server create\\nprovisioning server create --check # Dry-run mode\\nprovisioning server create --yes # Skip confirmation # Delete servers\\nprovisioning server delete\\nprovisioning server delete --check\\nprovisioning server delete --yes # List servers\\nprovisioning server list\\nprovisioning server list --infra wuji\\nprovisioning server list --out json # SSH into server\\nprovisioning server ssh web-01\\nprovisioning server ssh db-01 # Show pricing\\nprovisioning server price\\nprovisioning server price --provider upcloud","breadcrumbs":"Quick Start Cheatsheet » Server Management","id":"195","title":"Server Management"},"1950":{"body":"import asyncio\\nimport aiohttp\\nfrom cachetools import TTLCache\\nimport time class OptimizedProvisioningClient: \\"\\"\\"High-performance client with connection pooling and caching\\"\\"\\" def __init__(self, base_url: str, max_connections: int = 100): self.base_url = base_url self.session = None self.cache = TTLCache(maxsize=1000, ttl=300) # 5-minute cache self.max_connections = max_connections async def __aenter__(self): \\"\\"\\"Async context manager entry\\"\\"\\" connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=20, keepalive_timeout=30, enable_cleanup_closed=True ) timeout = aiohttp.ClientTimeout(total=30, connect=5) self.session = aiohttp.ClientSession( connector=connector, timeout=timeout, headers={\'User-Agent\': \'ProvisioningClient/2.0.0\'} ) return self async def __aexit__(self, exc_type, exc_val, exc_tb): \\"\\"\\"Async context manager exit\\"\\"\\" if self.session: await self.session.close() async def get_task_status_cached(self, task_id: str) -> dict: \\"\\"\\"Get task status with caching\\"\\"\\" cache_key = f\\"task_status:{task_id}\\" # Check cache first if cache_key in self.cache: return self.cache[cache_key] # Fetch from API result = await self._make_request(\'GET\', f\'/tasks/{task_id}\') # Cache completed tasks for longer if result.get(\'status\') in [\'Completed\', \'Failed\', \'Cancelled\']: self.cache[cache_key] = result return result async def batch_get_task_status(self, task_ids: list) -> dict: \\"\\"\\"Get multiple task statuses in parallel\\"\\"\\" tasks = [self.get_task_status_cached(task_id) for task_id in task_ids] results = await asyncio.gather(*tasks, return_exceptions=True) return { task_id: result for task_id, result in zip(task_ids, results) if not isinstance(result, Exception) } async def _make_request(self, method: str, endpoint: str, **kwargs): \\"\\"\\"Optimized HTTP request method\\"\\"\\" url = f\\"{self.base_url}{endpoint}\\" start_time = time.time() async with self.session.request(method, url, **kwargs) as response: request_time = time.time() - start_time # Log slow requests if request_time > 5.0: print(f\\"Slow request: {method} {endpoint} took {request_time:.2f}s\\") response.raise_for_status() result = await response.json() if not result.get(\'success\'): raise Exception(result.get(\'error\', \'Request failed\')) return result[\'data\'] # Usage example\\nasync def high_performance_workflow(): async with OptimizedProvisioningClient(\'http://localhost:9090\') as client: # Create multiple workflows in parallel workflow_tasks = [ client.create_server_workflow({\'infra\': f\'server-{i}\'}) for i in range(10) ] task_ids = await asyncio.gather(*workflow_tasks) print(f\\"Created {len(task_ids)} workflows\\") # Monitor all tasks efficiently while True: # Batch status check statuses = await client.batch_get_task_status(task_ids) completed = [ task_id for task_id, status in statuses.items() if status.get(\'status\') in [\'Completed\', \'Failed\', \'Cancelled\'] ] print(f\\"Completed: {len(completed)}/{len(task_ids)}\\") if len(completed) == len(task_ids): break await asyncio.sleep(10)","breadcrumbs":"Integration Examples » Connection Pooling and Caching","id":"1950","title":"Connection Pooling and Caching"},"1951":{"body":"class WebSocketPool { constructor(maxConnections = 5) { this.maxConnections = maxConnections; this.connections = new Map(); this.connectionQueue = []; } async getConnection(token, eventTypes = []) { const key = `${token}:${eventTypes.sort().join(\',\')}`; if (this.connections.has(key)) { return this.connections.get(key); } if (this.connections.size >= this.maxConnections) { // Wait for available connection await this.waitForAvailableSlot(); } const connection = await this.createConnection(token, eventTypes); this.connections.set(key, connection); return connection; } async createConnection(token, eventTypes) { const ws = new WebSocket(`ws://localhost:9090/ws?token=${token}&events=${eventTypes.join(\',\')}`); return new Promise((resolve, reject) => { ws.onopen = () => resolve(ws); ws.onerror = (error) => reject(error); ws.onclose = () => { // Remove from pool when closed for (const [key, conn] of this.connections.entries()) { if (conn === ws) { this.connections.delete(key); break; } } }; }); } async waitForAvailableSlot() { return new Promise((resolve) => { this.connectionQueue.push(resolve); }); } releaseConnection(ws) { if (this.connectionQueue.length > 0) { const waitingResolver = this.connectionQueue.shift(); waitingResolver(); } }\\n}","breadcrumbs":"Integration Examples » WebSocket Connection Pooling","id":"1951","title":"WebSocket Connection Pooling"},"1952":{"body":"","breadcrumbs":"Integration Examples » SDK Documentation","id":"1952","title":"SDK Documentation"},"1953":{"body":"The Python SDK provides a comprehensive interface for provisioning: Installation pip install provisioning-client Quick Start from provisioning_client import ProvisioningClient # Initialize client\\nclient = ProvisioningClient( base_url=\\"http://localhost:9090\\", username=\\"admin\\", password=\\"password\\"\\n) # Create workflow\\ntask_id = await client.create_server_workflow( infra=\\"production\\", settings=\\"config.ncl\\"\\n) # Wait for completion\\ntask = await client.wait_for_task_completion(task_id)\\nprint(f\\"Workflow completed: {task.status}\\") Advanced Usage # Use with async context manager\\nasync with ProvisioningClient() as client: # Batch operations batch_config = { \\"name\\": \\"deployment\\", \\"operations\\": [...] } batch_result = await client.execute_batch_operation(batch_config) # Real-time monitoring await client.connect_websocket([\'TaskStatusChanged\']) client.on_event(\'TaskStatusChanged\', handle_task_update)","breadcrumbs":"Integration Examples » Python SDK","id":"1953","title":"Python SDK"},"1954":{"body":"Installation npm install @provisioning/client Usage import { ProvisioningClient } from \'@provisioning/client\'; const client = new ProvisioningClient({ baseUrl: \'http://localhost:9090\', username: \'admin\', password: \'password\'\\n}); // Create workflow\\nconst taskId = await client.createServerWorkflow({ infra: \'production\', settings: \'config.ncl\'\\n}); // Monitor progress\\nclient.on(\'workflowProgress\', (progress) => { console.log(`Progress: ${progress.progress}%`);\\n}); await client.connectWebSocket();","breadcrumbs":"Integration Examples » JavaScript/TypeScript SDK","id":"1954","title":"JavaScript/TypeScript SDK"},"1955":{"body":"","breadcrumbs":"Integration Examples » Common Integration Patterns","id":"1955","title":"Common Integration Patterns"},"1956":{"body":"class WorkflowPipeline: \\"\\"\\"Orchestrate complex multi-step workflows\\"\\"\\" def __init__(self, client: ProvisioningClient): self.client = client self.steps = [] def add_step(self, name: str, operation: Callable, dependencies: list = None): \\"\\"\\"Add a step to the pipeline\\"\\"\\" self.steps.append({ \'name\': name, \'operation\': operation, \'dependencies\': dependencies or [], \'status\': \'pending\', \'result\': None }) async def execute(self): \\"\\"\\"Execute the pipeline\\"\\"\\" completed_steps = set() while len(completed_steps) < len(self.steps): # Find steps ready to execute ready_steps = [ step for step in self.steps if (step[\'status\'] == \'pending\' and all(dep in completed_steps for dep in step[\'dependencies\'])) ] if not ready_steps: raise Exception(\\"Pipeline deadlock detected\\") # Execute ready steps in parallel tasks = [] for step in ready_steps: step[\'status\'] = \'running\' tasks.append(self._execute_step(step)) # Wait for completion results = await asyncio.gather(*tasks, return_exceptions=True) for step, result in zip(ready_steps, results): if isinstance(result, Exception): step[\'status\'] = \'failed\' step[\'error\'] = str(result) raise Exception(f\\"Step {step[\'name\']} failed: {result}\\") else: step[\'status\'] = \'completed\' step[\'result\'] = result completed_steps.add(step[\'name\']) async def _execute_step(self, step): \\"\\"\\"Execute a single step\\"\\"\\" try: return await step[\'operation\']() except Exception as e: print(f\\"Step {step[\'name\']} failed: {e}\\") raise # Usage example\\nasync def complex_deployment(): client = ProvisioningClient() pipeline = WorkflowPipeline(client) # Define deployment steps pipeline.add_step(\'servers\', lambda: client.create_server_workflow({ \'infra\': \'production\' })) pipeline.add_step(\'kubernetes\', lambda: client.create_taskserv_workflow({ \'operation\': \'create\', \'taskserv\': \'kubernetes\', \'infra\': \'production\' }), dependencies=[\'servers\']) pipeline.add_step(\'cilium\', lambda: client.create_taskserv_workflow({ \'operation\': \'create\', \'taskserv\': \'cilium\', \'infra\': \'production\' }), dependencies=[\'kubernetes\']) # Execute pipeline await pipeline.execute() print(\\"Deployment pipeline completed successfully\\")","breadcrumbs":"Integration Examples » Workflow Orchestration Pipeline","id":"1956","title":"Workflow Orchestration Pipeline"},"1957":{"body":"class EventDrivenWorkflowManager { constructor(client) { this.client = client; this.workflows = new Map(); this.setupEventHandlers(); } setupEventHandlers() { this.client.on(\'TaskStatusChanged\', this.handleTaskStatusChange.bind(this)); this.client.on(\'WorkflowProgressUpdate\', this.handleProgressUpdate.bind(this)); this.client.on(\'SystemHealthUpdate\', this.handleHealthUpdate.bind(this)); } async createWorkflow(config) { const workflowId = generateUUID(); const workflow = { id: workflowId, config, tasks: [], status: \'pending\', progress: 0, events: [] }; this.workflows.set(workflowId, workflow); // Start workflow execution await this.executeWorkflow(workflow); return workflowId; } async executeWorkflow(workflow) { try { workflow.status = \'running\'; // Create initial tasks based on configuration const taskId = await this.client.createServerWorkflow(workflow.config); workflow.tasks.push({ id: taskId, type: \'server_creation\', status: \'pending\' }); this.emit(\'workflowStarted\', { workflowId: workflow.id, taskId }); } catch (error) { workflow.status = \'failed\'; workflow.error = error.message; this.emit(\'workflowFailed\', { workflowId: workflow.id, error }); } } handleTaskStatusChange(event) { // Find workflows containing this task for (const [workflowId, workflow] of this.workflows) { const task = workflow.tasks.find(t => t.id === event.data.task_id); if (task) { task.status = event.data.status; this.updateWorkflowProgress(workflow); // Trigger next steps based on task completion if (event.data.status === \'Completed\') { this.triggerNextSteps(workflow, task); } } } } updateWorkflowProgress(workflow) { const completedTasks = workflow.tasks.filter(t => [\'Completed\', \'Failed\'].includes(t.status) ).length; workflow.progress = (completedTasks / workflow.tasks.length) * 100; if (completedTasks === workflow.tasks.length) { const failedTasks = workflow.tasks.filter(t => t.status === \'Failed\'); workflow.status = failedTasks.length > 0 ? \'failed\' : \'completed\'; this.emit(\'workflowCompleted\', { workflowId: workflow.id, status: workflow.status }); } } async triggerNextSteps(workflow, completedTask) { // Define workflow dependencies and next steps const nextSteps = this.getNextSteps(workflow, completedTask); for (const nextStep of nextSteps) { try { const taskId = await this.executeWorkflowStep(nextStep); workflow.tasks.push({ id: taskId, type: nextStep.type, status: \'pending\', dependencies: [completedTask.id] }); } catch (error) { console.error(`Failed to trigger next step: ${error.message}`); } } } getNextSteps(workflow, completedTask) { // Define workflow logic based on completed task type switch (completedTask.type) { case \'server_creation\': return [ { type: \'kubernetes_installation\', taskserv: \'kubernetes\' }, { type: \'monitoring_setup\', taskserv: \'prometheus\' } ]; case \'kubernetes_installation\': return [ { type: \'networking_setup\', taskserv: \'cilium\' } ]; default: return []; } }\\n} This comprehensive integration documentation provides developers with everything needed to successfully integrate with provisioning, including complete client implementations, error handling strategies, performance optimizations, and common integration patterns.","breadcrumbs":"Integration Examples » Event-Driven Architecture","id":"1957","title":"Event-Driven Architecture"},"1958":{"body":"API documentation for creating and using infrastructure providers.","breadcrumbs":"Provider API » Provider API Reference","id":"1958","title":"Provider API Reference"},"1959":{"body":"Providers handle cloud-specific operations and resource provisioning. The provisioning platform supports multiple cloud providers through a unified API.","breadcrumbs":"Provider API » Overview","id":"1959","title":"Overview"},"196":{"body":"# Create taskserv\\nprovisioning taskserv create kubernetes\\nprovisioning taskserv create kubernetes --check\\nprovisioning taskserv create kubernetes --infra wuji # Delete taskserv\\nprovisioning taskserv delete kubernetes\\nprovisioning taskserv delete kubernetes --check # List taskservs\\nprovisioning taskserv list\\nprovisioning taskserv list --infra wuji # Generate taskserv configuration\\nprovisioning taskserv generate kubernetes\\nprovisioning taskserv generate kubernetes --out yaml # Check for updates\\nprovisioning taskserv check-updates\\nprovisioning taskserv check-updates --taskserv kubernetes","breadcrumbs":"Quick Start Cheatsheet » Taskserv Management","id":"196","title":"Taskserv Management"},"1960":{"body":"UpCloud - European cloud provider AWS - Amazon Web Services Local - Local development environment","breadcrumbs":"Provider API » Supported Providers","id":"1960","title":"Supported Providers"},"1961":{"body":"All providers must implement the following interface:","breadcrumbs":"Provider API » Provider Interface","id":"1961","title":"Provider Interface"},"1962":{"body":"# Provider initialization\\nexport def init [] -> record { ... } # Server operations\\nexport def create-servers [plan: record] -> list { ... }\\nexport def delete-servers [ids: list] -> bool { ... }\\nexport def list-servers [] -> table { ... } # Resource information\\nexport def get-server-plans [] -> table { ... }\\nexport def get-regions [] -> list { ... }\\nexport def get-pricing [plan: string] -> record { ... }","breadcrumbs":"Provider API » Required Functions","id":"1962","title":"Required Functions"},"1963":{"body":"Each provider requires configuration in Nickel format: # Example: UpCloud provider configuration\\n{ provider = { name = \\"upcloud\\", type = \\"cloud\\", enabled = true, config = { username = \\"{{env.UPCLOUD_USERNAME}}\\", password = \\"{{env.UPCLOUD_PASSWORD}}\\", default_zone = \\"de-fra1\\", }, }\\n}","breadcrumbs":"Provider API » Provider Configuration","id":"1963","title":"Provider Configuration"},"1964":{"body":"","breadcrumbs":"Provider API » Creating a Custom Provider","id":"1964","title":"Creating a Custom Provider"},"1965":{"body":"provisioning/extensions/providers/my-provider/\\n├── nulib/\\n│ └── my_provider.nu # Provider implementation\\n├── schemas/\\n│ ├── main.ncl # Nickel schema\\n│ └── defaults.ncl # Default configuration\\n└── README.md # Provider documentation","breadcrumbs":"Provider API » 1. Directory Structure","id":"1965","title":"1. Directory Structure"},"1966":{"body":"# my_provider.nu\\nexport def init [] { { name: \\"my-provider\\" type: \\"cloud\\" ready: true }\\n} export def create-servers [plan: record] { # Implementation here []\\n} export def list-servers [] { # Implementation here []\\n} # ... other required functions","breadcrumbs":"Provider API » 2. Implementation Template","id":"1966","title":"2. Implementation Template"},"1967":{"body":"# main.ncl\\n{ MyProvider = { # My custom provider schema name | String = \\"my-provider\\", type | String | \\"cloud\\" | \\"local\\" = \\"cloud\\", config | MyProviderConfig, }, MyProviderConfig = { api_key | String, region | String = \\"us-east-1\\", },\\n}","breadcrumbs":"Provider API » 3. Nickel Schema","id":"1967","title":"3. Nickel Schema"},"1968":{"body":"Providers are automatically discovered from: provisioning/extensions/providers/*/nu/*.nu User workspace: workspace/extensions/providers/*/nu/*.nu # Discover available providers\\nprovisioning module discover providers # Load provider\\nprovisioning module load providers workspace my-provider","breadcrumbs":"Provider API » Provider Discovery","id":"1968","title":"Provider Discovery"},"1969":{"body":"","breadcrumbs":"Provider API » Provider API Examples","id":"1969","title":"Provider API Examples"},"197":{"body":"# Create cluster\\nprovisioning cluster create buildkit\\nprovisioning cluster create buildkit --check\\nprovisioning cluster create buildkit --infra wuji # Delete cluster\\nprovisioning cluster delete buildkit\\nprovisioning cluster delete buildkit --check # List clusters\\nprovisioning cluster list\\nprovisioning cluster list --infra wuji","breadcrumbs":"Quick Start Cheatsheet » Cluster Management","id":"197","title":"Cluster Management"},"1970":{"body":"use my_provider.nu * let plan = { count: 3 size: \\"medium\\" zone: \\"us-east-1\\"\\n} create-servers $plan","breadcrumbs":"Provider API » Create Servers","id":"1970","title":"Create Servers"},"1971":{"body":"list-servers | where status == \\"running\\" | select hostname ip_address","breadcrumbs":"Provider API » List Servers","id":"1971","title":"List Servers"},"1972":{"body":"get-pricing \\"small\\" | to yaml","breadcrumbs":"Provider API » Get Pricing","id":"1972","title":"Get Pricing"},"1973":{"body":"Use the test environment system to test providers: # Test provider without real resources\\nprovisioning test env single my-provider --check","breadcrumbs":"Provider API » Testing Providers","id":"1973","title":"Testing Providers"},"1974":{"body":"For complete provider development guide, see: Provider Development - Quick start guide Extension Development - Complete extension guide Integration Examples - Example implementations","breadcrumbs":"Provider API » Provider Development Guide","id":"1974","title":"Provider Development Guide"},"1975":{"body":"Provider API follows semantic versioning: Major : Breaking changes Minor : New features, backward compatible Patch : Bug fixes Current API version: 2.0.0 For more examples, see Integration Examples .","breadcrumbs":"Provider API » API Stability","id":"1975","title":"API Stability"},"1976":{"body":"API documentation for Nushell library functions in the provisioning platform.","breadcrumbs":"NuShell API » Nushell API Reference","id":"1976","title":"Nushell API Reference"},"1977":{"body":"The provisioning platform provides a comprehensive Nushell library with reusable functions for infrastructure automation.","breadcrumbs":"NuShell API » Overview","id":"1977","title":"Overview"},"1978":{"body":"","breadcrumbs":"NuShell API » Core Modules","id":"1978","title":"Core Modules"},"1979":{"body":"Location : provisioning/core/nulib/lib_provisioning/config/ get-config - Retrieve configuration values validate-config - Validate configuration files load-config - Load configuration from file","breadcrumbs":"NuShell API » Configuration Module","id":"1979","title":"Configuration Module"},"198":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Orchestration Commands","id":"198","title":"Orchestration Commands"},"1980":{"body":"Location : provisioning/core/nulib/lib_provisioning/servers/ create-servers - Create server infrastructure list-servers - List all provisioned servers delete-servers - Remove servers","breadcrumbs":"NuShell API » Server Module","id":"1980","title":"Server Module"},"1981":{"body":"Location : provisioning/core/nulib/lib_provisioning/taskservs/ install-taskserv - Install infrastructure service list-taskservs - List installed services generate-taskserv-config - Generate service configuration","breadcrumbs":"NuShell API » Task Service Module","id":"1981","title":"Task Service Module"},"1982":{"body":"Location : provisioning/core/nulib/lib_provisioning/workspace/ init-workspace - Initialize new workspace get-active-workspace - Get current workspace switch-workspace - Switch to different workspace","breadcrumbs":"NuShell API » Workspace Module","id":"1982","title":"Workspace Module"},"1983":{"body":"Location : provisioning/core/nulib/lib_provisioning/providers/ discover-providers - Find available providers load-provider - Load provider module list-providers - List loaded providers","breadcrumbs":"NuShell API » Provider Module","id":"1983","title":"Provider Module"},"1984":{"body":"","breadcrumbs":"NuShell API » Diagnostics & Utilities","id":"1984","title":"Diagnostics & Utilities"},"1985":{"body":"Location : provisioning/core/nulib/lib_provisioning/diagnostics/ system-status - Check system health (13+ checks) health-check - Deep validation (7 areas) next-steps - Get progressive guidance deployment-phase - Check deployment progress","breadcrumbs":"NuShell API » Diagnostics Module","id":"1985","title":"Diagnostics Module"},"1986":{"body":"Location : provisioning/core/nulib/lib_provisioning/utils/hints.nu show-next-step - Display next step suggestion show-doc-link - Show documentation link show-example - Display command example","breadcrumbs":"NuShell API » Hints Module","id":"1986","title":"Hints Module"},"1987":{"body":"# Load provisioning library\\nuse provisioning/core/nulib/lib_provisioning * # Check system status\\nsystem-status | table # Create servers\\ncreate-servers --plan \\"3-node-cluster\\" --check # Install kubernetes\\ninstall-taskserv kubernetes --check # Get next steps\\nnext-steps","breadcrumbs":"NuShell API » Usage Example","id":"1987","title":"Usage Example"},"1988":{"body":"All API functions follow these conventions: Explicit types : All parameters have type annotations Early returns : Validate first, fail fast Pure functions : No side effects (mutations marked with !) Pipeline-friendly : Output designed for Nu pipelines","breadcrumbs":"NuShell API » API Conventions","id":"1988","title":"API Conventions"},"1989":{"body":"See Nushell Best Practices for coding guidelines.","breadcrumbs":"NuShell API » Best Practices","id":"1989","title":"Best Practices"},"199":{"body":"# Submit server creation workflow\\nnu -c \\"use core/nulib/workflows/server_create.nu *; server_create_workflow \'wuji\' \'\' [] --check\\" # Submit taskserv workflow\\nnu -c \\"use core/nulib/workflows/taskserv.nu *; taskserv create \'kubernetes\' \'wuji\' --check\\" # Submit cluster workflow\\nnu -c \\"use core/nulib/workflows/cluster.nu *; cluster create \'buildkit\' \'wuji\' --check\\" # List all workflows\\nprovisioning workflow list\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow list\\" # Get workflow statistics\\nprovisioning workflow stats\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow stats\\" # Monitor workflow in real-time\\nprovisioning workflow monitor \\nnu -c \\"use core/nulib/workflows/management.nu *; workflow monitor \\" # Check orchestrator health\\nprovisioning workflow orchestrator\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow orchestrator\\" # Get specific workflow status\\nprovisioning workflow status \\nnu -c \\"use core/nulib/workflows/management.nu *; workflow status \\"","breadcrumbs":"Quick Start Cheatsheet » Workflow Management","id":"199","title":"Workflow Management"},"1990":{"body":"Browse the complete source code: Core library : provisioning/core/nulib/lib_provisioning/ Module index : provisioning/core/nulib/lib_provisioning/mod.nu For integration examples, see Integration Examples .","breadcrumbs":"NuShell API » Source Code","id":"1990","title":"Source Code"},"1991":{"body":"This document describes the path resolution system used throughout the provisioning infrastructure for discovering configurations, extensions, and resolving workspace paths.","breadcrumbs":"Path Resolution » Path Resolution API","id":"1991","title":"Path Resolution API"},"1992":{"body":"The path resolution system provides a hierarchical and configurable mechanism for: Configuration file discovery and loading Extension discovery (providers, task services, clusters) Workspace and project path management Environment variable interpolation Cross-platform path handling","breadcrumbs":"Path Resolution » Overview","id":"1992","title":"Overview"},"1993":{"body":"The system follows a specific hierarchy for loading configuration files: 1. System defaults (config.defaults.toml)\\n2. User configuration (config.user.toml)\\n3. Project configuration (config.project.toml)\\n4. Infrastructure config (infra/config.toml)\\n5. Environment config (config.{env}.toml)\\n6. Runtime overrides (CLI arguments, ENV vars)","breadcrumbs":"Path Resolution » Configuration Resolution Hierarchy","id":"1993","title":"Configuration Resolution Hierarchy"},"1994":{"body":"The system searches for configuration files in these locations: # Default search paths (in order)\\n/usr/local/provisioning/config.defaults.toml\\n$HOME/.config/provisioning/config.user.toml\\n$PWD/config.project.toml\\n$PROVISIONING_KLOUD_PATH/config.infra.toml\\n$PWD/config.{PROVISIONING_ENV}.toml","breadcrumbs":"Path Resolution » Configuration Search Paths","id":"1994","title":"Configuration Search Paths"},"1995":{"body":"","breadcrumbs":"Path Resolution » Path Resolution API","id":"1995","title":"Path Resolution API"},"1996":{"body":"resolve-config-path(pattern: string, search_paths: list) -> string Resolves configuration file paths using the search hierarchy. Parameters: pattern: File pattern to search for (for example, \\"config.*.toml\\") search_paths: Additional paths to search (optional) Returns: Full path to the first matching configuration file Empty string if no file found Example: use path-resolution.nu *\\nlet config_path = (resolve-config-path \\"config.user.toml\\" [])\\n# Returns: \\"/home/user/.config/provisioning/config.user.toml\\" resolve-extension-path(type: string, name: string) -> record Discovers extension paths (providers, taskservs, clusters). Parameters: type: Extension type (\\"provider\\", \\"taskserv\\", \\"cluster\\") name: Extension name (for example, \\"upcloud\\", \\"kubernetes\\", \\"buildkit\\") Returns: { base_path: \\"/usr/local/provisioning/providers/upcloud\\", schemas_path: \\"/usr/local/provisioning/providers/upcloud/schemas\\", nulib_path: \\"/usr/local/provisioning/providers/upcloud/nulib\\", templates_path: \\"/usr/local/provisioning/providers/upcloud/templates\\", exists: true\\n} resolve-workspace-paths() -> record Gets current workspace path configuration. Returns: { base: \\"/usr/local/provisioning\\", current_infra: \\"/workspace/infra/production\\", kloud_path: \\"/workspace/kloud\\", providers: \\"/usr/local/provisioning/providers\\", taskservs: \\"/usr/local/provisioning/taskservs\\", clusters: \\"/usr/local/provisioning/cluster\\", extensions: \\"/workspace/extensions\\"\\n}","breadcrumbs":"Path Resolution » Core Functions","id":"1996","title":"Core Functions"},"1997":{"body":"The system supports variable interpolation in configuration paths: Supported Variables {{paths.base}} - Base provisioning path {{paths.kloud}} - Current kloud path {{env.HOME}} - User home directory {{env.PWD}} - Current working directory {{now.date}} - Current date (YYYY-MM-DD) {{now.time}} - Current time (HH:MM:SS) {{git.branch}} - Current git branch {{git.commit}} - Current git commit hash interpolate-path(template: string, context: record) -> string Interpolates variables in path templates. Parameters: template: Path template with variables context: Variable context record Example: let template = \\"{{paths.base}}/infra/{{env.USER}}/{{git.branch}}\\"\\nlet result = (interpolate-path $template { paths: { base: \\"/usr/local/provisioning\\" }, env: { USER: \\"admin\\" }, git: { branch: \\"main\\" }\\n})\\n# Returns: \\"/usr/local/provisioning/infra/admin/main\\"","breadcrumbs":"Path Resolution » Path Interpolation","id":"1997","title":"Path Interpolation"},"1998":{"body":"","breadcrumbs":"Path Resolution » Extension Discovery API","id":"1998","title":"Extension Discovery API"},"1999":{"body":"discover-providers() -> list Discovers all available providers. Returns: [ { name: \\"upcloud\\", path: \\"/usr/local/provisioning/providers/upcloud\\", type: \\"provider\\", version: \\"1.2.0\\", enabled: true, has_schemas: true, has_nulib: true, has_templates: true }, { name: \\"aws\\", path: \\"/usr/local/provisioning/providers/aws\\", type: \\"provider\\", version: \\"2.1.0\\", enabled: true, has_schemas: true, has_nulib: true, has_templates: true }\\n] get-provider-config(name: string) -> record Gets provider-specific configuration and paths. Parameters: name: Provider name Returns: { name: \\"upcloud\\", base_path: \\"/usr/local/provisioning/providers/upcloud\\", config: { api_url: \\"https://api.upcloud.com/1.3\\", auth_method: \\"basic\\", interface: \\"API\\" }, paths: { schemas: \\"/usr/local/provisioning/providers/upcloud/schemas\\", nulib: \\"/usr/local/provisioning/providers/upcloud/nulib\\", templates: \\"/usr/local/provisioning/providers/upcloud/templates\\" }, metadata: { version: \\"1.2.0\\", description: \\"UpCloud provider for server provisioning\\" }\\n}","breadcrumbs":"Path Resolution » Provider Discovery","id":"1999","title":"Provider Discovery"},"2":{"body":"Document Description Audience Installation Guide Install and configure the system New Users Getting Started First steps and basic concepts New Users Quick Reference Command cheat sheet All Users From Scratch Guide Complete deployment walkthrough New Users","breadcrumbs":"Home » 🚀 Getting Started","id":"2","title":"🚀 Getting Started"},"20":{"body":"Review System Overview Study Design Principles Read relevant ADRs Follow Development Guide Reference Nickel Quick Reference","breadcrumbs":"Home » For Developers","id":"20","title":"For Developers"},"200":{"body":"# Submit batch workflow from Nickel\\nprovisioning batch submit workflows/example_batch.ncl\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch submit workflows/example_batch.ncl\\" # Monitor batch workflow progress\\nprovisioning batch monitor \\nnu -c \\"use core/nulib/workflows/batch.nu *; batch monitor \\" # List batch workflows with filtering\\nprovisioning batch list\\nprovisioning batch list --status Running\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch list --status Running\\" # Get detailed batch status\\nprovisioning batch status \\nnu -c \\"use core/nulib/workflows/batch.nu *; batch status \\" # Initiate rollback for failed workflow\\nprovisioning batch rollback \\nnu -c \\"use core/nulib/workflows/batch.nu *; batch rollback \\" # Cancel running batch\\nprovisioning batch cancel # Show batch workflow statistics\\nprovisioning batch stats\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch stats\\"","breadcrumbs":"Quick Start Cheatsheet » Batch Operations","id":"200","title":"Batch Operations"},"2000":{"body":"discover-taskservs() -> list Discovers all available task services. Returns: [ { name: \\"kubernetes\\", path: \\"/usr/local/provisioning/taskservs/kubernetes\\", type: \\"taskserv\\", category: \\"orchestration\\", version: \\"1.28.0\\", enabled: true }, { name: \\"cilium\\", path: \\"/usr/local/provisioning/taskservs/cilium\\", type: \\"taskserv\\", category: \\"networking\\", version: \\"1.14.0\\", enabled: true }\\n] get-taskserv-config(name: string) -> record Gets task service configuration and version information. Parameters: name: Task service name Returns: { name: \\"kubernetes\\", path: \\"/usr/local/provisioning/taskservs/kubernetes\\", version: { current: \\"1.28.0\\", available: \\"1.28.2\\", update_available: true, source: \\"github\\", release_url: \\"https://github.com/kubernetes/kubernetes/releases\\" }, config: { category: \\"orchestration\\", dependencies: [\\"containerd\\"], supports_versions: [\\"1.26.x\\", \\"1.27.x\\", \\"1.28.x\\"] }\\n}","breadcrumbs":"Path Resolution » Task Service Discovery","id":"2000","title":"Task Service Discovery"},"2001":{"body":"discover-clusters() -> list Discovers all available cluster configurations. Returns: [ { name: \\"buildkit\\", path: \\"/usr/local/provisioning/cluster/buildkit\\", type: \\"cluster\\", category: \\"build\\", components: [\\"buildkit\\", \\"registry\\", \\"storage\\"], enabled: true }\\n]","breadcrumbs":"Path Resolution » Cluster Discovery","id":"2001","title":"Cluster Discovery"},"2002":{"body":"","breadcrumbs":"Path Resolution » Environment Management API","id":"2002","title":"Environment Management API"},"2003":{"body":"detect-environment() -> string Automatically detects the current environment based on: PROVISIONING_ENV environment variable Git branch patterns (main → prod, develop → dev, etc.) Directory structure analysis Configuration file presence Returns: Environment name string (dev, test, prod, etc.) get-environment-config(env: string) -> record Gets environment-specific configuration. Parameters: env: Environment name Returns: { name: \\"production\\", paths: { base: \\"/opt/provisioning\\", kloud: \\"/data/kloud\\", logs: \\"/var/log/provisioning\\" }, providers: { default: \\"upcloud\\", allowed: [\\"upcloud\\", \\"aws\\"] }, features: { debug: false, telemetry: true, rollback: true }\\n}","breadcrumbs":"Path Resolution » Environment Detection","id":"2003","title":"Environment Detection"},"2004":{"body":"switch-environment(env: string, validate: bool = true) -> null Switches to a different environment and updates path resolution. Parameters: env: Target environment name validate: Whether to validate environment configuration Effects: Updates PROVISIONING_ENV environment variable Reconfigures path resolution for new environment Validates environment configuration if requested","breadcrumbs":"Path Resolution » Environment Switching","id":"2004","title":"Environment Switching"},"2005":{"body":"","breadcrumbs":"Path Resolution » Workspace Management API","id":"2005","title":"Workspace Management API"},"2006":{"body":"discover-workspaces() -> list Discovers available workspaces and infrastructure directories. Returns: [ { name: \\"production\\", path: \\"/workspace/infra/production\\", type: \\"infrastructure\\", provider: \\"upcloud\\", settings: \\"settings.ncl\\", valid: true }, { name: \\"development\\", path: \\"/workspace/infra/development\\", type: \\"infrastructure\\", provider: \\"local\\", settings: \\"dev-settings.ncl\\", valid: true }\\n] set-current-workspace(path: string) -> null Sets the current workspace for path resolution. Parameters: path: Workspace directory path Effects: Updates CURRENT_INFRA_PATH environment variable Reconfigures workspace-relative path resolution","breadcrumbs":"Path Resolution » Workspace Discovery","id":"2006","title":"Workspace Discovery"},"2007":{"body":"analyze-project-structure(path: string = $PWD) -> record Analyzes project structure and identifies components. Parameters: path: Project root path (defaults to current directory) Returns: { root: \\"/workspace/project\\", type: \\"provisioning_workspace\\", components: { providers: [ { name: \\"upcloud\\", path: \\"providers/upcloud\\" }, { name: \\"aws\\", path: \\"providers/aws\\" } ], taskservs: [ { name: \\"kubernetes\\", path: \\"taskservs/kubernetes\\" }, { name: \\"cilium\\", path: \\"taskservs/cilium\\" } ], clusters: [ { name: \\"buildkit\\", path: \\"cluster/buildkit\\" } ], infrastructure: [ { name: \\"production\\", path: \\"infra/production\\" }, { name: \\"staging\\", path: \\"infra/staging\\" } ] }, config_files: [ \\"config.defaults.toml\\", \\"config.user.toml\\", \\"config.prod.toml\\" ]\\n}","breadcrumbs":"Path Resolution » Project Structure Analysis","id":"2007","title":"Project Structure Analysis"},"2008":{"body":"","breadcrumbs":"Path Resolution » Caching and Performance","id":"2008","title":"Caching and Performance"},"2009":{"body":"The path resolution system includes intelligent caching: cache-paths(duration: duration = 5 min) -> null Enables path caching for the specified duration. Parameters: duration: Cache validity duration invalidate-path-cache() -> null Invalidates the path resolution cache. get-cache-stats() -> record Gets path resolution cache statistics. Returns: { enabled: true, size: 150, hit_rate: 0.85, last_invalidated: \\"2025-09-26T10:00:00Z\\"\\n}","breadcrumbs":"Path Resolution » Path Caching","id":"2009","title":"Path Caching"},"201":{"body":"# Start orchestrator in background\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background # Check orchestrator status\\n./scripts/start-orchestrator.nu --check\\nprovisioning orchestrator status # Stop orchestrator\\n./scripts/start-orchestrator.nu --stop\\nprovisioning orchestrator stop # View logs\\ntail -f provisioning/platform/orchestrator/data/orchestrator.log\\nprovisioning orchestrator logs","breadcrumbs":"Quick Start Cheatsheet » Orchestrator Management","id":"201","title":"Orchestrator Management"},"2010":{"body":"","breadcrumbs":"Path Resolution » Cross-Platform Compatibility","id":"2010","title":"Cross-Platform Compatibility"},"2011":{"body":"normalize-path(path: string) -> string Normalizes paths for cross-platform compatibility. Parameters: path: Input path (may contain mixed separators) Returns: Normalized path using platform-appropriate separators Example: # On Windows\\nnormalize-path \\"path/to/file\\" # Returns: \\"path\\\\to\\\\file\\" # On Unix\\nnormalize-path \\"path\\\\to\\\\file\\" # Returns: \\"path/to/file\\" join-paths(segments: list) -> string Safely joins path segments using platform separators. Parameters: segments: List of path segments Returns: Joined path string","breadcrumbs":"Path Resolution » Path Normalization","id":"2011","title":"Path Normalization"},"2012":{"body":"","breadcrumbs":"Path Resolution » Configuration Validation API","id":"2012","title":"Configuration Validation API"},"2013":{"body":"validate-paths(config: record) -> record Validates all paths in configuration. Parameters: config: Configuration record Returns: { valid: true, errors: [], warnings: [ { path: \\"paths.extensions\\", message: \\"Path does not exist\\" } ], checks_performed: 15\\n} validate-extension-structure(type: string, path: string) -> record Validates extension directory structure. Parameters: type: Extension type (provider, taskserv, cluster) path: Extension base path Returns: { valid: true, required_files: [ { file: \\"manifest.toml\\", exists: true }, { file: \\"schemas/main.ncl\\", exists: true }, { file: \\"nulib/mod.nu\\", exists: true } ], optional_files: [ { file: \\"templates/server.j2\\", exists: false } ]\\n}","breadcrumbs":"Path Resolution » Path Validation","id":"2013","title":"Path Validation"},"2014":{"body":"","breadcrumbs":"Path Resolution » Command-Line Interface","id":"2014","title":"Command-Line Interface"},"2015":{"body":"The path resolution API is exposed via Nushell commands: # Show current path configuration\\nprovisioning show paths # Discover available extensions\\nprovisioning discover providers\\nprovisioning discover taskservs\\nprovisioning discover clusters # Validate path configuration\\nprovisioning validate paths # Switch environments\\nprovisioning env switch prod # Set workspace\\nprovisioning workspace set /path/to/infra","breadcrumbs":"Path Resolution » Path Resolution Commands","id":"2015","title":"Path Resolution Commands"},"2016":{"body":"","breadcrumbs":"Path Resolution » Integration Examples","id":"2016","title":"Integration Examples"},"2017":{"body":"import subprocess\\nimport json class PathResolver: def __init__(self, provisioning_path=\\"/usr/local/bin/provisioning\\"): self.cmd = provisioning_path def get_paths(self): result = subprocess.run([ \\"nu\\", \\"-c\\", f\\"use {self.cmd} *; show-config --section=paths --format=json\\" ], capture_output=True, text=True) return json.loads(result.stdout) def discover_providers(self): result = subprocess.run([ \\"nu\\", \\"-c\\", f\\"use {self.cmd} *; discover providers --format=json\\" ], capture_output=True, text=True) return json.loads(result.stdout) # Usage\\nresolver = PathResolver()\\npaths = resolver.get_paths()\\nproviders = resolver.discover_providers()","breadcrumbs":"Path Resolution » Python Integration","id":"2017","title":"Python Integration"},"2018":{"body":"const { exec } = require(\'child_process\');\\nconst util = require(\'util\');\\nconst execAsync = util.promisify(exec); class PathResolver { constructor(provisioningPath = \'/usr/local/bin/provisioning\') { this.cmd = provisioningPath; } async getPaths() { const { stdout } = await execAsync( `nu -c \\"use ${this.cmd} *; show-config --section=paths --format=json\\"` ); return JSON.parse(stdout); } async discoverExtensions(type) { const { stdout } = await execAsync( `nu -c \\"use ${this.cmd} *; discover ${type} --format=json\\"` ); return JSON.parse(stdout); }\\n} // Usage\\nconst resolver = new PathResolver();\\nconst paths = await resolver.getPaths();\\nconst providers = await resolver.discoverExtensions(\'providers\');","breadcrumbs":"Path Resolution » JavaScript/Node.js Integration","id":"2018","title":"JavaScript/Node.js Integration"},"2019":{"body":"","breadcrumbs":"Path Resolution » Error Handling","id":"2019","title":"Error Handling"},"202":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Configuration Commands","id":"202","title":"Configuration Commands"},"2020":{"body":"Configuration File Not Found Error: Configuration file not found in search paths\\nSearched: [\\"/usr/local/provisioning/config.defaults.toml\\", ...] Extension Not Found Error: Provider \'missing-provider\' not found\\nAvailable providers: [\\"upcloud\\", \\"aws\\", \\"local\\"] Invalid Path Template Error: Invalid template variable: {{invalid.var}}\\nValid variables: [\\"paths.*\\", \\"env.*\\", \\"now.*\\", \\"git.*\\"] Environment Not Found Error: Environment \'staging\' not configured\\nAvailable environments: [\\"dev\\", \\"test\\", \\"prod\\"]","breadcrumbs":"Path Resolution » Common Error Scenarios","id":"2020","title":"Common Error Scenarios"},"2021":{"body":"The system provides graceful fallbacks: Missing configuration files use system defaults Invalid paths fall back to safe defaults Extension discovery continues if some paths are inaccessible Environment detection falls back to \'local\' if detection fails","breadcrumbs":"Path Resolution » Error Recovery","id":"2021","title":"Error Recovery"},"2022":{"body":"","breadcrumbs":"Path Resolution » Performance Considerations","id":"2022","title":"Performance Considerations"},"2023":{"body":"Use Path Caching : Enable caching for frequently accessed paths Batch Discovery : Discover all extensions at once rather than individually Lazy Loading : Load extension configurations only when needed Environment Detection : Cache environment detection results","breadcrumbs":"Path Resolution » Best Practices","id":"2023","title":"Best Practices"},"2024":{"body":"Monitor path resolution performance: # Get resolution statistics\\nprovisioning debug path-stats # Monitor cache performance\\nprovisioning debug cache-stats # Profile path resolution\\nprovisioning debug profile-paths","breadcrumbs":"Path Resolution » Monitoring","id":"2024","title":"Monitoring"},"2025":{"body":"","breadcrumbs":"Path Resolution » Security Considerations","id":"2025","title":"Security Considerations"},"2026":{"body":"The system includes protections against path traversal attacks: All paths are normalized and validated Relative paths are resolved within safe boundaries Symlinks are validated before following","breadcrumbs":"Path Resolution » Path Traversal Protection","id":"2026","title":"Path Traversal Protection"},"2027":{"body":"Path resolution respects file system permissions: Configuration files require read access Extension directories require read/execute access Workspace directories may require write access for operations This path resolution API provides a comprehensive and flexible system for managing the complex path requirements of multi-provider, multi-environment infrastructure provisioning.","breadcrumbs":"Path Resolution » Access Control","id":"2027","title":"Access Control"},"2028":{"body":"This guide focuses on creating extensions tailored to specific infrastructure requirements, business needs, and organizational constraints.","breadcrumbs":"Infrastructure-Specific Extensions » Infrastructure-Specific Extension Development","id":"2028","title":"Infrastructure-Specific Extension Development"},"2029":{"body":"Overview Infrastructure Assessment Custom Taskserv Development Provider-Specific Extensions Multi-Environment Management Integration Patterns Real-World Examples","breadcrumbs":"Infrastructure-Specific Extensions » Table of Contents","id":"2029","title":"Table of Contents"},"203":{"body":"# Show environment variables\\nprovisioning env # Show all environment and configuration\\nprovisioning allenv # Validate configuration\\nprovisioning validate config\\nprovisioning validate infra # Setup wizard\\nprovisioning setup","breadcrumbs":"Quick Start Cheatsheet » Environment and Validation","id":"203","title":"Environment and Validation"},"2030":{"body":"Infrastructure-specific extensions address unique requirements that generic modules cannot cover: Company-specific applications and services Compliance and security requirements Legacy system integrations Custom networking configurations Specialized monitoring and alerting Multi-cloud and hybrid deployments","breadcrumbs":"Infrastructure-Specific Extensions » Overview","id":"2030","title":"Overview"},"2031":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Infrastructure Assessment","id":"2031","title":"Infrastructure Assessment"},"2032":{"body":"Before creating custom extensions, assess your infrastructure requirements: 1. Application Inventory # Document existing applications\\ncat > infrastructure-assessment.yaml << EOF\\napplications: - name: \\"legacy-billing-system\\" type: \\"monolith\\" runtime: \\"java-8\\" database: \\"oracle-11g\\" integrations: [\\"ldap\\", \\"file-storage\\", \\"email\\"] compliance: [\\"pci-dss\\", \\"sox\\"] - name: \\"customer-portal\\" type: \\"microservices\\" runtime: \\"nodejs-16\\" database: \\"postgresql-13\\" integrations: [\\"redis\\", \\"elasticsearch\\", \\"s3\\"] compliance: [\\"gdpr\\", \\"hipaa\\"] infrastructure: - type: \\"on-premise\\" location: \\"datacenter-primary\\" capabilities: [\\"kubernetes\\", \\"vmware\\", \\"storage-array\\"] - type: \\"cloud\\" provider: \\"aws\\" regions: [\\"us-east-1\\", \\"eu-west-1\\"] services: [\\"eks\\", \\"rds\\", \\"s3\\", \\"cloudfront\\"] compliance_requirements: - \\"PCI DSS Level 1\\" - \\"SOX compliance\\" - \\"GDPR data protection\\" - \\"HIPAA safeguards\\" network_requirements: - \\"air-gapped environments\\" - \\"private subnet isolation\\" - \\"vpn connectivity\\" - \\"load balancer integration\\"\\nEOF 2. Gap Analysis # Analyze what standard modules don\'t cover\\n./provisioning/core/cli/module-loader discover taskservs > available-modules.txt # Create gap analysis\\ncat > gap-analysis.md << EOF\\n# Infrastructure Gap Analysis ## Standard Modules Available\\n$(cat available-modules.txt) ## Missing Capabilities\\n- [ ] Legacy Oracle database integration\\n- [ ] Company-specific LDAP authentication\\n- [ ] Custom monitoring for legacy systems\\n- [ ] Compliance reporting automation\\n- [ ] Air-gapped deployment workflows\\n- [ ] Multi-datacenter replication ## Custom Extensions Needed\\n1. **oracle-db-taskserv**: Oracle database with company settings\\n2. **company-ldap-taskserv**: LDAP integration with custom schema\\n3. **compliance-monitor-taskserv**: Automated compliance checking\\n4. **airgap-deployment-cluster**: Air-gapped deployment patterns\\n5. **company-monitoring-taskserv**: Custom monitoring dashboard\\nEOF","breadcrumbs":"Infrastructure-Specific Extensions » Identifying Extension Needs","id":"2032","title":"Identifying Extension Needs"},"2033":{"body":"Business Requirements Template \\"\\"\\"\\nBusiness Requirements Schema for Custom Extensions\\nUse this template to document requirements before development\\n\\"\\"\\" schema BusinessRequirements: \\"\\"\\"Document business requirements for custom extensions\\"\\"\\" # Project information project_name: str stakeholders: [str] timeline: str budget_constraints?: str # Functional requirements functional_requirements: [FunctionalRequirement] # Non-functional requirements performance_requirements: PerformanceRequirements security_requirements: SecurityRequirements compliance_requirements: [str] # Integration requirements existing_systems: [ExistingSystem] required_integrations: [Integration] # Operational requirements monitoring_requirements: [str] backup_requirements: [str] disaster_recovery_requirements: [str] schema FunctionalRequirement: id: str description: str priority: \\"high\\" | \\"medium\\" | \\"low\\" acceptance_criteria: [str] schema PerformanceRequirements: max_response_time: str throughput_requirements: str availability_target: str scalability_requirements: str schema SecurityRequirements: authentication_method: str authorization_model: str encryption_requirements: [str] audit_requirements: [str] network_security: [str] schema ExistingSystem: name: str type: str version: str api_available: bool integration_method: str schema Integration: target_system: str integration_type: \\"api\\" | \\"database\\" | \\"file\\" | \\"message_queue\\" data_format: str frequency: str direction: \\"inbound\\" | \\"outbound\\" | \\"bidirectional\\"","breadcrumbs":"Infrastructure-Specific Extensions » Requirements Gathering","id":"2033","title":"Requirements Gathering"},"2034":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Custom Taskserv Development","id":"2034","title":"Custom Taskserv Development"},"2035":{"body":"Example: Legacy ERP System Integration # Create company-specific taskserv\\nmkdir -p extensions/taskservs/company-specific/legacy-erp/nickel\\ncd extensions/taskservs/company-specific/legacy-erp/nickel Create legacy-erp.ncl: \\"\\"\\"\\nLegacy ERP System Taskserv\\nHandles deployment and management of company\'s legacy ERP system\\n\\"\\"\\" import provisioning.lib as lib\\nimport provisioning.dependencies as deps\\nimport provisioning.defaults as defaults # ERP system configuration\\nschema LegacyERPConfig: \\"\\"\\"Configuration for legacy ERP system\\"\\"\\" # Application settings erp_version: str = \\"12.2.0\\" installation_mode: \\"standalone\\" | \\"cluster\\" | \\"ha\\" = \\"ha\\" # Database configuration database_type: \\"oracle\\" | \\"sqlserver\\" = \\"oracle\\" database_version: str = \\"19c\\" database_size: str = \\"500Gi\\" database_backup_retention: int = 30 # Network configuration erp_port: int = 8080 database_port: int = 1521 ssl_enabled: bool = True internal_network_only: bool = True # Integration settings ldap_server: str file_share_path: str email_server: str # Compliance settings audit_logging: bool = True encryption_at_rest: bool = True encryption_in_transit: bool = True data_retention_years: int = 7 # Resource allocation app_server_resources: ERPResourceConfig database_resources: ERPResourceConfig # Backup configuration backup_schedule: str = \\"0 2 * * *\\" # Daily at 2 AM backup_retention_policy: BackupRetentionPolicy check: erp_port > 0 and erp_port < 65536, \\"ERP port must be valid\\" database_port > 0 and database_port < 65536, \\"Database port must be valid\\" data_retention_years > 0, \\"Data retention must be positive\\" len(ldap_server) > 0, \\"LDAP server required\\" schema ERPResourceConfig: \\"\\"\\"Resource configuration for ERP components\\"\\"\\" cpu_request: str memory_request: str cpu_limit: str memory_limit: str storage_size: str storage_class: str = \\"fast-ssd\\" schema BackupRetentionPolicy: \\"\\"\\"Backup retention policy for ERP system\\"\\"\\" daily_backups: int = 7 weekly_backups: int = 4 monthly_backups: int = 12 yearly_backups: int = 7 # Environment-specific resource configurations\\nerp_resource_profiles = { \\"development\\": { app_server_resources = { cpu_request = \\"1\\" memory_request = \\"4Gi\\" cpu_limit = \\"2\\" memory_limit = \\"8Gi\\" storage_size = \\"50Gi\\" storage_class = \\"standard\\" } database_resources = { cpu_request = \\"2\\" memory_request = \\"8Gi\\" cpu_limit = \\"4\\" memory_limit = \\"16Gi\\" storage_size = \\"100Gi\\" storage_class = \\"standard\\" } }, \\"production\\": { app_server_resources = { cpu_request = \\"4\\" memory_request = \\"16Gi\\" cpu_limit = \\"8\\" memory_limit = \\"32Gi\\" storage_size = \\"200Gi\\" storage_class = \\"fast-ssd\\" } database_resources = { cpu_request = \\"8\\" memory_request = \\"32Gi\\" cpu_limit = \\"16\\" memory_limit = \\"64Gi\\" storage_size = \\"2Ti\\" storage_class = \\"fast-ssd\\" } }\\n} # Taskserv definition\\nschema LegacyERPTaskserv(lib.TaskServDef): \\"\\"\\"Legacy ERP Taskserv Definition\\"\\"\\" name: str = \\"legacy-erp\\" config: LegacyERPConfig environment: \\"development\\" | \\"staging\\" | \\"production\\" # Dependencies for legacy ERP\\nlegacy_erp_dependencies: deps.TaskservDependencies = { name = \\"legacy-erp\\" # Infrastructure dependencies requires = [\\"kubernetes\\", \\"storage-class\\"] optional = [\\"monitoring\\", \\"backup-agent\\", \\"log-aggregator\\"] conflicts = [\\"modern-erp\\"] # Services provided provides = [\\"erp-api\\", \\"erp-ui\\", \\"erp-reports\\", \\"erp-integration\\"] # Resource requirements resources = { cpu = \\"8\\" memory = \\"32Gi\\" disk = \\"2Ti\\" network = True privileged = True # Legacy systems often need privileged access } # Health checks health_checks = [ { command = \\"curl -k https://localhost:9090/health\\" interval = 60 timeout = 30 retries = 3 }, { command = \\"sqlplus system/password@localhost:1521/XE <<< \'SELECT 1 FROM DUAL;\'\\" interval = 300 timeout = 60 retries = 2 } ] # Installation phases phases = [ { name = \\"pre-install\\" order = 1 parallel = False required = True }, { name = \\"database-setup\\" order = 2 parallel = False required = True }, { name = \\"application-install\\" order = 3 parallel = False required = True }, { name = \\"integration-setup\\" order = 4 parallel = True required = False }, { name = \\"compliance-validation\\" order = 5 parallel = False required = True } ] # Compatibility os_support = [\\"linux\\"] arch_support = [\\"amd64\\"] timeout = 3600 # 1 hour for legacy system deployment\\n} # Default configuration\\nlegacy_erp_default: LegacyERPTaskserv = { name = \\"legacy-erp\\" environment = \\"production\\" config = { erp_version = \\"12.2.0\\" installation_mode = \\"ha\\" database_type = \\"oracle\\" database_version = \\"19c\\" database_size = \\"1Ti\\" database_backup_retention = 30 erp_port = 8080 database_port = 1521 ssl_enabled = True internal_network_only = True # Company-specific settings ldap_server = \\"ldap.company.com\\" file_share_path = \\"/mnt/company-files\\" email_server = \\"smtp.company.com\\" # Compliance settings audit_logging = True encryption_at_rest = True encryption_in_transit = True data_retention_years = 7 # Production resources app_server_resources = erp_resource_profiles.production.app_server_resources database_resources = erp_resource_profiles.production.database_resources backup_schedule = \\"0 2 * * *\\" backup_retention_policy = { daily_backups = 7 weekly_backups = 4 monthly_backups = 12 yearly_backups = 7 } }\\n} # Export for provisioning system\\n{ config: legacy_erp_default, dependencies: legacy_erp_dependencies, profiles: erp_resource_profiles\\n}","breadcrumbs":"Infrastructure-Specific Extensions » Company-Specific Application Taskserv","id":"2035","title":"Company-Specific Application Taskserv"},"2036":{"body":"Create compliance-monitor.ncl: \\"\\"\\"\\nCompliance Monitoring Taskserv\\nAutomated compliance checking and reporting for regulated environments\\n\\"\\"\\" import provisioning.lib as lib\\nimport provisioning.dependencies as deps schema ComplianceMonitorConfig: \\"\\"\\"Configuration for compliance monitoring system\\"\\"\\" # Compliance frameworks enabled_frameworks: [ComplianceFramework] # Monitoring settings scan_frequency: str = \\"0 0 * * *\\" # Daily real_time_monitoring: bool = True # Reporting settings report_frequency: str = \\"0 0 * * 0\\" # Weekly report_recipients: [str] report_format: \\"pdf\\" | \\"html\\" | \\"json\\" = \\"pdf\\" # Alerting configuration alert_severity_threshold: \\"low\\" | \\"medium\\" | \\"high\\" = \\"medium\\" alert_channels: [AlertChannel] # Data retention audit_log_retention_days: int = 2555 # 7 years report_retention_days: int = 365 # Integration settings siem_integration: bool = True siem_endpoint?: str check: audit_log_retention_days >= 2555, \\"Audit logs must be retained for at least 7 years\\" len(report_recipients) > 0, \\"At least one report recipient required\\" schema ComplianceFramework: \\"\\"\\"Compliance framework configuration\\"\\"\\" name: \\"pci-dss\\" | \\"sox\\" | \\"gdpr\\" | \\"hipaa\\" | \\"iso27001\\" | \\"nist\\" version: str enabled: bool = True custom_controls?: [ComplianceControl] schema ComplianceControl: \\"\\"\\"Custom compliance control\\"\\"\\" id: str description: str check_command: str severity: \\"low\\" | \\"medium\\" | \\"high\\" | \\"critical\\" remediation_guidance: str schema AlertChannel: \\"\\"\\"Alert channel configuration\\"\\"\\" type: \\"email\\" | \\"slack\\" | \\"teams\\" | \\"webhook\\" | \\"sms\\" endpoint: str severity_filter: [\\"low\\", \\"medium\\", \\"high\\", \\"critical\\"] # Taskserv definition\\nschema ComplianceMonitorTaskserv(lib.TaskServDef): \\"\\"\\"Compliance Monitor Taskserv Definition\\"\\"\\" name: str = \\"compliance-monitor\\" config: ComplianceMonitorConfig # Dependencies\\ncompliance_monitor_dependencies: deps.TaskservDependencies = { name = \\"compliance-monitor\\" # Dependencies requires = [\\"kubernetes\\"] optional = [\\"monitoring\\", \\"logging\\", \\"backup\\"] provides = [\\"compliance-reports\\", \\"audit-logs\\", \\"compliance-api\\"] # Resource requirements resources = { cpu = \\"500m\\" memory = \\"1Gi\\" disk = \\"50Gi\\" network = True privileged = False } # Health checks health_checks = [ { command = \\"curl -f http://localhost:9090/health\\" interval = 30 timeout = 10 retries = 3 }, { command = \\"compliance-check --dry-run\\" interval = 300 timeout = 60 retries = 1 } ] # Compatibility os_support = [\\"linux\\"] arch_support = [\\"amd64\\", \\"arm64\\"]\\n} # Default configuration with common compliance frameworks\\ncompliance_monitor_default: ComplianceMonitorTaskserv = { name = \\"compliance-monitor\\" config = { enabled_frameworks = [ { name = \\"pci-dss\\" version = \\"3.2.1\\" enabled = True }, { name = \\"sox\\" version = \\"2002\\" enabled = True }, { name = \\"gdpr\\" version = \\"2018\\" enabled = True } ] scan_frequency = \\"0 */6 * * *\\" # Every 6 hours real_time_monitoring = True report_frequency = \\"0 0 * * 1\\" # Weekly on Monday report_recipients = [\\"compliance@company.com\\", \\"security@company.com\\"] report_format = \\"pdf\\" alert_severity_threshold = \\"medium\\" alert_channels = [ { type = \\"email\\" endpoint = \\"security-alerts@company.com\\" severity_filter = [\\"medium\\", \\"high\\", \\"critical\\"] }, { type = \\"slack\\" endpoint = \\"https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX\\" severity_filter = [\\"high\\", \\"critical\\"] } ] audit_log_retention_days = 2555 report_retention_days = 365 siem_integration = True siem_endpoint = \\"https://siem.company.com/api/events\\" }\\n} # Export configuration\\n{ config: compliance_monitor_default, dependencies: compliance_monitor_dependencies\\n}","breadcrumbs":"Infrastructure-Specific Extensions » Compliance-Focused Taskserv","id":"2036","title":"Compliance-Focused Taskserv"},"2037":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Provider-Specific Extensions","id":"2037","title":"Provider-Specific Extensions"},"2038":{"body":"When working with specialized or private cloud providers: # Create custom provider extension\\nmkdir -p extensions/providers/company-private-cloud/nickel\\ncd extensions/providers/company-private-cloud/nickel Create provision_company-private-cloud.ncl: \\"\\"\\"\\nCompany Private Cloud Provider\\nIntegration with company\'s private cloud infrastructure\\n\\"\\"\\" import provisioning.defaults as defaults\\nimport provisioning.server as server schema CompanyPrivateCloudConfig: \\"\\"\\"Company private cloud configuration\\"\\"\\" # API configuration api_endpoint: str = \\"https://cloud-api.company.com\\" api_version: str = \\"v2\\" auth_token: str # Network configuration management_network: str = \\"10.0.0.0/24\\" production_network: str = \\"10.1.0.0/16\\" dmz_network: str = \\"10.2.0.0/24\\" # Resource pools compute_cluster: str = \\"production-cluster\\" storage_cluster: str = \\"storage-cluster\\" # Compliance settings encryption_required: bool = True audit_all_operations: bool = True # Company-specific settings cost_center: str department: str project_code: str check: len(api_endpoint) > 0, \\"API endpoint required\\" len(auth_token) > 0, \\"Authentication token required\\" len(cost_center) > 0, \\"Cost center required for billing\\" schema CompanyPrivateCloudServer(server.Server): \\"\\"\\"Server configuration for company private cloud\\"\\"\\" # Instance configuration instance_class: \\"standard\\" | \\"compute-optimized\\" | \\"memory-optimized\\" | \\"storage-optimized\\" = \\"standard\\" instance_size: \\"small\\" | \\"medium\\" | \\"large\\" | \\"xlarge\\" | \\"2xlarge\\" = \\"medium\\" # Storage configuration root_disk_type: \\"ssd\\" | \\"nvme\\" | \\"spinning\\" = \\"ssd\\" root_disk_size: int = 50 additional_storage?: [CompanyCloudStorage] # Network configuration network_segment: \\"management\\" | \\"production\\" | \\"dmz\\" = \\"production\\" security_groups: [str] = [\\"default\\"] # Compliance settings encrypted_storage: bool = True backup_enabled: bool = True monitoring_enabled: bool = True # Company metadata cost_center: str department: str project_code: str environment: \\"dev\\" | \\"test\\" | \\"staging\\" | \\"prod\\" = \\"prod\\" check: root_disk_size >= 20, \\"Root disk must be at least 20 GB\\" len(cost_center) > 0, \\"Cost center required\\" len(department) > 0, \\"Department required\\" schema CompanyCloudStorage: \\"\\"\\"Additional storage configuration\\"\\"\\" size: int type: \\"ssd\\" | \\"nvme\\" | \\"spinning\\" | \\"archive\\" = \\"ssd\\" mount_point: str encrypted: bool = True backup_enabled: bool = True # Instance size configurations\\ninstance_specs = { \\"small\\": { vcpus = 2 memory_gb = 4 network_performance = \\"moderate\\" }, \\"medium\\": { vcpus = 4 memory_gb = 8 network_performance = \\"good\\" }, \\"large\\": { vcpus = 8 memory_gb = 16 network_performance = \\"high\\" }, \\"xlarge\\": { vcpus = 16 memory_gb = 32 network_performance = \\"high\\" }, \\"2xlarge\\": { vcpus = 32 memory_gb = 64 network_performance = \\"very-high\\" }\\n} # Provider defaults\\ncompany_private_cloud_defaults: defaults.ServerDefaults = { lock = False time_zone = \\"UTC\\" running_wait = 20 running_timeout = 600 # Private cloud may be slower # Company-specific OS image storage_os_find = \\"name: company-ubuntu-20.04-hardened | arch: x86_64\\" # Network settings network_utility_ipv4 = True network_public_ipv4 = False # Private cloud, no public IPs # Security settings user = \\"company-admin\\" user_ssh_port = 22 fix_local_hosts = True # Company metadata labels = \\"provider: company-private-cloud, compliance: required\\"\\n} # Export provider configuration\\n{ config: CompanyPrivateCloudConfig, server: CompanyPrivateCloudServer, defaults: company_private_cloud_defaults, instance_specs: instance_specs\\n}","breadcrumbs":"Infrastructure-Specific Extensions » Custom Cloud Provider Integration","id":"2038","title":"Custom Cloud Provider Integration"},"2039":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Multi-Environment Management","id":"2039","title":"Multi-Environment Management"},"204":{"body":"# System defaults\\nless provisioning/config/config.defaults.toml # User configuration\\nvim workspace/config/local-overrides.toml # Environment-specific configs\\nvim workspace/config/dev-defaults.toml\\nvim workspace/config/test-defaults.toml\\nvim workspace/config/prod-defaults.toml # Infrastructure-specific config\\nvim workspace/infra//config.toml","breadcrumbs":"Quick Start Cheatsheet » Configuration Files","id":"204","title":"Configuration Files"},"2040":{"body":"Create environment-specific extensions that handle different deployment patterns: # Create environment management extension\\nmkdir -p extensions/clusters/company-environments/nickel\\ncd extensions/clusters/company-environments/nickel Create company-environments.ncl: \\"\\"\\"\\nCompany Environment Management\\nStandardized environment configurations for different deployment stages\\n\\"\\"\\" import provisioning.cluster as cluster\\nimport provisioning.server as server schema CompanyEnvironment: \\"\\"\\"Standard company environment configuration\\"\\"\\" # Environment metadata name: str type: \\"development\\" | \\"testing\\" | \\"staging\\" | \\"production\\" | \\"disaster-recovery\\" region: str availability_zones: [str] # Network configuration vpc_cidr: str subnet_configuration: SubnetConfiguration # Security configuration security_profile: SecurityProfile # Compliance requirements compliance_level: \\"basic\\" | \\"standard\\" | \\"high\\" | \\"critical\\" data_classification: \\"public\\" | \\"internal\\" | \\"confidential\\" | \\"restricted\\" # Resource constraints resource_limits: ResourceLimits # Backup and DR configuration backup_configuration: BackupConfiguration disaster_recovery_configuration?: DRConfiguration # Monitoring and alerting monitoring_level: \\"basic\\" | \\"standard\\" | \\"enhanced\\" alert_routing: AlertRouting schema SubnetConfiguration: \\"\\"\\"Network subnet configuration\\"\\"\\" public_subnets: [str] private_subnets: [str] database_subnets: [str] management_subnets: [str] schema SecurityProfile: \\"\\"\\"Security configuration profile\\"\\"\\" encryption_at_rest: bool encryption_in_transit: bool network_isolation: bool access_logging: bool vulnerability_scanning: bool # Access control multi_factor_auth: bool privileged_access_management: bool network_segmentation: bool # Compliance controls audit_logging: bool data_loss_prevention: bool endpoint_protection: bool schema ResourceLimits: \\"\\"\\"Resource allocation limits for environment\\"\\"\\" max_cpu_cores: int max_memory_gb: int max_storage_tb: int max_instances: int # Cost controls max_monthly_cost: int cost_alerts_enabled: bool schema BackupConfiguration: \\"\\"\\"Backup configuration for environment\\"\\"\\" backup_frequency: str retention_policy: {str: int} cross_region_backup: bool encryption_enabled: bool schema DRConfiguration: \\"\\"\\"Disaster recovery configuration\\"\\"\\" dr_region: str rto_minutes: int # Recovery Time Objective rpo_minutes: int # Recovery Point Objective automated_failover: bool schema AlertRouting: \\"\\"\\"Alert routing configuration\\"\\"\\" business_hours_contacts: [str] after_hours_contacts: [str] escalation_policy: [EscalationLevel] schema EscalationLevel: \\"\\"\\"Alert escalation level\\"\\"\\" level: int delay_minutes: int contacts: [str] # Environment templates\\nenvironment_templates = { \\"development\\": { type = \\"development\\" compliance_level = \\"basic\\" data_classification = \\"internal\\" security_profile = { encryption_at_rest = False encryption_in_transit = False network_isolation = False access_logging = True vulnerability_scanning = False multi_factor_auth = False privileged_access_management = False network_segmentation = False audit_logging = False data_loss_prevention = False endpoint_protection = False } resource_limits = { max_cpu_cores = 50 max_memory_gb = 200 max_storage_tb = 10 max_instances = 20 max_monthly_cost = 5000 cost_alerts_enabled = True } monitoring_level = \\"basic\\" }, \\"production\\": { type = \\"production\\" compliance_level = \\"critical\\" data_classification = \\"confidential\\" security_profile = { encryption_at_rest = True encryption_in_transit = True network_isolation = True access_logging = True vulnerability_scanning = True multi_factor_auth = True privileged_access_management = True network_segmentation = True audit_logging = True data_loss_prevention = True endpoint_protection = True } resource_limits = { max_cpu_cores = 1000 max_memory_gb = 4000 max_storage_tb = 500 max_instances = 200 max_monthly_cost = 100000 cost_alerts_enabled = True } monitoring_level = \\"enhanced\\" disaster_recovery_configuration = { dr_region = \\"us-west-2\\" rto_minutes = 60 rpo_minutes = 15 automated_failover = True } }\\n} # Export environment templates\\n{ templates: environment_templates, schema: CompanyEnvironment\\n}","breadcrumbs":"Infrastructure-Specific Extensions » Environment-Specific Configuration Management","id":"2040","title":"Environment-Specific Configuration Management"},"2041":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Integration Patterns","id":"2041","title":"Integration Patterns"},"2042":{"body":"Create integration patterns for common legacy system scenarios: # Create integration patterns\\nmkdir -p extensions/taskservs/integrations/legacy-bridge/nickel\\ncd extensions/taskservs/integrations/legacy-bridge/nickel Create legacy-bridge.ncl: \\"\\"\\"\\nLegacy System Integration Bridge\\nProvides standardized integration patterns for legacy systems\\n\\"\\"\\" import provisioning.lib as lib\\nimport provisioning.dependencies as deps schema LegacyBridgeConfig: \\"\\"\\"Configuration for legacy system integration bridge\\"\\"\\" # Bridge configuration bridge_name: str integration_type: \\"api\\" | \\"database\\" | \\"file\\" | \\"message-queue\\" | \\"etl\\" # Legacy system details legacy_system: LegacySystemInfo # Modern system details modern_system: ModernSystemInfo # Data transformation configuration data_transformation: DataTransformationConfig # Security configuration security_config: IntegrationSecurityConfig # Monitoring and alerting monitoring_config: IntegrationMonitoringConfig schema LegacySystemInfo: \\"\\"\\"Legacy system information\\"\\"\\" name: str type: \\"mainframe\\" | \\"as400\\" | \\"unix\\" | \\"windows\\" | \\"database\\" | \\"file-system\\" version: str # Connection details connection_method: \\"direct\\" | \\"vpn\\" | \\"dedicated-line\\" | \\"api-gateway\\" endpoint: str port?: int # Authentication auth_method: \\"password\\" | \\"certificate\\" | \\"kerberos\\" | \\"ldap\\" | \\"token\\" credentials_source: \\"vault\\" | \\"config\\" | \\"environment\\" # Data characteristics data_format: \\"fixed-width\\" | \\"csv\\" | \\"xml\\" | \\"json\\" | \\"binary\\" | \\"proprietary\\" character_encoding: str = \\"utf-8\\" # Operational characteristics availability_hours: str = \\"24/7\\" maintenance_windows: [MaintenanceWindow] schema ModernSystemInfo: \\"\\"\\"Modern system information\\"\\"\\" name: str type: \\"microservice\\" | \\"api\\" | \\"database\\" | \\"event-stream\\" | \\"file-store\\" # Connection details endpoint: str api_version?: str # Data format data_format: \\"json\\" | \\"xml\\" | \\"avro\\" | \\"protobuf\\" # Authentication auth_method: \\"oauth2\\" | \\"jwt\\" | \\"api-key\\" | \\"mutual-tls\\" schema DataTransformationConfig: \\"\\"\\"Data transformation configuration\\"\\"\\" transformation_rules: [TransformationRule] error_handling: ErrorHandlingConfig data_validation: DataValidationConfig schema TransformationRule: \\"\\"\\"Individual data transformation rule\\"\\"\\" source_field: str target_field: str transformation_type: \\"direct\\" | \\"calculated\\" | \\"lookup\\" | \\"conditional\\" transformation_expression?: str schema ErrorHandlingConfig: \\"\\"\\"Error handling configuration\\"\\"\\" retry_policy: RetryPolicy dead_letter_queue: bool = True error_notification: bool = True schema RetryPolicy: \\"\\"\\"Retry policy configuration\\"\\"\\" max_attempts: int = 3 initial_delay_seconds: int = 5 backoff_multiplier: float = 2.0 max_delay_seconds: int = 300 schema DataValidationConfig: \\"\\"\\"Data validation configuration\\"\\"\\" schema_validation: bool = True business_rules_validation: bool = True data_quality_checks: [DataQualityCheck] schema DataQualityCheck: \\"\\"\\"Data quality check definition\\"\\"\\" name: str check_type: \\"completeness\\" | \\"uniqueness\\" | \\"validity\\" | \\"consistency\\" threshold: float = 0.95 action_on_failure: \\"warn\\" | \\"stop\\" | \\"quarantine\\" schema IntegrationSecurityConfig: \\"\\"\\"Security configuration for integration\\"\\"\\" encryption_in_transit: bool = True encryption_at_rest: bool = True # Access control source_ip_whitelist?: [str] api_rate_limiting: bool = True # Audit and compliance audit_all_transactions: bool = True pii_data_handling: PIIHandlingConfig schema PIIHandlingConfig: \\"\\"\\"PII data handling configuration\\"\\"\\" pii_fields: [str] anonymization_enabled: bool = True retention_policy_days: int = 365 schema IntegrationMonitoringConfig: \\"\\"\\"Monitoring configuration for integration\\"\\"\\" metrics_collection: bool = True performance_monitoring: bool = True # SLA monitoring sla_targets: SLATargets # Alerting alert_on_failures: bool = True alert_on_performance_degradation: bool = True schema SLATargets: \\"\\"\\"SLA targets for integration\\"\\"\\" max_latency_ms: int = 5000 min_availability_percent: float = 99.9 max_error_rate_percent: float = 0.1 schema MaintenanceWindow: \\"\\"\\"Maintenance window definition\\"\\"\\" day_of_week: int # 0=Sunday, 6=Saturday start_time: str # HH:MM format duration_hours: int # Taskserv definition\\nschema LegacyBridgeTaskserv(lib.TaskServDef): \\"\\"\\"Legacy Bridge Taskserv Definition\\"\\"\\" name: str = \\"legacy-bridge\\" config: LegacyBridgeConfig # Dependencies\\nlegacy_bridge_dependencies: deps.TaskservDependencies = { name = \\"legacy-bridge\\" requires = [\\"kubernetes\\"] optional = [\\"monitoring\\", \\"logging\\", \\"vault\\"] provides = [\\"legacy-integration\\", \\"data-bridge\\"] resources = { cpu = \\"500m\\" memory = \\"1Gi\\" disk = \\"10Gi\\" network = True privileged = False } health_checks = [ { command = \\"curl -f http://localhost:9090/health\\" interval = 30 timeout = 10 retries = 3 }, { command = \\"integration-test --quick\\" interval = 300 timeout = 120 retries = 1 } ] os_support = [\\"linux\\"] arch_support = [\\"amd64\\", \\"arm64\\"]\\n} # Export configuration\\n{ config: LegacyBridgeTaskserv, dependencies: legacy_bridge_dependencies\\n}","breadcrumbs":"Infrastructure-Specific Extensions » Legacy System Integration","id":"2042","title":"Legacy System Integration"},"2043":{"body":"","breadcrumbs":"Infrastructure-Specific Extensions » Real-World Examples","id":"2043","title":"Real-World Examples"},"2044":{"body":"# Financial services specific extensions\\nmkdir -p extensions/taskservs/financial-services/{trading-system,risk-engine,compliance-reporter}/nickel","breadcrumbs":"Infrastructure-Specific Extensions » Example 1: Financial Services Company","id":"2044","title":"Example 1: Financial Services Company"},"2045":{"body":"# Healthcare specific extensions\\nmkdir -p extensions/taskservs/healthcare/{hl7-processor,dicom-storage,hipaa-audit}/nickel","breadcrumbs":"Infrastructure-Specific Extensions » Example 2: Healthcare Organization","id":"2045","title":"Example 2: Healthcare Organization"},"2046":{"body":"# Manufacturing specific extensions\\nmkdir -p extensions/taskservs/manufacturing/{iot-gateway,scada-bridge,quality-system}/nickel","breadcrumbs":"Infrastructure-Specific Extensions » Example 3: Manufacturing Company","id":"2046","title":"Example 3: Manufacturing Company"},"2047":{"body":"Loading Infrastructure-Specific Extensions # Load company-specific extensions\\ncd workspace/infra/production\\nmodule-loader load taskservs . [legacy-erp, compliance-monitor, legacy-bridge]\\nmodule-loader load providers . [company-private-cloud]\\nmodule-loader load clusters . [company-environments] # Verify loading\\nmodule-loader list taskservs .\\nmodule-loader validate . Using in Server Configuration # Import loaded extensions\\nimport .taskservs.legacy-erp.legacy-erp as erp\\nimport .taskservs.compliance-monitor.compliance-monitor as compliance\\nimport .providers.company-private-cloud as private_cloud # Configure servers with company-specific extensions\\ncompany_servers: [server.Server] = [ { hostname = \\"erp-prod-01\\" title = \\"Production ERP Server\\" # Use company private cloud # Provider-specific configuration goes here taskservs = [ { name = \\"legacy-erp\\" profile = \\"production\\" }, { name = \\"compliance-monitor\\" profile = \\"default\\" } ] }\\n] This comprehensive guide covers all aspects of creating infrastructure-specific extensions, from assessment and planning to implementation and deployment.","breadcrumbs":"Infrastructure-Specific Extensions » Usage Examples","id":"2047","title":"Usage Examples"},"2048":{"body":"Target Audience : Developers working on the provisioning CLI Last Updated : 2025-09-30 Related : ADR-006 CLI Refactoring","breadcrumbs":"Command Handler Guide » Command Handler Developer Guide","id":"2048","title":"Command Handler Developer Guide"},"2049":{"body":"The provisioning CLI uses a modular, domain-driven architecture that separates concerns into focused command handlers. This guide shows you how to work with this architecture.","breadcrumbs":"Command Handler Guide » Overview","id":"2049","title":"Overview"},"205":{"body":"# Configure HTTP client behavior\\n# In workspace/config/local-overrides.toml:\\n[http]\\nuse_curl = true # Use curl instead of ureq","breadcrumbs":"Quick Start Cheatsheet » HTTP Configuration","id":"205","title":"HTTP Configuration"},"2050":{"body":"Separation of Concerns : Routing, flag parsing, and business logic are separated Domain-Driven Design : Commands organized by domain (infrastructure, orchestration, etc.) DRY (Don\'t Repeat Yourself) : Centralized flag handling eliminates code duplication Single Responsibility : Each module has one clear purpose Open/Closed Principle : Easy to extend, no need to modify core routing","breadcrumbs":"Command Handler Guide » Key Architecture Principles","id":"2050","title":"Key Architecture Principles"},"2051":{"body":"provisioning/core/nulib/\\n├── provisioning (211 lines) - Main entry point\\n├── main_provisioning/\\n│ ├── flags.nu (139 lines) - Centralized flag handling\\n│ ├── dispatcher.nu (264 lines) - Command routing\\n│ ├── help_system.nu - Categorized help system\\n│ └── commands/ - Domain-focused handlers\\n│ ├── infrastructure.nu (117 lines) - Server, taskserv, cluster, infra\\n│ ├── orchestration.nu (64 lines) - Workflow, batch, orchestrator\\n│ ├── development.nu (72 lines) - Module, layer, version, pack\\n│ ├── workspace.nu (56 lines) - Workspace, template\\n│ ├── generation.nu (78 lines) - Generate commands\\n│ ├── utilities.nu (157 lines) - SSH, SOPS, cache, providers\\n│ └── configuration.nu (316 lines) - Env, show, init, validate","breadcrumbs":"Command Handler Guide » Architecture Components","id":"2051","title":"Architecture Components"},"2052":{"body":"","breadcrumbs":"Command Handler Guide » Adding New Commands","id":"2052","title":"Adding New Commands"},"2053":{"body":"Commands are organized by domain. Choose the appropriate handler: Domain Handler Responsibility infrastructure infrastructure.nu Server/taskserv/cluster/infra lifecycle orchestration orchestration.nu Workflow/batch operations, orchestrator control development development.nu Module discovery, layers, versions, packaging workspace workspace.nu Workspace and template management configuration configuration.nu Environment, settings, initialization utilities utilities.nu SSH, SOPS, cache, providers, utilities generation generation.nu Generate commands (server, taskserv, etc.)","breadcrumbs":"Command Handler Guide » Step 1: Choose the Right Domain Handler","id":"2053","title":"Step 1: Choose the Right Domain Handler"},"2054":{"body":"Example: Adding a new server command server status Edit provisioning/core/nulib/main_provisioning/commands/infrastructure.nu: # Add to the handle_infrastructure_command match statement\\nexport def handle_infrastructure_command [ command: string ops: string flags: record\\n] { set_debug_env $flags match $command { \\"server\\" => { handle_server $ops $flags } \\"taskserv\\" | \\"task\\" => { handle_taskserv $ops $flags } \\"cluster\\" => { handle_cluster $ops $flags } \\"infra\\" | \\"infras\\" => { handle_infra $ops $flags } _ => { print $\\"❌ Unknown infrastructure command: ($command)\\" print \\"\\" print \\"Available infrastructure commands:\\" print \\" server - Server operations (create, delete, list, ssh, status)\\" # Updated print \\" taskserv - Task service management\\" print \\" cluster - Cluster operations\\" print \\" infra - Infrastructure management\\" print \\"\\" print \\"Use \'provisioning help infrastructure\' for more details\\" exit 1 } }\\n} # Add the new command handler\\ndef handle_server [ops: string, flags: record] { let args = build_module_args $flags $ops run_module $args \\"server\\" --exec\\n} That\'s it! The command is now available as provisioning server status.","breadcrumbs":"Command Handler Guide » Step 2: Add Command to Handler","id":"2054","title":"Step 2: Add Command to Handler"},"2055":{"body":"If you want shortcuts like provisioning s status: Edit provisioning/core/nulib/main_provisioning/dispatcher.nu: export def get_command_registry []: nothing -> record { { # Infrastructure commands \\"s\\" => \\"infrastructure server\\" # Already exists \\"server\\" => \\"infrastructure server\\" # Already exists # Your new shortcut (if needed) # Example: \\"srv-status\\" => \\"infrastructure server status\\" # ... rest of registry }\\n} Note : Most shortcuts are already configured. You only need to add new shortcuts if you\'re creating completely new command categories.","breadcrumbs":"Command Handler Guide » Step 3: Add Shortcuts (Optional)","id":"2055","title":"Step 3: Add Shortcuts (Optional)"},"2056":{"body":"","breadcrumbs":"Command Handler Guide » Modifying Existing Handlers","id":"2056","title":"Modifying Existing Handlers"},"2057":{"body":"Let\'s say you want to add better error handling to the taskserv command: Before: def handle_taskserv [ops: string, flags: record] { let args = build_module_args $flags $ops run_module $args \\"taskserv\\" --exec\\n} After: def handle_taskserv [ops: string, flags: record] { # Validate taskserv name if provided let first_arg = ($ops | split row \\" \\" | get -o 0) if ($first_arg | is-not-empty) and $first_arg not-in [\\"create\\", \\"delete\\", \\"list\\", \\"generate\\", \\"check-updates\\", \\"help\\"] { # Check if taskserv exists let available_taskservs = (^$env.PROVISIONING_NAME module discover taskservs | from json) if $first_arg not-in $available_taskservs { print $\\"❌ Unknown taskserv: ($first_arg)\\" print \\"\\" print \\"Available taskservs:\\" $available_taskservs | each { |ts| print $\\" • ($ts)\\" } exit 1 } } let args = build_module_args $flags $ops run_module $args \\"taskserv\\" --exec\\n}","breadcrumbs":"Command Handler Guide » Example: Enhancing the taskserv Command","id":"2057","title":"Example: Enhancing the taskserv Command"},"2058":{"body":"","breadcrumbs":"Command Handler Guide » Working with Flags","id":"2058","title":"Working with Flags"},"2059":{"body":"The flags.nu module provides centralized flag handling: # Parse all flags into normalized record\\nlet parsed_flags = (parse_common_flags { version: $version, v: $v, info: $info, debug: $debug, check: $check, yes: $yes, wait: $wait, infra: $infra, # ... etc\\n}) # Build argument string for module execution\\nlet args = build_module_args $parsed_flags $ops # Set environment variables based on flags\\nset_debug_env $parsed_flags","breadcrumbs":"Command Handler Guide » Using Centralized Flag Handling","id":"2059","title":"Using Centralized Flag Handling"},"206":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Workspace Commands","id":"206","title":"Workspace Commands"},"2060":{"body":"The parse_common_flags function normalizes these flags: Flag Record Field Description show_version Version display (--version, -v) show_info Info display (--info, -i) show_about About display (--about, -a) debug_mode Debug mode (--debug, -x) check_mode Check mode (--check, -c) auto_confirm Auto-confirm (--yes, -y) wait Wait for completion (--wait, -w) keep_storage Keep storage (--keepstorage) infra Infrastructure name (--infra) outfile Output file (--outfile) output_format Output format (--out) template Template name (--template) select Selection (--select) settings Settings file (--settings) new_infra New infra name (--new)","breadcrumbs":"Command Handler Guide » Available Flag Parsing","id":"2060","title":"Available Flag Parsing"},"2061":{"body":"If you need to add a new flag: Update main provisioning file to accept the flag Update flags.nu:parse_common_flags to normalize it Update flags.nu:build_module_args to pass it to modules Example: Adding --timeout flag # 1. In provisioning main file (parameter list)\\ndef main [ # ... existing parameters --timeout: int = 300 # Timeout in seconds # ... rest of parameters\\n] { # ... existing code let parsed_flags = (parse_common_flags { # ... existing flags timeout: $timeout })\\n} # 2. In flags.nu:parse_common_flags\\nexport def parse_common_flags [flags: record]: nothing -> record { { # ... existing normalizations timeout: ($flags.timeout? | default 300) }\\n} # 3. In flags.nu:build_module_args\\nexport def build_module_args [flags: record, extra: string = \\"\\"]: nothing -> string { # ... existing code let str_timeout = if ($flags.timeout != 300) { $\\"--timeout ($flags.timeout) \\" } else { \\"\\" } # ... rest of function $\\"($extra) ($use_check)($use_yes)($use_wait)($str_timeout)...\\"\\n}","breadcrumbs":"Command Handler Guide » Adding New Flags","id":"2061","title":"Adding New Flags"},"2062":{"body":"","breadcrumbs":"Command Handler Guide » Adding New Shortcuts","id":"2062","title":"Adding New Shortcuts"},"2063":{"body":"1-2 letters : Ultra-short for common commands (s for server, ws for workspace) 3-4 letters : Abbreviations (orch for orchestrator, tmpl for template) Aliases : Alternative names (task for taskserv, flow for workflow)","breadcrumbs":"Command Handler Guide » Shortcut Naming Conventions","id":"2063","title":"Shortcut Naming Conventions"},"2064":{"body":"Edit provisioning/core/nulib/main_provisioning/dispatcher.nu: export def get_command_registry []: nothing -> record { { # ... existing shortcuts # Add your new shortcut \\"db\\" => \\"infrastructure database\\" # New: db command \\"database\\" => \\"infrastructure database\\" # Full name # ... rest of registry }\\n} Important : After adding a shortcut, update the help system in help_system.nu to document it.","breadcrumbs":"Command Handler Guide » Example: Adding a New Shortcut","id":"2064","title":"Example: Adding a New Shortcut"},"2065":{"body":"","breadcrumbs":"Command Handler Guide » Testing Your Changes","id":"2065","title":"Testing Your Changes"},"2066":{"body":"# Run comprehensive test suite\\nnu tests/test_provisioning_refactor.nu","breadcrumbs":"Command Handler Guide » Running the Test Suite","id":"2066","title":"Running the Test Suite"},"2067":{"body":"The test suite validates: ✅ Main help display ✅ Category help (infrastructure, orchestration, development, workspace) ✅ Bi-directional help routing ✅ All command shortcuts ✅ Category shortcut help ✅ Command routing to correct handlers","breadcrumbs":"Command Handler Guide » Test Coverage","id":"2067","title":"Test Coverage"},"2068":{"body":"Edit tests/test_provisioning_refactor.nu: # Add your test function\\nexport def test_my_new_feature [] { print \\"\\\\n🧪 Testing my new feature...\\" let output = (run_provisioning \\"my-command\\" \\"test\\") assert_contains $output \\"Expected Output\\" \\"My command works\\"\\n} # Add to main test runner\\nexport def main [] { # ... existing tests let results = [ # ... existing test calls (try { test_my_new_feature; \\"passed\\" } catch { \\"failed\\" }) ] # ... rest of main\\n}","breadcrumbs":"Command Handler Guide » Adding Tests for Your Changes","id":"2068","title":"Adding Tests for Your Changes"},"2069":{"body":"# Test command execution\\nprovisioning/core/cli/provisioning my-command test --check # Test with debug mode\\nprovisioning/core/cli/provisioning --debug my-command test # Test help\\nprovisioning/core/cli/provisioning my-command help\\nprovisioning/core/cli/provisioning help my-command # Bi-directional","breadcrumbs":"Command Handler Guide » Manual Testing","id":"2069","title":"Manual Testing"},"207":{"body":"# List all workspaces\\nprovisioning workspace list # Show active workspace\\nprovisioning workspace active # Switch to another workspace\\nprovisioning workspace switch \\nprovisioning workspace activate # alias # Register new workspace\\nprovisioning workspace register \\nprovisioning workspace register --activate # Remove workspace from registry\\nprovisioning workspace remove \\nprovisioning workspace remove --force # Initialize new workspace\\nprovisioning workspace init\\nprovisioning workspace init --name production # Create new workspace\\nprovisioning workspace create # Validate workspace\\nprovisioning workspace validate # Show workspace info\\nprovisioning workspace info # Migrate workspace\\nprovisioning workspace migrate","breadcrumbs":"Quick Start Cheatsheet » Workspace Management","id":"207","title":"Workspace Management"},"2070":{"body":"","breadcrumbs":"Command Handler Guide » Common Patterns","id":"2070","title":"Common Patterns"},"2071":{"body":"Use Case : Command just needs to execute a module with standard flags def handle_simple_command [ops: string, flags: record] { let args = build_module_args $flags $ops run_module $args \\"module_name\\" --exec\\n}","breadcrumbs":"Command Handler Guide » Pattern 1: Simple Command Handler","id":"2071","title":"Pattern 1: Simple Command Handler"},"2072":{"body":"Use Case : Need to validate input before execution def handle_validated_command [ops: string, flags: record] { # Validate let first_arg = ($ops | split row \\" \\" | get -o 0) if ($first_arg | is-empty) { print \\"❌ Missing required argument\\" print \\"Usage: provisioning command \\" exit 1 } # Execute let args = build_module_args $flags $ops run_module $args \\"module_name\\" --exec\\n}","breadcrumbs":"Command Handler Guide » Pattern 2: Command with Validation","id":"2072","title":"Pattern 2: Command with Validation"},"2073":{"body":"Use Case : Command has multiple subcommands (like server create, server delete) def handle_complex_command [ops: string, flags: record] { let subcommand = ($ops | split row \\" \\" | get -o 0) let rest_ops = ($ops | split row \\" \\" | skip 1 | str join \\" \\") match $subcommand { \\"create\\" => { handle_create $rest_ops $flags } \\"delete\\" => { handle_delete $rest_ops $flags } \\"list\\" => { handle_list $rest_ops $flags } _ => { print \\"❌ Unknown subcommand: $subcommand\\" print \\"Available: create, delete, list\\" exit 1 } }\\n}","breadcrumbs":"Command Handler Guide » Pattern 3: Command with Subcommands","id":"2073","title":"Pattern 3: Command with Subcommands"},"2074":{"body":"Use Case : Command behavior changes based on flags def handle_flag_routed_command [ops: string, flags: record] { if $flags.check_mode { # Dry-run mode print \\"🔍 Check mode: simulating command...\\" let args = build_module_args $flags $ops run_module $args \\"module_name\\" # No --exec, returns output } else { # Normal execution let args = build_module_args $flags $ops run_module $args \\"module_name\\" --exec }\\n}","breadcrumbs":"Command Handler Guide » Pattern 4: Command with Flag-Based Routing","id":"2074","title":"Pattern 4: Command with Flag-Based Routing"},"2075":{"body":"","breadcrumbs":"Command Handler Guide » Best Practices","id":"2075","title":"Best Practices"},"2076":{"body":"Each handler should do one thing well : ✅ Good: handle_server manages all server operations ❌ Bad: handle_server also manages clusters and taskservs","breadcrumbs":"Command Handler Guide » 1. Keep Handlers Focused","id":"2076","title":"1. Keep Handlers Focused"},"2077":{"body":"# ❌ Bad\\nprint \\"Error\\" # ✅ Good\\nprint \\"❌ Unknown taskserv: kubernetes-invalid\\"\\nprint \\"\\"\\nprint \\"Available taskservs:\\"\\nprint \\" • kubernetes\\"\\nprint \\" • containerd\\"\\nprint \\" • cilium\\"\\nprint \\"\\"\\nprint \\"Use \'provisioning taskserv list\' to see all available taskservs\\"","breadcrumbs":"Command Handler Guide » 2. Use Descriptive Error Messages","id":"2077","title":"2. Use Descriptive Error Messages"},"2078":{"body":"Don\'t repeat code - use centralized functions: # ❌ Bad: Repeating flag handling\\ndef handle_bad [ops: string, flags: record] { let use_check = if $flags.check_mode { \\"--check \\" } else { \\"\\" } let use_yes = if $flags.auto_confirm { \\"--yes \\" } else { \\"\\" } let str_infra = if ($flags.infra | is-not-empty) { $\\"--infra ($flags.infra) \\" } else { \\"\\" } # ... 10 more lines of flag handling run_module $\\"($ops) ($use_check)($use_yes)($str_infra)...\\" \\"module\\" --exec\\n} # ✅ Good: Using centralized function\\ndef handle_good [ops: string, flags: record] { let args = build_module_args $flags $ops run_module $args \\"module\\" --exec\\n}","breadcrumbs":"Command Handler Guide » 3. Leverage Centralized Functions","id":"2078","title":"3. Leverage Centralized Functions"},"2079":{"body":"Update relevant documentation: ADR-006 : If architectural changes CLAUDE.md : If new commands or shortcuts help_system.nu : If new categories or commands This guide : If new patterns or conventions","breadcrumbs":"Command Handler Guide » 4. Document Your Changes","id":"2079","title":"4. Document Your Changes"},"208":{"body":"# View user preferences\\nprovisioning workspace preferences # Set user preference\\nprovisioning workspace set-preference editor vim\\nprovisioning workspace set-preference output_format yaml\\nprovisioning workspace set-preference confirm_delete true # Get user preference\\nprovisioning workspace get-preference editor User Config Location: macOS: ~/Library/Application Support/provisioning/user_config.yaml Linux: ~/.config/provisioning/user_config.yaml Windows: %APPDATA%\\\\provisioning\\\\user_config.yaml","breadcrumbs":"Quick Start Cheatsheet » User Preferences","id":"208","title":"User Preferences"},"2080":{"body":"Before committing: Run test suite: nu tests/test_provisioning_refactor.nu Test manual execution Test with --check flag Test with --debug flag Test help: both provisioning cmd help and provisioning help cmd Test shortcuts","breadcrumbs":"Command Handler Guide » 5. Test Thoroughly","id":"2080","title":"5. Test Thoroughly"},"2081":{"body":"","breadcrumbs":"Command Handler Guide » Troubleshooting","id":"2081","title":"Troubleshooting"},"2082":{"body":"Cause : Incorrect import path in handler Fix : Use relative imports with .nu extension: # ✅ Correct\\nuse ../flags.nu *\\nuse ../../lib_provisioning * # ❌ Wrong\\nuse ../main_provisioning/flags *\\nuse lib_provisioning *","breadcrumbs":"Command Handler Guide » Issue: \\"Module not found\\"","id":"2082","title":"Issue: \\"Module not found\\""},"2083":{"body":"Cause : Missing type signature format Fix : Use proper Nushell 0.107 type signature: # ✅ Correct\\nexport def my_function [param: string]: nothing -> string { \\"result\\"\\n} # ❌ Wrong\\nexport def my_function [param: string] -> string { \\"result\\"\\n}","breadcrumbs":"Command Handler Guide » Issue: \\"Parse mismatch: expected colon\\"","id":"2083","title":"Issue: \\"Parse mismatch: expected colon\\""},"2084":{"body":"Cause : Shortcut not in command registry Fix : Add to dispatcher.nu:get_command_registry: \\"myshortcut\\" => \\"domain command\\"","breadcrumbs":"Command Handler Guide » Issue: \\"Command not routing correctly\\"","id":"2084","title":"Issue: \\"Command not routing correctly\\""},"2085":{"body":"Cause : Not using build_module_args Fix : Use centralized flag builder: let args = build_module_args $flags $ops\\nrun_module $args \\"module\\" --exec","breadcrumbs":"Command Handler Guide » Issue: \\"Flags not being passed\\"","id":"2085","title":"Issue: \\"Flags not being passed\\""},"2086":{"body":"","breadcrumbs":"Command Handler Guide » Quick Reference","id":"2086","title":"Quick Reference"},"2087":{"body":"provisioning/core/nulib/\\n├── provisioning - Main entry, flag definitions\\n├── main_provisioning/\\n│ ├── flags.nu - Flag parsing (parse_common_flags, build_module_args)\\n│ ├── dispatcher.nu - Routing (get_command_registry, dispatch_command)\\n│ ├── help_system.nu - Help (provisioning-help, help-*)\\n│ └── commands/ - Domain handlers (handle_*_command)\\ntests/\\n└── test_provisioning_refactor.nu - Test suite\\ndocs/\\n├── architecture/\\n│ └── adr-006-provisioning-cli-refactoring.md - Architecture docs\\n└── development/ └── COMMAND_HANDLER_GUIDE.md - This guide","breadcrumbs":"Command Handler Guide » File Locations","id":"2087","title":"File Locations"},"2088":{"body":"# In flags.nu\\nparse_common_flags [flags: record]: nothing -> record\\nbuild_module_args [flags: record, extra: string = \\"\\"]: nothing -> string\\nset_debug_env [flags: record]\\nget_debug_flag [flags: record]: nothing -> string # In dispatcher.nu\\nget_command_registry []: nothing -> record\\ndispatch_command [args: list, flags: record] # In help_system.nu\\nprovisioning-help [category?: string]: nothing -> string\\nhelp-infrastructure []: nothing -> string\\nhelp-orchestration []: nothing -> string\\n# ... (one for each category) # In commands/*.nu\\nhandle_*_command [command: string, ops: string, flags: record]\\n# Example: handle_infrastructure_command, handle_workspace_command","breadcrumbs":"Command Handler Guide » Key Functions","id":"2088","title":"Key Functions"},"2089":{"body":"# Run full test suite\\nnu tests/test_provisioning_refactor.nu # Test specific command\\nprovisioning/core/cli/provisioning my-command test --check # Test with debug\\nprovisioning/core/cli/provisioning --debug my-command test # Test help\\nprovisioning/core/cli/provisioning help my-command\\nprovisioning/core/cli/provisioning my-command help # Bi-directional","breadcrumbs":"Command Handler Guide » Testing Commands","id":"2089","title":"Testing Commands"},"209":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Security Commands","id":"209","title":"Security Commands"},"2090":{"body":"ADR-006: CLI Refactoring - Complete architectural decision record Project Structure - Overall project organization Workflow Development - Workflow system architecture Development Integration - Integration patterns","breadcrumbs":"Command Handler Guide » Further Reading","id":"2090","title":"Further Reading"},"2091":{"body":"When contributing command handler changes: Follow existing patterns - Use the patterns in this guide Update documentation - Keep docs in sync with code Add tests - Cover your new functionality Run test suite - Ensure nothing breaks Update CLAUDE.md - Document new commands/shortcuts For questions or issues, refer to ADR-006 or ask the team. This guide is part of the provisioning project documentation. Last updated: 2025-09-30","breadcrumbs":"Command Handler Guide » Contributing","id":"2091","title":"Contributing"},"2092":{"body":"This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning project.","breadcrumbs":"Workflow » Development Workflow Guide","id":"2092","title":"Development Workflow Guide"},"2093":{"body":"Overview Development Setup Daily Development Workflow Code Organization Testing Strategies Debugging Techniques Integration Workflows Collaboration Guidelines Quality Assurance Best Practices","breadcrumbs":"Workflow » Table of Contents","id":"2093","title":"Table of Contents"},"2094":{"body":"The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency, quality, and efficiency. Key Technologies : Nushell : Primary scripting and automation language Rust : High-performance system components KCL : Configuration language and schemas TOML : Configuration files Jinja2 : Template engine Development Principles : Configuration-Driven : Never hardcode, always configure Hybrid Architecture : Rust for performance, Nushell for flexibility Test-First : Comprehensive testing at all levels Documentation-Driven : Code and APIs are self-documenting","breadcrumbs":"Workflow » Overview","id":"2094","title":"Overview"},"2095":{"body":"","breadcrumbs":"Workflow » Development Setup","id":"2095","title":"Development Setup"},"2096":{"body":"1. Clone and Navigate : # Clone repository\\ngit clone https://github.com/company/provisioning-system.git\\ncd provisioning-system # Navigate to workspace\\ncd workspace/tools 2. Initialize Workspace : # Initialize development workspace\\nnu workspace.nu init --user-name $USER --infra-name dev-env # Check workspace health\\nnu workspace.nu health --detailed --fix-issues 3. Configure Development Environment : # Create user configuration\\ncp workspace/config/local-overrides.toml.example workspace/config/$USER.toml # Edit configuration for development\\n$EDITOR workspace/config/$USER.toml 4. Set Up Build System : # Navigate to build tools\\ncd src/tools # Check build prerequisites\\nmake info # Perform initial build\\nmake dev-build","breadcrumbs":"Workflow » Initial Environment Setup","id":"2096","title":"Initial Environment Setup"},"2097":{"body":"Required Tools : # Install Nushell\\ncargo install nu # Install Nickel\\ncargo install nickel # Install additional tools\\ncargo install cross # Cross-compilation\\ncargo install cargo-audit # Security auditing\\ncargo install cargo-watch # File watching Optional Development Tools : # Install development enhancers\\ncargo install nu_plugin_tera # Template plugin\\ncargo install sops # Secrets management\\nbrew install k9s # Kubernetes management","breadcrumbs":"Workflow » Tool Installation","id":"2097","title":"Tool Installation"},"2098":{"body":"VS Code Setup (.vscode/settings.json): { \\"files.associations\\": { \\"*.nu\\": \\"shellscript\\", \\"*.ncl\\": \\"nickel\\", \\"*.toml\\": \\"toml\\" }, \\"nushell.shellPath\\": \\"/usr/local/bin/nu\\", \\"rust-analyzer.cargo.features\\": \\"all\\", \\"editor.formatOnSave\\": true, \\"editor.rulers\\": [100], \\"files.trimTrailingWhitespace\\": true\\n} Recommended Extensions : Nushell Language Support Rust Analyzer Nickel Language Support TOML Language Support Better TOML","breadcrumbs":"Workflow » IDE Configuration","id":"2098","title":"IDE Configuration"},"2099":{"body":"","breadcrumbs":"Workflow » Daily Development Workflow","id":"2099","title":"Daily Development Workflow"},"21":{"body":"Understand Mode System Learn Service Management Review Infrastructure Management Study OCI Registry","breadcrumbs":"Home » For Operators","id":"21","title":"For Operators"},"210":{"body":"# Login\\nprovisioning login admin # Logout\\nprovisioning logout # Show session status\\nprovisioning auth status # List active sessions\\nprovisioning auth sessions","breadcrumbs":"Quick Start Cheatsheet » Authentication (via CLI)","id":"210","title":"Authentication (via CLI)"},"2100":{"body":"1. Sync and Update : # Sync with upstream\\ngit pull origin main # Update workspace\\ncd workspace/tools\\nnu workspace.nu health --fix-issues # Check for updates\\nnu workspace.nu status --detailed 2. Review Current State : # Check current infrastructure\\nprovisioning show servers\\nprovisioning show settings # Review workspace status\\nnu workspace.nu status","breadcrumbs":"Workflow » Morning Routine","id":"2100","title":"Morning Routine"},"2101":{"body":"1. Feature Development : # Create feature branch\\ngit checkout -b feature/new-provider-support # Start development environment\\ncd workspace/tools\\nnu workspace.nu init --workspace-type development # Begin development\\n$EDITOR workspace/extensions/providers/new-provider/nulib/provider.nu 2. Incremental Testing : # Test syntax during development\\nnu --check workspace/extensions/providers/new-provider/nulib/provider.nu # Run unit tests\\nnu workspace/extensions/providers/new-provider/tests/unit/basic-test.nu # Integration testing\\nnu workspace.nu tools test-extension providers/new-provider 3. Build and Validate : # Quick development build\\ncd src/tools\\nmake dev-build # Validate changes\\nmake validate-all # Test distribution\\nmake test-dist","breadcrumbs":"Workflow » Development Cycle","id":"2101","title":"Development Cycle"},"2102":{"body":"Unit Testing : # Add test examples to functions\\ndef create-server [name: string] -> record { # @test: \\"test-server\\" -> {name: \\"test-server\\", status: \\"created\\"} # Implementation here\\n} Integration Testing : # Test with real infrastructure\\nnu workspace/extensions/providers/new-provider/nulib/provider.nu \\\\ create-server test-server --dry-run # Test with workspace isolation\\nPROVISIONING_WORKSPACE_USER=$USER provisioning server create test-server --check","breadcrumbs":"Workflow » Testing During Development","id":"2102","title":"Testing During Development"},"2103":{"body":"1. Commit Progress : # Stage changes\\ngit add . # Commit with descriptive message\\ngit commit -m \\"feat(provider): add new cloud provider support - Implement basic server creation\\n- Add configuration schema\\n- Include unit tests\\n- Update documentation\\" # Push to feature branch\\ngit push origin feature/new-provider-support 2. Workspace Maintenance : # Clean up development data\\nnu workspace.nu cleanup --type cache --age 1d # Backup current state\\nnu workspace.nu backup --auto-name --components config,extensions # Check workspace health\\nnu workspace.nu health","breadcrumbs":"Workflow » End-of-Day Routine","id":"2103","title":"End-of-Day Routine"},"2104":{"body":"","breadcrumbs":"Workflow » Code Organization","id":"2104","title":"Code Organization"},"2105":{"body":"File Organization : Extension Structure:\\n├── nulib/\\n│ ├── main.nu # Main entry point\\n│ ├── core/ # Core functionality\\n│ │ ├── api.nu # API interactions\\n│ │ ├── config.nu # Configuration handling\\n│ │ └── utils.nu # Utility functions\\n│ ├── commands/ # User commands\\n│ │ ├── create.nu # Create operations\\n│ │ ├── delete.nu # Delete operations\\n│ │ └── list.nu # List operations\\n│ └── tests/ # Test files\\n│ ├── unit/ # Unit tests\\n│ └── integration/ # Integration tests\\n└── templates/ # Template files ├── config.j2 # Configuration templates └── manifest.j2 # Manifest templates Function Naming Conventions : # Use kebab-case for commands\\ndef create-server [name: string] -> record { ... }\\ndef validate-config [config: record] -> bool { ... } # Use snake_case for internal functions\\ndef get_api_client [] -> record { ... }\\ndef parse_config_file [path: string] -> record { ... } # Use descriptive prefixes\\ndef check-server-status [server: string] -> string { ... }\\ndef get-server-info [server: string] -> record { ... }\\ndef list-available-zones [] -> list { ... } Error Handling Pattern : def create-server [ name: string --dry-run: bool = false\\n] -> record { # 1. Validate inputs if ($name | str length) == 0 { error make { msg: \\"Server name cannot be empty\\" label: { text: \\"empty name provided\\" span: (metadata $name).span } } } # 2. Check prerequisites let config = try { get-provider-config } catch { error make {msg: \\"Failed to load provider configuration\\"} } # 3. Perform operation if $dry_run { return {action: \\"create\\", server: $name, status: \\"dry-run\\"} } # 4. Return result {server: $name, status: \\"created\\", id: (generate-id)}\\n}","breadcrumbs":"Workflow » Nushell Code Structure","id":"2105","title":"Nushell Code Structure"},"2106":{"body":"Project Organization : src/\\n├── lib.rs # Library root\\n├── main.rs # Binary entry point\\n├── config/ # Configuration handling\\n│ ├── mod.rs\\n│ ├── loader.rs # Config loading\\n│ └── validation.rs # Config validation\\n├── api/ # HTTP API\\n│ ├── mod.rs\\n│ ├── handlers.rs # Request handlers\\n│ └── middleware.rs # Middleware components\\n└── orchestrator/ # Orchestration logic ├── mod.rs ├── workflow.rs # Workflow management └── task_queue.rs # Task queue management Error Handling : use anyhow::{Context, Result};\\nuse thiserror::Error; #[derive(Error, Debug)]\\npub enum ProvisioningError { #[error(\\"Configuration error: {message}\\")] Config { message: String }, #[error(\\"Network error: {source}\\")] Network { #[from] source: reqwest::Error, }, #[error(\\"Validation failed: {field}\\")] Validation { field: String },\\n} pub fn create_server(name: &str) -> Result { let config = load_config() .context(\\"Failed to load configuration\\")?; validate_server_name(name) .context(\\"Server name validation failed\\")?; let server = provision_server(name, &config) .context(\\"Failed to provision server\\")?; Ok(server)\\n}","breadcrumbs":"Workflow » Rust Code Structure","id":"2106","title":"Rust Code Structure"},"2107":{"body":"Schema Structure : # Base schema definitions\\nlet ServerConfig = { name | string, plan | string, zone | string, tags | { } | default = {},\\n} in\\nServerConfig # Provider-specific extensions\\nlet UpCloudServerConfig = { template | string | default = \\"Ubuntu Server 22.04 LTS (Jammy Jellyfish)\\", storage | number | default = 25,\\n} in\\nUpCloudServerConfig # Composition schemas\\nlet InfrastructureConfig = { servers | array, networks | array | default = [], load_balancers | array | default = [],\\n} in\\nInfrastructureConfig","breadcrumbs":"Workflow » Nickel Schema Organization","id":"2107","title":"Nickel Schema Organization"},"2108":{"body":"","breadcrumbs":"Workflow » Testing Strategies","id":"2108","title":"Testing Strategies"},"2109":{"body":"TDD Workflow : Write Test First : Define expected behavior Run Test (Fail) : Confirm test fails as expected Write Code : Implement minimal code to pass Run Test (Pass) : Confirm test now passes Refactor : Improve code while keeping tests green","breadcrumbs":"Workflow » Test-Driven Development","id":"2109","title":"Test-Driven Development"},"211":{"body":"# Enroll in TOTP (Google Authenticator, Authy)\\nprovisioning mfa totp enroll # Enroll in WebAuthn (YubiKey, Touch ID, Windows Hello)\\nprovisioning mfa webauthn enroll # Verify MFA code\\nprovisioning mfa totp verify --code 123456\\nprovisioning mfa webauthn verify # List registered devices\\nprovisioning mfa devices","breadcrumbs":"Quick Start Cheatsheet » Multi-Factor Authentication (MFA)","id":"211","title":"Multi-Factor Authentication (MFA)"},"2110":{"body":"Unit Test Pattern : # Function with embedded test\\ndef validate-server-name [name: string] -> bool { # @test: \\"valid-name\\" -> true # @test: \\"\\" -> false # @test: \\"name-with-spaces\\" -> false if ($name | str length) == 0 { return false } if ($name | str contains \\" \\") { return false } true\\n} # Separate test file\\n# tests/unit/server-validation-test.nu\\ndef test_validate_server_name [] { # Valid cases assert (validate-server-name \\"valid-name\\") assert (validate-server-name \\"server123\\") # Invalid cases assert not (validate-server-name \\"\\") assert not (validate-server-name \\"name with spaces\\") assert not (validate-server-name \\"name@with!special\\") print \\"✅ validate-server-name tests passed\\"\\n} Integration Test Pattern : # tests/integration/server-lifecycle-test.nu\\ndef test_complete_server_lifecycle [] { # Setup let test_server = \\"test-server-\\" + (date now | format date \\"%Y%m%d%H%M%S\\") try { # Test creation let create_result = (create-server $test_server --dry-run) assert ($create_result.status == \\"dry-run\\") # Test validation let validate_result = (validate-server-config $test_server) assert $validate_result print $\\"✅ Server lifecycle test passed for ($test_server)\\" } catch { |e| print $\\"❌ Server lifecycle test failed: ($e.msg)\\" exit 1 }\\n}","breadcrumbs":"Workflow » Nushell Testing","id":"2110","title":"Nushell Testing"},"2111":{"body":"Unit Testing : #[cfg(test)]\\nmod tests { use super::*; use tokio_test; #[test] fn test_validate_server_name() { assert!(validate_server_name(\\"valid-name\\")); assert!(validate_server_name(\\"server123\\")); assert!(!validate_server_name(\\"\\")); assert!(!validate_server_name(\\"name with spaces\\")); assert!(!validate_server_name(\\"name@special\\")); } #[tokio::test] async fn test_server_creation() { let config = test_config(); let result = create_server(\\"test-server\\", &config).await; assert!(result.is_ok()); let server = result.unwrap(); assert_eq!(server.name, \\"test-server\\"); assert_eq!(server.status, \\"created\\"); }\\n} Integration Testing : #[cfg(test)]\\nmod integration_tests { use super::*; use testcontainers::*; #[tokio::test] async fn test_full_workflow() { // Setup test environment let docker = clients::Cli::default(); let postgres = docker.run(images::postgres::Postgres::default()); let config = TestConfig { database_url: format!(\\"postgresql://localhost:{}/test\\", postgres.get_host_port_ipv4(5432)) }; // Test complete workflow let workflow = create_workflow(&config).await.unwrap(); let result = execute_workflow(workflow).await.unwrap(); assert_eq!(result.status, WorkflowStatus::Completed); }\\n}","breadcrumbs":"Workflow » Rust Testing","id":"2111","title":"Rust Testing"},"2112":{"body":"Schema Validation Testing : # Test Nickel schemas\\nnickel check schemas/ # Validate specific schemas\\nnickel typecheck schemas/server.ncl # Test with examples\\nnickel eval schemas/server.ncl","breadcrumbs":"Workflow » Nickel Testing","id":"2112","title":"Nickel Testing"},"2113":{"body":"Continuous Testing : # Watch for changes and run tests\\ncargo watch -x test -x check # Watch Nushell files\\nfind . -name \\"*.nu\\" | entr -r nu tests/run-all-tests.nu # Automated testing in workspace\\nnu workspace.nu tools test-all --watch","breadcrumbs":"Workflow » Test Automation","id":"2113","title":"Test Automation"},"2114":{"body":"","breadcrumbs":"Workflow » Debugging Techniques","id":"2114","title":"Debugging Techniques"},"2115":{"body":"Enable Debug Mode : # Environment variables\\nexport PROVISIONING_DEBUG=true\\nexport PROVISIONING_LOG_LEVEL=debug\\nexport RUST_LOG=debug\\nexport RUST_BACKTRACE=1 # Workspace debug\\nexport PROVISIONING_WORKSPACE_USER=$USER","breadcrumbs":"Workflow » Debug Configuration","id":"2115","title":"Debug Configuration"},"2116":{"body":"Debug Techniques : # Debug prints\\ndef debug-server-creation [name: string] { print $\\"🐛 Creating server: ($name)\\" let config = get-provider-config print $\\"🐛 Config loaded: ($config | to json)\\" let result = try { create-server-api $name $config } catch { |e| print $\\"🐛 API call failed: ($e.msg)\\" $e } print $\\"🐛 Result: ($result | to json)\\" $result\\n} # Conditional debugging\\ndef create-server [name: string] { if $env.PROVISIONING_DEBUG? == \\"true\\" { print $\\"Debug: Creating server ($name)\\" } # Implementation\\n} # Interactive debugging\\ndef debug-interactive [] { print \\"🐛 Entering debug mode...\\" print \\"Available commands: $env.PATH\\" print \\"Current config: \\" (get-config | to json) # Drop into interactive shell nu --interactive\\n} Error Investigation : # Comprehensive error handling\\ndef safe-server-creation [name: string] { try { create-server $name } catch { |e| # Log error details { timestamp: (date now | format date \\"%Y-%m-%d %H:%M:%S\\"), operation: \\"create-server\\", input: $name, error: $e.msg, debug: $e.debug?, env: { user: $env.USER, workspace: $env.PROVISIONING_WORKSPACE_USER?, debug: $env.PROVISIONING_DEBUG? } } | save --append logs/error-debug.json # Re-throw with context error make { msg: $\\"Server creation failed: ($e.msg)\\", label: {text: \\"failed here\\", span: $e.span?} } }\\n}","breadcrumbs":"Workflow » Nushell Debugging","id":"2116","title":"Nushell Debugging"},"2117":{"body":"Debug Logging : use tracing::{debug, info, warn, error, instrument}; #[instrument]\\npub async fn create_server(name: &str) -> Result { debug!(\\"Starting server creation for: {}\\", name); let config = load_config() .map_err(|e| { error!(\\"Failed to load config: {:?}\\", e); e })?; info!(\\"Configuration loaded successfully\\"); debug!(\\"Config details: {:?}\\", config); let server = provision_server(name, &config).await .map_err(|e| { error!(\\"Provisioning failed for {}: {:?}\\", name, e); e })?; info!(\\"Server {} created successfully\\", name); Ok(server)\\n} Interactive Debugging : // Use debugger breakpoints\\n#[cfg(debug_assertions)]\\n{ println!(\\"Debug: server creation starting\\"); dbg!(&config); // Add breakpoint here in IDE\\n}","breadcrumbs":"Workflow » Rust Debugging","id":"2117","title":"Rust Debugging"},"2118":{"body":"Log Monitoring : # Follow all logs\\ntail -f workspace/runtime/logs/$USER/*.log # Filter for errors\\ngrep -i error workspace/runtime/logs/$USER/*.log # Monitor specific component\\ntail -f workspace/runtime/logs/$USER/orchestrator.log | grep -i workflow # Structured log analysis\\njq \'.level == \\"ERROR\\"\' workspace/runtime/logs/$USER/structured.jsonl Debug Log Levels : # Different verbosity levels\\nPROVISIONING_LOG_LEVEL=trace provisioning server create test\\nPROVISIONING_LOG_LEVEL=debug provisioning server create test\\nPROVISIONING_LOG_LEVEL=info provisioning server create test","breadcrumbs":"Workflow » Log Analysis","id":"2118","title":"Log Analysis"},"2119":{"body":"","breadcrumbs":"Workflow » Integration Workflows","id":"2119","title":"Integration Workflows"},"212":{"body":"# Generate AWS STS credentials (15 min-12h TTL)\\nprovisioning secrets generate aws --ttl 1hr # Generate SSH key pair (Ed25519)\\nprovisioning secrets generate ssh --ttl 4hr # List active secrets\\nprovisioning secrets list # Revoke secret\\nprovisioning secrets revoke # Cleanup expired secrets\\nprovisioning secrets cleanup","breadcrumbs":"Quick Start Cheatsheet » Secrets Management","id":"212","title":"Secrets Management"},"2120":{"body":"Working with Legacy Components : # Test integration with existing system\\nprovisioning --version # Legacy system\\nsrc/core/nulib/provisioning --version # New system # Test workspace integration\\nPROVISIONING_WORKSPACE_USER=$USER provisioning server list # Validate configuration compatibility\\nprovisioning validate config\\nnu workspace.nu config validate","breadcrumbs":"Workflow » Existing System Integration","id":"2120","title":"Existing System Integration"},"2121":{"body":"REST API Testing : # Test orchestrator API\\ncurl -X GET http://localhost:9090/health\\ncurl -X GET http://localhost:9090/tasks # Test workflow creation\\ncurl -X POST http://localhost:9090/workflows/servers/create \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"name\\": \\"test-server\\", \\"plan\\": \\"2xCPU-4 GB\\"}\' # Monitor workflow\\ncurl -X GET http://localhost:9090/workflows/batch/status/workflow-id","breadcrumbs":"Workflow » API Integration Testing","id":"2121","title":"API Integration Testing"},"2122":{"body":"SurrealDB Integration : # Test database connectivity\\nuse core/nulib/lib_provisioning/database/surreal.nu\\nlet db = (connect-database)\\n(test-connection $db) # Workflow state testing\\nlet workflow_id = (create-workflow-record \\"test-workflow\\")\\nlet status = (get-workflow-status $workflow_id)\\nassert ($status.status == \\"pending\\")","breadcrumbs":"Workflow » Database Integration","id":"2122","title":"Database Integration"},"2123":{"body":"Container Integration : # Test with Docker\\ndocker run --rm -v $(pwd):/work provisioning:dev provisioning --version # Test with Kubernetes\\nkubectl apply -f manifests/test-pod.yaml\\nkubectl logs test-pod # Validate in different environments\\nmake test-dist PLATFORM=docker\\nmake test-dist PLATFORM=kubernetes","breadcrumbs":"Workflow » External Tool Integration","id":"2123","title":"External Tool Integration"},"2124":{"body":"","breadcrumbs":"Workflow » Collaboration Guidelines","id":"2124","title":"Collaboration Guidelines"},"2125":{"body":"Branch Naming : feature/description - New features fix/description - Bug fixes docs/description - Documentation updates refactor/description - Code refactoring test/description - Test improvements Workflow : # Start new feature\\ngit checkout main\\ngit pull origin main\\ngit checkout -b feature/new-provider-support # Regular commits\\ngit add .\\ngit commit -m \\"feat(provider): implement server creation API\\" # Push and create PR\\ngit push origin feature/new-provider-support\\ngh pr create --title \\"Add new provider support\\" --body \\"...\\"","breadcrumbs":"Workflow » Branch Strategy","id":"2125","title":"Branch Strategy"},"2126":{"body":"Review Checklist : Code follows project conventions Tests are included and passing Documentation is updated No hardcoded values Error handling is comprehensive Performance considerations addressed Review Commands : # Test PR locally\\ngh pr checkout 123\\ncd src/tools && make ci-test # Run specific tests\\nnu workspace/extensions/providers/new-provider/tests/run-all.nu # Check code quality\\ncargo clippy -- -D warnings\\nnu --check $(find . -name \\"*.nu\\")","breadcrumbs":"Workflow » Code Review Process","id":"2126","title":"Code Review Process"},"2127":{"body":"Code Documentation : # Function documentation\\ndef create-server [ name: string # Server name (must be unique) plan: string # Server plan (for example, \\"2xCPU-4 GB\\") --dry-run: bool # Show what would be created without doing it\\n] -> record { # Returns server creation result # Creates a new server with the specified configuration # # Examples: # create-server \\"web-01\\" \\"2xCPU-4 GB\\" # create-server \\"test\\" \\"1xCPU-2 GB\\" --dry-run # Implementation\\n}","breadcrumbs":"Workflow » Documentation Requirements","id":"2127","title":"Documentation Requirements"},"2128":{"body":"Progress Updates : Daily standup participation Weekly architecture reviews PR descriptions with context Issue tracking with details Knowledge Sharing : Technical blog posts Architecture decision records Code review discussions Team documentation updates","breadcrumbs":"Workflow » Communication","id":"2128","title":"Communication"},"2129":{"body":"","breadcrumbs":"Workflow » Quality Assurance","id":"2129","title":"Quality Assurance"},"213":{"body":"# Connect to server with temporal key\\nprovisioning ssh connect server01 --ttl 1hr # Generate SSH key pair only\\nprovisioning ssh generate --ttl 4hr # List active SSH keys\\nprovisioning ssh list # Revoke SSH key\\nprovisioning ssh revoke ","breadcrumbs":"Quick Start Cheatsheet » SSH Temporal Keys","id":"213","title":"SSH Temporal Keys"},"2130":{"body":"Automated Quality Gates : # Pre-commit hooks\\npre-commit install # Manual quality check\\ncd src/tools\\nmake validate-all # Security audit\\ncargo audit Quality Metrics : Code coverage > 80% No critical security vulnerabilities All tests passing Documentation coverage complete Performance benchmarks met","breadcrumbs":"Workflow » Code Quality Checks","id":"2130","title":"Code Quality Checks"},"2131":{"body":"Performance Testing : # Benchmark builds\\nmake benchmark # Performance profiling\\ncargo flamegraph --bin provisioning-orchestrator # Load testing\\nab -n 1000 -c 10 http://localhost:9090/health Resource Monitoring : # Monitor during development\\nnu workspace/tools/runtime-manager.nu monitor --duration 5m # Check resource usage\\ndu -sh workspace/runtime/\\ndf -h","breadcrumbs":"Workflow » Performance Monitoring","id":"2131","title":"Performance Monitoring"},"2132":{"body":"","breadcrumbs":"Workflow » Best Practices","id":"2132","title":"Best Practices"},"2133":{"body":"Never Hardcode : # Bad\\ndef get-api-url [] { \\"https://api.upcloud.com\\" } # Good\\ndef get-api-url [] { get-config-value \\"providers.upcloud.api_url\\" \\"https://api.upcloud.com\\"\\n}","breadcrumbs":"Workflow » Configuration Management","id":"2133","title":"Configuration Management"},"2134":{"body":"Comprehensive Error Context : def create-server [name: string] { try { validate-server-name $name } catch { |e| error make { msg: $\\"Invalid server name \'($name)\': ($e.msg)\\", label: {text: \\"server name validation failed\\", span: $e.span?} } } try { provision-server $name } catch { |e| error make { msg: $\\"Server provisioning failed for \'($name)\': ($e.msg)\\", help: \\"Check provider credentials and quota limits\\" } }\\n}","breadcrumbs":"Workflow » Error Handling","id":"2134","title":"Error Handling"},"2135":{"body":"Clean Up Resources : def with-temporary-server [name: string, action: closure] { let server = (create-server $name) try { do $action $server } catch { |e| # Clean up on error delete-server $name $e } # Clean up on success delete-server $name\\n}","breadcrumbs":"Workflow » Resource Management","id":"2135","title":"Resource Management"},"2136":{"body":"Test Isolation : def test-with-isolation [test_name: string, test_action: closure] { let test_workspace = $\\"test-($test_name)-(date now | format date \'%Y%m%d%H%M%S\')\\" try { # Set up isolated environment $env.PROVISIONING_WORKSPACE_USER = $test_workspace nu workspace.nu init --user-name $test_workspace # Run test do $test_action print $\\"✅ Test ($test_name) passed\\" } catch { |e| print $\\"❌ Test ($test_name) failed: ($e.msg)\\" exit 1 } finally { # Clean up test environment nu workspace.nu cleanup --user-name $test_workspace --type all --force }\\n} This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the project\'s architectural principles and ensuring smooth collaboration across the team.","breadcrumbs":"Workflow » Testing Best Practices","id":"2136","title":"Testing Best Practices"},"2137":{"body":"This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration strategies, deployment considerations, and monitoring and observability.","breadcrumbs":"Integration » Integration Guide","id":"2137","title":"Integration Guide"},"2138":{"body":"Overview Existing System Integration API Compatibility and Versioning Database Migration Strategies Deployment Considerations Monitoring and Observability Legacy System Bridge Migration Pathways Troubleshooting Integration Issues","breadcrumbs":"Integration » Table of Contents","id":"2138","title":"Table of Contents"},"2139":{"body":"Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and existing production systems while providing clear migration pathways. Integration Principles : Backward Compatibility : All existing APIs and interfaces remain functional Gradual Migration : Systems can be migrated incrementally without disruption Dual Operation : New and legacy systems operate side-by-side during transition Zero Downtime : Migrations occur without service interruption Data Integrity : All data migrations are atomic and reversible Integration Architecture : Integration Ecosystem\\n┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐\\n│ Legacy Core │ ←→ │ Bridge Layer │ ←→ │ New Systems │\\n│ │ │ │ │ │\\n│ - ENV config │ │ - Compatibility │ │ - TOML config │\\n│ - Direct calls │ │ - Translation │ │ - Orchestrator │\\n│ - File-based │ │ - Monitoring │ │ - Workflows │\\n│ - Simple logging│ │ - Validation │ │ - REST APIs │\\n└─────────────────┘ └─────────────────┘ └─────────────────┘","breadcrumbs":"Integration » Overview","id":"2139","title":"Overview"},"214":{"body":"# Encrypt configuration file\\nprovisioning kms encrypt secure.yaml # Decrypt configuration file\\nprovisioning kms decrypt secure.yaml.enc # Encrypt entire config directory\\nprovisioning config encrypt workspace/infra/production/ # Decrypt config directory\\nprovisioning config decrypt workspace/infra/production/","breadcrumbs":"Quick Start Cheatsheet » KMS Operations (via CLI)","id":"214","title":"KMS Operations (via CLI)"},"2140":{"body":"","breadcrumbs":"Integration » Existing System Integration","id":"2140","title":"Existing System Integration"},"2141":{"body":"Seamless CLI Compatibility : # All existing commands continue to work unchanged\\n./core/nulib/provisioning server create web-01 2xCPU-4 GB\\n./core/nulib/provisioning taskserv install kubernetes\\n./core/nulib/provisioning cluster create buildkit # New commands available alongside existing ones\\n./src/core/nulib/provisioning server create web-01 2xCPU-4 GB --orchestrated\\nnu workspace/tools/workspace.nu health --detailed Path Resolution Integration : # Automatic path resolution between systems\\nuse workspace/lib/path-resolver.nu # Resolves to workspace path if available, falls back to core\\nlet config_path = (path-resolver resolve_path \\"config\\" \\"user\\" --fallback-to-core) # Seamless extension discovery\\nlet provider_path = (path-resolver resolve_extension \\"providers\\" \\"upcloud\\")","breadcrumbs":"Integration » Command-Line Interface Integration","id":"2141","title":"Command-Line Interface Integration"},"2142":{"body":"Dual Configuration Support : # Configuration bridge supports both ENV and TOML\\ndef get-config-value-bridge [key: string, default: string = \\"\\"] -> string { # Try new TOML configuration first let toml_value = try { get-config-value $key } catch { null } if $toml_value != null { return $toml_value } # Fall back to ENV variable (legacy support) let env_key = ($key | str replace \\".\\" \\"_\\" | str upcase | $\\"PROVISIONING_($in)\\") let env_value = ($env | get $env_key | default null) if $env_value != null { return $env_value } # Use default if provided if $default != \\"\\" { return $default } # Error with helpful migration message error make { msg: $\\"Configuration not found: ($key)\\", help: $\\"Migrate from ($env_key) environment variable to ($key) in config file\\" }\\n}","breadcrumbs":"Integration » Configuration System Bridge","id":"2142","title":"Configuration System Bridge"},"2143":{"body":"Shared Data Access : # Unified data access across old and new systems\\ndef get-server-info [server_name: string] -> record { # Try new orchestrator data store first let orchestrator_data = try { get-orchestrator-server-data $server_name } catch { null } if $orchestrator_data != null { return $orchestrator_data } # Fall back to legacy file-based storage let legacy_data = try { get-legacy-server-data $server_name } catch { null } if $legacy_data != null { return ($legacy_data | migrate-to-new-format) } error make {msg: $\\"Server not found: ($server_name)\\"}\\n}","breadcrumbs":"Integration » Data Integration","id":"2143","title":"Data Integration"},"2144":{"body":"Hybrid Process Management : # Orchestrator-aware process management\\ndef create-server-integrated [ name: string, plan: string, --orchestrated: bool = false\\n] -> record { if $orchestrated and (check-orchestrator-available) { # Use new orchestrator workflow return (create-server-workflow $name $plan) } else { # Use legacy direct creation return (create-server-direct $name $plan) }\\n} def check-orchestrator-available [] -> bool { try { http get \\"http://localhost:9090/health\\" | get status == \\"ok\\" } catch { false }\\n}","breadcrumbs":"Integration » Process Integration","id":"2144","title":"Process Integration"},"2145":{"body":"","breadcrumbs":"Integration » API Compatibility and Versioning","id":"2145","title":"API Compatibility and Versioning"},"2146":{"body":"API Version Strategy : v1 : Legacy compatibility API (existing functionality) v2 : Enhanced API with orchestrator features v3 : Full workflow and batch operation support Version Header Support : # API calls with version specification\\ncurl -H \\"API-Version: v1\\" http://localhost:9090/servers\\ncurl -H \\"API-Version: v2\\" http://localhost:9090/workflows/servers/create\\ncurl -H \\"API-Version: v3\\" http://localhost:9090/workflows/batch/submit","breadcrumbs":"Integration » REST API Versioning","id":"2146","title":"REST API Versioning"},"2147":{"body":"Backward Compatible Endpoints : // Rust API compatibility layer\\n#[derive(Debug, Serialize, Deserialize)]\\nstruct ApiRequest { version: Option, #[serde(flatten)] payload: serde_json::Value,\\n} async fn handle_versioned_request( headers: HeaderMap, req: ApiRequest,\\n) -> Result { let api_version = headers .get(\\"API-Version\\") .and_then(|v| v.to_str().ok()) .unwrap_or(\\"v1\\"); match api_version { \\"v1\\" => handle_v1_request(req.payload).await, \\"v2\\" => handle_v2_request(req.payload).await, \\"v3\\" => handle_v3_request(req.payload).await, _ => Err(ApiError::UnsupportedVersion(api_version.to_string())), }\\n} // V1 compatibility endpoint\\nasync fn handle_v1_request(payload: serde_json::Value) -> Result { // Transform request to legacy format let legacy_request = transform_to_legacy_format(payload)?; // Execute using legacy system let result = execute_legacy_operation(legacy_request).await?; // Transform response to v1 format Ok(transform_to_v1_response(result))\\n}","breadcrumbs":"Integration » API Compatibility Layer","id":"2147","title":"API Compatibility Layer"},"2148":{"body":"Backward Compatible Schema Changes : # API schema with version support\\nlet ServerCreateRequest = { # V1 fields (always supported) name | string, plan | string, zone | string | default = \\"auto\\", # V2 additions (optional for backward compatibility) orchestrated | bool | default = false, workflow_options | { } | optional, # V3 additions batch_options | { } | optional, dependencies | array | default = [], # Version constraints api_version | string | default = \\"v1\\",\\n} in\\nServerCreateRequest # Conditional validation based on API version\\nlet WorkflowOptions = { wait_for_completion | bool | default = true, timeout_seconds | number | default = 300, retry_count | number | default = 3,\\n} in\\nWorkflowOptions","breadcrumbs":"Integration » Schema Evolution","id":"2148","title":"Schema Evolution"},"2149":{"body":"Multi-Version Client Support : # Nushell client with version support\\ndef \\"client create-server\\" [ name: string, plan: string, --api-version: string = \\"v1\\", --orchestrated: bool = false\\n] -> record { let endpoint = match $api_version { \\"v1\\" => \\"/servers\\", \\"v2\\" => \\"/workflows/servers/create\\", \\"v3\\" => \\"/workflows/batch/submit\\", _ => (error make {msg: $\\"Unsupported API version: ($api_version)\\"}) } let request_body = match $api_version { \\"v1\\" => {name: $name, plan: $plan}, \\"v2\\" => {name: $name, plan: $plan, orchestrated: $orchestrated}, \\"v3\\" => { operations: [{ id: \\"create_server\\", type: \\"server_create\\", config: {name: $name, plan: $plan} }] }, _ => (error make {msg: $\\"Unsupported API version: ($api_version)\\"}) } http post $\\"http://localhost:9090($endpoint)\\" $request_body --headers { \\"Content-Type\\": \\"application/json\\", \\"API-Version\\": $api_version }\\n}","breadcrumbs":"Integration » Client SDK Compatibility","id":"2149","title":"Client SDK Compatibility"},"215":{"body":"# Request emergency access\\nprovisioning break-glass request \\"Production database outage\\" # Approve emergency request (requires admin)\\nprovisioning break-glass approve --reason \\"Approved by CTO\\" # List break-glass sessions\\nprovisioning break-glass list # Revoke break-glass session\\nprovisioning break-glass revoke ","breadcrumbs":"Quick Start Cheatsheet » Break-Glass Emergency Access","id":"215","title":"Break-Glass Emergency Access"},"2150":{"body":"","breadcrumbs":"Integration » Database Migration Strategies","id":"2150","title":"Database Migration Strategies"},"2151":{"body":"Migration Strategy : Database Evolution Path\\n┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐\\n│ File-based │ → │ SQLite │ → │ SurrealDB │\\n│ Storage │ │ Migration │ │ Full Schema │\\n│ │ │ │ │ │\\n│ - JSON files │ │ - Structured │ │ - Graph DB │\\n│ - Text logs │ │ - Transactions │ │ - Real-time │\\n│ - Simple state │ │ - Backup/restore│ │ - Clustering │\\n└─────────────────┘ └─────────────────┘ └─────────────────┘","breadcrumbs":"Integration » Database Architecture Evolution","id":"2151","title":"Database Architecture Evolution"},"2152":{"body":"Automated Database Migration : # Database migration orchestration\\ndef migrate-database [ --from: string = \\"filesystem\\", --to: string = \\"surrealdb\\", --backup-first: bool = true, --verify: bool = true\\n] -> record { if $backup_first { print \\"Creating backup before migration...\\" let backup_result = (create-database-backup $from) print $\\"Backup created: ($backup_result.path)\\" } print $\\"Migrating from ($from) to ($to)...\\" match [$from, $to] { [\\"filesystem\\", \\"sqlite\\"] => migrate_filesystem_to_sqlite, [\\"filesystem\\", \\"surrealdb\\"] => migrate_filesystem_to_surrealdb, [\\"sqlite\\", \\"surrealdb\\"] => migrate_sqlite_to_surrealdb, _ => (error make {msg: $\\"Unsupported migration path: ($from) → ($to)\\"}) } if $verify { print \\"Verifying migration integrity...\\" let verification = (verify-migration $from $to) if not $verification.success { error make { msg: $\\"Migration verification failed: ($verification.errors)\\", help: \\"Restore from backup and retry migration\\" } } } print $\\"Migration from ($from) to ($to) completed successfully\\" {from: $from, to: $to, status: \\"completed\\", migrated_at: (date now)}\\n} File System to SurrealDB Migration : def migrate_filesystem_to_surrealdb [] -> record { # Initialize SurrealDB connection let db = (connect-surrealdb) # Migrate server data let server_files = (ls data/servers/*.json) let migrated_servers = [] for server_file in $server_files { let server_data = (open $server_file.name | from json) # Transform to new schema let server_record = { id: $server_data.id, name: $server_data.name, plan: $server_data.plan, zone: ($server_data.zone? | default \\"unknown\\"), status: $server_data.status, ip_address: $server_data.ip_address?, created_at: $server_data.created_at, updated_at: (date now), metadata: ($server_data.metadata? | default {}), tags: ($server_data.tags? | default []) } # Insert into SurrealDB let insert_result = try { query-surrealdb $\\"CREATE servers:($server_record.id) CONTENT ($server_record | to json)\\" } catch { |e| print $\\"Warning: Failed to migrate server ($server_data.name): ($e.msg)\\" } $migrated_servers = ($migrated_servers | append $server_record.id) } # Migrate workflow data migrate_workflows_to_surrealdb $db # Migrate state data migrate_state_to_surrealdb $db { migrated_servers: ($migrated_servers | length), migrated_workflows: (migrate_workflows_to_surrealdb $db).count, status: \\"completed\\" }\\n}","breadcrumbs":"Integration » Migration Scripts","id":"2152","title":"Migration Scripts"},"2153":{"body":"Migration Verification : def verify-migration [from: string, to: string] -> record { print \\"Verifying data integrity...\\" let source_data = (read-source-data $from) let target_data = (read-target-data $to) let errors = [] # Verify record counts if $source_data.servers.count != $target_data.servers.count { $errors = ($errors | append \\"Server count mismatch\\") } # Verify key records for server in $source_data.servers { let target_server = ($target_data.servers | where id == $server.id | first) if ($target_server | is-empty) { $errors = ($errors | append $\\"Missing server: ($server.id)\\") } else { # Verify critical fields if $target_server.name != $server.name { $errors = ($errors | append $\\"Name mismatch for server ($server.id)\\") } if $target_server.status != $server.status { $errors = ($errors | append $\\"Status mismatch for server ($server.id)\\") } } } { success: ($errors | length) == 0, errors: $errors, verified_at: (date now) }\\n}","breadcrumbs":"Integration » Data Integrity Verification","id":"2153","title":"Data Integrity Verification"},"2154":{"body":"","breadcrumbs":"Integration » Deployment Considerations","id":"2154","title":"Deployment Considerations"},"2155":{"body":"Hybrid Deployment Model : Deployment Architecture\\n┌─────────────────────────────────────────────────────────────────┐\\n│ Load Balancer / Reverse Proxy │\\n└─────────────────────┬───────────────────────────────────────────┘ │ ┌─────────────────┼─────────────────┐ │ │ │\\n┌───▼────┐ ┌─────▼─────┐ ┌───▼────┐\\n│Legacy │ │Orchestrator│ │New │\\n│System │ ←→ │Bridge │ ←→ │Systems │\\n│ │ │ │ │ │\\n│- CLI │ │- API Gate │ │- REST │\\n│- Files │ │- Compat │ │- DB │\\n│- Logs │ │- Monitor │ │- Queue │\\n└────────┘ └────────────┘ └────────┘","breadcrumbs":"Integration » Deployment Architecture","id":"2155","title":"Deployment Architecture"},"2156":{"body":"Blue-Green Deployment : # Blue-Green deployment with integration bridge\\n# Phase 1: Deploy new system alongside existing (Green environment)\\ncd src/tools\\nmake all\\nmake create-installers # Install new system without disrupting existing\\n./packages/installers/install-provisioning-2.0.0.sh \\\\ --install-path /opt/provisioning-v2 \\\\ --no-replace-existing \\\\ --enable-bridge-mode # Phase 2: Start orchestrator and validate integration\\n/opt/provisioning-v2/bin/orchestrator start --bridge-mode --legacy-path /opt/provisioning-v1 # Phase 3: Gradual traffic shift\\n# Route 10% traffic to new system\\nnginx-traffic-split --new-backend 10% # Validate metrics and gradually increase\\nnginx-traffic-split --new-backend 50%\\nnginx-traffic-split --new-backend 90% # Phase 4: Complete cutover\\nnginx-traffic-split --new-backend 100%\\n/opt/provisioning-v1/bin/orchestrator stop Rolling Update : def rolling-deployment [ --target-version: string, --batch-size: int = 3, --health-check-interval: duration = 30sec\\n] -> record { let nodes = (get-deployment-nodes) let batches = ($nodes | group_by --chunk-size $batch_size) let deployment_results = [] for batch in $batches { print $\\"Deploying to batch: ($batch | get name | str join \', \')\\" # Deploy to batch for node in $batch { deploy-to-node $node $target_version } # Wait for health checks sleep $health_check_interval # Verify batch health let batch_health = ($batch | each { |node| check-node-health $node }) let healthy_nodes = ($batch_health | where healthy == true | length) if $healthy_nodes != ($batch | length) { # Rollback batch on failure print $\\"Health check failed, rolling back batch\\" for node in $batch { rollback-node $node } error make {msg: \\"Rolling deployment failed at batch\\"} } print $\\"Batch deployed successfully\\" $deployment_results = ($deployment_results | append { batch: $batch, status: \\"success\\", deployed_at: (date now) }) } { strategy: \\"rolling\\", target_version: $target_version, batches: ($deployment_results | length), status: \\"completed\\", completed_at: (date now) }\\n}","breadcrumbs":"Integration » Deployment Strategies","id":"2156","title":"Deployment Strategies"},"2157":{"body":"Environment-Specific Deployment : # Development deployment\\nPROVISIONING_ENV=dev ./deploy.sh \\\\ --config-source config.dev.toml \\\\ --enable-debug \\\\ --enable-hot-reload # Staging deployment\\nPROVISIONING_ENV=staging ./deploy.sh \\\\ --config-source config.staging.toml \\\\ --enable-monitoring \\\\ --backup-before-deploy # Production deployment\\nPROVISIONING_ENV=prod ./deploy.sh \\\\ --config-source config.prod.toml \\\\ --zero-downtime \\\\ --enable-all-monitoring \\\\ --backup-before-deploy \\\\ --health-check-timeout 5m","breadcrumbs":"Integration » Configuration Deployment","id":"2157","title":"Configuration Deployment"},"2158":{"body":"Docker Deployment with Bridge : # Multi-stage Docker build supporting both systems\\nFROM rust:1.70 as builder\\nWORKDIR /app\\nCOPY . .\\nRUN cargo build --release FROM ubuntu:22.04 as runtime\\nWORKDIR /app # Install both legacy and new systems\\nCOPY --from=builder /app/target/release/orchestrator /app/bin/\\nCOPY legacy-provisioning/ /app/legacy/\\nCOPY config/ /app/config/ # Bridge script for dual operation\\nCOPY bridge-start.sh /app/bin/ ENV PROVISIONING_BRIDGE_MODE=true\\nENV PROVISIONING_LEGACY_PATH=/app/legacy\\nENV PROVISIONING_NEW_PATH=/app/bin EXPOSE 8080\\nCMD [\\"/app/bin/bridge-start.sh\\"] Kubernetes Integration : # Kubernetes deployment with bridge sidecar\\napiVersion: apps/v1\\nkind: Deployment\\nmetadata: name: provisioning-system\\nspec: replicas: 3 template: spec: containers: - name: orchestrator image: provisioning-system:2.0.0 ports: - containerPort: 8080 env: - name: PROVISIONING_BRIDGE_MODE value: \\"true\\" volumeMounts: - name: config mountPath: /app/config - name: legacy-data mountPath: /app/legacy/data - name: legacy-bridge image: provisioning-legacy:1.0.0 env: - name: BRIDGE_ORCHESTRATOR_URL value: \\"http://localhost:9090\\" volumeMounts: - name: legacy-data mountPath: /data volumes: - name: config configMap: name: provisioning-config - name: legacy-data persistentVolumeClaim: claimName: provisioning-data","breadcrumbs":"Integration » Container Integration","id":"2158","title":"Container Integration"},"2159":{"body":"","breadcrumbs":"Integration » Monitoring and Observability","id":"2159","title":"Monitoring and Observability"},"216":{"body":"# Generate compliance report\\nprovisioning compliance report\\nprovisioning compliance report --standard gdpr\\nprovisioning compliance report --standard soc2\\nprovisioning compliance report --standard iso27001 # GDPR operations\\nprovisioning compliance gdpr export \\nprovisioning compliance gdpr delete \\nprovisioning compliance gdpr rectify # Incident management\\nprovisioning compliance incident create \\"Security breach detected\\"\\nprovisioning compliance incident list\\nprovisioning compliance incident update --status investigating # Audit log queries\\nprovisioning audit query --user alice --action deploy --from 24h\\nprovisioning audit export --format json --output audit-logs.json","breadcrumbs":"Quick Start Cheatsheet » Compliance and Audit","id":"216","title":"Compliance and Audit"},"2160":{"body":"Monitoring Stack Integration : Observability Architecture\\n┌─────────────────────────────────────────────────────────────────┐\\n│ Monitoring Dashboard │\\n│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │\\n│ │ Grafana │ │ Jaeger │ │ AlertMgr │ │\\n│ └─────────────┘ └─────────────┘ └─────────────┘ │\\n└─────────────┬───────────────┬───────────────┬─────────────────┘ │ │ │ ┌──────────▼──────────┐ │ ┌───────────▼───────────┐ │ Prometheus │ │ │ Jaeger │ │ (Metrics) │ │ │ (Tracing) │ └──────────┬──────────┘ │ └───────────┬───────────┘ │ │ │\\n┌─────────────▼─────────────┐ │ ┌─────────────▼─────────────┐\\n│ Legacy │ │ │ New System │\\n│ Monitoring │ │ │ Monitoring │\\n│ │ │ │ │\\n│ - File-based logs │ │ │ - Structured logs │\\n│ - Simple metrics │ │ │ - Prometheus metrics │\\n│ - Basic health checks │ │ │ - Distributed tracing │\\n└───────────────────────────┘ │ └───────────────────────────┘ │ ┌─────────▼─────────┐ │ Bridge Monitor │ │ │ │ - Integration │ │ - Compatibility │ │ - Migration │ └───────────────────┘","breadcrumbs":"Integration » Integrated Monitoring Architecture","id":"2160","title":"Integrated Monitoring Architecture"},"2161":{"body":"Unified Metrics Collection : # Metrics bridge for legacy and new systems\\ndef collect-system-metrics [] -> record { let legacy_metrics = collect-legacy-metrics let new_metrics = collect-new-metrics let bridge_metrics = collect-bridge-metrics { timestamp: (date now), legacy: $legacy_metrics, new: $new_metrics, bridge: $bridge_metrics, integration: { compatibility_rate: (calculate-compatibility-rate $bridge_metrics), migration_progress: (calculate-migration-progress), system_health: (assess-overall-health $legacy_metrics $new_metrics) } }\\n} def collect-legacy-metrics [] -> record { let log_files = (ls logs/*.log) let process_stats = (get-process-stats \\"legacy-provisioning\\") { active_processes: $process_stats.count, log_file_sizes: ($log_files | get size | math sum), last_activity: (get-last-log-timestamp), error_count: (count-log-errors \\"last 1h\\"), performance: { avg_response_time: (calculate-avg-response-time), throughput: (calculate-throughput) } }\\n} def collect-new-metrics [] -> record { let orchestrator_stats = try { http get \\"http://localhost:9090/metrics\\" } catch { {status: \\"unavailable\\"} } { orchestrator: $orchestrator_stats, workflow_stats: (get-workflow-metrics), api_stats: (get-api-metrics), database_stats: (get-database-metrics) }\\n}","breadcrumbs":"Integration » Metrics Integration","id":"2161","title":"Metrics Integration"},"2162":{"body":"Unified Logging Strategy : # Structured logging bridge\\ndef log-integrated [ level: string, message: string, --component: string = \\"bridge\\", --legacy-compat: bool = true\\n] { let log_entry = { timestamp: (date now | format date \\"%Y-%m-%d %H:%M:%S%.3f\\"), level: $level, component: $component, message: $message, system: \\"integrated\\", correlation_id: (generate-correlation-id) } # Write to structured log (new system) $log_entry | to json | save --append logs/integrated.jsonl if $legacy_compat { # Write to legacy log format let legacy_entry = $\\"[($log_entry.timestamp)] [($level)] ($component): ($message)\\" $legacy_entry | save --append logs/legacy.log } # Send to monitoring system send-to-monitoring $log_entry\\n}","breadcrumbs":"Integration » Logging Integration","id":"2162","title":"Logging Integration"},"2163":{"body":"Comprehensive Health Monitoring : def health-check-integrated [] -> record { let health_checks = [ {name: \\"legacy-system\\", check: (check-legacy-health)}, {name: \\"orchestrator\\", check: (check-orchestrator-health)}, {name: \\"database\\", check: (check-database-health)}, {name: \\"bridge-compatibility\\", check: (check-bridge-health)}, {name: \\"configuration\\", check: (check-config-health)} ] let results = ($health_checks | each { |check| let result = try { do $check.check } catch { |e| {status: \\"unhealthy\\", error: $e.msg} } {name: $check.name, result: $result} }) let healthy_count = ($results | where result.status == \\"healthy\\" | length) let total_count = ($results | length) { overall_status: (if $healthy_count == $total_count { \\"healthy\\" } else { \\"degraded\\" }), healthy_services: $healthy_count, total_services: $total_count, services: $results, checked_at: (date now) }\\n}","breadcrumbs":"Integration » Health Check Integration","id":"2163","title":"Health Check Integration"},"2164":{"body":"","breadcrumbs":"Integration » Legacy System Bridge","id":"2164","title":"Legacy System Bridge"},"2165":{"body":"Bridge Component Design : # Legacy system bridge module\\nexport module bridge { # Bridge state management export def init-bridge [] -> record { let bridge_config = get-config-section \\"bridge\\" { legacy_path: ($bridge_config.legacy_path? | default \\"/opt/provisioning-v1\\"), new_path: ($bridge_config.new_path? | default \\"/opt/provisioning-v2\\"), mode: ($bridge_config.mode? | default \\"compatibility\\"), monitoring_enabled: ($bridge_config.monitoring? | default true), initialized_at: (date now) } } # Command translation layer export def translate-command [ legacy_command: list ] -> list { match $legacy_command { [\\"provisioning\\", \\"server\\", \\"create\\", $name, $plan, ...$args] => { let new_args = ($args | each { |arg| match $arg { \\"--dry-run\\" => \\"--dry-run\\", \\"--wait\\" => \\"--wait\\", $zone if ($zone | str starts-with \\"--zone=\\") => $zone, _ => $arg } }) [\\"provisioning\\", \\"server\\", \\"create\\", $name, $plan] ++ $new_args ++ [\\"--orchestrated\\"] }, _ => $legacy_command # Pass through unchanged } } # Data format translation export def translate-response [ legacy_response: record, target_format: string = \\"v2\\" ] -> record { match $target_format { \\"v2\\" => { id: ($legacy_response.id? | default (generate-uuid)), name: $legacy_response.name, status: $legacy_response.status, created_at: ($legacy_response.created_at? | default (date now)), metadata: ($legacy_response | reject name status created_at), version: \\"v2-compat\\" }, _ => $legacy_response } }\\n}","breadcrumbs":"Integration » Bridge Architecture","id":"2165","title":"Bridge Architecture"},"2166":{"body":"Compatibility Mode : # Full compatibility with legacy system\\ndef run-compatibility-mode [] { print \\"Starting bridge in compatibility mode...\\" # Intercept legacy commands let legacy_commands = monitor-legacy-commands for command in $legacy_commands { let translated = (bridge translate-command $command) try { let result = (execute-new-system $translated) let legacy_result = (bridge translate-response $result \\"v1\\") respond-to-legacy $legacy_result } catch { |e| # Fall back to legacy system on error let fallback_result = (execute-legacy-system $command) respond-to-legacy $fallback_result } }\\n} Migration Mode : # Gradual migration with traffic splitting\\ndef run-migration-mode [ --new-system-percentage: int = 50\\n] { print $\\"Starting bridge in migration mode (($new_system_percentage)% new system)\\" let commands = monitor-all-commands for command in $commands { let route_to_new = ((random integer 1..100) <= $new_system_percentage) if $route_to_new { try { execute-new-system $command } catch { # Fall back to legacy on failure execute-legacy-system $command } } else { execute-legacy-system $command } }\\n}","breadcrumbs":"Integration » Bridge Operation Modes","id":"2166","title":"Bridge Operation Modes"},"2167":{"body":"","breadcrumbs":"Integration » Migration Pathways","id":"2167","title":"Migration Pathways"},"2168":{"body":"Phase 1: Parallel Deployment Deploy new system alongside existing Enable bridge for compatibility Begin data synchronization Monitor integration health Phase 2: Gradual Migration Route increasing traffic to new system Migrate data in background Validate consistency Address integration issues Phase 3: Full Migration Complete traffic cutover Decommission legacy system Clean up bridge components Finalize data migration","breadcrumbs":"Integration » Migration Phases","id":"2168","title":"Migration Phases"},"2169":{"body":"Automated Migration Orchestration : def execute-migration-plan [ migration_plan: string, --dry-run: bool = false, --skip-backup: bool = false\\n] -> record { let plan = (open $migration_plan | from yaml) if not $skip_backup { create-pre-migration-backup } let migration_results = [] for phase in $plan.phases { print $\\"Executing migration phase: ($phase.name)\\" if $dry_run { print $\\"[DRY RUN] Would execute phase: ($phase)\\" continue } let phase_result = try { execute-migration-phase $phase } catch { |e| print $\\"Migration phase failed: ($e.msg)\\" if $phase.rollback_on_failure? | default false { print \\"Rolling back migration phase...\\" rollback-migration-phase $phase } error make {msg: $\\"Migration failed at phase ($phase.name): ($e.msg)\\"} } $migration_results = ($migration_results | append $phase_result) # Wait between phases if specified if \\"wait_seconds\\" in $phase { sleep ($phase.wait_seconds * 1sec) } } { migration_plan: $migration_plan, phases_completed: ($migration_results | length), status: \\"completed\\", completed_at: (date now), results: $migration_results }\\n} Migration Validation : def validate-migration-readiness [] -> record { let checks = [ {name: \\"backup-available\\", check: (check-backup-exists)}, {name: \\"new-system-healthy\\", check: (check-new-system-health)}, {name: \\"database-accessible\\", check: (check-database-connectivity)}, {name: \\"configuration-valid\\", check: (validate-migration-config)}, {name: \\"resources-available\\", check: (check-system-resources)}, {name: \\"network-connectivity\\", check: (check-network-health)} ] let results = ($checks | each { |check| { name: $check.name, result: (do $check.check), timestamp: (date now) } }) let failed_checks = ($results | where result.status != \\"ready\\") { ready_for_migration: ($failed_checks | length) == 0, checks: $results, failed_checks: $failed_checks, validated_at: (date now) }\\n}","breadcrumbs":"Integration » Migration Automation","id":"2169","title":"Migration Automation"},"217":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Common Workflows","id":"217","title":"Common Workflows"},"2170":{"body":"","breadcrumbs":"Integration » Troubleshooting Integration Issues","id":"2170","title":"Troubleshooting Integration Issues"},"2171":{"body":"API Compatibility Issues Problem : Version mismatch between client and server # Diagnosis\\ncurl -H \\"API-Version: v1\\" http://localhost:9090/health\\ncurl -H \\"API-Version: v2\\" http://localhost:9090/health # Solution: Check supported versions\\ncurl http://localhost:9090/api/versions # Update client API version\\nexport PROVISIONING_API_VERSION=v2 Configuration Bridge Issues Problem : Configuration not found in either system # Diagnosis\\ndef diagnose-config-issue [key: string] -> record { let toml_result = try { get-config-value $key } catch { |e| {status: \\"failed\\", error: $e.msg} } let env_key = ($key | str replace \\".\\" \\"_\\" | str upcase | $\\"PROVISIONING_($in)\\") let env_result = try { $env | get $env_key } catch { |e| {status: \\"failed\\", error: $e.msg} } { key: $key, toml_config: $toml_result, env_config: $env_result, migration_needed: ($toml_result.status == \\"failed\\" and $env_result.status != \\"failed\\") }\\n} # Solution: Migrate configuration\\ndef migrate-single-config [key: string] { let diagnosis = (diagnose-config-issue $key) if $diagnosis.migration_needed { let env_value = $diagnosis.env_config set-config-value $key $env_value print $\\"Migrated ($key) from environment variable\\" }\\n} Database Integration Issues Problem : Data inconsistency between systems # Diagnosis and repair\\ndef repair-data-consistency [] -> record { let legacy_data = (read-legacy-data) let new_data = (read-new-data) let inconsistencies = [] # Check server records for server in $legacy_data.servers { let new_server = ($new_data.servers | where id == $server.id | first) if ($new_server | is-empty) { print $\\"Missing server in new system: ($server.id)\\" create-server-record $server $inconsistencies = ($inconsistencies | append {type: \\"missing\\", id: $server.id}) } else if $new_server != $server { print $\\"Inconsistent server data: ($server.id)\\" update-server-record $server $inconsistencies = ($inconsistencies | append {type: \\"inconsistent\\", id: $server.id}) } } { inconsistencies_found: ($inconsistencies | length), repairs_applied: ($inconsistencies | length), repaired_at: (date now) }\\n}","breadcrumbs":"Integration » Common Integration Problems","id":"2171","title":"Common Integration Problems"},"2172":{"body":"Integration Debug Mode : # Enable comprehensive debugging\\nexport PROVISIONING_DEBUG=true\\nexport PROVISIONING_LOG_LEVEL=debug\\nexport PROVISIONING_BRIDGE_DEBUG=true\\nexport PROVISIONING_INTEGRATION_TRACE=true # Run with integration debugging\\nprovisioning server create test-server 2xCPU-4 GB --debug-integration Health Check Debugging : def debug-integration-health [] -> record { print \\"=== Integration Health Debug ===\\" # Check all integration points let legacy_health = try { check-legacy-system } catch { |e| {status: \\"error\\", error: $e.msg} } let orchestrator_health = try { http get \\"http://localhost:9090/health\\" } catch { |e| {status: \\"error\\", error: $e.msg} } let bridge_health = try { check-bridge-status } catch { |e| {status: \\"error\\", error: $e.msg} } let config_health = try { validate-config-integration } catch { |e| {status: \\"error\\", error: $e.msg} } print $\\"Legacy System: ($legacy_health.status)\\" print $\\"Orchestrator: ($orchestrator_health.status)\\" print $\\"Bridge: ($bridge_health.status)\\" print $\\"Configuration: ($config_health.status)\\" { legacy: $legacy_health, orchestrator: $orchestrator_health, bridge: $bridge_health, configuration: $config_health, debug_timestamp: (date now) }\\n} This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while maintaining reliability, compatibility, and clear migration pathways.","breadcrumbs":"Integration » Debug Tools","id":"2172","title":"Debug Tools"},"2173":{"body":"This document provides comprehensive documentation for the provisioning project\'s build system, including the complete Makefile reference with 40+ targets, build tools, compilation instructions, and troubleshooting.","breadcrumbs":"Build System » Build System Documentation","id":"2173","title":"Build System Documentation"},"2174":{"body":"Overview Quick Start Makefile Reference Build Tools Cross-Platform Compilation Dependency Management Troubleshooting CI/CD Integration","breadcrumbs":"Build System » Table of Contents","id":"2174","title":"Table of Contents"},"2175":{"body":"The build system is a comprehensive, Makefile-based solution that orchestrates: Rust compilation : Platform binaries (orchestrator, control-center, etc.) Nushell bundling : Core libraries and CLI tools Nickel validation : Configuration schema validation Distribution generation : Multi-platform packages Release management : Automated release pipelines Documentation generation : API and user documentation Location : /src/tools/ Main entry point : /src/tools/Makefile","breadcrumbs":"Build System » Overview","id":"2175","title":"Overview"},"2176":{"body":"# Navigate to build system\\ncd src/tools # View all available targets\\nmake help # Complete build and package\\nmake all # Development build (quick)\\nmake dev-build # Build for specific platform\\nmake linux\\nmake macos\\nmake windows # Clean everything\\nmake clean # Check build system status\\nmake status","breadcrumbs":"Build System » Quick Start","id":"2176","title":"Quick Start"},"2177":{"body":"","breadcrumbs":"Build System » Makefile Reference","id":"2177","title":"Makefile Reference"},"2178":{"body":"Variables : # Project metadata\\nPROJECT_NAME := provisioning\\nVERSION := $(git describe --tags --always --dirty)\\nBUILD_TIME := $(date -u +\\"%Y-%m-%dT%H:%M:%SZ\\") # Build configuration\\nRUST_TARGET := x86_64-unknown-linux-gnu\\nBUILD_MODE := release\\nPLATFORMS := linux-amd64,macos-amd64,windows-amd64\\nVARIANTS := complete,minimal # Flags\\nVERBOSE := false\\nDRY_RUN := false\\nPARALLEL := true","breadcrumbs":"Build System » Build Configuration","id":"2178","title":"Build Configuration"},"2179":{"body":"Primary Build Targets make all - Complete build, package, and test Runs: clean build-all package-all test-dist Use for: Production releases, complete validation make build-all - Build all components Runs: build-platform build-core validate-nickel Use for: Complete system compilation make build-platform - Build platform binaries for all targets make build-platform\\n# Equivalent to:\\nnu tools/build/compile-platform.nu \\\\ --target x86_64-unknown-linux-gnu \\\\ --release \\\\ --output-dir dist/platform \\\\ --verbose=false make build-core - Bundle core Nushell libraries make build-core\\n# Equivalent to:\\nnu tools/build/bundle-core.nu \\\\ --output-dir dist/core \\\\ --config-dir dist/config \\\\ --validate \\\\ --exclude-dev make validate-nickel - Validate and compile Nickel schemas make validate-nickel\\n# Equivalent to:\\nnu tools/build/validate-nickel.nu \\\\ --output-dir dist/schemas \\\\ --format-code \\\\ --check-dependencies make build-cross - Cross-compile for multiple platforms Builds for all platforms in PLATFORMS variable Parallel execution support Failure handling for each platform Package Targets make package-all - Create all distribution packages Runs: dist-generate package-binaries package-containers make dist-generate - Generate complete distributions make dist-generate\\n# Advanced usage:\\nmake dist-generate PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete make package-binaries - Package binaries for distribution Creates platform-specific archives Strips debug symbols Generates checksums make package-containers - Build container images Multi-platform container builds Optimized layers and caching Version tagging make create-archives - Create distribution archives TAR and ZIP formats Platform-specific and universal archives Compression and checksums make create-installers - Create installation packages Shell script installers Platform-specific packages (DEB, RPM, MSI) Uninstaller creation Release Targets make release - Create a complete release (requires VERSION) make release VERSION=2.1.0 Features: Automated changelog generation Git tag creation and push Artifact upload Comprehensive validation make release-draft - Create a draft release Create without publishing Review artifacts before release Manual approval workflow make upload-artifacts - Upload release artifacts GitHub Releases Container registries Package repositories Verification and validation make notify-release - Send release notifications Slack notifications Discord announcements Email notifications Custom webhook support make update-registry - Update package manager registries Homebrew formula updates APT repository updates Custom registry support Development and Testing Targets make dev-build - Quick development build make dev-build\\n# Fast build with minimal validation make test-build - Test build system Validates build process Runs with test configuration Comprehensive logging make test-dist - Test generated distributions Validates distribution integrity Tests installation process Platform compatibility checks make validate-all - Validate all components Nickel schema validation Package validation Configuration validation make benchmark - Run build benchmarks Times build process Performance analysis Resource usage monitoring Documentation Targets make docs - Generate documentation make docs\\n# Generates API docs, user guides, and examples make docs-serve - Generate and serve documentation locally Starts local HTTP server on port 8000 Live documentation browsing Development documentation workflow Utility Targets make clean - Clean all build artifacts make clean\\n# Removes all build, distribution, and package directories make clean-dist - Clean only distribution artifacts Preserves build cache Removes distribution packages Faster cleanup option make install - Install the built system locally Requires distribution to be built Installs to system directories Creates uninstaller make uninstall - Uninstall the system Removes system installation Cleans configuration Removes service files make status - Show build system status make status\\n# Output:\\n# Build System Status\\n# ===================\\n# Project: provisioning\\n# Version: v2.1.0-5-g1234567\\n# Git Commit: 1234567890abcdef\\n# Build Time: 2025-09-25T14:30:22Z\\n#\\n# Directories:\\n# Source: /Users/user/repo-cnz/src\\n# Tools: /Users/user/repo-cnz/src/tools\\n# Build: /Users/user/repo-cnz/src/target\\n# Distribution: /Users/user/repo-cnz/src/dist\\n# Packages: /Users/user/repo-cnz/src/packages make info - Show detailed system information OS and architecture details Tool versions (Nushell, Rust, Docker, Git) Environment information Build prerequisites CI/CD Integration Targets make ci-build - CI build pipeline Complete validation build Suitable for automated CI systems Comprehensive testing make ci-test - CI test pipeline Validation and testing only Fast feedback for pull requests Quality assurance make ci-release - CI release pipeline Build and packaging for releases Artifact preparation Release candidate creation make cd-deploy - CD deployment pipeline Complete release and deployment Artifact upload and distribution User notifications Platform-Specific Targets make linux - Build for Linux only make linux\\n# Sets PLATFORMS=linux-amd64 make macos - Build for macOS only make macos\\n# Sets PLATFORMS=macos-amd64 make windows - Build for Windows only make windows\\n# Sets PLATFORMS=windows-amd64 Debugging Targets make debug - Build with debug information make debug\\n# Sets BUILD_MODE=debug VERBOSE=true make debug-info - Show debug information Make variables and environment Build system diagnostics Troubleshooting information","breadcrumbs":"Build System » Build Targets","id":"2179","title":"Build Targets"},"218":{"body":"# 1. Initialize workspace\\nprovisioning workspace init --name production # 2. Validate configuration\\nprovisioning validate config # 3. Create infrastructure definition\\nprovisioning generate infra --new production # 4. Create servers (check mode first)\\nprovisioning server create --infra production --check # 5. Create servers (actual deployment)\\nprovisioning server create --infra production --yes # 6. Install Kubernetes\\nprovisioning taskserv create kubernetes --infra production --check\\nprovisioning taskserv create kubernetes --infra production # 7. Deploy cluster services\\nprovisioning cluster create production --check\\nprovisioning cluster create production # 8. Verify deployment\\nprovisioning server list --infra production\\nprovisioning taskserv list --infra production # 9. SSH to servers\\nprovisioning server ssh k8s-master-01","breadcrumbs":"Quick Start Cheatsheet » Complete Deployment from Scratch","id":"218","title":"Complete Deployment from Scratch"},"2180":{"body":"","breadcrumbs":"Build System » Build Tools","id":"2180","title":"Build Tools"},"2181":{"body":"All build tools are implemented as Nushell scripts with comprehensive parameter validation and error handling. /src/tools/build/compile-platform.nu Purpose : Compiles all Rust components for distribution Components Compiled : orchestrator → provisioning-orchestrator binary control-center → control-center binary control-center-ui → Web UI assets mcp-server-rust → MCP integration binary Usage : nu compile-platform.nu [options] Options: --target STRING Target platform (default: x86_64-unknown-linux-gnu) --release Build in release mode --features STRING Comma-separated features to enable --output-dir STRING Output directory (default: dist/platform) --verbose Enable verbose logging --clean Clean before building Example : nu compile-platform.nu \\\\ --target x86_64-apple-darwin \\\\ --release \\\\ --features \\"surrealdb,telemetry\\" \\\\ --output-dir dist/macos \\\\ --verbose /src/tools/build/bundle-core.nu Purpose : Bundles Nushell core libraries and CLI for distribution Components Bundled : Nushell provisioning CLI wrapper Core Nushell libraries (lib_provisioning) Configuration system Template system Extensions and plugins Usage : nu bundle-core.nu [options] Options: --output-dir STRING Output directory (default: dist/core) --config-dir STRING Configuration directory (default: dist/config) --validate Validate Nushell syntax --compress Compress bundle with gzip --exclude-dev Exclude development files (default: true) --verbose Enable verbose logging Validation Features : Syntax validation of all Nushell files Import dependency checking Function signature validation Test execution (if tests present) /src/tools/build/validate-nickel.nu Purpose : Validates and compiles Nickel schemas Validation Process : Syntax validation of all .ncl files Schema dependency checking Type constraint validation Example validation against schemas Documentation generation Usage : nu validate-nickel.nu [options] Options: --output-dir STRING Output directory (default: dist/schemas) --format-code Format Nickel code during validation --check-dependencies Validate schema dependencies --verbose Enable verbose logging /src/tools/build/test-distribution.nu Purpose : Tests generated distributions for correctness Test Types : Basic : Installation test, CLI help, version check Integration : Server creation, configuration validation Complete : Full workflow testing including cluster operations Usage : nu test-distribution.nu [options] Options: --dist-dir STRING Distribution directory (default: dist) --test-types STRING Test types: basic,integration,complete --platform STRING Target platform for testing --cleanup Remove test files after completion --verbose Enable verbose logging /src/tools/build/clean-build.nu Purpose : Intelligent build artifact cleanup Cleanup Scopes : all : Complete cleanup (build, dist, packages, cache) dist : Distribution artifacts only cache : Build cache and temporary files old : Files older than specified age Usage : nu clean-build.nu [options] Options: --scope STRING Cleanup scope: all,dist,cache,old --age DURATION Age threshold for \'old\' scope (default: 7d) --force Force cleanup without confirmation --dry-run Show what would be cleaned without doing it --verbose Enable verbose logging","breadcrumbs":"Build System » Core Build Scripts","id":"2181","title":"Core Build Scripts"},"2182":{"body":"/src/tools/distribution/generate-distribution.nu Purpose : Main distribution generator orchestrating the complete process Generation Process : Platform binary compilation Core library bundling Nickel schema validation and packaging Configuration system preparation Documentation generation Archive creation and compression Installer generation Validation and testing Usage : nu generate-distribution.nu [command] [options] Commands: Generate complete distribution quick Quick development distribution status Show generation status Options: --version STRING Version to build (default: auto-detect) --platforms STRING Comma-separated platforms --variants STRING Variants: complete,minimal --output-dir STRING Output directory (default: dist) --compress Enable compression --generate-docs Generate documentation --parallel-builds Enable parallel builds --validate-output Validate generated output --verbose Enable verbose logging Advanced Examples : # Complete multi-platform release\\nnu generate-distribution.nu \\\\ --version 2.1.0 \\\\ --platforms linux-amd64,macos-amd64,windows-amd64 \\\\ --variants complete,minimal \\\\ --compress \\\\ --generate-docs \\\\ --parallel-builds \\\\ --validate-output # Quick development build\\nnu generate-distribution.nu quick \\\\ --platform linux \\\\ --variant minimal # Status check\\nnu generate-distribution.nu status /src/tools/distribution/create-installer.nu Purpose : Creates platform-specific installers Installer Types : shell : Shell script installer (cross-platform) package : Platform packages (DEB, RPM, MSI, PKG) container : Container image with provisioning source : Source distribution with build instructions Usage : nu create-installer.nu DISTRIBUTION_DIR [options] Options: --output-dir STRING Installer output directory --installer-types STRING Installer types: shell,package,container,source --platforms STRING Target platforms --include-services Include systemd/launchd service files --create-uninstaller Generate uninstaller --validate-installer Test installer functionality --verbose Enable verbose logging","breadcrumbs":"Build System » Distribution Tools","id":"2182","title":"Distribution Tools"},"2183":{"body":"/src/tools/package/package-binaries.nu Purpose : Packages compiled binaries for distribution Package Formats : archive : TAR.GZ and ZIP archives standalone : Single binary with embedded resources installer : Platform-specific installer packages Features : Binary stripping for size reduction Compression optimization Checksum generation (SHA256, MD5) Digital signing (if configured) /src/tools/package/build-containers.nu Purpose : Builds optimized container images Container Features : Multi-stage builds for minimal image size Security scanning integration Multi-platform image generation Layer caching optimization Runtime environment configuration","breadcrumbs":"Build System » Package Tools","id":"2183","title":"Package Tools"},"2184":{"body":"/src/tools/release/create-release.nu Purpose : Automated release creation and management Release Process : Version validation and tagging Changelog generation from git history Asset building and validation Release creation (GitHub, GitLab, etc.) Asset upload and verification Release announcement preparation Usage : nu create-release.nu [options] Options: --version STRING Release version (required) --asset-dir STRING Directory containing release assets --draft Create draft release --prerelease Mark as pre-release --generate-changelog Auto-generate changelog --push-tag Push git tag --auto-upload Upload assets automatically --verbose Enable verbose logging","breadcrumbs":"Build System » Release Tools","id":"2184","title":"Release Tools"},"2185":{"body":"","breadcrumbs":"Build System » Cross-Platform Compilation","id":"2185","title":"Cross-Platform Compilation"},"2186":{"body":"Primary Platforms : linux-amd64 (x86_64-unknown-linux-gnu) macos-amd64 (x86_64-apple-darwin) windows-amd64 (x86_64-pc-windows-gnu) Additional Platforms : linux-arm64 (aarch64-unknown-linux-gnu) macos-arm64 (aarch64-apple-darwin) freebsd-amd64 (x86_64-unknown-freebsd)","breadcrumbs":"Build System » Supported Platforms","id":"2186","title":"Supported Platforms"},"2187":{"body":"Install Rust Targets : # Install additional targets\\nrustup target add x86_64-apple-darwin\\nrustup target add x86_64-pc-windows-gnu\\nrustup target add aarch64-unknown-linux-gnu\\nrustup target add aarch64-apple-darwin Platform-Specific Dependencies : macOS Cross-Compilation : # Install osxcross toolchain\\nbrew install FiloSottile/musl-cross/musl-cross\\nbrew install mingw-w64 Windows Cross-Compilation : # Install Windows dependencies\\nbrew install mingw-w64\\n# or on Linux:\\nsudo apt-get install gcc-mingw-w64","breadcrumbs":"Build System » Cross-Compilation Setup","id":"2187","title":"Cross-Compilation Setup"},"2188":{"body":"Single Platform : # Build for macOS from Linux\\nmake build-platform RUST_TARGET=x86_64-apple-darwin # Build for Windows\\nmake build-platform RUST_TARGET=x86_64-pc-windows-gnu Multiple Platforms : # Build for all configured platforms\\nmake build-cross # Specify platforms\\nmake build-cross PLATFORMS=linux-amd64,macos-amd64,windows-amd64 Platform-Specific Targets : # Quick platform builds\\nmake linux # Linux AMD64\\nmake macos # macOS AMD64\\nmake windows # Windows AMD64","breadcrumbs":"Build System » Cross-Compilation Usage","id":"2188","title":"Cross-Compilation Usage"},"2189":{"body":"","breadcrumbs":"Build System » Dependency Management","id":"2189","title":"Dependency Management"},"219":{"body":"# Deploy to dev\\nprovisioning server create --infra dev --check\\nprovisioning server create --infra dev\\nprovisioning taskserv create kubernetes --infra dev # Deploy to staging\\nprovisioning server create --infra staging --check\\nprovisioning server create --infra staging\\nprovisioning taskserv create kubernetes --infra staging # Deploy to production (with confirmation)\\nprovisioning server create --infra production --check\\nprovisioning server create --infra production\\nprovisioning taskserv create kubernetes --infra production","breadcrumbs":"Quick Start Cheatsheet » Multi-Environment Deployment","id":"219","title":"Multi-Environment Deployment"},"2190":{"body":"Required Tools : Nushell 0.107.1+ : Core shell and scripting Rust 1.70+ : Platform binary compilation Cargo : Rust package management KCL 0.11.2+ : Configuration language Git : Version control and tagging Optional Tools : Docker : Container image building Cross : Simplified cross-compilation SOPS : Secrets management Age : Encryption for secrets","breadcrumbs":"Build System » Build Dependencies","id":"2190","title":"Build Dependencies"},"2191":{"body":"Check Dependencies : make info\\n# Shows versions of all required tools # Output example:\\n# Tool Versions:\\n# Nushell: 0.107.1\\n# Rust: rustc 1.75.0\\n# Docker: Docker version 24.0.6\\n# Git: git version 2.42.0 Install Missing Dependencies : # Install Nushell\\ncargo install nu # Install Nickel\\ncargo install nickel # Install Cross (for cross-compilation)\\ncargo install cross","breadcrumbs":"Build System » Dependency Validation","id":"2191","title":"Dependency Validation"},"2192":{"body":"Rust Dependencies : Cargo cache: ~/.cargo/registry Target cache: target/ directory Cross-compilation cache: ~/.cache/cross Build Cache Management : # Clean Cargo cache\\ncargo clean # Clean cross-compilation cache\\ncross clean # Clean all caches\\nmake clean SCOPE=cache","breadcrumbs":"Build System » Dependency Caching","id":"2192","title":"Dependency Caching"},"2193":{"body":"","breadcrumbs":"Build System » Troubleshooting","id":"2193","title":"Troubleshooting"},"2194":{"body":"Rust Compilation Errors Error : linker \'cc\' not found # Solution: Install build essentials\\nsudo apt-get install build-essential # Linux\\nxcode-select --install # macOS Error : target not found # Solution: Install target\\nrustup target add x86_64-unknown-linux-gnu Error : Cross-compilation linking errors # Solution: Use cross instead of cargo\\ncargo install cross\\nmake build-platform CROSS=true Nushell Script Errors Error : command not found # Solution: Ensure Nushell is in PATH\\nwhich nu\\nexport PATH=\\"$HOME/.cargo/bin:$PATH\\" Error : Permission denied # Solution: Make scripts executable\\nchmod +x src/tools/build/*.nu Error : Module not found # Solution: Check working directory\\ncd src/tools\\nnu build/compile-platform.nu --help Nickel Validation Errors Error : nickel command not found # Solution: Install Nickel\\ncargo install nickel\\n# or\\nbrew install nickel Error : Schema validation failed # Solution: Check Nickel syntax\\nnickel fmt schemas/\\nnickel check schemas/","breadcrumbs":"Build System » Common Build Issues","id":"2194","title":"Common Build Issues"},"2195":{"body":"Slow Compilation Optimizations : # Enable parallel builds\\nmake build-all PARALLEL=true # Use faster linker\\nexport RUSTFLAGS=\\"-C link-arg=-fuse-ld=lld\\" # Increase build jobs\\nexport CARGO_BUILD_JOBS=8 Cargo Configuration (~/.cargo/config.toml): [build]\\njobs = 8 [target.x86_64-unknown-linux-gnu]\\nlinker = \\"lld\\" Memory Issues Solutions : # Reduce parallel jobs\\nexport CARGO_BUILD_JOBS=2 # Use debug build for development\\nmake dev-build BUILD_MODE=debug # Clean up between builds\\nmake clean-dist","breadcrumbs":"Build System » Build Performance Issues","id":"2195","title":"Build Performance Issues"},"2196":{"body":"Missing Assets Validation : # Test distribution\\nmake test-dist # Detailed validation\\nnu src/tools/package/validate-package.nu dist/ Size Optimization Optimizations : # Strip binaries\\nmake package-binaries STRIP=true # Enable compression\\nmake dist-generate COMPRESS=true # Use minimal variant\\nmake dist-generate VARIANTS=minimal","breadcrumbs":"Build System » Distribution Issues","id":"2196","title":"Distribution Issues"},"2197":{"body":"Enable Debug Logging : # Set environment\\nexport PROVISIONING_DEBUG=true\\nexport RUST_LOG=debug # Run with debug\\nmake debug # Verbose make output\\nmake build-all VERBOSE=true Debug Information : # Show debug information\\nmake debug-info # Build system status\\nmake status # Tool information\\nmake info","breadcrumbs":"Build System » Debug Mode","id":"2197","title":"Debug Mode"},"2198":{"body":"","breadcrumbs":"Build System » CI/CD Integration","id":"2198","title":"CI/CD Integration"},"2199":{"body":"Example Workflow (.github/workflows/build.yml): name: Build and Test\\non: [push, pull_request] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Setup Nushell uses: hustcer/setup-nu@v3.5 - name: Setup Rust uses: actions-rs/toolchain@v1 with: toolchain: stable - name: CI Build run: | cd src/tools make ci-build - name: Upload Artifacts uses: actions/upload-artifact@v4 with: name: build-artifacts path: src/dist/","breadcrumbs":"Build System » GitHub Actions","id":"2199","title":"GitHub Actions"},"22":{"body":"Read System Overview Study all ADRs Review Integration Patterns Understand Multi-Repo Architecture","breadcrumbs":"Home » For Architects","id":"22","title":"For Architects"},"220":{"body":"# 1. Check for updates\\nprovisioning taskserv check-updates # 2. Update specific taskserv (check mode)\\nprovisioning taskserv update kubernetes --check # 3. Apply update\\nprovisioning taskserv update kubernetes # 4. Verify update\\nprovisioning taskserv list --infra production | where name == kubernetes","breadcrumbs":"Quick Start Cheatsheet » Update Infrastructure","id":"220","title":"Update Infrastructure"},"2200":{"body":"Release Workflow : name: Release\\non: push: tags: [\'v*\'] jobs: release: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build Release run: | cd src/tools make ci-release VERSION=${{ github.ref_name }} - name: Create Release run: | cd src/tools make release VERSION=${{ github.ref_name }}","breadcrumbs":"Build System » Release Automation","id":"2200","title":"Release Automation"},"2201":{"body":"Test CI Pipeline Locally : # Run CI build pipeline\\nmake ci-build # Run CI test pipeline\\nmake ci-test # Full CI/CD pipeline\\nmake ci-release This build system provides a comprehensive, maintainable foundation for the provisioning project\'s development lifecycle, from local development to production releases.","breadcrumbs":"Build System » Local CI Testing","id":"2201","title":"Local CI Testing"},"2202":{"body":"This document provides comprehensive documentation for the provisioning project\'s distribution process, covering release workflows, package generation, multi-platform distribution, and rollback procedures.","breadcrumbs":"Distribution Process » Distribution Process Documentation","id":"2202","title":"Distribution Process Documentation"},"2203":{"body":"Overview Distribution Architecture Release Process Package Generation Multi-Platform Distribution Validation and Testing Release Management Rollback Procedures CI/CD Integration Troubleshooting","breadcrumbs":"Distribution Process » Table of Contents","id":"2203","title":"Table of Contents"},"2204":{"body":"The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with automated release management. Key Features : Multi-Platform Support : Linux, macOS, Windows with multiple architectures Multiple Distribution Variants : Complete and minimal distributions Automated Release Pipeline : From development to production deployment Package Management : Binary packages, container images, and installers Validation Framework : Comprehensive testing and validation Rollback Capabilities : Safe rollback and recovery procedures Location : /src/tools/ Main Tool : /src/tools/Makefile and associated Nushell scripts","breadcrumbs":"Distribution Process » Overview","id":"2204","title":"Overview"},"2205":{"body":"","breadcrumbs":"Distribution Process » Distribution Architecture","id":"2205","title":"Distribution Architecture"},"2206":{"body":"Distribution Ecosystem\\n├── Core Components\\n│ ├── Platform Binaries # Rust-compiled binaries\\n│ ├── Core Libraries # Nushell libraries and CLI\\n│ ├── Configuration System # TOML configuration files\\n│ └── Documentation # User and API documentation\\n├── Platform Packages\\n│ ├── Archives # TAR.GZ and ZIP files\\n│ ├── Installers # Platform-specific installers\\n│ └── Container Images # Docker/OCI images\\n├── Distribution Variants\\n│ ├── Complete # Full-featured distribution\\n│ └── Minimal # Lightweight distribution\\n└── Release Artifacts ├── Checksums # SHA256/MD5 verification ├── Signatures # Digital signatures └── Metadata # Release information","breadcrumbs":"Distribution Process » Distribution Components","id":"2206","title":"Distribution Components"},"2207":{"body":"Build Pipeline Flow\\n┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐\\n│ Source Code │ -> │ Build Stage │ -> │ Package Stage │\\n│ │ │ │ │ │\\n│ - Rust code │ │ - compile- │ │ - create- │\\n│ - Nushell libs │ │ platform │ │ archives │\\n│ - Nickel schemas│ │ - bundle-core │ │ - build- │\\n│ - Config files │ │ - validate-nickel│ │ containers │\\n└─────────────────┘ └─────────────────┘ └─────────────────┘ | v\\n┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐\\n│ Release Stage │ <- │ Validate Stage │ <- │ Distribute Stage│\\n│ │ │ │ │ │\\n│ - create- │ │ - test-dist │ │ - generate- │\\n│ release │ │ - validate- │ │ distribution │\\n│ - upload- │ │ package │ │ - create- │\\n│ artifacts │ │ - integration │ │ installers │\\n└─────────────────┘ └─────────────────┘ └─────────────────┘","breadcrumbs":"Distribution Process » Build Pipeline","id":"2207","title":"Build Pipeline"},"2208":{"body":"Complete Distribution : All Rust binaries (orchestrator, control-center, MCP server) Full Nushell library suite All providers, taskservs, and clusters Complete documentation and examples Development tools and templates Minimal Distribution : Essential binaries only Core Nushell libraries Basic provider support Essential task services Minimal documentation","breadcrumbs":"Distribution Process » Distribution Variants","id":"2208","title":"Distribution Variants"},"2209":{"body":"","breadcrumbs":"Distribution Process » Release Process","id":"2209","title":"Release Process"},"221":{"body":"# 1. Authenticate\\nauth login admin\\nauth mfa verify --code 123456 # 2. Encrypt secrets\\nkms encrypt (open secrets/production.yaml) --backend rustyvault | save secrets/production.enc # 3. Deploy with encrypted secrets\\nprovisioning cluster create production --secrets secrets/production.enc # 4. Verify deployment\\norch tasks --status completed","breadcrumbs":"Quick Start Cheatsheet » Encrypted Secrets Deployment","id":"221","title":"Encrypted Secrets Deployment"},"2210":{"body":"Release Classifications : Major Release (x.0.0): Breaking changes, new major features Minor Release (x.y.0): New features, backward compatible Patch Release (x.y.z): Bug fixes, security updates Pre-Release (x.y.z-alpha/beta/rc): Development/testing releases","breadcrumbs":"Distribution Process » Release Types","id":"2210","title":"Release Types"},"2211":{"body":"1. Preparation Phase Pre-Release Checklist : # Update dependencies and security\\ncargo update\\ncargo audit # Run comprehensive tests\\nmake ci-test # Update documentation\\nmake docs # Validate all configurations\\nmake validate-all Version Planning : # Check current version\\ngit describe --tags --always # Plan next version\\nmake status | grep Version # Validate version bump\\nnu src/tools/release/create-release.nu --dry-run --version 2.1.0 2. Build Phase Complete Build : # Clean build environment\\nmake clean # Build all platforms and variants\\nmake all # Validate build output\\nmake test-dist Build with Specific Parameters : # Build for specific platforms\\nmake all PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete # Build with custom version\\nmake all VERSION=2.1.0-rc1 # Parallel build for speed\\nmake all PARALLEL=true 3. Package Generation Create Distribution Packages : # Generate complete distributions\\nmake dist-generate # Create binary packages\\nmake package-binaries # Build container images\\nmake package-containers # Create installers\\nmake create-installers Package Validation : # Validate packages\\nmake test-dist # Check package contents\\nnu src/tools/package/validate-package.nu packages/ # Test installation\\nmake install\\nmake uninstall 4. Release Creation Automated Release : # Create complete release\\nmake release VERSION=2.1.0 # Create draft release for review\\nmake release-draft VERSION=2.1.0 # Manual release creation\\nnu src/tools/release/create-release.nu \\\\ --version 2.1.0 \\\\ --generate-changelog \\\\ --push-tag \\\\ --auto-upload Release Options : --pre-release: Mark as pre-release --draft: Create draft release --generate-changelog: Auto-generate changelog from commits --push-tag: Push git tag to remote --auto-upload: Upload assets automatically 5. Distribution and Notification Upload Artifacts : # Upload to GitHub Releases\\nmake upload-artifacts # Update package registries\\nmake update-registry # Send notifications\\nmake notify-release Registry Updates : # Update Homebrew formula\\nnu src/tools/release/update-registry.nu \\\\ --registries homebrew \\\\ --version 2.1.0 \\\\ --auto-commit # Custom registry updates\\nnu src/tools/release/update-registry.nu \\\\ --registries custom \\\\ --registry-url https://packages.company.com \\\\ --credentials-file ~/.registry-creds","breadcrumbs":"Distribution Process » Step-by-Step Release Process","id":"2211","title":"Step-by-Step Release Process"},"2212":{"body":"Complete Automated Release : # Full release pipeline\\nmake cd-deploy VERSION=2.1.0 # Equivalent manual steps:\\nmake clean\\nmake all VERSION=2.1.0\\nmake create-archives\\nmake create-installers\\nmake release VERSION=2.1.0\\nmake upload-artifacts\\nmake update-registry\\nmake notify-release","breadcrumbs":"Distribution Process » Release Automation","id":"2212","title":"Release Automation"},"2213":{"body":"","breadcrumbs":"Distribution Process » Package Generation","id":"2213","title":"Package Generation"},"2214":{"body":"Package Types : Standalone Archives : TAR.GZ and ZIP with all dependencies Platform Packages : DEB, RPM, MSI, PKG with system integration Portable Packages : Single-directory distributions Source Packages : Source code with build instructions Create Binary Packages : # Standard binary packages\\nmake package-binaries # Custom package creation\\nnu src/tools/package/package-binaries.nu \\\\ --source-dir dist/platform \\\\ --output-dir packages/binaries \\\\ --platforms linux-amd64,macos-amd64 \\\\ --format archive \\\\ --compress \\\\ --strip \\\\ --checksum Package Features : Binary Stripping : Removes debug symbols for smaller size Compression : GZIP, LZMA, and Brotli compression Checksums : SHA256 and MD5 verification Signatures : GPG and code signing support","breadcrumbs":"Distribution Process » Binary Packages","id":"2214","title":"Binary Packages"},"2215":{"body":"Container Build Process : # Build container images\\nmake package-containers # Advanced container build\\nnu src/tools/package/build-containers.nu \\\\ --dist-dir dist \\\\ --tag-prefix provisioning \\\\ --version 2.1.0 \\\\ --platforms \\"linux/amd64,linux/arm64\\" \\\\ --optimize-size \\\\ --security-scan \\\\ --multi-stage Container Features : Multi-Stage Builds : Minimal runtime images Security Scanning : Vulnerability detection Multi-Platform : AMD64, ARM64 support Layer Optimization : Efficient layer caching Runtime Configuration : Environment-based configuration Container Registry Support : Docker Hub GitHub Container Registry Amazon ECR Google Container Registry Azure Container Registry Private registries","breadcrumbs":"Distribution Process » Container Images","id":"2215","title":"Container Images"},"2216":{"body":"Installer Types : Shell Script Installer : Universal Unix/Linux installer Package Installers : DEB, RPM, MSI, PKG Container Installer : Docker/Podman setup Source Installer : Build-from-source installer Create Installers : # Generate all installer types\\nmake create-installers # Custom installer creation\\nnu src/tools/distribution/create-installer.nu \\\\ dist/provisioning-2.1.0-linux-amd64-complete \\\\ --output-dir packages/installers \\\\ --installer-types shell,package \\\\ --platforms linux,macos \\\\ --include-services \\\\ --create-uninstaller \\\\ --validate-installer Installer Features : System Integration : Systemd/Launchd service files Path Configuration : Automatic PATH updates User/System Install : Support for both user and system-wide installation Uninstaller : Clean removal capability Dependency Management : Automatic dependency resolution Configuration Setup : Initial configuration creation","breadcrumbs":"Distribution Process » Installers","id":"2216","title":"Installers"},"2217":{"body":"","breadcrumbs":"Distribution Process » Multi-Platform Distribution","id":"2217","title":"Multi-Platform Distribution"},"2218":{"body":"Primary Platforms : Linux AMD64 (x86_64-unknown-linux-gnu) Linux ARM64 (aarch64-unknown-linux-gnu) macOS AMD64 (x86_64-apple-darwin) macOS ARM64 (aarch64-apple-darwin) Windows AMD64 (x86_64-pc-windows-gnu) FreeBSD AMD64 (x86_64-unknown-freebsd) Platform-Specific Features : Linux : SystemD integration, package manager support macOS : LaunchAgent services, Homebrew packages Windows : Windows Service support, MSI installers FreeBSD : RC scripts, pkg packages","breadcrumbs":"Distribution Process » Supported Platforms","id":"2218","title":"Supported Platforms"},"2219":{"body":"Cross-Compilation Setup : # Install cross-compilation targets\\nrustup target add aarch64-unknown-linux-gnu\\nrustup target add x86_64-apple-darwin\\nrustup target add aarch64-apple-darwin\\nrustup target add x86_64-pc-windows-gnu # Install cross-compilation tools\\ncargo install cross Platform-Specific Builds : # Build for specific platform\\nmake build-platform RUST_TARGET=aarch64-apple-darwin # Build for multiple platforms\\nmake build-cross PLATFORMS=linux-amd64,macos-arm64,windows-amd64 # Platform-specific distributions\\nmake linux\\nmake macos\\nmake windows","breadcrumbs":"Distribution Process » Cross-Platform Build","id":"2219","title":"Cross-Platform Build"},"222":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Debug and Check Mode","id":"222","title":"Debug and Check Mode"},"2220":{"body":"Generated Distributions : Distribution Matrix:\\nprovisioning-{version}-{platform}-{variant}.{format} Examples:\\n- provisioning-2.1.0-linux-amd64-complete.tar.gz\\n- provisioning-2.1.0-macos-arm64-minimal.tar.gz\\n- provisioning-2.1.0-windows-amd64-complete.zip\\n- provisioning-2.1.0-freebsd-amd64-minimal.tar.xz Platform Considerations : File Permissions : Executable permissions on Unix systems Path Separators : Platform-specific path handling Service Integration : Platform-specific service management Package Formats : TAR.GZ for Unix, ZIP for Windows Line Endings : CRLF for Windows, LF for Unix","breadcrumbs":"Distribution Process » Distribution Matrix","id":"2220","title":"Distribution Matrix"},"2221":{"body":"","breadcrumbs":"Distribution Process » Validation and Testing","id":"2221","title":"Validation and Testing"},"2222":{"body":"Validation Pipeline : # Complete validation\\nmake test-dist # Custom validation\\nnu src/tools/build/test-distribution.nu \\\\ --dist-dir dist \\\\ --test-types basic,integration,complete \\\\ --platform linux \\\\ --cleanup \\\\ --verbose Validation Types : Basic : Installation test, CLI help, version check Integration : Server creation, configuration validation Complete : Full workflow testing including cluster operations","breadcrumbs":"Distribution Process » Distribution Validation","id":"2222","title":"Distribution Validation"},"2223":{"body":"Test Categories : Unit Tests : Component-specific testing Integration Tests : Cross-component testing End-to-End Tests : Complete workflow testing Performance Tests : Load and performance validation Security Tests : Security scanning and validation Test Execution : # Run all tests\\nmake ci-test # Specific test types\\nnu src/tools/build/test-distribution.nu --test-types basic\\nnu src/tools/build/test-distribution.nu --test-types integration\\nnu src/tools/build/test-distribution.nu --test-types complete","breadcrumbs":"Distribution Process » Testing Framework","id":"2223","title":"Testing Framework"},"2224":{"body":"Package Integrity : # Validate package structure\\nnu src/tools/package/validate-package.nu dist/ # Check checksums\\nsha256sum -c packages/checksums.sha256 # Verify signatures\\ngpg --verify packages/provisioning-2.1.0.tar.gz.sig Installation Testing : # Test installation process\\n./packages/installers/install-provisioning-2.1.0.sh --dry-run # Test uninstallation\\n./packages/installers/uninstall-provisioning.sh --dry-run # Container testing\\ndocker run --rm provisioning:2.1.0 provisioning --version","breadcrumbs":"Distribution Process » Package Validation","id":"2224","title":"Package Validation"},"2225":{"body":"","breadcrumbs":"Distribution Process » Release Management","id":"2225","title":"Release Management"},"2226":{"body":"GitHub Release Integration : # Create GitHub release\\nnu src/tools/release/create-release.nu \\\\ --version 2.1.0 \\\\ --asset-dir packages \\\\ --generate-changelog \\\\ --push-tag \\\\ --auto-upload Release Features : Automated Changelog : Generated from git commit history Asset Management : Automatic upload of all distribution artifacts Tag Management : Semantic version tagging Release Notes : Formatted release notes with change summaries","breadcrumbs":"Distribution Process » Release Workflow","id":"2226","title":"Release Workflow"},"2227":{"body":"Semantic Versioning : MAJOR.MINOR.PATCH format (for example, 2.1.0) Pre-release suffixes (for example, 2.1.0-alpha.1, 2.1.0-rc.2) Build metadata (for example, 2.1.0+20250925.abcdef) Version Detection : # Auto-detect next version\\nnu src/tools/release/create-release.nu --release-type minor # Manual version specification\\nnu src/tools/release/create-release.nu --version 2.1.0 # Pre-release versioning\\nnu src/tools/release/create-release.nu --version 2.1.0-rc.1 --pre-release","breadcrumbs":"Distribution Process » Versioning Strategy","id":"2227","title":"Versioning Strategy"},"2228":{"body":"Artifact Types : Source Archives : Complete source code distributions Binary Archives : Compiled binary distributions Container Images : OCI-compliant container images Installers : Platform-specific installation packages Documentation : Generated documentation packages Upload and Distribution : # Upload to GitHub Releases\\nmake upload-artifacts # Upload to container registries\\ndocker push provisioning:2.1.0 # Update package repositories\\nmake update-registry","breadcrumbs":"Distribution Process » Artifact Management","id":"2228","title":"Artifact Management"},"2229":{"body":"","breadcrumbs":"Distribution Process » Rollback Procedures","id":"2229","title":"Rollback Procedures"},"223":{"body":"Enable verbose logging with --debug or -x flag: # Server creation with debug output\\nprovisioning server create --debug\\nprovisioning server create -x # Taskserv creation with debug\\nprovisioning taskserv create kubernetes --debug # Show detailed error traces\\nprovisioning --debug taskserv create kubernetes","breadcrumbs":"Quick Start Cheatsheet » Debug Mode","id":"223","title":"Debug Mode"},"2230":{"body":"Common Rollback Triggers : Critical bugs discovered post-release Security vulnerabilities identified Performance regression Compatibility issues Infrastructure failures","breadcrumbs":"Distribution Process » Rollback Scenarios","id":"2230","title":"Rollback Scenarios"},"2231":{"body":"Automated Rollback : # Rollback latest release\\nnu src/tools/release/rollback-release.nu --version 2.1.0 # Rollback with specific target\\nnu src/tools/release/rollback-release.nu \\\\ --from-version 2.1.0 \\\\ --to-version 2.0.5 \\\\ --update-registries \\\\ --notify-users Manual Rollback Steps : # 1. Identify target version\\ngit tag -l | grep -v 2.1.0 | tail -5 # 2. Create rollback release\\nnu src/tools/release/create-release.nu \\\\ --version 2.0.6 \\\\ --rollback-from 2.1.0 \\\\ --urgent # 3. Update package managers\\nnu src/tools/release/update-registry.nu \\\\ --version 2.0.6 \\\\ --rollback-notice \\"Critical fix for 2.1.0 issues\\" # 4. Notify users\\nnu src/tools/release/notify-users.nu \\\\ --channels slack,discord,email \\\\ --message-type rollback \\\\ --urgent","breadcrumbs":"Distribution Process » Rollback Process","id":"2231","title":"Rollback Process"},"2232":{"body":"Pre-Rollback Validation : Validate target version integrity Check compatibility matrix Verify rollback procedure testing Confirm communication plan Rollback Testing : # Test rollback in staging\\nnu src/tools/release/rollback-release.nu \\\\ --version 2.1.0 \\\\ --target-version 2.0.5 \\\\ --dry-run \\\\ --staging-environment # Validate rollback success\\nmake test-dist DIST_VERSION=2.0.5","breadcrumbs":"Distribution Process » Rollback Safety","id":"2232","title":"Rollback Safety"},"2233":{"body":"Critical Security Rollback : # Emergency rollback (bypasses normal procedures)\\nnu src/tools/release/rollback-release.nu \\\\ --version 2.1.0 \\\\ --emergency \\\\ --security-issue \\\\ --immediate-notify Infrastructure Failure Recovery : # Failover to backup infrastructure\\nnu src/tools/release/rollback-release.nu \\\\ --infrastructure-failover \\\\ --backup-registry \\\\ --mirror-sync","breadcrumbs":"Distribution Process » Emergency Procedures","id":"2233","title":"Emergency Procedures"},"2234":{"body":"","breadcrumbs":"Distribution Process » CI/CD Integration","id":"2234","title":"CI/CD Integration"},"2235":{"body":"Build Workflow (.github/workflows/build.yml): name: Build and Distribute\\non: push: branches: [main] pull_request: branches: [main] jobs: build: runs-on: ubuntu-latest strategy: matrix: platform: [linux, macos, windows] steps: - uses: actions/checkout@v4 - name: Setup Nushell uses: hustcer/setup-nu@v3.5 - name: Setup Rust uses: actions-rs/toolchain@v1 with: toolchain: stable - name: CI Build run: | cd src/tools make ci-build - name: Upload Build Artifacts uses: actions/upload-artifact@v4 with: name: build-${{ matrix.platform }} path: src/dist/ Release Workflow (.github/workflows/release.yml): name: Release\\non: push: tags: [\'v*\'] jobs: release: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build Release run: | cd src/tools make ci-release VERSION=${{ github.ref_name }} - name: Create Release run: | cd src/tools make release VERSION=${{ github.ref_name }} - name: Update Registries run: | cd src/tools make update-registry VERSION=${{ github.ref_name }}","breadcrumbs":"Distribution Process » GitHub Actions Integration","id":"2235","title":"GitHub Actions Integration"},"2236":{"body":"GitLab CI Configuration (.gitlab-ci.yml): stages: - build - package - test - release build: stage: build script: - cd src/tools - make ci-build artifacts: paths: - src/dist/ expire_in: 1 hour package: stage: package script: - cd src/tools - make package-all artifacts: paths: - src/packages/ expire_in: 1 day release: stage: release script: - cd src/tools - make cd-deploy VERSION=${CI_COMMIT_TAG} only: - tags","breadcrumbs":"Distribution Process » GitLab CI Integration","id":"2236","title":"GitLab CI Integration"},"2237":{"body":"Jenkinsfile : pipeline { agent any stages { stage(\'Build\') { steps { dir(\'src/tools\') { sh \'make ci-build\' } } } stage(\'Package\') { steps { dir(\'src/tools\') { sh \'make package-all\' } } } stage(\'Release\') { when { tag \'*\' } steps { dir(\'src/tools\') { sh \\"make cd-deploy VERSION=${env.TAG_NAME}\\" } } } }\\n}","breadcrumbs":"Distribution Process » Jenkins Integration","id":"2237","title":"Jenkins Integration"},"2238":{"body":"","breadcrumbs":"Distribution Process » Troubleshooting","id":"2238","title":"Troubleshooting"},"2239":{"body":"Build Failures Rust Compilation Errors : # Solution: Clean and rebuild\\nmake clean\\ncargo clean\\nmake build-platform # Check Rust toolchain\\nrustup show\\nrustup update Cross-Compilation Issues : # Solution: Install missing targets\\nrustup target list --installed\\nrustup target add x86_64-apple-darwin # Use cross for problematic targets\\ncargo install cross\\nmake build-platform CROSS=true Package Generation Issues Missing Dependencies : # Solution: Install build tools\\nsudo apt-get install build-essential\\nbrew install gnu-tar # Check tool availability\\nmake info Permission Errors : # Solution: Fix permissions\\nchmod +x src/tools/build/*.nu\\nchmod +x src/tools/distribution/*.nu\\nchmod +x src/tools/package/*.nu Distribution Validation Failures Package Integrity Issues : # Solution: Regenerate packages\\nmake clean-dist\\nmake package-all # Verify manually\\nsha256sum packages/*.tar.gz Installation Test Failures : # Solution: Test in clean environment\\ndocker run --rm -v $(pwd):/work ubuntu:latest /work/packages/installers/install.sh # Debug installation\\n./packages/installers/install.sh --dry-run --verbose","breadcrumbs":"Distribution Process » Common Issues","id":"2239","title":"Common Issues"},"224":{"body":"Preview changes without applying them with --check or -c flag: # Check what servers would be created\\nprovisioning server create --check\\nprovisioning server create -c # Check taskserv installation\\nprovisioning taskserv create kubernetes --check # Check cluster creation\\nprovisioning cluster create buildkit --check # Combine with debug for detailed preview\\nprovisioning server create --check --debug","breadcrumbs":"Quick Start Cheatsheet » Check Mode (Dry Run)","id":"224","title":"Check Mode (Dry Run)"},"2240":{"body":"Upload Failures Network Issues : # Solution: Retry with backoff\\nnu src/tools/release/upload-artifacts.nu \\\\ --retry-count 5 \\\\ --backoff-delay 30 # Manual upload\\ngh release upload v2.1.0 packages/*.tar.gz Authentication Failures : # Solution: Refresh tokens\\ngh auth refresh\\ndocker login ghcr.io # Check credentials\\ngh auth status\\ndocker system info Registry Update Issues Homebrew Formula Issues : # Solution: Manual PR creation\\ngit clone https://github.com/Homebrew/homebrew-core\\ncd homebrew-core\\n# Edit formula\\ngit add Formula/provisioning.rb\\ngit commit -m \\"provisioning 2.1.0\\"","breadcrumbs":"Distribution Process » Release Issues","id":"2240","title":"Release Issues"},"2241":{"body":"Debug Mode : # Enable debug logging\\nexport PROVISIONING_DEBUG=true\\nexport RUST_LOG=debug # Run with verbose output\\nmake all VERBOSE=true # Debug specific components\\nnu src/tools/distribution/generate-distribution.nu \\\\ --verbose \\\\ --dry-run Monitoring Build Progress : # Monitor build logs\\ntail -f src/tools/build.log # Check build status\\nmake status # Resource monitoring\\ntop\\ndf -h This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms while maintaining high quality and reliability standards.","breadcrumbs":"Distribution Process » Debug and Monitoring","id":"2241","title":"Debug and Monitoring"},"2242":{"body":"Status: Ready for Implementation Estimated Time: 12-16 days Priority: High Related: Architecture Analysis","breadcrumbs":"Implementation Guide » Repository Restructuring - Implementation Guide","id":"2242","title":"Repository Restructuring - Implementation Guide"},"2243":{"body":"This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes specific commands, validation steps, and rollback procedures.","breadcrumbs":"Implementation Guide » Overview","id":"2243","title":"Overview"},"2244":{"body":"","breadcrumbs":"Implementation Guide » Prerequisites","id":"2244","title":"Prerequisites"},"2245":{"body":"Nushell 0.107.1+ Rust toolchain (for platform builds) Git tar/gzip curl or wget","breadcrumbs":"Implementation Guide » Required Tools","id":"2245","title":"Required Tools"},"2246":{"body":"Just (task runner) ripgrep (for code searches) fd (for file finding)","breadcrumbs":"Implementation Guide » Recommended Tools","id":"2246","title":"Recommended Tools"},"2247":{"body":"Create full backup Notify team members Create implementation branch Set aside dedicated time","breadcrumbs":"Implementation Guide » Before Starting","id":"2247","title":"Before Starting"},"2248":{"body":"","breadcrumbs":"Implementation Guide » Phase 1: Repository Restructuring (Days 1-4)","id":"2248","title":"Phase 1: Repository Restructuring (Days 1-4)"},"2249":{"body":"Step 1.1: Create Complete Backup # Create timestamped backup\\nBACKUP_DIR=\\"/Users/Akasha/project-provisioning-backup-$(date +%Y%m%d)\\"\\ncp -r /Users/Akasha/project-provisioning \\"$BACKUP_DIR\\" # Verify backup\\nls -lh \\"$BACKUP_DIR\\"\\ndu -sh \\"$BACKUP_DIR\\" # Create backup manifest\\nfind \\"$BACKUP_DIR\\" -type f > \\"$BACKUP_DIR/manifest.txt\\"\\necho \\"✅ Backup created: $BACKUP_DIR\\" Step 1.2: Analyze Current State cd /Users/Akasha/project-provisioning # Count workspace directories\\necho \\"=== Workspace Directories ===\\"\\nfd workspace -t d # Analyze workspace contents\\necho \\"=== Active Workspace ===\\"\\ndu -sh workspace/ echo \\"=== Backup Workspaces ===\\"\\ndu -sh _workspace/ backup-workspace/ workspace-librecloud/ # Find obsolete directories\\necho \\"=== Build Artifacts ===\\"\\ndu -sh target/ wrks/ NO/ # Save analysis\\n{ echo \\"# Current State Analysis - $(date)\\" echo \\"\\" echo \\"## Workspace Directories\\" fd workspace -t d echo \\"\\" echo \\"## Directory Sizes\\" du -sh workspace/ _workspace/ backup-workspace/ workspace-librecloud/ 2>/dev/null echo \\"\\" echo \\"## Build Artifacts\\" du -sh target/ wrks/ NO/ 2>/dev/null\\n} > docs/development/current-state-analysis.txt echo \\"✅ Analysis complete: docs/development/current-state-analysis.txt\\" Step 1.3: Identify Dependencies # Find all hardcoded paths\\necho \\"=== Hardcoded Paths in Nushell Scripts ===\\"\\nrg -t nu \\"workspace/|_workspace/|backup-workspace/\\" provisioning/core/nulib/ | tee hardcoded-paths.txt # Find ENV references (legacy)\\necho \\"=== ENV References ===\\"\\nrg \\"PROVISIONING_\\" provisioning/core/nulib/ | wc -l # Find workspace references in configs\\necho \\"=== Config References ===\\"\\nrg \\"workspace\\" provisioning/config/ echo \\"✅ Dependencies mapped\\" Step 1.4: Create Implementation Branch # Create and switch to implementation branch\\ngit checkout -b feat/repo-restructure # Commit analysis\\ngit add docs/development/current-state-analysis.txt\\ngit commit -m \\"docs: add current state analysis for restructuring\\" echo \\"✅ Implementation branch created: feat/repo-restructure\\" Validation: ✅ Backup exists and is complete ✅ Analysis document created ✅ Dependencies mapped ✅ Implementation branch ready","breadcrumbs":"Implementation Guide » Day 1: Backup and Analysis","id":"2249","title":"Day 1: Backup and Analysis"},"225":{"body":"Skip confirmation prompts with --yes or -y flag: # Auto-confirm server creation\\nprovisioning server create --yes\\nprovisioning server create -y # Auto-confirm deletion\\nprovisioning server delete --yes","breadcrumbs":"Quick Start Cheatsheet » Auto-Confirm Mode","id":"225","title":"Auto-Confirm Mode"},"2250":{"body":"Step 2.1: Create New Directory Structure cd /Users/Akasha/project-provisioning # Create distribution directory structure\\nmkdir -p distribution/{packages,installers,registry}\\necho \\"✅ Created distribution/\\" # Create workspace structure (keep tracked templates)\\nmkdir -p workspace/{infra,config,extensions,runtime}/{.gitkeep}\\nmkdir -p workspace/templates/{minimal,kubernetes,multi-cloud}\\necho \\"✅ Created workspace/\\" # Verify\\ntree -L 2 distribution/ workspace/ Step 2.2: Move Build Artifacts # Move Rust build artifacts\\nif [ -d \\"target\\" ]; then mv target distribution/target echo \\"✅ Moved target/ to distribution/\\"\\nfi # Move KCL packages\\nif [ -d \\"provisioning/tools/dist\\" ]; then mv provisioning/tools/dist/* distribution/packages/ 2>/dev/null || true echo \\"✅ Moved packages to distribution/\\"\\nfi # Move any existing packages\\nfind . -name \\"*.tar.gz\\" -o -name \\"*.zip\\" | grep -v node_modules | while read pkg; do mv \\"$pkg\\" distribution/packages/ echo \\" Moved: $pkg\\"\\ndone Step 2.3: Consolidate Workspaces # Identify active workspace\\necho \\"=== Current Workspace Status ===\\"\\nls -la workspace/ _workspace/ backup-workspace/ 2>/dev/null # Interactive workspace consolidation\\nread -p \\"Which workspace is currently active? (workspace/_workspace/backup-workspace): \\" ACTIVE_WS if [ \\"$ACTIVE_WS\\" != \\"workspace\\" ]; then echo \\"Consolidating $ACTIVE_WS to workspace/\\" # Merge infra configs if [ -d \\"$ACTIVE_WS/infra\\" ]; then cp -r \\"$ACTIVE_WS/infra/\\"* workspace/infra/ fi # Merge configs if [ -d \\"$ACTIVE_WS/config\\" ]; then cp -r \\"$ACTIVE_WS/config/\\"* workspace/config/ fi # Merge extensions if [ -d \\"$ACTIVE_WS/extensions\\" ]; then cp -r \\"$ACTIVE_WS/extensions/\\"* workspace/extensions/ fi echo \\"✅ Consolidated workspace\\"\\nfi # Archive old workspace directories\\nmkdir -p .archived-workspaces\\nfor ws in _workspace backup-workspace workspace-librecloud; do if [ -d \\"$ws\\" ] && [ \\"$ws\\" != \\"$ACTIVE_WS\\" ]; then mv \\"$ws\\" \\".archived-workspaces/$(basename $ws)-$(date +%Y%m%d)\\" echo \\" Archived: $ws\\" fi\\ndone echo \\"✅ Workspaces consolidated\\" Step 2.4: Remove Obsolete Directories # Remove build artifacts (already moved)\\nrm -rf wrks/\\necho \\"✅ Removed wrks/\\" # Remove test/scratch directories\\nrm -rf NO/\\necho \\"✅ Removed NO/\\" # Archive presentations (optional)\\nif [ -d \\"presentations\\" ]; then read -p \\"Archive presentations directory? (y/N): \\" ARCHIVE_PRES if [ \\"$ARCHIVE_PRES\\" = \\"y\\" ]; then tar czf presentations-archive-$(date +%Y%m%d).tar.gz presentations/ rm -rf presentations/ echo \\"✅ Archived and removed presentations/\\" fi\\nfi # Remove empty directories\\nfind . -type d -empty -delete 2>/dev/null || true echo \\"✅ Cleanup complete\\" Step 2.5: Update .gitignore # Backup existing .gitignore\\ncp .gitignore .gitignore.backup # Update .gitignore\\ncat >> .gitignore << \'EOF\' # ============================================================================\\n# Repository Restructure (2025-10-01)\\n# ============================================================================ # Workspace runtime data (user-specific)\\n/workspace/infra/\\n/workspace/config/\\n/workspace/extensions/\\n/workspace/runtime/ # Distribution artifacts\\n/distribution/packages/\\n/distribution/target/ # Build artifacts\\n/target/\\n/provisioning/platform/target/\\n/provisioning/platform/*/target/ # Rust artifacts\\n**/*.rs.bk\\nCargo.lock # Archived directories\\n/.archived-workspaces/ # Temporary files\\n*.tmp\\n*.temp\\n/tmp/\\n/wrks/\\n/NO/ # Logs\\n*.log\\n/workspace/runtime/logs/ # Cache\\n.cache/\\n/workspace/runtime/cache/ # IDE\\n.vscode/\\n.idea/\\n*.swp\\n*.swo\\n*~ # OS\\n.DS_Store\\nThumbs.db # Backup files\\n*.backup\\n*.bak EOF echo \\"✅ Updated .gitignore\\" Step 2.6: Commit Restructuring # Stage changes\\ngit add -A # Show what\'s being committed\\ngit status # Commit\\ngit commit -m \\"refactor: restructure repository for clean distribution - Consolidate workspace directories to single workspace/\\n- Move build artifacts to distribution/\\n- Remove obsolete directories (wrks/, NO/)\\n- Update .gitignore for new structure\\n- Archive old workspace variants This is part of Phase 1 of the repository restructuring plan. Related: docs/architecture/repo-dist-analysis.md\\" echo \\"✅ Restructuring committed\\" Validation: ✅ Single workspace/ directory exists ✅ Build artifacts in distribution/ ✅ No wrks/, NO/ directories ✅ .gitignore updated ✅ Changes committed","breadcrumbs":"Implementation Guide » Day 2: Directory Restructuring","id":"2250","title":"Day 2: Directory Restructuring"},"2251":{"body":"Step 3.1: Create Path Update Script # Create migration script\\ncat > provisioning/tools/migration/update-paths.nu << \'EOF\'\\n#!/usr/bin/env nu\\n# Path update script for repository restructuring # Find and replace path references\\nexport def main [] { print \\"🔧 Updating path references...\\" let replacements = [ [\\"_workspace/\\" \\"workspace/\\"] [\\"backup-workspace/\\" \\"workspace/\\"] [\\"workspace-librecloud/\\" \\"workspace/\\"] [\\"wrks/\\" \\"distribution/\\"] [\\"NO/\\" \\"distribution/\\"] ] let files = (fd -e nu -e toml -e md . provisioning/) mut updated_count = 0 for file in $files { mut content = (open $file) mut modified = false for replacement in $replacements { let old = $replacement.0 let new = $replacement.1 if ($content | str contains $old) { $content = ($content | str replace -a $old $new) $modified = true } } if $modified { $content | save -f $file $updated_count = $updated_count + 1 print $\\" ✓ Updated: ($file)\\" } } print $\\"✅ Updated ($updated_count) files\\"\\n}\\nEOF chmod +x provisioning/tools/migration/update-paths.nu Step 3.2: Run Path Updates # Create backup before updates\\ngit stash\\ngit checkout -b feat/path-updates # Run update script\\nnu provisioning/tools/migration/update-paths.nu # Review changes\\ngit diff # Test a sample file\\nnu -c \\"use provisioning/core/nulib/servers/create.nu; print \'OK\'\\" Step 3.3: Update CLAUDE.md # Update CLAUDE.md with new paths\\ncat > CLAUDE.md.new << \'EOF\'\\n# CLAUDE.md [Keep existing content, update paths section...] ## Updated Path Structure (2025-10-01) ### Core System\\n- **Main CLI**: `provisioning/core/cli/provisioning`\\n- **Libraries**: `provisioning/core/nulib/`\\n- **Extensions**: `provisioning/extensions/`\\n- **Platform**: `provisioning/platform/` ### User Workspace\\n- **Active Workspace**: `workspace/` (gitignored runtime data)\\n- **Templates**: `workspace/templates/` (tracked)\\n- **Infrastructure**: `workspace/infra/` (user configs, gitignored) ### Build System\\n- **Distribution**: `distribution/` (gitignored artifacts)\\n- **Packages**: `distribution/packages/`\\n- **Installers**: `distribution/installers/` [Continue with rest of content...]\\nEOF # Review changes\\ndiff CLAUDE.md CLAUDE.md.new # Apply if satisfied\\nmv CLAUDE.md.new CLAUDE.md Step 3.4: Update Documentation # Find all documentation files\\nfd -e md . docs/ # Update each doc with new paths\\n# This is semi-automated - review each file # Create list of docs to update\\nfd -e md . docs/ > docs-to-update.txt # Manual review and update\\necho \\"Review and update each documentation file with new paths\\"\\necho \\"Files listed in: docs-to-update.txt\\" Step 3.5: Commit Path Updates git add -A\\ngit commit -m \\"refactor: update all path references for new structure - Update Nushell scripts to use workspace/ instead of variants\\n- Update CLAUDE.md with new path structure\\n- Update documentation references\\n- Add migration script for future path changes Phase 1.3 of repository restructuring.\\" echo \\"✅ Path updates committed\\" Validation: ✅ All Nushell scripts reference correct paths ✅ CLAUDE.md updated ✅ Documentation updated ✅ No references to old paths remain","breadcrumbs":"Implementation Guide » Day 3: Update Path References","id":"2251","title":"Day 3: Update Path References"},"2252":{"body":"Step 4.1: Automated Validation # Create validation script\\ncat > provisioning/tools/validation/validate-structure.nu << \'EOF\'\\n#!/usr/bin/env nu\\n# Repository structure validation export def main [] { print \\"🔍 Validating repository structure...\\" mut passed = 0 mut failed = 0 # Check required directories exist let required_dirs = [ \\"provisioning/core\\" \\"provisioning/extensions\\" \\"provisioning/platform\\" \\"provisioning/schemas\\" \\"workspace\\" \\"workspace/templates\\" \\"distribution\\" \\"docs\\" \\"tests\\" ] for dir in $required_dirs { if ($dir | path exists) { print $\\" ✓ ($dir)\\" $passed = $passed + 1 } else { print $\\" ✗ ($dir) MISSING\\" $failed = $failed + 1 } } # Check obsolete directories don\'t exist let obsolete_dirs = [ \\"_workspace\\" \\"backup-workspace\\" \\"workspace-librecloud\\" \\"wrks\\" \\"NO\\" ] for dir in $obsolete_dirs { if not ($dir | path exists) { print $\\" ✓ ($dir) removed\\" $passed = $passed + 1 } else { print $\\" ✗ ($dir) still exists\\" $failed = $failed + 1 } } # Check no old path references let old_paths = [\\"_workspace/\\" \\"backup-workspace/\\" \\"wrks/\\"] for path in $old_paths { let results = (rg -l $path provisioning/ --iglob \\"!*.md\\" 2>/dev/null | lines) if ($results | is-empty) { print $\\" ✓ No references to ($path)\\" $passed = $passed + 1 } else { print $\\" ✗ Found references to ($path):\\" $results | each { |f| print $\\" - ($f)\\" } $failed = $failed + 1 } } print \\"\\" print $\\"Results: ($passed) passed, ($failed) failed\\" if $failed > 0 { error make { msg: \\"Validation failed\\" } } print \\"✅ Validation passed\\"\\n}\\nEOF chmod +x provisioning/tools/validation/validate-structure.nu # Run validation\\nnu provisioning/tools/validation/validate-structure.nu Step 4.2: Functional Testing # Test core commands\\necho \\"=== Testing Core Commands ===\\" # Version\\nprovisioning/core/cli/provisioning version\\necho \\"✓ version command\\" # Help\\nprovisioning/core/cli/provisioning help\\necho \\"✓ help command\\" # List\\nprovisioning/core/cli/provisioning list servers\\necho \\"✓ list command\\" # Environment\\nprovisioning/core/cli/provisioning env\\necho \\"✓ env command\\" # Validate config\\nprovisioning/core/cli/provisioning validate config\\necho \\"✓ validate command\\" echo \\"✅ Functional tests passed\\" Step 4.3: Integration Testing # Test workflow system\\necho \\"=== Testing Workflow System ===\\" # List workflows\\nnu -c \\"use provisioning/core/nulib/workflows/management.nu *; workflow list\\"\\necho \\"✓ workflow list\\" # Test workspace commands\\necho \\"=== Testing Workspace Commands ===\\" # Workspace info\\nprovisioning/core/cli/provisioning workspace info\\necho \\"✓ workspace info\\" echo \\"✅ Integration tests passed\\" Step 4.4: Create Test Report { echo \\"# Repository Restructuring - Validation Report\\" echo \\"Date: $(date)\\" echo \\"\\" echo \\"## Structure Validation\\" nu provisioning/tools/validation/validate-structure.nu 2>&1 echo \\"\\" echo \\"## Functional Tests\\" echo \\"✓ version command\\" echo \\"✓ help command\\" echo \\"✓ list command\\" echo \\"✓ env command\\" echo \\"✓ validate command\\" echo \\"\\" echo \\"## Integration Tests\\" echo \\"✓ workflow list\\" echo \\"✓ workspace info\\" echo \\"\\" echo \\"## Conclusion\\" echo \\"✅ Phase 1 validation complete\\"\\n} > docs/development/phase1-validation-report.md echo \\"✅ Test report created: docs/development/phase1-validation-report.md\\" Step 4.5: Update README # Update main README with new structure\\n# This is manual - review and update README.md echo \\"📝 Please review and update README.md with new structure\\"\\necho \\" - Update directory structure diagram\\"\\necho \\" - Update installation instructions\\"\\necho \\" - Update quick start guide\\" Step 4.6: Finalize Phase 1 # Commit validation and reports\\ngit add -A\\ngit commit -m \\"test: add validation for repository restructuring - Add structure validation script\\n- Add functional tests\\n- Add integration tests\\n- Create validation report\\n- Document Phase 1 completion Phase 1 complete: Repository restructuring validated.\\" # Merge to implementation branch\\ngit checkout feat/repo-restructure\\ngit merge feat/path-updates echo \\"✅ Phase 1 complete and merged\\" Validation: ✅ All validation tests pass ✅ Functional tests pass ✅ Integration tests pass ✅ Validation report created ✅ README updated ✅ Phase 1 changes merged","breadcrumbs":"Implementation Guide » Day 4: Validation and Testing","id":"2252","title":"Day 4: Validation and Testing"},"2253":{"body":"","breadcrumbs":"Implementation Guide » Phase 2: Build System Implementation (Days 5-8)","id":"2253","title":"Phase 2: Build System Implementation (Days 5-8)"},"2254":{"body":"Step 5.1: Create Build Tools Directory mkdir -p provisioning/tools/build\\ncd provisioning/tools/build # Create directory structure\\nmkdir -p {core,platform,extensions,validation,distribution} echo \\"✅ Build tools directory created\\" Step 5.2: Implement Core Build System # Create main build orchestrator\\n# See full implementation in repo-dist-analysis.md\\n# Copy build-system.nu from the analysis document # Test build system\\nnu build-system.nu status Step 5.3: Implement Core Packaging # Create package-core.nu\\n# This packages Nushell libraries, KCL schemas, templates # Test core packaging\\nnu build-system.nu build-core --version dev Step 5.4: Create Justfile # Create Justfile in project root\\n# See full Justfile in repo-dist-analysis.md # Test Justfile\\njust --list\\njust status Validation: ✅ Build system structure exists ✅ Core build orchestrator works ✅ Core packaging works ✅ Justfile functional","breadcrumbs":"Implementation Guide » Day 5: Build System Core","id":"2254","title":"Day 5: Build System Core"},"2255":{"body":"[Follow similar pattern for remaining build system components]","breadcrumbs":"Implementation Guide » Day 6-8: Continue with Platform, Extensions, and Validation","id":"2255","title":"Day 6-8: Continue with Platform, Extensions, and Validation"},"2256":{"body":"","breadcrumbs":"Implementation Guide » Phase 3: Installation System (Days 9-11)","id":"2256","title":"Phase 3: Installation System (Days 9-11)"},"2257":{"body":"Step 9.1: Create install.nu mkdir -p distribution/installers # Create install.nu\\n# See full implementation in repo-dist-analysis.md Step 9.2: Test Installation # Test installation to /tmp\\nnu distribution/installers/install.nu --prefix /tmp/provisioning-test # Verify\\nls -lh /tmp/provisioning-test/ # Test uninstallation\\nnu distribution/installers/install.nu uninstall --prefix /tmp/provisioning-test Validation: ✅ Installer works ✅ Files installed to correct locations ✅ Uninstaller works ✅ No files left after uninstall","breadcrumbs":"Implementation Guide » Day 9: Nushell Installer","id":"2257","title":"Day 9: Nushell Installer"},"2258":{"body":"","breadcrumbs":"Implementation Guide » Rollback Procedures","id":"2258","title":"Rollback Procedures"},"2259":{"body":"# Restore from backup\\nrm -rf /Users/Akasha/project-provisioning\\ncp -r \\"$BACKUP_DIR\\" /Users/Akasha/project-provisioning # Return to main branch\\ncd /Users/Akasha/project-provisioning\\ngit checkout main\\ngit branch -D feat/repo-restructure","breadcrumbs":"Implementation Guide » If Phase 1 Fails","id":"2259","title":"If Phase 1 Fails"},"226":{"body":"Wait for operations to complete with --wait or -w flag: # Wait for server creation to complete\\nprovisioning server create --wait # Wait for taskserv installation\\nprovisioning taskserv create kubernetes --wait","breadcrumbs":"Quick Start Cheatsheet » Wait Mode","id":"226","title":"Wait Mode"},"2260":{"body":"# Revert build system commits\\ngit checkout feat/repo-restructure\\ngit revert ","breadcrumbs":"Implementation Guide » If Build System Fails","id":"2260","title":"If Build System Fails"},"2261":{"body":"# Clean up test installation\\nrm -rf /tmp/provisioning-test\\nsudo rm -rf /usr/local/lib/provisioning\\nsudo rm -rf /usr/local/share/provisioning","breadcrumbs":"Implementation Guide » If Installation Fails","id":"2261","title":"If Installation Fails"},"2262":{"body":"","breadcrumbs":"Implementation Guide » Checklist","id":"2262","title":"Checklist"},"2263":{"body":"Day 1: Backup and analysis complete Day 2: Directory restructuring complete Day 3: Path references updated Day 4: Validation passed","breadcrumbs":"Implementation Guide » Phase 1: Repository Restructuring","id":"2263","title":"Phase 1: Repository Restructuring"},"2264":{"body":"Day 5: Core build system implemented Day 6: Platform/extensions packaging Day 7: Package validation Day 8: Build system tested","breadcrumbs":"Implementation Guide » Phase 2: Build System","id":"2264","title":"Phase 2: Build System"},"2265":{"body":"Day 9: Nushell installer created Day 10: Bash installer and CLI Day 11: Multi-OS testing","breadcrumbs":"Implementation Guide » Phase 3: Installation","id":"2265","title":"Phase 3: Installation"},"2266":{"body":"Day 12: Registry system Day 13: Registry commands Day 14: Registry hosting","breadcrumbs":"Implementation Guide » Phase 4: Registry (Optional)","id":"2266","title":"Phase 4: Registry (Optional)"},"2267":{"body":"Day 15: Documentation updated Day 16: Release prepared","breadcrumbs":"Implementation Guide » Phase 5: Documentation","id":"2267","title":"Phase 5: Documentation"},"2268":{"body":"Take breaks between phases - Don\'t rush Test thoroughly - Each phase builds on previous Commit frequently - Small, atomic commits Document issues - Track any problems encountered Ask for review - Get feedback at phase boundaries","breadcrumbs":"Implementation Guide » Notes","id":"2268","title":"Notes"},"2269":{"body":"If you encounter issues: Check the validation reports Review the rollback procedures Consult the architecture analysis Create an issue in the tracker","breadcrumbs":"Implementation Guide » Support","id":"2269","title":"Support"},"227":{"body":"Specify target infrastructure with --infra or -i flag: # Create servers in specific infrastructure\\nprovisioning server create --infra production\\nprovisioning server create -i production # List servers in specific infrastructure\\nprovisioning server list --infra production","breadcrumbs":"Quick Start Cheatsheet » Infrastructure Selection","id":"227","title":"Infrastructure Selection"},"2270":{"body":"This document provides a comprehensive overview of the provisioning project\'s structure after the major reorganization, explaining both the new development-focused organization and the preserved existing functionality.","breadcrumbs":"Project Structure » Project Structure Guide","id":"2270","title":"Project Structure Guide"},"2271":{"body":"Overview New Structure vs Legacy Core Directories Development Workspace File Naming Conventions Navigation Guide Migration Path","breadcrumbs":"Project Structure » Table of Contents","id":"2271","title":"Table of Contents"},"2272":{"body":"The provisioning project has been restructured to support a dual-organization approach: src/ : Development-focused structure with build tools, distribution system, and core components Legacy directories : Preserved in their original locations for backward compatibility workspace/ : Development workspace with tools and runtime management This reorganization enables efficient development workflows while maintaining full backward compatibility with existing deployments.","breadcrumbs":"Project Structure » Overview","id":"2272","title":"Overview"},"2273":{"body":"","breadcrumbs":"Project Structure » New Structure vs Legacy","id":"2273","title":"New Structure vs Legacy"},"2274":{"body":"src/\\n├── config/ # System configuration\\n├── control-center/ # Control center application\\n├── control-center-ui/ # Web UI for control center\\n├── core/ # Core system libraries\\n├── docs/ # Documentation (new)\\n├── extensions/ # Extension framework\\n├── generators/ # Code generation tools\\n├── schemas/ # Nickel configuration schemas (migrated from kcl/)\\n├── orchestrator/ # Hybrid Rust/Nushell orchestrator\\n├── platform/ # Platform-specific code\\n├── provisioning/ # Main provisioning\\n├── templates/ # Template files\\n├── tools/ # Build and development tools\\n└── utils/ # Utility scripts","breadcrumbs":"Project Structure » New Development Structure (/src/)","id":"2274","title":"New Development Structure (/src/)"},"2275":{"body":"repo-cnz/\\n├── cluster/ # Cluster configurations (preserved)\\n├── core/ # Core system (preserved)\\n├── generate/ # Generation scripts (preserved)\\n├── schemas/ # Nickel schemas (migrated from kcl/)\\n├── klab/ # Development lab (preserved)\\n├── nushell-plugins/ # Plugin development (preserved)\\n├── providers/ # Cloud providers (preserved)\\n├── taskservs/ # Task services (preserved)\\n└── templates/ # Template files (preserved)","breadcrumbs":"Project Structure » Legacy Structure (Preserved)","id":"2275","title":"Legacy Structure (Preserved)"},"2276":{"body":"workspace/\\n├── config/ # Development configuration\\n├── extensions/ # Extension development\\n├── infra/ # Development infrastructure\\n├── lib/ # Workspace libraries\\n├── runtime/ # Runtime data\\n└── tools/ # Workspace management tools","breadcrumbs":"Project Structure » Development Workspace (/workspace/)","id":"2276","title":"Development Workspace (/workspace/)"},"2277":{"body":"","breadcrumbs":"Project Structure » Core Directories","id":"2277","title":"Core Directories"},"2278":{"body":"Purpose : Development-focused core libraries and entry points Key Files : nulib/provisioning - Main CLI entry point (symlinks to legacy location) nulib/lib_provisioning/ - Core provisioning libraries nulib/workflows/ - Workflow management (orchestrator integration) Relationship to Legacy : Preserves original core/ functionality while adding development enhancements","breadcrumbs":"Project Structure » /src/core/ - Core Development Libraries","id":"2278","title":"/src/core/ - Core Development Libraries"},"2279":{"body":"Purpose : Complete build system for the provisioning project Key Components : tools/\\n├── build/ # Build tools\\n│ ├── compile-platform.nu # Platform-specific compilation\\n│ ├── bundle-core.nu # Core library bundling\\n│ ├── validate-nickel.nu # Nickel schema validation\\n│ ├── clean-build.nu # Build cleanup\\n│ └── test-distribution.nu # Distribution testing\\n├── distribution/ # Distribution tools\\n│ ├── generate-distribution.nu # Main distribution generator\\n│ ├── prepare-platform-dist.nu # Platform-specific distribution\\n│ ├── prepare-core-dist.nu # Core distribution\\n│ ├── create-installer.nu # Installer creation\\n│ └── generate-docs.nu # Documentation generation\\n├── package/ # Packaging tools\\n│ ├── package-binaries.nu # Binary packaging\\n│ ├── build-containers.nu # Container image building\\n│ ├── create-tarball.nu # Archive creation\\n│ └── validate-package.nu # Package validation\\n├── release/ # Release management\\n│ ├── create-release.nu # Release creation\\n│ ├── upload-artifacts.nu # Artifact upload\\n│ ├── rollback-release.nu # Release rollback\\n│ ├── notify-users.nu # Release notifications\\n│ └── update-registry.nu # Package registry updates\\n└── Makefile # Main build system (40+ targets)","breadcrumbs":"Project Structure » /src/tools/ - Build and Development Tools","id":"2279","title":"/src/tools/ - Build and Development Tools"},"228":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Output Formats","id":"228","title":"Output Formats"},"2280":{"body":"Purpose : Rust/Nushell hybrid orchestrator for solving deep call stack limitations Key Components : src/ - Rust orchestrator implementation scripts/ - Orchestrator management scripts data/ - File-based task queue and persistence Integration : Provides REST API and workflow management while preserving all Nushell business logic","breadcrumbs":"Project Structure » /src/orchestrator/ - Hybrid Orchestrator","id":"2280","title":"/src/orchestrator/ - Hybrid Orchestrator"},"2281":{"body":"Purpose : Enhanced version of the main provisioning with additional features Key Features : Batch workflow system (v3.1.0) Provider-agnostic design Configuration-driven architecture (v2.0.0)","breadcrumbs":"Project Structure » /src/provisioning/ - Enhanced Provisioning","id":"2281","title":"/src/provisioning/ - Enhanced Provisioning"},"2282":{"body":"Purpose : Complete development environment with tools and runtime management Key Components : tools/workspace.nu - Unified workspace management interface lib/path-resolver.nu - Smart path resolution system config/ - Environment-specific development configurations extensions/ - Extension development templates and examples infra/ - Development infrastructure examples runtime/ - Isolated runtime data per user","breadcrumbs":"Project Structure » /workspace/ - Development Workspace","id":"2282","title":"/workspace/ - Development Workspace"},"2283":{"body":"","breadcrumbs":"Project Structure » Development Workspace","id":"2283","title":"Development Workspace"},"2284":{"body":"The workspace provides a sophisticated development environment: Initialization : cd workspace/tools\\nnu workspace.nu init --user-name developer --infra-name my-infra Health Monitoring : nu workspace.nu health --detailed --fix-issues Path Resolution : use lib/path-resolver.nu\\nlet config = (path-resolver resolve_config \\"user\\" --workspace-user \\"john\\")","breadcrumbs":"Project Structure » Workspace Management","id":"2284","title":"Workspace Management"},"2285":{"body":"The workspace provides templates for developing: Providers : Custom cloud provider implementations Task Services : Infrastructure service components Clusters : Complete deployment solutions Templates are available in workspace/extensions/{type}/template/","breadcrumbs":"Project Structure » Extension Development","id":"2285","title":"Extension Development"},"2286":{"body":"The workspace implements a sophisticated configuration cascade: Workspace user configuration (workspace/config/{user}.toml) Environment-specific defaults (workspace/config/{env}-defaults.toml) Workspace defaults (workspace/config/dev-defaults.toml) Core system defaults (config.defaults.toml)","breadcrumbs":"Project Structure » Configuration Hierarchy","id":"2286","title":"Configuration Hierarchy"},"2287":{"body":"","breadcrumbs":"Project Structure » File Naming Conventions","id":"2287","title":"File Naming Conventions"},"2288":{"body":"Commands : kebab-case - create-server.nu, validate-config.nu Modules : snake_case - lib_provisioning, path_resolver Scripts : kebab-case - workspace-health.nu, runtime-manager.nu","breadcrumbs":"Project Structure » Nushell Files (.nu)","id":"2288","title":"Nushell Files (.nu)"},"2289":{"body":"TOML : kebab-case.toml - config-defaults.toml, user-settings.toml Environment : {env}-defaults.toml - dev-defaults.toml, prod-defaults.toml Examples : *.toml.example - local-overrides.toml.example","breadcrumbs":"Project Structure » Configuration Files","id":"2289","title":"Configuration Files"},"229":{"body":"# Output as JSON\\nprovisioning server list --out json\\nprovisioning taskserv list --out json # Pipeline JSON output\\nprovisioning server list --out json | jq \'.[] | select(.status == \\"running\\")\'","breadcrumbs":"Quick Start Cheatsheet » JSON Output","id":"229","title":"JSON Output"},"2290":{"body":"Schemas : kebab-case.ncl - server-config.ncl, workflow-schema.ncl Configuration : manifest.toml - Package metadata Structure : Organized in schemas/ directories per extension","breadcrumbs":"Project Structure » Nickel Files (.ncl)","id":"2290","title":"Nickel Files (.ncl)"},"2291":{"body":"Scripts : kebab-case.nu - compile-platform.nu, generate-distribution.nu Makefiles : Makefile - Standard naming Archives : {project}-{version}-{platform}-{variant}.{ext}","breadcrumbs":"Project Structure » Build and Distribution","id":"2291","title":"Build and Distribution"},"2292":{"body":"","breadcrumbs":"Project Structure » Navigation Guide","id":"2292","title":"Navigation Guide"},"2293":{"body":"Core System Entry Points : # Main CLI (development version)\\n/src/core/nulib/provisioning # Legacy CLI (production version)\\n/core/nulib/provisioning # Workspace management\\n/workspace/tools/workspace.nu Build System : # Main build system\\ncd /src/tools && make help # Quick development build\\nmake dev-build # Complete distribution\\nmake all Configuration Files : # System defaults\\n/config.defaults.toml # User configuration (workspace)\\n/workspace/config/{user}.toml # Environment-specific\\n/workspace/config/{env}-defaults.toml Extension Development : # Provider template\\n/workspace/extensions/providers/template/ # Task service template\\n/workspace/extensions/taskservs/template/ # Cluster template\\n/workspace/extensions/clusters/template/","breadcrumbs":"Project Structure » Finding Components","id":"2293","title":"Finding Components"},"2294":{"body":"1. Development Setup : # Initialize workspace\\ncd workspace/tools\\nnu workspace.nu init --user-name $USER # Check health\\nnu workspace.nu health --detailed 2. Building Distribution : # Complete build\\ncd src/tools\\nmake all # Platform-specific build\\nmake linux\\nmake macos\\nmake windows 3. Extension Development : # Create new provider\\ncp -r workspace/extensions/providers/template workspace/extensions/providers/my-provider # Test extension\\nnu workspace/extensions/providers/my-provider/nulib/provider.nu test","breadcrumbs":"Project Structure » Common Workflows","id":"2294","title":"Common Workflows"},"2295":{"body":"Existing Commands Still Work : # All existing commands preserved\\n./core/nulib/provisioning server create\\n./core/nulib/provisioning taskserv install kubernetes\\n./core/nulib/provisioning cluster create buildkit Configuration Migration : ENV variables still supported as fallbacks New configuration system provides better defaults Migration tools available in src/tools/migration/","breadcrumbs":"Project Structure » Legacy Compatibility","id":"2295","title":"Legacy Compatibility"},"2296":{"body":"","breadcrumbs":"Project Structure » Migration Path","id":"2296","title":"Migration Path"},"2297":{"body":"No Changes Required : All existing commands continue to work Configuration files remain compatible Existing infrastructure deployments unaffected Optional Enhancements : Migrate to new configuration system for better defaults Use workspace for development environments Leverage new build system for custom distributions","breadcrumbs":"Project Structure » For Users","id":"2297","title":"For Users"},"2298":{"body":"Development Environment : Initialize development workspace: nu workspace/tools/workspace.nu init Use new build system: cd src/tools && make dev-build Leverage extension templates for custom development Build System : Use new Makefile for comprehensive build management Leverage distribution tools for packaging Use release management for version control Orchestrator Integration : Start orchestrator for workflow management: cd src/orchestrator && ./scripts/start-orchestrator.nu Use workflow APIs for complex operations Leverage batch operations for efficiency","breadcrumbs":"Project Structure » For Developers","id":"2298","title":"For Developers"},"2299":{"body":"Available Migration Scripts : src/tools/migration/config-migration.nu - Configuration migration src/tools/migration/workspace-setup.nu - Workspace initialization src/tools/migration/path-resolver.nu - Path resolution migration Validation Tools : src/tools/validation/system-health.nu - System health validation src/tools/validation/compatibility-check.nu - Compatibility verification src/tools/validation/migration-status.nu - Migration status tracking","breadcrumbs":"Project Structure » Migration Tools","id":"2299","title":"Migration Tools"},"23":{"body":"","breadcrumbs":"Home » System Capabilities","id":"23","title":"System Capabilities"},"230":{"body":"# Output as YAML\\nprovisioning server list --out yaml\\nprovisioning taskserv list --out yaml # Pipeline YAML output\\nprovisioning server list --out yaml | yq \'.[] | select(.status == \\"running\\")\'","breadcrumbs":"Quick Start Cheatsheet » YAML Output","id":"230","title":"YAML Output"},"2300":{"body":"","breadcrumbs":"Project Structure » Architecture Benefits","id":"2300","title":"Architecture Benefits"},"2301":{"body":"Build System : Comprehensive 40+ target Makefile system Workspace Isolation : Per-user development environments Extension Framework : Template-based extension development","breadcrumbs":"Project Structure » Development Efficiency","id":"2301","title":"Development Efficiency"},"2302":{"body":"Backward Compatibility : All existing functionality preserved Configuration Migration : Gradual migration from ENV to config-driven Orchestrator Architecture : Hybrid Rust/Nushell for performance and flexibility Workflow Management : Batch operations with rollback capabilities","breadcrumbs":"Project Structure » Production Reliability","id":"2302","title":"Production Reliability"},"2303":{"body":"Clean Separation : Development tools separate from production code Organized Structure : Logical grouping of related functionality Documentation : Comprehensive documentation and examples Testing Framework : Built-in testing and validation tools This structure represents a significant evolution in the project\'s organization while maintaining complete backward compatibility and providing powerful new development capabilities.","breadcrumbs":"Project Structure » Maintenance Benefits","id":"2303","title":"Maintenance Benefits"},"2304":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » CTRL-C Handling Implementation Notes","id":"2304","title":"CTRL-C Handling Implementation Notes"},"2305":{"body":"Implemented graceful CTRL-C handling for sudo password prompts during server creation/generation operations.","breadcrumbs":"Ctrl-C Implementation Notes » Overview","id":"2305","title":"Overview"},"2306":{"body":"When fix_local_hosts: true is set, the provisioning tool requires sudo access to modify /etc/hosts and SSH config. When a user cancels the sudo password prompt (no password, wrong password, timeout), the system would: Exit with code 1 (sudo failed) Propagate null values up the call stack Show cryptic Nushell errors about pipeline failures Leave the operation in an inconsistent state Important Unix Limitation : Pressing CTRL-C at the sudo password prompt sends SIGINT to the entire process group, interrupting Nushell before exit code handling can occur. This cannot be caught and is expected Unix behavior.","breadcrumbs":"Ctrl-C Implementation Notes » Problem Statement","id":"2306","title":"Problem Statement"},"2307":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » Solution Architecture","id":"2307","title":"Solution Architecture"},"2308":{"body":"Instead of using exit 130 which kills the entire process, we use return values to signal cancellation and let each layer of the call stack handle it gracefully.","breadcrumbs":"Ctrl-C Implementation Notes » Key Principle: Return Values, Not Exit Codes","id":"2308","title":"Key Principle: Return Values, Not Exit Codes"},"2309":{"body":"Detection Layer (ssh.nu helper functions) Detects sudo cancellation via exit code + stderr Returns false instead of calling exit Propagation Layer (ssh.nu core functions) on_server_ssh(): Returns false on cancellation server_ssh(): Uses reduce to propagate failures Handling Layer (create.nu, generate.nu) Checks return values Displays user-friendly messages Returns false to caller","breadcrumbs":"Ctrl-C Implementation Notes » Three-Layer Approach","id":"2309","title":"Three-Layer Approach"},"231":{"body":"# Output as table (default)\\nprovisioning server list\\nprovisioning server list --out table # Pretty-printed table\\nprovisioning server list | table","breadcrumbs":"Quick Start Cheatsheet » Table Output (Default)","id":"231","title":"Table Output (Default)"},"2310":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » Implementation Details","id":"2310","title":"Implementation Details"},"2311":{"body":"def check_sudo_cached []: nothing -> bool { let result = (do --ignore-errors { ^sudo -n true } | complete) $result.exit_code == 0\\n} def run_sudo_with_interrupt_check [ command: closure operation_name: string\\n]: nothing -> bool { let result = (do --ignore-errors { do $command } | complete) if $result.exit_code == 1 and ($result.stderr | str contains \\"password is required\\") { print \\"\\\\n⚠ Operation cancelled - sudo password required but not provided\\" print \\"ℹ Run \'sudo -v\' first to cache credentials, or run without --fix-local-hosts\\" return false # Signal cancellation } else if $result.exit_code != 0 and $result.exit_code != 1 { error make {msg: $\\"($operation_name) failed: ($result.stderr)\\"} } true\\n} Design Decision : Return bool instead of throwing error or calling exit. This allows the caller to decide how to handle cancellation.","breadcrumbs":"Ctrl-C Implementation Notes » 1. Helper Functions (ssh.nu:11-32)","id":"2311","title":"1. Helper Functions (ssh.nu:11-32)"},"2312":{"body":"if $server.fix_local_hosts and not (check_sudo_cached) { print \\"\\\\n⚠ Sudo access required for --fix-local-hosts\\" print \\"ℹ You will be prompted for your password, or press CTRL-C to cancel\\" print \\" Tip: Run \'sudo -v\' beforehand to cache credentials\\\\n\\"\\n} Design Decision : Warn users upfront so they\'re not surprised by the password prompt.","breadcrumbs":"Ctrl-C Implementation Notes » 2. Pre-emptive Warning (ssh.nu:155-160)","id":"2312","title":"2. Pre-emptive Warning (ssh.nu:155-160)"},"2313":{"body":"All sudo commands wrapped with detection: let result = (do --ignore-errors { ^sudo } | complete)\\nif $result.exit_code == 1 and ($result.stderr | str contains \\"password is required\\") { print \\"\\\\n⚠ Operation cancelled\\" return false\\n} Design Decision : Use do --ignore-errors + complete to capture both exit code and stderr without throwing exceptions.","breadcrumbs":"Ctrl-C Implementation Notes » 3. CTRL-C Detection (ssh.nu:171-199)","id":"2313","title":"3. CTRL-C Detection (ssh.nu:171-199)"},"2314":{"body":"Using Nushell\'s reduce instead of mutable variables: let all_succeeded = ($settings.data.servers | reduce -f true { |server, acc| if $text_match == null or $server.hostname == $text_match { let result = (on_server_ssh $settings $server $ip_type $request_from $run) $acc and $result } else { $acc }\\n}) Design Decision : Nushell doesn\'t allow mutable variable capture in closures. Use reduce for accumulating boolean state across iterations.","breadcrumbs":"Ctrl-C Implementation Notes » 4. State Accumulation Pattern (ssh.nu:122-129)","id":"2314","title":"4. State Accumulation Pattern (ssh.nu:122-129)"},"2315":{"body":"let ssh_result = (on_server_ssh $settings $server \\"pub\\" \\"create\\" false)\\nif not $ssh_result { _print \\"\\\\n✗ Server creation cancelled\\" return false\\n} Design Decision : Check return value and provide context-specific message before returning.","breadcrumbs":"Ctrl-C Implementation Notes » 5. Caller Handling (create.nu:262-266, generate.nu:269-273)","id":"2315","title":"5. Caller Handling (create.nu:262-266, generate.nu:269-273)"},"2316":{"body":"User presses CTRL-C during password prompt ↓\\nsudo exits with code 1, stderr: \\"password is required\\" ↓\\ndo --ignore-errors captures exit code & stderr ↓\\nDetection logic identifies cancellation ↓\\nPrint user-friendly message ↓\\nReturn false (not exit!) ↓\\non_server_ssh returns false ↓\\nCaller (create.nu/generate.nu) checks return value ↓\\nPrint \\"✗ Server creation cancelled\\" ↓\\nReturn false to settings.nu ↓\\nsettings.nu handles false gracefully (no append) ↓\\nClean exit, no cryptic errors","breadcrumbs":"Ctrl-C Implementation Notes » Error Flow Diagram","id":"2316","title":"Error Flow Diagram"},"2317":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » Nushell Idioms Used","id":"2317","title":"Nushell Idioms Used"},"2318":{"body":"Captures both stdout, stderr, and exit code without throwing: let result = (do --ignore-errors { ^sudo command } | complete)\\n# result = { stdout: \\"...\\", stderr: \\"...\\", exit_code: 1 }","breadcrumbs":"Ctrl-C Implementation Notes » 1. do --ignore-errors + complete","id":"2318","title":"1. do --ignore-errors + complete"},"2319":{"body":"Instead of mutable variables in loops: # ❌ BAD - mutable capture in closure\\nmut all_succeeded = true\\n$servers | each { |s| $all_succeeded = false # Error: capture of mutable variable\\n} # ✅ GOOD - reduce with accumulator\\nlet all_succeeded = ($servers | reduce -f true { |s, acc| $acc and (check_server $s)\\n})","breadcrumbs":"Ctrl-C Implementation Notes » 2. reduce for Accumulation","id":"2319","title":"2. reduce for Accumulation"},"232":{"body":"# Output as plain text\\nprovisioning server list --out text","breadcrumbs":"Quick Start Cheatsheet » Text Output","id":"232","title":"Text Output"},"2320":{"body":"if not $condition { print \\"Error message\\" return false\\n}\\n# Continue with happy path","breadcrumbs":"Ctrl-C Implementation Notes » 3. Early Returns for Error Handling","id":"2320","title":"3. Early Returns for Error Handling"},"2321":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » Testing Scenarios","id":"2321","title":"Testing Scenarios"},"2322":{"body":"provisioning -c server create\\n# Password: [CTRL-C] # Expected Output:\\n# ⚠ Operation cancelled - sudo password required but not provided\\n# ℹ Run \'sudo -v\' first to cache credentials\\n# ✗ Server creation cancelled","breadcrumbs":"Ctrl-C Implementation Notes » Scenario 1: CTRL-C During First Sudo Command","id":"2322","title":"Scenario 1: CTRL-C During First Sudo Command"},"2323":{"body":"sudo -v\\nprovisioning -c server create # Expected: No password prompt, smooth operation","breadcrumbs":"Ctrl-C Implementation Notes » Scenario 2: Pre-cached Credentials","id":"2323","title":"Scenario 2: Pre-cached Credentials"},"2324":{"body":"provisioning -c server create\\n# Password: [wrong]\\n# Password: [wrong]\\n# Password: [wrong] # Expected: Same as CTRL-C (treated as cancellation)","breadcrumbs":"Ctrl-C Implementation Notes » Scenario 3: Wrong Password 3 Times","id":"2324","title":"Scenario 3: Wrong Password 3 Times"},"2325":{"body":"# If creating multiple servers and CTRL-C on second:\\n# - First server completes successfully\\n# - Second server shows cancellation message\\n# - Operation stops, doesn\'t proceed to third","breadcrumbs":"Ctrl-C Implementation Notes » Scenario 4: Multiple Servers, Cancel on Second","id":"2325","title":"Scenario 4: Multiple Servers, Cancel on Second"},"2326":{"body":"","breadcrumbs":"Ctrl-C Implementation Notes » Maintenance Notes","id":"2326","title":"Maintenance Notes"},"2327":{"body":"When adding new sudo commands to the codebase: Wrap with do --ignore-errors + complete Check for exit code 1 + \\"password is required\\" Return false on cancellation Let caller handle the false return value Example template: let result = (do --ignore-errors { ^sudo new-command } | complete)\\nif $result.exit_code == 1 and ($result.stderr | str contains \\"password is required\\") { print \\"\\\\n⚠ Operation cancelled - sudo password required\\" return false\\n}","breadcrumbs":"Ctrl-C Implementation Notes » Adding New Sudo Commands","id":"2327","title":"Adding New Sudo Commands"},"2328":{"body":"Don\'t use exit : It kills the entire process Don\'t use mutable variables in closures : Use reduce instead Don\'t ignore return values : Always check and propagate Don\'t forget the pre-check warning : Users should know sudo is needed","breadcrumbs":"Ctrl-C Implementation Notes » Common Pitfalls","id":"2328","title":"Common Pitfalls"},"2329":{"body":"Sudo Credential Manager : Optionally use a credential manager (keychain, etc.) Sudo-less Mode : Alternative implementation that doesn\'t require root Timeout Handling : Detect when sudo times out waiting for password Multiple Password Attempts : Distinguish between CTRL-C and wrong password","breadcrumbs":"Ctrl-C Implementation Notes » Future Improvements","id":"2329","title":"Future Improvements"},"233":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Performance Tips","id":"233","title":"Performance Tips"},"2330":{"body":"Nushell complete command: https://www.nushell.sh/commands/docs/complete.html Nushell reduce command: https://www.nushell.sh/commands/docs/reduce.html Sudo exit codes: man sudo (exit code 1 = authentication failure) POSIX signal conventions: SIGINT (CTRL-C) = 130","breadcrumbs":"Ctrl-C Implementation Notes » References","id":"2330","title":"References"},"2331":{"body":"provisioning/core/nulib/servers/ssh.nu - Core implementation provisioning/core/nulib/servers/create.nu - Calls on_server_ssh provisioning/core/nulib/servers/generate.nu - Calls on_server_ssh docs/troubleshooting/CTRL-C_SUDO_HANDLING.md - User-facing docs docs/quick-reference/SUDO_PASSWORD_HANDLING.md - Quick reference","breadcrumbs":"Ctrl-C Implementation Notes » Related Files","id":"2331","title":"Related Files"},"2332":{"body":"2025-01-XX : Initial implementation with return values (v2) 2025-01-XX : Fixed mutable variable capture with reduce pattern 2025-01-XX : First attempt with exit 130 (reverted, caused process termination)","breadcrumbs":"Ctrl-C Implementation Notes » Changelog","id":"2332","title":"Changelog"},"2333":{"body":"Status : ✅ Complete and Production-Ready Version : 1.0.0 Last Updated : 2025-12-10","breadcrumbs":"Auth Metadata Guide » Metadata-Driven Authentication System - Implementation Guide","id":"2333","title":"Metadata-Driven Authentication System - Implementation Guide"},"2334":{"body":"Overview Architecture Installation Usage Guide Migration Path Developer Guide Testing Troubleshooting","breadcrumbs":"Auth Metadata Guide » Table of Contents","id":"2334","title":"Table of Contents"},"2335":{"body":"This guide describes the metadata-driven authentication system implemented over 5 weeks across 14 command handlers and 12 major systems. The system provides: Centralized Metadata : All command definitions in Nickel with runtime validation Automatic Auth Checks : Pre-execution validation before handler logic Performance Optimization : 40-100x faster through metadata caching Flexible Deployment : Works with orchestrator, batch workflows, and direct CLI","breadcrumbs":"Auth Metadata Guide » Overview","id":"2335","title":"Overview"},"2336":{"body":"","breadcrumbs":"Auth Metadata Guide » Architecture","id":"2336","title":"Architecture"},"2337":{"body":"┌─────────────────────────────────────────────────────────────┐\\n│ User Command │\\n└────────────────────────────────┬──────────────────────────────┘ │ ┌────────────▼─────────────┐ │ CLI Dispatcher │ │ (main_provisioning) │ └────────────┬─────────────┘ │ ┌────────────▼─────────────┐ │ Metadata Loading │ │ (cached via traits.nu) │ └────────────┬─────────────┘ │ ┌────────────▼─────────────────────┐ │ Pre-Execution Validation │ │ - Auth checks │ │ - Permission validation │ │ - Operation type mapping │ └────────────┬─────────────────────┘ │ ┌────────────▼─────────────────────┐ │ Command Handler Execution │ │ - infrastructure.nu │ │ - orchestration.nu │ │ - workspace.nu │ └────────────┬─────────────────────┘ │ ┌────────────▼─────────────┐ │ Result/Response │ └─────────────────────────┘","breadcrumbs":"Auth Metadata Guide » System Components","id":"2337","title":"System Components"},"2338":{"body":"User Command → CLI Dispatcher Dispatcher → Load cached metadata (or parse Nickel) Validate → Check auth, operation type, permissions Execute → Call appropriate handler Return → Result to user","breadcrumbs":"Auth Metadata Guide » Data Flow","id":"2338","title":"Data Flow"},"2339":{"body":"Location : ~/.cache/provisioning/command_metadata.json Format : Serialized JSON (pre-parsed for speed) TTL : 1 hour (configurable via PROVISIONING_METADATA_TTL) Invalidation : Automatic on main.ncl modification Performance : 40-100x faster than Nickel parsing","breadcrumbs":"Auth Metadata Guide » Metadata Caching","id":"2339","title":"Metadata Caching"},"234":{"body":"# ❌ Slow: HTTP API (50 ms per call)\\nfor i in 1..100 { http post http://localhost:9998/encrypt { data: \\"secret\\" } } # ✅ Fast: Plugin (5 ms per call, 10x faster)\\nfor i in 1..100 { kms encrypt \\"secret\\" }","breadcrumbs":"Quick Start Cheatsheet » Use Plugins for Frequent Operations","id":"234","title":"Use Plugins for Frequent Operations"},"2340":{"body":"","breadcrumbs":"Auth Metadata Guide » Installation","id":"2340","title":"Installation"},"2341":{"body":"Nushell 0.109.0+ Nickel 1.15.0+ SOPS 3.10.2 (for encrypted configs) Age 1.2.1 (for encryption)","breadcrumbs":"Auth Metadata Guide » Prerequisites","id":"2341","title":"Prerequisites"},"2342":{"body":"# 1. Clone or update repository\\ngit clone https://github.com/your-org/project-provisioning.git\\ncd project-provisioning # 2. Initialize workspace\\n./provisioning/core/cli/provisioning workspace init # 3. Validate system\\n./provisioning/core/cli/provisioning validate config # 4. Run system checks\\n./provisioning/core/cli/provisioning health # 5. Run test suites\\nnu tests/test-fase5-e2e.nu\\nnu tests/test-security-audit-day20.nu\\nnu tests/test-metadata-cache-benchmark.nu","breadcrumbs":"Auth Metadata Guide » Installation Steps","id":"2342","title":"Installation Steps"},"2343":{"body":"","breadcrumbs":"Auth Metadata Guide » Usage Guide","id":"2343","title":"Usage Guide"},"2344":{"body":"# Initialize authentication\\nprovisioning login # Enroll in MFA\\nprovisioning mfa totp enroll # Create infrastructure\\nprovisioning server create --name web-01 --plan 1xCPU-2 GB # Deploy with orchestrator\\nprovisioning workflow submit workflows/deployment.ncl --orchestrated # Batch operations\\nprovisioning batch submit workflows/batch-deploy.ncl # Check without executing\\nprovisioning server create --name test --check","breadcrumbs":"Auth Metadata Guide » Basic Commands","id":"2344","title":"Basic Commands"},"2345":{"body":"# 1. Login (required for production operations)\\n$ provisioning login\\nUsername: alice@example.com\\nPassword: **** # 2. Optional: Setup MFA\\n$ provisioning mfa totp enroll\\nScan QR code with authenticator app\\nVerify code: 123456 # 3. Use commands (auth checks happen automatically)\\n$ provisioning server delete --name old-server --infra production\\nAuth check: Check auth for production (delete operation)\\nAre you sure? [yes/no] yes\\n✓ Server deleted # 4. All destructive operations require auth\\n$ provisioning taskserv delete postgres web-01\\nAuth check: Check auth for destructive operation\\n✓ Taskserv deleted","breadcrumbs":"Auth Metadata Guide » Authentication Flow","id":"2345","title":"Authentication Flow"},"2346":{"body":"# Dry-run without auth checks\\nprovisioning server create --name test --check # Output: Shows what would happen, no auth checks\\nDry-run mode - no changes will be made\\n✓ Would create server: test\\n✓ Would deploy taskservs: []","breadcrumbs":"Auth Metadata Guide » Check Mode (Bypass Auth for Testing)","id":"2346","title":"Check Mode (Bypass Auth for Testing)"},"2347":{"body":"# Automated mode - skip confirmations\\nprovisioning server create --name web-01 --yes # Batch operations\\nprovisioning batch submit workflows/batch.ncl --yes --check # With environment variable\\nPROVISIONING_NON_INTERACTIVE=1 provisioning server create --name web-02 --yes","breadcrumbs":"Auth Metadata Guide » Non-Interactive CI/CD Mode","id":"2347","title":"Non-Interactive CI/CD Mode"},"2348":{"body":"","breadcrumbs":"Auth Metadata Guide » Migration Path","id":"2348","title":"Migration Path"},"2349":{"body":"Old Pattern (Before Fase 5): # Hardcoded auth check\\nlet response = (input \\"Delete server? (yes/no): \\")\\nif $response != \\"yes\\" { exit 1 } # No metadata - auth unknown\\nexport def delete-server [name: string, --yes] { if not $yes { ... manual confirmation ... } # ... deletion logic ...\\n} New Pattern (After Fase 5): # Metadata header\\n# [command]\\n# name = \\"server delete\\"\\n# group = \\"infrastructure\\"\\n# tags = [\\"server\\", \\"delete\\", \\"destructive\\"]\\n# version = \\"1.0.0\\" # Automatic auth check from metadata\\nexport def delete-server [name: string, --yes] { # Pre-execution check happens in dispatcher # Auth enforcement via metadata # Operation type: \\"delete\\" automatically detected # ... deletion logic ...\\n}","breadcrumbs":"Auth Metadata Guide » Phase 1: From Old input to Metadata","id":"2349","title":"Phase 1: From Old input to Metadata"},"235":{"body":"# Use batch workflows for multiple operations\\nprovisioning batch submit workflows/multi-cloud-deploy.ncl","breadcrumbs":"Quick Start Cheatsheet » Batch Operations","id":"235","title":"Batch Operations"},"2350":{"body":"For each script that was migrated: Add metadata header after shebang: #!/usr/bin/env nu\\n# [command]\\n# name = \\"server create\\"\\n# group = \\"infrastructure\\"\\n# tags = [\\"server\\", \\"create\\", \\"interactive\\"]\\n# version = \\"1.0.0\\" export def create-server [name: string] { # Logic here\\n} Register in provisioning/schemas/main.ncl: let server_create = { name = \\"server create\\", domain = \\"infrastructure\\", description = \\"Create a new server\\", requirements = { interactive = false, requires_auth = true, auth_type = \\"jwt\\", side_effect_type = \\"create\\", min_permission = \\"write\\", },\\n} in\\nserver_create Handler integration (happens in dispatcher): # Dispatcher automatically:\\n# 1. Loads metadata for \\"server create\\"\\n# 2. Validates auth based on requirements\\n# 3. Checks permission levels\\n# 4. Calls handler if validation passes","breadcrumbs":"Auth Metadata Guide » Phase 2: Adding Metadata Headers","id":"2350","title":"Phase 2: Adding Metadata Headers"},"2351":{"body":"# Validate metadata headers\\nnu utils/validate-metadata-headers.nu # Find scripts by tag\\nnu utils/search-scripts.nu by-tag destructive # Find all scripts in group\\nnu utils/search-scripts.nu by-group infrastructure # Find scripts with multiple tags\\nnu utils/search-scripts.nu by-tags server delete # List all migrated scripts\\nnu utils/search-scripts.nu list","breadcrumbs":"Auth Metadata Guide » Phase 3: Validating Migration","id":"2351","title":"Phase 3: Validating Migration"},"2352":{"body":"","breadcrumbs":"Auth Metadata Guide » Developer Guide","id":"2352","title":"Developer Guide"},"2353":{"body":"Step 1: Create metadata in main.ncl let new_feature_command = { name = \\"feature command\\", domain = \\"infrastructure\\", description = \\"My new feature\\", requirements = { interactive = false, requires_auth = true, auth_type = \\"jwt\\", side_effect_type = \\"create\\", min_permission = \\"write\\", },\\n} in\\nnew_feature_command Step 2: Add metadata header to script #!/usr/bin/env nu\\n# [command]\\n# name = \\"feature command\\"\\n# group = \\"infrastructure\\"\\n# tags = [\\"feature\\", \\"create\\"]\\n# version = \\"1.0.0\\" export def feature-command [param: string] { # Implementation\\n} Step 3: Implement handler function # Handler registered in dispatcher\\nexport def handle-feature-command [ action: string --flags\\n]: nothing -> nothing { # Dispatcher handles: # 1. Metadata validation # 2. Auth checks # 3. Permission validation # Your logic here\\n} Step 4: Test with check mode # Dry-run without auth\\nprovisioning feature command --check # Full execution\\nprovisioning feature command --yes","breadcrumbs":"Auth Metadata Guide » Adding New Commands with Metadata","id":"2353","title":"Adding New Commands with Metadata"},"2354":{"body":"Field Type Required Description name string Yes Command canonical name domain string Yes Command category (infrastructure, orchestration, etc.) description string Yes Human-readable description requires_auth bool Yes Whether auth is required auth_type enum Yes \\"none\\", \\"jwt\\", \\"mfa\\", \\"cedar\\" side_effect_type enum Yes \\"none\\", \\"create\\", \\"update\\", \\"delete\\", \\"deploy\\" min_permission enum Yes \\"read\\", \\"write\\", \\"admin\\", \\"superadmin\\" interactive bool No Whether command requires user input slow_operation bool No Whether operation takes >60 seconds","breadcrumbs":"Auth Metadata Guide » Metadata Field Reference","id":"2354","title":"Metadata Field Reference"},"2355":{"body":"Groups : infrastructure - Server, taskserv, cluster operations orchestration - Workflow, batch operations workspace - Workspace management authentication - Auth, MFA, tokens utilities - Helper commands Operations : create, read, update, delete - CRUD operations destructive - Irreversible operations interactive - Requires user input Performance : slow - Operation >60 seconds optimizable - Candidate for optimization","breadcrumbs":"Auth Metadata Guide » Standard Tags","id":"2355","title":"Standard Tags"},"2356":{"body":"Pattern 1: For Long Operations # Use orchestrator for operations >2 seconds\\nif (get-operation-duration \\"my-operation\\") > 2000 { submit-to-orchestrator $operation return \\"Operation submitted in background\\"\\n} Pattern 2: For Batch Operations # Use batch workflows for multiple operations\\nnu -c \\"\\nuse core/nulib/workflows/batch.nu *\\nbatch submit workflows/batch-deploy.ncl --parallel-limit 5\\n\\" Pattern 3: For Metadata Overhead # Cache hit rate optimization\\n# Current: 40-100x faster with warm cache\\n# Target: >95% cache hit rate\\n# Achieved: Metadata stays in cache for 1 hour (TTL)","breadcrumbs":"Auth Metadata Guide » Performance Optimization Patterns","id":"2356","title":"Performance Optimization Patterns"},"2357":{"body":"","breadcrumbs":"Auth Metadata Guide » Testing","id":"2357","title":"Testing"},"2358":{"body":"# End-to-End Integration Tests\\nnu tests/test-fase5-e2e.nu # Security Audit\\nnu tests/test-security-audit-day20.nu # Performance Benchmarks\\nnu tests/test-metadata-cache-benchmark.nu # Run all tests\\nfor test in tests/test-*.nu { nu $test }","breadcrumbs":"Auth Metadata Guide » Running Tests","id":"2358","title":"Running Tests"},"2359":{"body":"Test Suite Category Coverage E2E Tests Integration 7 test groups, 40+ checks Security Audit Auth 5 audit categories, 100% pass Benchmarks Performance 6 benchmark categories","breadcrumbs":"Auth Metadata Guide » Test Coverage","id":"2359","title":"Test Coverage"},"236":{"body":"# Always test with --check first\\nprovisioning server create --check\\nprovisioning server create # Only after verification","breadcrumbs":"Quick Start Cheatsheet » Check Mode for Testing","id":"236","title":"Check Mode for Testing"},"2360":{"body":"✅ All tests pass ✅ No Nushell syntax violations ✅ Cache hit rate >95% ✅ Auth enforcement 100% ✅ Performance baselines met","breadcrumbs":"Auth Metadata Guide » Expected Results","id":"2360","title":"Expected Results"},"2361":{"body":"","breadcrumbs":"Auth Metadata Guide » Troubleshooting","id":"2361","title":"Troubleshooting"},"2362":{"body":"Solution : Ensure metadata is registered in main.ncl # Check if command is in metadata\\ngrep \\"command_name\\" provisioning/schemas/main.ncl","breadcrumbs":"Auth Metadata Guide » Issue: Command not found","id":"2362","title":"Issue: Command not found"},"2363":{"body":"Solution : Verify user has required permission level # Check current user permissions\\nprovisioning auth whoami # Check command requirements\\nnu -c \\"\\nuse core/nulib/lib_provisioning/commands/traits.nu *\\nget-command-metadata \'server create\'\\n\\"","breadcrumbs":"Auth Metadata Guide » Issue: Auth check failing","id":"2363","title":"Issue: Auth check failing"},"2364":{"body":"Solution : Check cache status # Force cache reload\\nrm ~/.cache/provisioning/command_metadata.json # Check cache hit rate\\nnu tests/test-metadata-cache-benchmark.nu","breadcrumbs":"Auth Metadata Guide » Issue: Slow command execution","id":"2364","title":"Issue: Slow command execution"},"2365":{"body":"Solution : Run compliance check # Validate Nushell compliance\\nnu --ide-check 100 # Check for common issues\\ngrep \\"try {\\" # Should be empty\\ngrep \\"let mut\\" # Should be empty","breadcrumbs":"Auth Metadata Guide » Issue: Nushell syntax error","id":"2365","title":"Issue: Nushell syntax error"},"2366":{"body":"","breadcrumbs":"Auth Metadata Guide » Performance Characteristics","id":"2366","title":"Performance Characteristics"},"2367":{"body":"Operation Cold Warm Improvement Metadata Load 200 ms 2-5 ms 40-100x Auth Check <5 ms <5 ms Same Command Dispatch <10 ms <10 ms Same Total Command ~210 ms ~10 ms 21x","breadcrumbs":"Auth Metadata Guide » Baseline Metrics","id":"2367","title":"Baseline Metrics"},"2368":{"body":"Scenario: 20 sequential commands Without cache: 20 × 200 ms = 4 seconds With cache: 1 × 200 ms + 19 × 5 ms = 295 ms Speedup: ~13.5x faster","breadcrumbs":"Auth Metadata Guide » Real-World Impact","id":"2368","title":"Real-World Impact"},"2369":{"body":"Deploy : Use installer to deploy to production Monitor : Watch cache hit rates (target >95%) Extend : Add new commands following migration pattern Optimize : Use profiling to identify slow operations Maintain : Run validation scripts regularly For Support : See docs/troubleshooting-guide.md For Architecture : See docs/architecture/ For User Guide : See docs/user/AUTHENTICATION_LAYER_GUIDE.md","breadcrumbs":"Auth Metadata Guide » Next Steps","id":"2369","title":"Next Steps"},"237":{"body":"","breadcrumbs":"Quick Start Cheatsheet » Help System","id":"237","title":"Help System"},"2370":{"body":"Version : 0.2.0 Date : 2025-10-08 Status : Active","breadcrumbs":"KMS Simplification » KMS Simplification Migration Guide","id":"2370","title":"KMS Simplification Migration Guide"},"2371":{"body":"The KMS service has been simplified from supporting 4 backends (Vault, AWS KMS, Age, Cosmian) to supporting only 2 backends: Age : Development and local testing Cosmian KMS : Production deployments This simplification reduces complexity, removes unnecessary cloud provider dependencies, and provides a clearer separation between development and production use cases.","breadcrumbs":"KMS Simplification » Overview","id":"2371","title":"Overview"},"2372":{"body":"","breadcrumbs":"KMS Simplification » What Changed","id":"2372","title":"What Changed"},"2373":{"body":"❌ HashiCorp Vault backend (src/vault/) ❌ AWS KMS backend (src/aws/) ❌ AWS SDK dependencies (aws-sdk-kms, aws-config, aws-credential-types) ❌ Envelope encryption helpers (AWS-specific) ❌ Complex multi-backend configuration","breadcrumbs":"KMS Simplification » Removed","id":"2373","title":"Removed"},"2374":{"body":"✅ Age backend for development (src/age/) ✅ Cosmian KMS backend for production (src/cosmian/) ✅ Simplified configuration (provisioning/config/kms.toml) ✅ Clear dev/prod separation ✅ Better error messages","breadcrumbs":"KMS Simplification » Added","id":"2374","title":"Added"},"2375":{"body":"🔄 KmsBackendConfig enum (now only Age and Cosmian) 🔄 KmsError enum (removed Vault/AWS-specific errors) 🔄 Service initialization logic 🔄 README and documentation 🔄 Cargo.toml dependencies","breadcrumbs":"KMS Simplification » Modified","id":"2375","title":"Modified"},"2376":{"body":"","breadcrumbs":"KMS Simplification » Why This Change","id":"2376","title":"Why This Change"},"2377":{"body":"Unnecessary Complexity : 4 backends for simple use cases Cloud Lock-in : AWS KMS dependency limited flexibility Operational Overhead : Vault requires server setup even for dev Dependency Bloat : AWS SDK adds significant compile time Unclear Use Cases : When to use which backend?","breadcrumbs":"KMS Simplification » Problems with Previous Approach","id":"2377","title":"Problems with Previous Approach"},"2378":{"body":"Clear Separation : Age = dev, Cosmian = prod Faster Compilation : Removed AWS SDK (saves ~30 s) Offline Development : Age works without network Enterprise Security : Cosmian provides confidential computing Easier Maintenance : 2 backends instead of 4","breadcrumbs":"KMS Simplification » Benefits of Simplified Approach","id":"2378","title":"Benefits of Simplified Approach"},"2379":{"body":"","breadcrumbs":"KMS Simplification » Migration Steps","id":"2379","title":"Migration Steps"},"238":{"body":"# Show help for specific command\\nprovisioning help server\\nprovisioning help taskserv\\nprovisioning help cluster\\nprovisioning help workflow\\nprovisioning help batch # Show help for command category\\nprovisioning help infra\\nprovisioning help orch\\nprovisioning help dev\\nprovisioning help ws\\nprovisioning help config","breadcrumbs":"Quick Start Cheatsheet » Command-Specific Help","id":"238","title":"Command-Specific Help"},"2380":{"body":"If you were using Vault or AWS KMS for development: Step 1: Install Age # macOS\\nbrew install age # Ubuntu/Debian\\napt install age # From source\\ngo install filippo.io/age/cmd/...@latest Step 2: Generate Age Keys mkdir -p ~/.config/provisioning/age\\nage-keygen -o ~/.config/provisioning/age/private_key.txt\\nage-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt Step 3: Update Configuration Replace your old Vault/AWS config: Old (Vault) : [kms]\\ntype = \\"vault\\"\\naddress = \\"http://localhost:8200\\"\\ntoken = \\"${VAULT_TOKEN}\\"\\nmount_point = \\"transit\\" New (Age) : [kms]\\nenvironment = \\"dev\\" [kms.age]\\npublic_key_path = \\"~/.config/provisioning/age/public_key.txt\\"\\nprivate_key_path = \\"~/.config/provisioning/age/private_key.txt\\" Step 4: Re-encrypt Development Secrets # Export old secrets (if using Vault)\\nvault kv get -format=json secret/dev > dev-secrets.json # Encrypt with Age\\ncat dev-secrets.json | age -r $(cat ~/.config/provisioning/age/public_key.txt) > dev-secrets.age # Test decryption\\nage -d -i ~/.config/provisioning/age/private_key.txt dev-secrets.age","breadcrumbs":"KMS Simplification » For Development Environments","id":"2380","title":"For Development Environments"},"2381":{"body":"If you were using Vault or AWS KMS for production: Step 1: Set Up Cosmian KMS Choose one of these options: Option A: Cosmian Cloud (Managed) # Sign up at https://cosmian.com\\n# Get API credentials\\nexport COSMIAN_KMS_URL=https://kms.cosmian.cloud\\nexport COSMIAN_API_KEY=your-api-key Option B: Self-Hosted Cosmian KMS # Deploy Cosmian KMS server\\n# See: https://docs.cosmian.com/kms/deployment/ # Configure endpoint\\nexport COSMIAN_KMS_URL=https://kms.example.com\\nexport COSMIAN_API_KEY=your-api-key Step 2: Create Master Key in Cosmian # Using Cosmian CLI\\ncosmian-kms create-key \\\\ --algorithm AES \\\\ --key-length 256 \\\\ --key-id provisioning-master-key # Or via API\\ncurl -X POST $COSMIAN_KMS_URL/api/v1/keys \\\\ -H \\"X-API-Key: $COSMIAN_API_KEY\\" \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"algorithm\\": \\"AES\\", \\"keyLength\\": 256, \\"keyId\\": \\"provisioning-master-key\\" }\' Step 3: Migrate Production Secrets From Vault to Cosmian : # Export secrets from Vault\\nvault kv get -format=json secret/prod > prod-secrets.json # Import to Cosmian\\n# (Use temporary Age encryption for transfer)\\ncat prod-secrets.json | \\\\ age -r $(cat ~/.config/provisioning/age/public_key.txt) | \\\\ base64 > prod-secrets.enc # On production server with Cosmian\\ncat prod-secrets.enc | \\\\ base64 -d | \\\\ age -d -i ~/.config/provisioning/age/private_key.txt | \\\\ # Re-encrypt with Cosmian curl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \\\\ -H \\"X-API-Key: $COSMIAN_API_KEY\\" \\\\ -d @- From AWS KMS to Cosmian : # Decrypt with AWS KMS\\naws kms decrypt \\\\ --ciphertext-blob fileb://encrypted-data \\\\ --output text \\\\ --query Plaintext | \\\\ base64 -d > plaintext-data # Encrypt with Cosmian\\ncurl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \\\\ -H \\"X-API-Key: $COSMIAN_API_KEY\\" \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \\"{\\\\\\"keyId\\\\\\":\\\\\\"provisioning-master-key\\\\\\",\\\\\\"data\\\\\\":\\\\\\"$(base64 plaintext-data)\\\\\\"}\\" Step 4: Update Production Configuration Old (AWS KMS) : [kms]\\ntype = \\"aws-kms\\"\\nregion = \\"us-east-1\\"\\nkey_id = \\"arn:aws:kms:us-east-1:123456789012:key/...\\" New (Cosmian) : [kms]\\nenvironment = \\"prod\\" [kms.cosmian]\\nserver_url = \\"${COSMIAN_KMS_URL}\\"\\napi_key = \\"${COSMIAN_API_KEY}\\"\\ndefault_key_id = \\"provisioning-master-key\\"\\ntls_verify = true\\nuse_confidential_computing = false # Enable if using SGX/SEV Step 5: Test Production Setup # Set environment\\nexport PROVISIONING_ENV=prod\\nexport COSMIAN_KMS_URL=https://kms.example.com\\nexport COSMIAN_API_KEY=your-api-key # Start KMS service\\ncargo run --bin kms-service # Test encryption\\ncurl -X POST http://localhost:8082/api/v1/kms/encrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"plaintext\\":\\"SGVsbG8=\\",\\"context\\":\\"env=prod\\"}\' # Test decryption\\ncurl -X POST http://localhost:8082/api/v1/kms/decrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"ciphertext\\":\\"...\\",\\"context\\":\\"env=prod\\"}\'","breadcrumbs":"KMS Simplification » For Production Environments","id":"2381","title":"For Production Environments"},"2382":{"body":"","breadcrumbs":"KMS Simplification » Configuration Comparison","id":"2382","title":"Configuration Comparison"},"2383":{"body":"# Development could use any backend\\n[kms]\\ntype = \\"vault\\" # or \\"aws-kms\\"\\naddress = \\"http://localhost:8200\\"\\ntoken = \\"${VAULT_TOKEN}\\" # Production used Vault or AWS\\n[kms]\\ntype = \\"aws-kms\\"\\nregion = \\"us-east-1\\"\\nkey_id = \\"arn:aws:kms:...\\"","breadcrumbs":"KMS Simplification » Before (4 Backends)","id":"2383","title":"Before (4 Backends)"},"2384":{"body":"# Clear environment-based selection\\n[kms]\\ndev_backend = \\"age\\"\\nprod_backend = \\"cosmian\\"\\nenvironment = \\"${PROVISIONING_ENV:-dev}\\" # Age for development\\n[kms.age]\\npublic_key_path = \\"~/.config/provisioning/age/public_key.txt\\"\\nprivate_key_path = \\"~/.config/provisioning/age/private_key.txt\\" # Cosmian for production\\n[kms.cosmian]\\nserver_url = \\"${COSMIAN_KMS_URL}\\"\\napi_key = \\"${COSMIAN_API_KEY}\\"\\ndefault_key_id = \\"provisioning-master-key\\"\\ntls_verify = true","breadcrumbs":"KMS Simplification » After (2 Backends)","id":"2384","title":"After (2 Backends)"},"2385":{"body":"","breadcrumbs":"KMS Simplification » Breaking Changes","id":"2385","title":"Breaking Changes"},"2386":{"body":"Removed Functions generate_data_key() - Now only available with Cosmian backend envelope_encrypt() - AWS-specific, removed envelope_decrypt() - AWS-specific, removed rotate_key() - Now handled server-side by Cosmian Changed Error Types Before : KmsError::VaultError(String)\\nKmsError::AwsKmsError(String) After : KmsError::AgeError(String)\\nKmsError::CosmianError(String) Updated Configuration Enum Before : enum KmsBackendConfig { Vault { address, token, mount_point, ... }, AwsKms { region, key_id, assume_role },\\n} After : enum KmsBackendConfig { Age { public_key_path, private_key_path }, Cosmian { server_url, api_key, default_key_id, tls_verify },\\n}","breadcrumbs":"KMS Simplification » API Changes","id":"2386","title":"API Changes"},"2387":{"body":"","breadcrumbs":"KMS Simplification » Code Migration","id":"2387","title":"Code Migration"},"2388":{"body":"Before (AWS KMS) : use kms_service::{KmsService, KmsBackendConfig}; let config = KmsBackendConfig::AwsKms { region: \\"us-east-1\\".to_string(), key_id: \\"arn:aws:kms:...\\".to_string(), assume_role: None,\\n}; let kms = KmsService::new(config).await?; After (Cosmian) : use kms_service::{KmsService, KmsBackendConfig}; let config = KmsBackendConfig::Cosmian { server_url: env::var(\\"COSMIAN_KMS_URL\\")?, api_key: env::var(\\"COSMIAN_API_KEY\\")?, default_key_id: \\"provisioning-master-key\\".to_string(), tls_verify: true,\\n}; let kms = KmsService::new(config).await?;","breadcrumbs":"KMS Simplification » Rust Code","id":"2388","title":"Rust Code"},"2389":{"body":"Before (Vault) : # Set Vault environment\\n$env.VAULT_ADDR = \\"http://localhost:8200\\"\\n$env.VAULT_TOKEN = \\"root\\" # Use KMS\\nkms encrypt \\"secret-data\\" After (Age for dev) : # Set environment\\n$env.PROVISIONING_ENV = \\"dev\\" # Age keys automatically loaded from config\\nkms encrypt \\"secret-data\\"","breadcrumbs":"KMS Simplification » Nushell Code","id":"2389","title":"Nushell Code"},"239":{"body":"# All these work identically:\\nprovisioning help workspace\\nprovisioning workspace help\\nprovisioning ws help\\nprovisioning help ws","breadcrumbs":"Quick Start Cheatsheet » Bi-Directional Help","id":"239","title":"Bi-Directional Help"},"2390":{"body":"If you need to rollback to Vault/AWS KMS: # Checkout previous version\\ngit checkout tags/v0.1.0 # Rebuild with old dependencies\\ncd provisioning/platform/kms-service\\ncargo clean\\ncargo build --release # Restore old configuration\\ncp provisioning/config/kms.toml.backup provisioning/config/kms.toml","breadcrumbs":"KMS Simplification » Rollback Plan","id":"2390","title":"Rollback Plan"},"2391":{"body":"","breadcrumbs":"KMS Simplification » Testing the Migration","id":"2391","title":"Testing the Migration"},"2392":{"body":"# 1. Generate Age keys\\nage-keygen -o /tmp/test_private.txt\\nage-keygen -y /tmp/test_private.txt > /tmp/test_public.txt # 2. Test encryption\\necho \\"test-data\\" | age -r $(cat /tmp/test_public.txt) > /tmp/encrypted # 3. Test decryption\\nage -d -i /tmp/test_private.txt /tmp/encrypted # 4. Start KMS service with test keys\\nexport PROVISIONING_ENV=dev\\n# Update config to point to /tmp keys\\ncargo run --bin kms-service","breadcrumbs":"KMS Simplification » Development Testing","id":"2392","title":"Development Testing"},"2393":{"body":"# 1. Set up test Cosmian instance\\nexport COSMIAN_KMS_URL=https://kms-staging.example.com\\nexport COSMIAN_API_KEY=test-api-key # 2. Create test key\\ncosmian-kms create-key --key-id test-key --algorithm AES --key-length 256 # 3. Test encryption\\ncurl -X POST $COSMIAN_KMS_URL/api/v1/encrypt \\\\ -H \\"X-API-Key: $COSMIAN_API_KEY\\" \\\\ -d \'{\\"keyId\\":\\"test-key\\",\\"data\\":\\"dGVzdA==\\"}\' # 4. Start KMS service\\nexport PROVISIONING_ENV=prod\\ncargo run --bin kms-service","breadcrumbs":"KMS Simplification » Production Testing","id":"2393","title":"Production Testing"},"2394":{"body":"","breadcrumbs":"KMS Simplification » Troubleshooting","id":"2394","title":"Troubleshooting"},"2395":{"body":"# Check keys exist\\nls -la ~/.config/provisioning/age/ # Regenerate if missing\\nage-keygen -o ~/.config/provisioning/age/private_key.txt\\nage-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt","breadcrumbs":"KMS Simplification » Age Keys Not Found","id":"2395","title":"Age Keys Not Found"},"2396":{"body":"# Check network connectivity\\ncurl -v $COSMIAN_KMS_URL/api/v1/health # Verify API key\\ncurl $COSMIAN_KMS_URL/api/v1/version \\\\ -H \\"X-API-Key: $COSMIAN_API_KEY\\" # Check TLS certificate\\nopenssl s_client -connect kms.example.com:443","breadcrumbs":"KMS Simplification » Cosmian Connection Failed","id":"2396","title":"Cosmian Connection Failed"},"2397":{"body":"# Clean and rebuild\\ncd provisioning/platform/kms-service\\ncargo clean\\ncargo update\\ncargo build --release","breadcrumbs":"KMS Simplification » Compilation Errors","id":"2397","title":"Compilation Errors"},"2398":{"body":"Documentation : See README.md Issues : Report on project issue tracker Cosmian Support : https://docs.cosmian.com/support/","breadcrumbs":"KMS Simplification » Support","id":"2398","title":"Support"},"2399":{"body":"2025-10-08 : Migration guide published 2025-10-15 : Deprecation notices for Vault/AWS 2025-11-01 : Old backends removed from codebase 2025-11-15 : Migration complete, old configs unsupported","breadcrumbs":"KMS Simplification » Timeline","id":"2399","title":"Timeline"},"24":{"body":"Multi-cloud support (AWS, UpCloud, Local) Declarative configuration with Nickel Automated dependency resolution Batch operations with rollback","breadcrumbs":"Home » ✅ Infrastructure Automation","id":"24","title":"✅ Infrastructure Automation"},"240":{"body":"# Show all commands\\nprovisioning help\\nprovisioning --help # Show version\\nprovisioning version\\nprovisioning --version","breadcrumbs":"Quick Start Cheatsheet » General Help","id":"240","title":"General Help"},"2400":{"body":"Q: Can I still use Vault if I really need to? A: No, Vault support has been removed. Use Age for dev or Cosmian for prod. Q: What about AWS KMS for existing deployments? A: Migrate to Cosmian KMS. The API is similar, and migration tools are provided. Q: Is Age secure enough for production? A: No. Age is designed for development only. Use Cosmian KMS for production. Q: Does Cosmian support confidential computing? A: Yes, Cosmian KMS supports SGX and SEV for confidential computing workloads. Q: How much does Cosmian cost? A: Cosmian offers both cloud and self-hosted options. Contact Cosmian for pricing. Q: Can I use my own KMS backend? A: Not currently supported. Only Age and Cosmian are available.","breadcrumbs":"KMS Simplification » FAQs","id":"2400","title":"FAQs"},"2401":{"body":"Use this checklist to track your migration:","breadcrumbs":"KMS Simplification » Checklist","id":"2401","title":"Checklist"},"2402":{"body":"Install Age (brew install age or equivalent) Generate Age keys (age-keygen) Update provisioning/config/kms.toml to use Age backend Export secrets from Vault/AWS (if applicable) Re-encrypt secrets with Age Test KMS service startup Test encrypt/decrypt operations Update CI/CD pipelines (if applicable) Update documentation","breadcrumbs":"KMS Simplification » Development Migration","id":"2402","title":"Development Migration"},"2403":{"body":"Set up Cosmian KMS server (cloud or self-hosted) Create master key in Cosmian Export production secrets from Vault/AWS Re-encrypt secrets with Cosmian Update provisioning/config/kms.toml to use Cosmian backend Set environment variables (COSMIAN_KMS_URL, COSMIAN_API_KEY) Test KMS service startup in staging Test encrypt/decrypt operations in staging Load test Cosmian integration Update production deployment configs Deploy to production Verify all secrets accessible Decommission old KMS infrastructure","breadcrumbs":"KMS Simplification » Production Migration","id":"2403","title":"Production Migration"},"2404":{"body":"The KMS simplification reduces complexity while providing better separation between development and production use cases. Age offers a fast, offline solution for development, while Cosmian KMS provides enterprise-grade security for production deployments. For questions or issues, please refer to the documentation or open an issue.","breadcrumbs":"KMS Simplification » Conclusion","id":"2404","title":"Conclusion"},"2405":{"body":"Last Updated : 2025-10-10 Version : 1.0.0 This glossary defines key terminology used throughout the Provisioning Platform documentation. Terms are listed alphabetically with definitions, usage context, and cross-references to related documentation.","breadcrumbs":"Glossary » Provisioning Platform Glossary","id":"2405","title":"Provisioning Platform Glossary"},"2406":{"body":"","breadcrumbs":"Glossary » A","id":"2406","title":"A"},"2407":{"body":"Definition : Documentation of significant architectural decisions, including context, decision, and consequences. Where Used : Architecture planning and review Technical decision-making process System design documentation Related Concepts : Architecture, Design Patterns, Technical Debt Examples : ADR-001: Project Structure ADR-006: CLI Refactoring ADR-009: Complete Security System See Also : Architecture Documentation","breadcrumbs":"Glossary » ADR (Architecture Decision Record)","id":"2407","title":"ADR (Architecture Decision Record)"},"2408":{"body":"Definition : A specialized component that performs a specific task in the system orchestration (for example, autonomous execution units in the orchestrator). Where Used : Task orchestration Workflow management Parallel execution patterns Related Concepts : Orchestrator, Workflow, Task See Also : Orchestrator Architecture","breadcrumbs":"Glossary » Agent","id":"2408","title":"Agent"},"2409":{"body":"Definition : An internal document link to a specific section within the same or different markdown file using the # symbol. Where Used : Cross-referencing documentation sections Table of contents generation Navigation within long documents Related Concepts : Internal Link, Cross-Reference, Documentation Examples : [See Installation](#installation) - Same document [Configuration Guide](config.md#setup) - Different document","breadcrumbs":"Glossary » Anchor Link","id":"2409","title":"Anchor Link"},"241":{"body":"Flag Short Description Example --debug -x Enable debug mode provisioning server create --debug --check -c Check mode (dry run) provisioning server create --check --yes -y Auto-confirm provisioning server delete --yes --wait -w Wait for completion provisioning server create --wait --infra -i Specify infrastructure provisioning server list --infra prod --out - Output format provisioning server list --out json","breadcrumbs":"Quick Start Cheatsheet » Quick Reference: Common Flags","id":"241","title":"Quick Reference: Common Flags"},"2410":{"body":"Definition : Platform service that provides unified REST API access to provisioning operations. Where Used : External system integration Web Control Center backend MCP server communication Related Concepts : REST API, Platform Service, Orchestrator Location : provisioning/platform/api-gateway/ See Also : REST API Documentation","breadcrumbs":"Glossary » API Gateway","id":"2410","title":"API Gateway"},"2411":{"body":"Definition : The process of verifying user identity using JWT tokens, MFA, and secure session management. Where Used : User login flows API access control CLI session management Related Concepts : Authorization, JWT, MFA, Security See Also : Authentication Layer Guide Auth Quick Reference","breadcrumbs":"Glossary » Auth (Authentication)","id":"2411","title":"Auth (Authentication)"},"2412":{"body":"Definition : The process of determining user permissions using Cedar policy language. Where Used : Access control decisions Resource permission checks Multi-tenant security Related Concepts : Auth, Cedar, Policies, RBAC See Also : Cedar Authorization Implementation","breadcrumbs":"Glossary » Authorization","id":"2412","title":"Authorization"},"2413":{"body":"","breadcrumbs":"Glossary » B","id":"2413","title":"B"},"2414":{"body":"Definition : A collection of related infrastructure operations executed as a single workflow unit. Where Used : Multi-server deployments Cluster creation Bulk taskserv installation Related Concepts : Workflow, Operation, Orchestrator Commands : provisioning batch submit workflow.ncl\\nprovisioning batch list\\nprovisioning batch status See Also : Batch Workflow System","breadcrumbs":"Glossary » Batch Operation","id":"2414","title":"Batch Operation"},"2415":{"body":"Definition : Emergency access mechanism requiring multi-party approval for critical operations. Where Used : Emergency system access Incident response Security override scenarios Related Concepts : Security, Compliance, Audit Commands : provisioning break-glass request \\"reason\\"\\nprovisioning break-glass approve See Also : Break-Glass Training Guide","breadcrumbs":"Glossary » Break-Glass","id":"2415","title":"Break-Glass"},"2416":{"body":"","breadcrumbs":"Glossary » C","id":"2416","title":"C"},"2417":{"body":"Definition : Amazon\'s policy language used for fine-grained authorization decisions. Where Used : Authorization policies Access control rules Resource permissions Related Concepts : Authorization, Policies, Security See Also : Cedar Authorization Implementation","breadcrumbs":"Glossary » Cedar","id":"2417","title":"Cedar"},"2418":{"body":"Definition : A saved state of a workflow allowing resume from point of failure. Where Used : Workflow recovery Long-running operations Batch processing Related Concepts : Workflow, State Management, Recovery See Also : Batch Workflow System","breadcrumbs":"Glossary » Checkpoint","id":"2418","title":"Checkpoint"},"2419":{"body":"Definition : The provisioning command-line tool providing access to all platform operations. Where Used : Daily operations Script automation CI/CD pipelines Related Concepts : Command, Shortcut, Module Location : provisioning/core/cli/provisioning Examples : provisioning server create\\nprovisioning taskserv install kubernetes\\nprovisioning workspace switch prod See Also : CLI Reference CLI Reference","breadcrumbs":"Glossary » CLI (Command-Line Interface)","id":"2419","title":"CLI (Command-Line Interface)"},"242":{"body":"# Build all plugins (one-time setup)\\ncd provisioning/core/plugins/nushell-plugins\\ncargo build --release --all # Register plugins\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # Verify installation\\nplugin list | where name =~ \\"auth|kms|orch\\"\\nauth --help\\nkms --help\\norch --help # Set environment\\nexport RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"hvs.xxxxx\\"\\nexport CONTROL_CENTER_URL=\\"http://localhost:3000\\"","breadcrumbs":"Quick Start Cheatsheet » Plugin Installation Quick Reference","id":"242","title":"Plugin Installation Quick Reference"},"2420":{"body":"Definition : A complete, pre-configured deployment of multiple servers and taskservs working together. Where Used : Kubernetes deployments Database clusters Complete infrastructure stacks Related Concepts : Infrastructure, Server, Taskserv Location : provisioning/extensions/clusters/{name}/ Commands : provisioning cluster create \\nprovisioning cluster list\\nprovisioning cluster delete See Also : Infrastructure Management","breadcrumbs":"Glossary » Cluster","id":"2420","title":"Cluster"},"2421":{"body":"Definition : System capabilities ensuring adherence to regulatory requirements (GDPR, SOC2, ISO 27001). Where Used : Audit logging Data retention policies Incident response Related Concepts : Audit, Security, GDPR See Also : Compliance Implementation Summary","breadcrumbs":"Glossary » Compliance","id":"2421","title":"Compliance"},"2422":{"body":"Definition : System settings stored in TOML files with hierarchical loading and variable interpolation. Where Used : System initialization User preferences Environment-specific settings Related Concepts : Settings, Environment, Workspace Files : provisioning/config/config.defaults.toml - System defaults workspace/config/local-overrides.toml - User settings See Also : Configuration Guide","breadcrumbs":"Glossary » Config (Configuration)","id":"2422","title":"Config (Configuration)"},"2423":{"body":"Definition : Web-based UI for managing provisioning operations built with Ratatui/Crossterm. Where Used : Visual infrastructure management Real-time monitoring Guided workflows Related Concepts : UI, Platform Service, Orchestrator Location : provisioning/platform/control-center/ See Also : Platform Services","breadcrumbs":"Glossary » Control Center","id":"2423","title":"Control Center"},"2424":{"body":"Definition : DNS server taskserv providing service discovery and DNS management. Where Used : Kubernetes DNS Service discovery Internal DNS resolution Related Concepts : Taskserv, Kubernetes, Networking See Also : CoreDNS Guide CoreDNS Quick Reference","breadcrumbs":"Glossary » CoreDNS","id":"2424","title":"CoreDNS"},"2425":{"body":"Definition : Links between related documentation sections or concepts. Where Used : Documentation navigation Related topic discovery Learning path guidance Related Concepts : Documentation, Navigation, See Also Examples : \\"See Also\\" sections at the end of documentation pages","breadcrumbs":"Glossary » Cross-Reference","id":"2425","title":"Cross-Reference"},"2426":{"body":"","breadcrumbs":"Glossary » D","id":"2426","title":"D"},"2427":{"body":"Definition : A requirement that must be satisfied before installing or running a component. Where Used : Taskserv installation order Version compatibility checks Cluster deployment sequencing Related Concepts : Version, Taskserv, Workflow Schema : provisioning/schemas/dependencies.ncl See Also : Nickel Dependency Patterns","breadcrumbs":"Glossary » Dependency","id":"2427","title":"Dependency"},"2428":{"body":"Definition : System health checking and troubleshooting assistance. Where Used : System status verification Problem identification Guided troubleshooting Related Concepts : Health Check, Monitoring, Troubleshooting Commands : provisioning status\\nprovisioning diagnostics run","breadcrumbs":"Glossary » Diagnostics","id":"2428","title":"Diagnostics"},"2429":{"body":"Definition : Temporary credentials generated on-demand with automatic expiration. Where Used : AWS STS tokens SSH temporary keys Database credentials Related Concepts : Security, KMS, Secrets Management See Also : Dynamic Secrets Implementation Dynamic Secrets Quick Reference","breadcrumbs":"Glossary » Dynamic Secrets","id":"2429","title":"Dynamic Secrets"},"243":{"body":"Complete Plugin Guide : docs/user/PLUGIN_INTEGRATION_GUIDE.md Plugin Reference : docs/user/NUSHELL_PLUGINS_GUIDE.md From Scratch Guide : docs/guides/from-scratch.md Update Infrastructure : Update Guide Customize Infrastructure : Customize Guide CLI Architecture : CLI Reference Security System : Security Architecture For fastest access to this guide : provisioning sc Last Updated : 2025-10-09 Maintained By : Platform Team","breadcrumbs":"Quick Start Cheatsheet » Related Documentation","id":"243","title":"Related Documentation"},"2430":{"body":"","breadcrumbs":"Glossary » E","id":"2430","title":"E"},"2431":{"body":"Definition : A deployment context (dev, test, prod) with specific configuration overrides. Where Used : Configuration loading Resource isolation Deployment targeting Related Concepts : Config, Workspace, Infrastructure Config Files : config.{dev,test,prod}.toml Usage : PROVISIONING_ENV=prod provisioning server list","breadcrumbs":"Glossary » Environment","id":"2431","title":"Environment"},"2432":{"body":"Definition : A pluggable component adding functionality (provider, taskserv, cluster, or workflow). Where Used : Custom cloud providers Third-party taskservs Custom deployment patterns Related Concepts : Provider, Taskserv, Cluster, Workflow Location : provisioning/extensions/{type}/{name}/ See Also : Extension Development","breadcrumbs":"Glossary » Extension","id":"2432","title":"Extension"},"2433":{"body":"","breadcrumbs":"Glossary » F","id":"2433","title":"F"},"2434":{"body":"Definition : A major system capability providing key platform functionality. Where Used : Architecture documentation Feature planning System capabilities Related Concepts : ADR, Architecture, System Examples : Batch Workflow System Orchestrator Architecture CLI Architecture Configuration System See Also : Architecture Overview","breadcrumbs":"Glossary » Feature","id":"2434","title":"Feature"},"2435":{"body":"","breadcrumbs":"Glossary » G","id":"2435","title":"G"},"2436":{"body":"Definition : EU data protection regulation compliance features in the platform. Where Used : Data export requests Right to erasure Audit compliance Related Concepts : Compliance, Audit, Security Commands : provisioning compliance gdpr export \\nprovisioning compliance gdpr delete See Also : Compliance Implementation","breadcrumbs":"Glossary » GDPR (General Data Protection Regulation)","id":"2436","title":"GDPR (General Data Protection Regulation)"},"2437":{"body":"Definition : This document - a comprehensive terminology reference for the platform. Where Used : Learning the platform Understanding documentation Resolving terminology questions Related Concepts : Documentation, Reference, Cross-Reference","breadcrumbs":"Glossary » Glossary","id":"2437","title":"Glossary"},"2438":{"body":"Definition : Step-by-step walkthrough documentation for common workflows. Where Used : Onboarding new users Learning workflows Reference implementation Related Concepts : Documentation, Workflow, Tutorial Commands : provisioning guide from-scratch\\nprovisioning guide update\\nprovisioning guide customize See Also : Guides","breadcrumbs":"Glossary » Guide","id":"2438","title":"Guide"},"2439":{"body":"","breadcrumbs":"Glossary » H","id":"2439","title":"H"},"244":{"body":"Goal : Get provisioning running in 5 minutes with a working example","breadcrumbs":"Setup Quick Start » Setup Quick Start - 5 Minutes to Deployment","id":"244","title":"Setup Quick Start - 5 Minutes to Deployment"},"2440":{"body":"Definition : Automated verification that a component is running correctly. Where Used : Taskserv validation System monitoring Dependency verification Related Concepts : Diagnostics, Monitoring, Status Example : health_check = { endpoint = \\"http://localhost:6443/healthz\\" timeout = 30 interval = 10\\n}","breadcrumbs":"Glossary » Health Check","id":"2440","title":"Health Check"},"2441":{"body":"Definition : System design combining Rust orchestrator with Nushell business logic. Where Used : Core platform architecture Performance optimization Call stack management Related Concepts : Orchestrator, Architecture, Design See Also : Orchestrator Architecture ADR-004: Hybrid Architecture","breadcrumbs":"Glossary » Hybrid Architecture","id":"2441","title":"Hybrid Architecture"},"2442":{"body":"","breadcrumbs":"Glossary » I","id":"2442","title":"I"},"2443":{"body":"Definition : A named collection of servers, configurations, and deployments managed as a unit. Where Used : Environment isolation Resource organization Deployment targeting Related Concepts : Workspace, Server, Environment Location : workspace/infra/{name}/ Commands : provisioning infra list\\nprovisioning generate infra --new See Also : Infrastructure Management","breadcrumbs":"Glossary » Infrastructure","id":"2443","title":"Infrastructure"},"2444":{"body":"Definition : Connection between platform components or external systems. Where Used : API integration CI/CD pipelines External tool connectivity Related Concepts : API, Extension, Platform See Also : Integration Patterns Integration Examples","breadcrumbs":"Glossary » Integration","id":"2444","title":"Integration"},"2445":{"body":"Definition : A markdown link to another documentation file or section within the platform docs. Where Used : Cross-referencing documentation Navigation between topics Related content discovery Related Concepts : Anchor Link, Cross-Reference, Documentation Examples : [See Configuration](configuration.md) [Architecture Overview](../architecture/README.md)","breadcrumbs":"Glossary » Internal Link","id":"2445","title":"Internal Link"},"2446":{"body":"","breadcrumbs":"Glossary » J","id":"2446","title":"J"},"2447":{"body":"Definition : Token-based authentication mechanism using RS256 signatures. Where Used : User authentication API authorization Session management Related Concepts : Auth, Security, Token See Also : JWT Auth Implementation","breadcrumbs":"Glossary » JWT (JSON Web Token)","id":"2447","title":"JWT (JSON Web Token)"},"2448":{"body":"","breadcrumbs":"Glossary » K","id":"2448","title":"K"},"2449":{"body":"Definition : Declarative configuration language with type safety and lazy evaluation for infrastructure definitions. Where Used : Infrastructure schemas Workflow definitions Configuration validation Related Concepts : Schema, Configuration, Validation Version : 1.15.0+ Location : provisioning/schemas/*.ncl See Also : Nickel Quick Reference","breadcrumbs":"Glossary » Nickel (Nickel Configuration Language)","id":"2449","title":"Nickel (Nickel Configuration Language)"},"245":{"body":"# Check Nushell\\nnu --version # Should be 0.109.0+ # Check deployment tool\\ndocker --version # OR\\nkubectl version # OR\\nssh -V # OR\\nsystemctl --version","breadcrumbs":"Setup Quick Start » Step 1: Check Prerequisites (30 seconds)","id":"245","title":"Step 1: Check Prerequisites (30 seconds)"},"2450":{"body":"Definition : Encryption key management system supporting multiple backends (RustyVault, Age, AWS, Vault). Where Used : Configuration encryption Secret management Data protection Related Concepts : Security, Encryption, Secrets See Also : RustyVault KMS Guide","breadcrumbs":"Glossary » KMS (Key Management Service)","id":"2450","title":"KMS (Key Management Service)"},"2451":{"body":"Definition : Container orchestration platform available as a taskserv. Where Used : Container deployments Cluster management Production workloads Related Concepts : Taskserv, Cluster, Container Commands : provisioning taskserv create kubernetes\\nprovisioning test quick kubernetes","breadcrumbs":"Glossary » Kubernetes","id":"2451","title":"Kubernetes"},"2452":{"body":"","breadcrumbs":"Glossary » L","id":"2452","title":"L"},"2453":{"body":"Definition : A level in the configuration hierarchy (Core → Workspace → Infrastructure). Where Used : Configuration inheritance Customization patterns Settings override Related Concepts : Config, Workspace, Infrastructure See Also : Configuration Guide","breadcrumbs":"Glossary » Layer","id":"2453","title":"Layer"},"2454":{"body":"","breadcrumbs":"Glossary » M","id":"2454","title":"M"},"2455":{"body":"Definition : AI-powered server providing intelligent configuration assistance. Where Used : Configuration validation Troubleshooting guidance Documentation search Related Concepts : Platform Service, AI, Guidance Location : provisioning/platform/mcp-server/ See Also : Platform Services","breadcrumbs":"Glossary » MCP (Model Context Protocol)","id":"2455","title":"MCP (Model Context Protocol)"},"2456":{"body":"Definition : Additional authentication layer using TOTP or WebAuthn/FIDO2. Where Used : Enhanced security Compliance requirements Production access Related Concepts : Auth, Security, TOTP, WebAuthn Commands : provisioning mfa totp enroll\\nprovisioning mfa webauthn enroll\\nprovisioning mfa verify See Also : MFA Implementation Summary","breadcrumbs":"Glossary » MFA (Multi-Factor Authentication)","id":"2456","title":"MFA (Multi-Factor Authentication)"},"2457":{"body":"Definition : Process of updating existing infrastructure or moving between system versions. Where Used : System upgrades Configuration changes Infrastructure evolution Related Concepts : Update, Upgrade, Version See Also : Migration Guide","breadcrumbs":"Glossary » Migration","id":"2457","title":"Migration"},"2458":{"body":"Definition : A reusable component (provider, taskserv, cluster) loaded into a workspace. Where Used : Extension management Workspace customization Component distribution Related Concepts : Extension, Workspace, Package Commands : provisioning module discover provider\\nprovisioning module load provider \\nprovisioning module list taskserv See Also : Module System","breadcrumbs":"Glossary » Module","id":"2458","title":"Module"},"2459":{"body":"","breadcrumbs":"Glossary » N","id":"2459","title":"N"},"246":{"body":"# Option A: Using installer script\\ncurl -sSL https://install.provisioning.dev | bash # Option B: From source\\ngit clone https://github.com/project-provisioning/provisioning\\ncd provisioning\\n./scripts/install.sh","breadcrumbs":"Setup Quick Start » Step 2: Install Provisioning (1 minute)","id":"246","title":"Step 2: Install Provisioning (1 minute)"},"2460":{"body":"Definition : Primary shell and scripting language (v0.107.1) used throughout the platform. Where Used : CLI implementation Automation scripts Business logic Related Concepts : CLI, Script, Automation Version : 0.107.1 See Also : Nushell Guidelines","breadcrumbs":"Glossary » Nushell","id":"2460","title":"Nushell"},"2461":{"body":"","breadcrumbs":"Glossary » O","id":"2461","title":"O"},"2462":{"body":"Definition : Standard format for packaging and distributing extensions. Where Used : Extension distribution Package registry Version management Related Concepts : Registry, Package, Distribution See Also : OCI Registry Guide","breadcrumbs":"Glossary » OCI (Open Container Initiative)","id":"2462","title":"OCI (Open Container Initiative)"},"2463":{"body":"Definition : A single infrastructure action (create server, install taskserv, etc.). Where Used : Workflow steps Batch processing Orchestrator tasks Related Concepts : Workflow, Task, Action","breadcrumbs":"Glossary » Operation","id":"2463","title":"Operation"},"2464":{"body":"Definition : Hybrid Rust/Nushell service coordinating complex infrastructure operations. Where Used : Workflow execution Task coordination State management Related Concepts : Hybrid Architecture, Workflow, Platform Service Location : provisioning/platform/orchestrator/ Commands : cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background See Also : Orchestrator Architecture","breadcrumbs":"Glossary » Orchestrator","id":"2464","title":"Orchestrator"},"2465":{"body":"","breadcrumbs":"Glossary » P","id":"2465","title":"P"},"2466":{"body":"Definition : Core architectural rules and patterns that must be followed. Where Used : Code review Architecture decisions Design validation Related Concepts : Architecture, ADR, Best Practices See Also : Architecture Overview","breadcrumbs":"Glossary » PAP (Project Architecture Principles)","id":"2466","title":"PAP (Project Architecture Principles)"},"2467":{"body":"Definition : A core service providing platform-level functionality (Orchestrator, Control Center, MCP, API Gateway). Where Used : System infrastructure Core capabilities Service integration Related Concepts : Service, Architecture, Infrastructure Location : provisioning/platform/{service}/","breadcrumbs":"Glossary » Platform Service","id":"2467","title":"Platform Service"},"2468":{"body":"Definition : Native Nushell plugin providing performance-optimized operations. Where Used : Auth operations (10-50x faster) KMS encryption Orchestrator queries Related Concepts : Nushell, Performance, Native Commands : provisioning plugin list\\nprovisioning plugin install See Also : Nushell Plugins Guide","breadcrumbs":"Glossary » Plugin","id":"2468","title":"Plugin"},"2469":{"body":"Definition : Cloud platform integration (AWS, UpCloud, local) handling infrastructure provisioning. Where Used : Server creation Resource management Cloud operations Related Concepts : Extension, Infrastructure, Cloud Location : provisioning/extensions/providers/{name}/ Examples : aws, upcloud, local Commands : provisioning module discover provider\\nprovisioning providers list See Also : Quick Provider Guide","breadcrumbs":"Glossary » Provider","id":"2469","title":"Provider"},"247":{"body":"# Run interactive setup\\nprovisioning setup system --interactive # Follow the prompts:\\n# - Press Enter for defaults\\n# - Select your deployment tool\\n# - Enter provider credentials (if using cloud)","breadcrumbs":"Setup Quick Start » Step 3: Initialize System (2 minutes)","id":"247","title":"Step 3: Initialize System (2 minutes)"},"2470":{"body":"","breadcrumbs":"Glossary » Q","id":"2470","title":"Q"},"2471":{"body":"Definition : Condensed command and configuration reference for rapid lookup. Where Used : Daily operations Quick reminders Command syntax Related Concepts : Guide, Documentation, Cheatsheet Commands : provisioning sc # Fastest\\nprovisioning guide quickstart See Also : Quickstart Cheatsheet","breadcrumbs":"Glossary » Quick Reference","id":"2471","title":"Quick Reference"},"2472":{"body":"","breadcrumbs":"Glossary » R","id":"2472","title":"R"},"2473":{"body":"Definition : Permission system with 5 roles (admin, operator, developer, viewer, auditor). Where Used : User permissions Access control Security policies Related Concepts : Authorization, Cedar, Security Roles : Admin, Operator, Developer, Viewer, Auditor","breadcrumbs":"Glossary » RBAC (Role-Based Access Control)","id":"2473","title":"RBAC (Role-Based Access Control)"},"2474":{"body":"Definition : OCI-compliant repository for storing and distributing extensions. Where Used : Extension publishing Version management Package distribution Related Concepts : OCI, Package, Distribution See Also : OCI Registry Guide","breadcrumbs":"Glossary » Registry","id":"2474","title":"Registry"},"2475":{"body":"Definition : HTTP endpoints exposing platform operations to external systems. Where Used : External integration Web UI backend Programmatic access Related Concepts : API, Integration, HTTP Endpoint : http://localhost:9090 See Also : REST API Documentation","breadcrumbs":"Glossary » REST API","id":"2475","title":"REST API"},"2476":{"body":"Definition : Reverting a failed workflow or operation to previous stable state. Where Used : Failure recovery Deployment safety State restoration Related Concepts : Workflow, Checkpoint, Recovery Commands : provisioning batch rollback ","breadcrumbs":"Glossary » Rollback","id":"2476","title":"Rollback"},"2477":{"body":"Definition : Rust-based secrets management backend for KMS. Where Used : Key storage Secret encryption Configuration protection Related Concepts : KMS, Security, Encryption See Also : RustyVault KMS Guide","breadcrumbs":"Glossary » RustyVault","id":"2477","title":"RustyVault"},"2478":{"body":"","breadcrumbs":"Glossary » S","id":"2478","title":"S"},"2479":{"body":"Definition : Nickel type definition specifying structure and validation rules. Where Used : Configuration validation Type safety Documentation Related Concepts : Nickel, Validation, Type Example : let ServerConfig = { hostname | string, cores | number, memory | number,\\n} in\\nServerConfig See Also : Nickel Development","breadcrumbs":"Glossary » Schema","id":"2479","title":"Schema"},"248":{"body":"# Create workspace\\nprovisioning setup workspace myapp # Verify it was created\\nprovisioning workspace list","breadcrumbs":"Setup Quick Start » Step 4: Create Your First Workspace (1 minute)","id":"248","title":"Step 4: Create Your First Workspace (1 minute)"},"2480":{"body":"Definition : System for secure storage and retrieval of sensitive data. Where Used : Password storage API keys Certificates Related Concepts : KMS, Security, Encryption See Also : Dynamic Secrets Implementation","breadcrumbs":"Glossary » Secrets Management","id":"2480","title":"Secrets Management"},"2481":{"body":"Definition : Comprehensive enterprise-grade security with 12 components (Auth, Cedar, MFA, KMS, Secrets, Compliance, etc.). Where Used : User authentication Access control Data protection Related Concepts : Auth, Authorization, MFA, KMS, Audit See Also : Security System Implementation","breadcrumbs":"Glossary » Security System","id":"2481","title":"Security System"},"2482":{"body":"Definition : Virtual machine or physical host managed by the platform. Where Used : Infrastructure provisioning Compute resources Deployment targets Related Concepts : Infrastructure, Provider, Taskserv Commands : provisioning server create\\nprovisioning server list\\nprovisioning server ssh See Also : Infrastructure Management","breadcrumbs":"Glossary » Server","id":"2482","title":"Server"},"2483":{"body":"Definition : A running application or daemon (interchangeable with Taskserv in many contexts). Where Used : Service management Application deployment System administration Related Concepts : Taskserv, Daemon, Application See Also : Service Management Guide","breadcrumbs":"Glossary » Service","id":"2483","title":"Service"},"2484":{"body":"Definition : Abbreviated command alias for faster CLI operations. Where Used : Daily operations Quick commands Productivity enhancement Related Concepts : CLI, Command, Alias Examples : provisioning s create → provisioning server create provisioning ws list → provisioning workspace list provisioning sc → Quick reference See Also : CLI Reference","breadcrumbs":"Glossary » Shortcut","id":"2484","title":"Shortcut"},"2485":{"body":"Definition : Encryption tool for managing secrets in version control. Where Used : Configuration encryption Secret management Secure storage Related Concepts : Encryption, Security, Age Version : 3.10.2 Commands : provisioning sops edit ","breadcrumbs":"Glossary » SOPS (Secrets OPerationS)","id":"2485","title":"SOPS (Secrets OPerationS)"},"2486":{"body":"Definition : Encrypted remote access protocol with temporal key support. Where Used : Server administration Remote commands Secure file transfer Related Concepts : Security, Server, Remote Access Commands : provisioning server ssh \\nprovisioning ssh connect See Also : SSH Temporal Keys User Guide","breadcrumbs":"Glossary » SSH (Secure Shell)","id":"2486","title":"SSH (Secure Shell)"},"2487":{"body":"Definition : Tracking and persisting workflow execution state. Where Used : Workflow recovery Progress tracking Failure handling Related Concepts : Workflow, Checkpoint, Orchestrator","breadcrumbs":"Glossary » State Management","id":"2487","title":"State Management"},"2488":{"body":"","breadcrumbs":"Glossary » T","id":"2488","title":"T"},"2489":{"body":"Definition : A unit of work submitted to the orchestrator for execution. Where Used : Workflow execution Job processing Operation tracking Related Concepts : Operation, Workflow, Orchestrator","breadcrumbs":"Glossary » Task","id":"2489","title":"Task"},"249":{"body":"# Activate workspace\\nprovisioning workspace activate myapp # Check configuration\\nprovisioning setup validate # Deploy server (dry-run first)\\nprovisioning server create --check # Deploy for real\\nprovisioning server create --yes","breadcrumbs":"Setup Quick Start » Step 5: Deploy Your First Server (1 minute)","id":"249","title":"Step 5: Deploy Your First Server (1 minute)"},"2490":{"body":"Definition : An installable infrastructure service (Kubernetes, PostgreSQL, Redis, etc.). Where Used : Service installation Application deployment Infrastructure components Related Concepts : Service, Extension, Package Location : provisioning/extensions/taskservs/{category}/{name}/ Commands : provisioning taskserv create \\nprovisioning taskserv list\\nprovisioning test quick See Also : Taskserv Developer Guide","breadcrumbs":"Glossary » Taskserv","id":"2490","title":"Taskserv"},"2491":{"body":"Definition : Parameterized configuration file supporting variable substitution. Where Used : Configuration generation Infrastructure customization Deployment automation Related Concepts : Config, Generation, Customization Location : provisioning/templates/","breadcrumbs":"Glossary » Template","id":"2491","title":"Template"},"2492":{"body":"Definition : Containerized isolated environment for testing taskservs and clusters. Where Used : Development testing CI/CD integration Pre-deployment validation Related Concepts : Container, Testing, Validation Commands : provisioning test quick \\nprovisioning test env single \\nprovisioning test env cluster See Also : Test Environment Guide","breadcrumbs":"Glossary » Test Environment","id":"2492","title":"Test Environment"},"2493":{"body":"Definition : Multi-node cluster configuration template (Kubernetes HA, etcd cluster, etc.). Where Used : Cluster testing Multi-node deployments Production simulation Related Concepts : Test Environment, Cluster, Configuration Examples : kubernetes_3node, etcd_cluster, kubernetes_single","breadcrumbs":"Glossary » Topology","id":"2493","title":"Topology"},"2494":{"body":"Definition : MFA method generating time-sensitive codes. Where Used : Two-factor authentication MFA enrollment Security enhancement Related Concepts : MFA, Security, Auth Commands : provisioning mfa totp enroll\\nprovisioning mfa totp verify ","breadcrumbs":"Glossary » TOTP (Time-based One-Time Password)","id":"2494","title":"TOTP (Time-based One-Time Password)"},"2495":{"body":"Definition : System problem diagnosis and resolution guidance. Where Used : Problem solving Error resolution System debugging Related Concepts : Diagnostics, Guide, Support See Also : Troubleshooting Guide","breadcrumbs":"Glossary » Troubleshooting","id":"2495","title":"Troubleshooting"},"2496":{"body":"","breadcrumbs":"Glossary » U","id":"2496","title":"U"},"2497":{"body":"Definition : Visual interface for platform operations (Control Center, Web UI). Where Used : Visual management Guided workflows Monitoring dashboards Related Concepts : Control Center, Platform Service, GUI","breadcrumbs":"Glossary » UI (User Interface)","id":"2497","title":"UI (User Interface)"},"2498":{"body":"Definition : Process of upgrading infrastructure components to newer versions. Where Used : Version management Security patches Feature updates Related Concepts : Version, Migration, Upgrade Commands : provisioning version check\\nprovisioning version apply See Also : Update Infrastructure Guide","breadcrumbs":"Glossary » Update","id":"2498","title":"Update"},"2499":{"body":"","breadcrumbs":"Glossary » V","id":"2499","title":"V"},"25":{"body":"Hybrid Rust/Nushell orchestration Checkpoint-based recovery Parallel execution with limits Real-time monitoring","breadcrumbs":"Home » ✅ Workflow Orchestration","id":"25","title":"✅ Workflow Orchestration"},"250":{"body":"# Check health\\nprovisioning platform health # Check servers\\nprovisioning server list # SSH into server (if applicable)\\nprovisioning server ssh ","breadcrumbs":"Setup Quick Start » Verify Everything Works","id":"250","title":"Verify Everything Works"},"2500":{"body":"Definition : Verification that configuration or infrastructure meets requirements. Where Used : Configuration checks Schema validation Pre-deployment verification Related Concepts : Schema, Nickel, Check Commands : provisioning validate config\\nprovisioning validate infrastructure See Also : Config Validation","breadcrumbs":"Glossary » Validation","id":"2500","title":"Validation"},"2501":{"body":"Definition : Semantic version identifier for components and compatibility. Where Used : Component versioning Compatibility checking Update management Related Concepts : Update, Dependency, Compatibility Commands : provisioning version\\nprovisioning version check\\nprovisioning taskserv check-updates","breadcrumbs":"Glossary » Version","id":"2501","title":"Version"},"2502":{"body":"","breadcrumbs":"Glossary » W","id":"2502","title":"W"},"2503":{"body":"Definition : FIDO2-based passwordless authentication standard. Where Used : Hardware key authentication Passwordless login Enhanced MFA Related Concepts : MFA, Security, FIDO2 Commands : provisioning mfa webauthn enroll\\nprovisioning mfa webauthn verify","breadcrumbs":"Glossary » WebAuthn","id":"2503","title":"WebAuthn"},"2504":{"body":"Definition : A sequence of related operations with dependency management and state tracking. Where Used : Complex deployments Multi-step operations Automated processes Related Concepts : Batch Operation, Orchestrator, Task Commands : provisioning workflow list\\nprovisioning workflow status \\nprovisioning workflow monitor See Also : Batch Workflow System","breadcrumbs":"Glossary » Workflow","id":"2504","title":"Workflow"},"2505":{"body":"Definition : An isolated environment containing infrastructure definitions and configuration. Where Used : Project isolation Environment separation Team workspaces Related Concepts : Infrastructure, Config, Environment Location : workspace/{name}/ Commands : provisioning workspace list\\nprovisioning workspace switch \\nprovisioning workspace create See Also : Workspace Switching Guide","breadcrumbs":"Glossary » Workspace","id":"2505","title":"Workspace"},"2506":{"body":"","breadcrumbs":"Glossary » X-Z","id":"2506","title":"X-Z"},"2507":{"body":"Definition : Data serialization format used for Kubernetes manifests and configuration. Where Used : Kubernetes deployments Configuration files Data interchange Related Concepts : Config, Kubernetes, Data Format","breadcrumbs":"Glossary » YAML","id":"2507","title":"YAML"},"2508":{"body":"Symbol/Acronym Full Term Category ADR Architecture Decision Record Architecture API Application Programming Interface Integration CLI Command-Line Interface User Interface GDPR General Data Protection Regulation Compliance JWT JSON Web Token Security Nickel Nickel Configuration Language Configuration KMS Key Management Service Security MCP Model Context Protocol Platform MFA Multi-Factor Authentication Security OCI Open Container Initiative Packaging PAP Project Architecture Principles Architecture RBAC Role-Based Access Control Security REST Representational State Transfer API SOC2 Service Organization Control 2 Compliance SOPS Secrets OPerationS Security SSH Secure Shell Remote Access TOTP Time-based One-Time Password Security UI User Interface User Interface","breadcrumbs":"Glossary » Symbol and Acronym Index","id":"2508","title":"Symbol and Acronym Index"},"2509":{"body":"","breadcrumbs":"Glossary » Cross-Reference Map","id":"2509","title":"Cross-Reference Map"},"251":{"body":"# Workspace management\\nprovisioning workspace list # List all workspaces\\nprovisioning workspace activate prod # Switch workspace\\nprovisioning workspace create dev # Create new workspace # Server management\\nprovisioning server list # List servers\\nprovisioning server create # Create server\\nprovisioning server delete # Delete server\\nprovisioning server ssh # SSH into server # Configuration\\nprovisioning setup validate # Validate configuration\\nprovisioning setup update platform # Update platform settings # System info\\nprovisioning info # System information\\nprovisioning capability check # Check capabilities\\nprovisioning platform health # Check platform health","breadcrumbs":"Setup Quick Start » Common Commands Cheat Sheet","id":"251","title":"Common Commands Cheat Sheet"},"2510":{"body":"Infrastructure : Infrastructure, Server, Cluster, Provider, Taskserv, Module Security : Auth, Authorization, JWT, MFA, TOTP, WebAuthn, Cedar, KMS, Secrets Management, RBAC, Break-Glass Configuration : Config, Nickel, Schema, Validation, Environment, Layer, Workspace Workflow & Operations : Workflow, Batch Operation, Operation, Task, Orchestrator, Checkpoint, Rollback Platform Services : Orchestrator, Control Center, MCP, API Gateway, Platform Service Documentation : Glossary, Guide, ADR, Cross-Reference, Internal Link, Anchor Link Development : Extension, Plugin, Template, Module, Integration Testing : Test Environment, Topology, Validation, Health Check Compliance : Compliance, GDPR, Audit, Security System","breadcrumbs":"Glossary » By Topic Area","id":"2510","title":"By Topic Area"},"2511":{"body":"New User : Glossary (this document) Guide Quick Reference Workspace Infrastructure Server Taskserv Developer : Extension Provider Taskserv Nickel Schema Template Plugin Operations : Workflow Orchestrator Monitoring Troubleshooting Security Compliance","breadcrumbs":"Glossary » By User Journey","id":"2511","title":"By User Journey"},"2512":{"body":"","breadcrumbs":"Glossary » Terminology Guidelines","id":"2512","title":"Terminology Guidelines"},"2513":{"body":"Consistency : Use the same term throughout documentation (for example, \\"Taskserv\\" not \\"task service\\" or \\"task-serv\\") Capitalization : Proper nouns and acronyms: CAPITALIZE (Nickel, JWT, MFA) Generic terms: lowercase (server, cluster, workflow) Platform-specific terms: Title Case (Taskserv, Workspace, Orchestrator) Pluralization : Taskservs (not taskservices) Workspaces (standard plural) Topologies (not topologys)","breadcrumbs":"Glossary » Writing Style","id":"2513","title":"Writing Style"},"2514":{"body":"Don\'t Say Say Instead Reason \\"Task service\\" \\"Taskserv\\" Standard platform term \\"Configuration file\\" \\"Config\\" or \\"Settings\\" Context-dependent \\"Worker\\" \\"Agent\\" or \\"Task\\" Clarify context \\"Kubernetes service\\" \\"K8s taskserv\\" or \\"K8s Service resource\\" Disambiguate","breadcrumbs":"Glossary » Avoiding Confusion","id":"2514","title":"Avoiding Confusion"},"2515":{"body":"","breadcrumbs":"Glossary » Contributing to the Glossary","id":"2515","title":"Contributing to the Glossary"},"2516":{"body":"Alphabetical placement in appropriate section Include all standard sections: Definition Where Used Related Concepts Examples (if applicable) Commands (if applicable) See Also (links to docs) Cross-reference in related terms Update Symbol and Acronym Index if applicable Update Cross-Reference Map","breadcrumbs":"Glossary » Adding New Terms","id":"2516","title":"Adding New Terms"},"2517":{"body":"Verify changes don\'t break cross-references Update \\"Last Updated\\" date at top Increment version if major changes Review related terms for consistency","breadcrumbs":"Glossary » Updating Existing Terms","id":"2517","title":"Updating Existing Terms"},"2518":{"body":"Version Date Changes 1.0.0 2025-10-10 Initial comprehensive glossary Maintained By : Documentation Team Review Cycle : Quarterly or when major features are added Feedback : Please report missing or unclear terms via issues","breadcrumbs":"Glossary » Version History","id":"2518","title":"Version History"},"2519":{"body":"A Rust-native Model Context Protocol (MCP) server for infrastructure automation and AI-assisted DevOps operations. Source : provisioning/platform/mcp-server/ Status : Proof of Concept Complete","breadcrumbs":"MCP Server » MCP Server - Model Context Protocol","id":"2519","title":"MCP Server - Model Context Protocol"},"252":{"body":"Setup wizard won\'t start # Check Nushell\\nnu --version # Check permissions\\nchmod +x $(which provisioning) Configuration error # Validate configuration\\nprovisioning setup validate --verbose # Check paths\\nprovisioning info paths Deployment fails # Dry-run to see what would happen\\nprovisioning server create --check # Check platform status\\nprovisioning platform status","breadcrumbs":"Setup Quick Start » Troubleshooting Quick Fixes","id":"252","title":"Troubleshooting Quick Fixes"},"2520":{"body":"Replaces the Python implementation with significant performance improvements while maintaining philosophical consistency with the Rust ecosystem approach.","breadcrumbs":"MCP Server » Overview","id":"2520","title":"Overview"},"2521":{"body":"🚀 Rust MCP Server Performance Analysis\\n================================================== 📋 Server Parsing Performance: • Sub-millisecond latency across all operations • 0μs average for configuration access 🤖 AI Status Performance: • AI Status: 0μs avg (10000 iterations) 💾 Memory Footprint: • ServerConfig size: 80 bytes • Config size: 272 bytes ✅ Performance Summary: • Server parsing: Sub-millisecond latency • Configuration access: Microsecond latency • Memory efficient: Small struct footprint • Zero-copy string operations where possible","breadcrumbs":"MCP Server » Performance Results","id":"2521","title":"Performance Results"},"2522":{"body":"src/\\n├── simple_main.rs # Lightweight MCP server entry point\\n├── main.rs # Full MCP server (with SDK integration)\\n├── lib.rs # Library interface\\n├── config.rs # Configuration management\\n├── provisioning.rs # Core provisioning engine\\n├── tools.rs # AI-powered parsing tools\\n├── errors.rs # Error handling\\n└── performance_test.rs # Performance benchmarking","breadcrumbs":"MCP Server » Architecture","id":"2522","title":"Architecture"},"2523":{"body":"AI-Powered Server Parsing : Natural language to infrastructure config Multi-Provider Support : AWS, UpCloud, Local Configuration Management : TOML-based with environment overrides Error Handling : Comprehensive error types with recovery hints Performance Monitoring : Built-in benchmarking capabilities","breadcrumbs":"MCP Server » Key Features","id":"2523","title":"Key Features"},"2524":{"body":"Metric Python MCP Server Rust MCP Server Improvement Startup Time ~500 ms ~50 ms 10x faster Memory Usage ~50 MB ~5 MB 10x less Parsing Latency ~1 ms ~0.001 ms 1000x faster Binary Size Python + deps ~15 MB static Portable Type Safety Runtime errors Compile-time Zero runtime errors","breadcrumbs":"MCP Server » Rust vs Python Comparison","id":"2524","title":"Rust vs Python Comparison"},"2525":{"body":"# Build and run\\ncargo run --bin provisioning-mcp-server --release # Run with custom config\\nPROVISIONING_PATH=/path/to/provisioning cargo run --bin provisioning-mcp-server -- --debug # Run tests\\ncargo test # Run benchmarks\\ncargo run --bin provisioning-mcp-server --release","breadcrumbs":"MCP Server » Usage","id":"2525","title":"Usage"},"2526":{"body":"Set via environment variables: export PROVISIONING_PATH=/path/to/provisioning\\nexport PROVISIONING_AI_PROVIDER=openai\\nexport OPENAI_API_KEY=your-key\\nexport PROVISIONING_DEBUG=true","breadcrumbs":"MCP Server » Configuration","id":"2526","title":"Configuration"},"2527":{"body":"Philosophical Consistency : Rust throughout the stack Performance : Sub-millisecond response times Memory Safety : No segfaults, no memory leaks Concurrency : Native async/await support Distribution : Single static binary Cross-compilation : ARM64/x86_64 support","breadcrumbs":"MCP Server » Integration Benefits","id":"2527","title":"Integration Benefits"},"2528":{"body":"Full MCP SDK integration (schema definitions) WebSocket/TCP transport layer Plugin system for extensibility Metrics collection and monitoring Documentation and examples","breadcrumbs":"MCP Server » Next Steps","id":"2528","title":"Next Steps"},"2529":{"body":"Architecture : MCP Integration","breadcrumbs":"MCP Server » Related Documentation","id":"2529","title":"Related Documentation"},"253":{"body":"After basic setup: Configure Provider : Add cloud provider credentials Create More Workspaces : Dev, staging, production Deploy Services : Web servers, databases, etc. Set Up Monitoring : Health checks, logging Automate Deployments : CI/CD integration","breadcrumbs":"Setup Quick Start » What\'s Next","id":"253","title":"What\'s Next"},"2530":{"body":"Version : 2.0.0 Last Updated : 2026-01-05 Status : Production Ready Target Audience : DevOps Engineers, Infrastructure Administrators Services Covered : 8 platform services (orchestrator, control-center, mcp-server, vault-service, extension-registry, rag, ai-service, provisioning-daemon) Interactive configuration for cloud-native infrastructure platform services using TypeDialog forms and Nickel.","breadcrumbs":"TypeDialog Platform Config Guide » TypeDialog Platform Configuration Guide","id":"2530","title":"TypeDialog Platform Configuration Guide"},"2531":{"body":"TypeDialog is an interactive form system that generates Nickel configurations for platform services. Instead of manually editing TOML or KCL files, you answer questions in an interactive form, and TypeDialog generates validated Nickel configuration. Benefits : ✅ No manual TOML editing required ✅ Interactive guidance for each setting ✅ Automatic validation of inputs ✅ Type-safe configuration (Nickel contracts) ✅ Generated configurations ready for deployment","breadcrumbs":"TypeDialog Platform Config Guide » Overview","id":"2531","title":"Overview"},"2532":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Quick Start","id":"2532","title":"Quick Start"},"2533":{"body":"# Launch interactive form for orchestrator\\nprovisioning config platform orchestrator # Or use TypeDialog directly\\ntypedialog form .typedialog/provisioning/platform/orchestrator/form.toml This opens an interactive form with sections for: Workspace configuration Server settings (host, port, workers) Storage backend (filesystem or SurrealDB) Task queue and batch settings Monitoring and health checks Rollback and recovery Logging configuration Extensions and integrations Advanced settings","breadcrumbs":"TypeDialog Platform Config Guide » 1. Configure a Platform Service (5 minutes)","id":"2533","title":"1. Configure a Platform Service (5 minutes)"},"2534":{"body":"After completing the form, TypeDialog generates config.ncl: # View what was generated\\ncat workspace_librecloud/config/config.ncl","breadcrumbs":"TypeDialog Platform Config Guide » 2. Review Generated Configuration","id":"2534","title":"2. Review Generated Configuration"},"2535":{"body":"# Check Nickel syntax is valid\\nnickel typecheck workspace_librecloud/config/config.ncl # Export to TOML for services\\nprovisioning config export","breadcrumbs":"TypeDialog Platform Config Guide » 3. Validate Configuration","id":"2535","title":"3. Validate Configuration"},"2536":{"body":"Platform services automatically load the exported TOML: # Orchestrator reads config/generated/platform/orchestrator.toml\\nprovisioning start orchestrator # Check it\'s using the right config\\ncat workspace_librecloud/config/generated/platform/orchestrator.toml","breadcrumbs":"TypeDialog Platform Config Guide » 4. Services Use Generated Config","id":"2536","title":"4. Services Use Generated Config"},"2537":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Interactive Configuration Workflow","id":"2537","title":"Interactive Configuration Workflow"},"2538":{"body":"Best for : Most users, no Nickel knowledge needed Workflow : Launch form for a service: provisioning config platform orchestrator Answer questions in interactive prompts about workspace, server, storage, queue Review what was generated: cat workspace_librecloud/config/config.ncl Update running services: provisioning config export && provisioning restart orchestrator","breadcrumbs":"TypeDialog Platform Config Guide » Recommended Approach: Use TypeDialog Forms","id":"2538","title":"Recommended Approach: Use TypeDialog Forms"},"2539":{"body":"Best for : Users comfortable with Nickel, want full control Workflow : Create file: touch workspace_librecloud/config/config.ncl Edit directly: vim workspace_librecloud/config/config.ncl Validate syntax: nickel typecheck workspace_librecloud/config/config.ncl Export and deploy: provisioning config export && provisioning restart orchestrator","breadcrumbs":"TypeDialog Platform Config Guide » Advanced Approach: Manual Nickel Editing","id":"2539","title":"Advanced Approach: Manual Nickel Editing"},"254":{"body":"# Get help\\nprovisioning help # Setup help\\nprovisioning help setup # Specific command help\\nprovisioning --help # View documentation\\nprovisioning guide system-setup","breadcrumbs":"Setup Quick Start » Need Help","id":"254","title":"Need Help"},"2540":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Configuration Structure","id":"2540","title":"Configuration Structure"},"2541":{"body":"All configuration lives in one Nickel file with three sections: # workspace_librecloud/config/config.ncl\\n{ # SECTION 1: Workspace metadata workspace = { name = \\"librecloud\\", path = \\"/Users/Akasha/project-provisioning/workspace_librecloud\\", description = \\"Production workspace\\" }, # SECTION 2: Cloud providers providers = { upcloud = { enabled = true, api_user = \\"{{env.UPCLOUD_USER}}\\", api_password = \\"{{kms.decrypt(\'upcloud_pass\')}}\\" }, aws = { enabled = false }, local = { enabled = true } }, # SECTION 3: Platform services platform = { orchestrator = { enabled = true, server = { host = \\"127.0.0.1\\", port = 9090 }, storage = { type = \\"filesystem\\" } }, kms = { enabled = true, backend = \\"rustyvault\\", url = \\"http://localhost:8200\\" } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Single File, Three Sections","id":"2541","title":"Single File, Three Sections"},"2542":{"body":"Section Purpose Used By workspace Workspace metadata and paths Config loader, providers providers.upcloud UpCloud provider settings UpCloud provisioning providers.aws AWS provider settings AWS provisioning providers.local Local VM provider settings Local VM provisioning Core Platform Services platform.orchestrator Orchestrator service config Orchestrator REST API platform.control_center Control center service config Control center REST API platform.mcp_server MCP server service config Model Context Protocol integration platform.installer Installer service config Infrastructure provisioning Security & Secrets platform.vault_service Vault service config Secrets management and encryption Extensions & Registry platform.extension_registry Extension registry config Extension distribution via Gitea/OCI AI & Intelligence platform.rag RAG system config Retrieval-Augmented Generation platform.ai_service AI service config AI model integration and DAG workflows Operations & Daemon platform.provisioning_daemon Provisioning daemon config Background provisioning operations","breadcrumbs":"TypeDialog Platform Config Guide » Available Configuration Sections","id":"2542","title":"Available Configuration Sections"},"2543":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Service-Specific Configuration","id":"2543","title":"Service-Specific Configuration"},"2544":{"body":"Purpose : Coordinate infrastructure operations, manage workflows, handle batch operations Key Settings : server : HTTP server configuration (host, port, workers) storage : Task queue storage (filesystem or SurrealDB) queue : Task processing (concurrency, retries, timeouts) batch : Batch operation settings (parallelism, timeouts) monitoring : Health checks and metrics collection rollback : Checkpoint and recovery strategy logging : Log level and format Example : platform = { orchestrator = { enabled = true, server = { host = \\"127.0.0.1\\", port = 9090, workers = 4, keep_alive = 75, max_connections = 1000 }, storage = { type = \\"filesystem\\", backend_path = \\"{{workspace.path}}/.orchestrator/data/queue.rkvs\\" }, queue = { max_concurrent_tasks = 5, retry_attempts = 3, retry_delay_seconds = 5, task_timeout_minutes = 60 } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Orchestrator Service","id":"2544","title":"Orchestrator Service"},"2545":{"body":"Purpose : Cryptographic key management, secret encryption/decryption Key Settings : backend : KMS backend (rustyvault, age, aws, vault, cosmian) url : Backend URL or connection string credentials : Authentication if required Example : platform = { kms = { enabled = true, backend = \\"rustyvault\\", url = \\"http://localhost:8200\\" }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » KMS Service","id":"2545","title":"KMS Service"},"2546":{"body":"Purpose : Centralized monitoring and control interface Key Settings : server : HTTP server configuration database : Backend database connection jwt : JWT authentication settings security : CORS and security policies Example : platform = { control_center = { enabled = true, server = { host = \\"127.0.0.1\\", port = 8080 } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Control Center Service","id":"2546","title":"Control Center Service"},"2547":{"body":"All platform services support four deployment modes, each with different resource allocation and feature sets: Mode Resources Use Case Storage TLS solo Minimal (2 workers) Development, testing Embedded/filesystem No multiuser Moderate (4 workers) Team environments Shared databases Optional cicd High throughput (8+ workers) CI/CD pipelines Ephemeral/memory No enterprise High availability (16+ workers) Production Clustered/distributed Yes Mode-based Configuration Loading : # Load a specific mode\'s configuration\\nexport VAULT_MODE=enterprise\\nexport REGISTRY_MODE=multiuser\\nexport RAG_MODE=cicd # Services automatically resolve to correct TOML files:\\n# Generated from: provisioning/schemas/platform/\\n# - vault-service.enterprise.toml (generated from vault-service.ncl)\\n# - extension-registry.multiuser.toml (generated from extension-registry.ncl)\\n# - rag.cicd.toml (generated from rag.ncl)","breadcrumbs":"TypeDialog Platform Config Guide » Deployment Modes","id":"2547","title":"Deployment Modes"},"2548":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » New Platform Services (Phase 13-19)","id":"2548","title":"New Platform Services (Phase 13-19)"},"2549":{"body":"Purpose : Secrets management, encryption, and cryptographic key storage Key Settings : server : HTTP server configuration (host, port, workers) storage : Backend storage (filesystem, memory, surrealdb, etcd, postgresql) vault : Vault mounting and key management ha : High availability clustering security : TLS, certificate validation logging : Log level and audit trails Mode Characteristics : solo : Filesystem storage, no TLS, embedded mode multiuser : SurrealDB backend, shared storage, TLS optional cicd : In-memory ephemeral storage, no persistence enterprise : Etcd HA, TLS required, audit logging enabled Environment Variable Overrides : VAULT_CONFIG=/path/to/vault.toml # Explicit config path\\nVAULT_MODE=enterprise # Mode-specific config\\nVAULT_SERVER_URL=http://localhost:8200 # Server URL\\nVAULT_STORAGE_BACKEND=etcd # Storage backend\\nVAULT_AUTH_TOKEN=s.xxxxxxxx # Authentication token\\nVAULT_TLS_VERIFY=true # TLS verification Example Configuration : platform = { vault_service = { enabled = true, server = { host = \\"0.0.0.0\\", port = 8200, workers = 8 }, storage = { backend = \\"surrealdb\\", url = \\"http://surrealdb:8000\\", namespace = \\"vault\\", database = \\"secrets\\" }, vault = { mount_point = \\"transit\\", key_name = \\"provisioning-master\\" }, ha = { enabled = true } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Vault Service","id":"2549","title":"Vault Service"},"255":{"body":"Your configuration is in: macOS : ~/Library/Application Support/provisioning/ Linux : ~/.config/provisioning/ Important files: system.toml - System configuration user_preferences.toml - User settings workspaces/*/ - Workspace definitions Ready to dive deeper? Check out the Full Setup Guide","breadcrumbs":"Setup Quick Start » Key Files","id":"255","title":"Key Files"},"2550":{"body":"Purpose : Extension distribution and management via Gitea and OCI registries Key Settings : server : HTTP server configuration (host, port, workers) gitea : Gitea integration for extension source repository oci : OCI registry for artifact distribution cache : Metadata and list caching auth : Registry authentication Mode Characteristics : solo : Gitea only, minimal cache, CORS disabled multiuser : Gitea + OCI, both enabled, CORS enabled cicd : OCI only (high-throughput mode), ephemeral cache enterprise : Both Gitea + OCI, TLS verification, large cache Environment Variable Overrides : REGISTRY_CONFIG=/path/to/registry.toml # Explicit config path\\nREGISTRY_MODE=multiuser # Mode-specific config\\nREGISTRY_SERVER_HOST=0.0.0.0 # Server host\\nREGISTRY_SERVER_PORT=8081 # Server port\\nREGISTRY_SERVER_WORKERS=4 # Worker count\\nREGISTRY_GITEA_URL=http://gitea:3000 # Gitea URL\\nREGISTRY_GITEA_ORG=provisioning # Gitea organization\\nREGISTRY_OCI_REGISTRY=registry.local:5000 # OCI registry\\nREGISTRY_OCI_NAMESPACE=provisioning # OCI namespace Example Configuration : platform = { extension_registry = { enabled = true, server = { host = \\"0.0.0.0\\", port = 8081, workers = 4 }, gitea = { enabled = true, url = \\"http://gitea:3000\\", org = \\"provisioning\\" }, oci = { enabled = true, registry = \\"registry.local:5000\\", namespace = \\"provisioning\\" }, cache = { capacity = 1000, ttl = 300 } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Extension Registry Service","id":"2550","title":"Extension Registry Service"},"2551":{"body":"Purpose : Document retrieval, semantic search, and AI-augmented responses Key Settings : embeddings : Embedding model provider (openai, local, anthropic) vector_db : Vector database backend (memory, surrealdb, qdrant, milvus) llm : Language model provider (anthropic, openai, ollama) retrieval : Search strategy and parameters ingestion : Document processing and indexing Mode Characteristics : solo : Local embeddings, in-memory vector DB, Ollama LLM multiuser : OpenAI embeddings, SurrealDB vector DB, Anthropic LLM cicd : RAG completely disabled (not applicable for ephemeral pipelines) enterprise : Large embeddings (3072-dim), distributed vector DB, Claude Opus Environment Variable Overrides : RAG_CONFIG=/path/to/rag.toml # Explicit config path\\nRAG_MODE=multiuser # Mode-specific config\\nRAG_ENABLED=true # Enable/disable RAG\\nRAG_EMBEDDINGS_PROVIDER=openai # Embedding provider\\nRAG_EMBEDDINGS_API_KEY=sk-xxx # Embedding API key\\nRAG_VECTOR_DB_URL=http://surrealdb:8000 # Vector DB URL\\nRAG_LLM_PROVIDER=anthropic # LLM provider\\nRAG_LLM_API_KEY=sk-ant-xxx # LLM API key\\nRAG_VECTOR_DB_TYPE=surrealdb # Vector DB type Example Configuration : platform = { rag = { enabled = true, embeddings = { provider = \\"openai\\", model = \\"text-embedding-3-small\\", api_key = \\"{{env.OPENAI_API_KEY}}\\" }, vector_db = { db_type = \\"surrealdb\\", url = \\"http://surrealdb:8000\\", namespace = \\"rag_prod\\" }, llm = { provider = \\"anthropic\\", model = \\"claude-opus-4-5-20251101\\", api_key = \\"{{env.ANTHROPIC_API_KEY}}\\" }, retrieval = { top_k = 10, similarity_threshold = 0.75 } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » RAG (Retrieval-Augmented Generation) Service","id":"2551","title":"RAG (Retrieval-Augmented Generation) Service"},"2552":{"body":"Purpose : AI model integration with RAG and MCP support for multi-step workflows Key Settings : server : HTTP server configuration rag : RAG system integration mcp : Model Context Protocol integration dag : Directed acyclic graph task orchestration Mode Characteristics : solo : RAG enabled, no MCP, minimal concurrency (3 tasks) multiuser : Both RAG and MCP enabled, moderate concurrency (10 tasks) cicd : RAG disabled, MCP enabled, high concurrency (20 tasks) enterprise : Both enabled, max concurrency (50 tasks), full monitoring Environment Variable Overrides : AI_SERVICE_CONFIG=/path/to/ai.toml # Explicit config path\\nAI_SERVICE_MODE=enterprise # Mode-specific config\\nAI_SERVICE_SERVER_PORT=8082 # Server port\\nAI_SERVICE_SERVER_WORKERS=16 # Worker count\\nAI_SERVICE_RAG_ENABLED=true # Enable RAG integration\\nAI_SERVICE_MCP_ENABLED=true # Enable MCP integration\\nAI_SERVICE_DAG_MAX_CONCURRENT_TASKS=50 # Max concurrent tasks Example Configuration : platform = { ai_service = { enabled = true, server = { host = \\"0.0.0.0\\", port = 8082, workers = 8 }, rag = { enabled = true, rag_service_url = \\"http://rag:8083\\", timeout = 60000 }, mcp = { enabled = true, mcp_service_url = \\"http://mcp-server:8084\\", timeout = 60000 }, dag = { max_concurrent_tasks = 20, task_timeout = 600000, retry_attempts = 5 } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » AI Service","id":"2552","title":"AI Service"},"2553":{"body":"Purpose : Background service for provisioning operations, workspace management, and health monitoring Key Settings : daemon : Daemon control (poll interval, max workers) logging : Log level and output configuration actions : Automated actions (cleanup, updates, sync) workers : Worker pool configuration health : Health check settings Mode Characteristics : solo : Minimal polling, no auto-cleanup, debug logging multiuser : Standard polling, workspace sync enabled, info logging cicd : Frequent polling, ephemeral cleanup, warning logging enterprise : Standard polling, full automation, all features enabled Environment Variable Overrides : DAEMON_CONFIG=/path/to/daemon.toml # Explicit config path\\nDAEMON_MODE=enterprise # Mode-specific config\\nDAEMON_POLL_INTERVAL=30 # Polling interval (seconds)\\nDAEMON_MAX_WORKERS=16 # Maximum worker threads\\nDAEMON_LOGGING_LEVEL=info # Log level (debug/info/warn/error)\\nDAEMON_AUTO_CLEANUP=true # Enable auto cleanup\\nDAEMON_AUTO_UPDATE=true # Enable auto updates Example Configuration : platform = { provisioning_daemon = { enabled = true, daemon = { poll_interval = 30, max_workers = 8 }, logging = { level = \\"info\\", file = \\"/var/log/provisioning/daemon.log\\" }, actions = { auto_cleanup = true, auto_update = false, workspace_sync = true } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Provisioning Daemon","id":"2553","title":"Provisioning Daemon"},"2554":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Using TypeDialog Forms","id":"2554","title":"Using TypeDialog Forms"},"2555":{"body":"Interactive Prompts : Answer questions one at a time Validation : Inputs are validated as you type Defaults : Each field shows a sensible default Skip Optional : Press Enter to use default or skip optional fields Review : Preview generated Nickel before saving","breadcrumbs":"TypeDialog Platform Config Guide » Form Navigation","id":"2555","title":"Form Navigation"},"2556":{"body":"Type Example Notes text \\"127.0.0.1\\" Free-form text input confirm true/false Yes/no answer select \\"filesystem\\" Choose from list custom(u16) 9090 Number input custom(u32) 1000 Larger number","breadcrumbs":"TypeDialog Platform Config Guide » Field Types","id":"2556","title":"Field Types"},"2557":{"body":"Environment Variables : api_user = \\"{{env.UPCLOUD_USER}}\\"\\napi_password = \\"{{env.UPCLOUD_PASSWORD}}\\" Workspace Paths : data_dir = \\"{{workspace.path}}/.orchestrator/data\\"\\nlogs_dir = \\"{{workspace.path}}/.orchestrator/logs\\" KMS Decryption : api_password = \\"{{kms.decrypt(\'upcloud_pass\')}}\\"","breadcrumbs":"TypeDialog Platform Config Guide » Special Values","id":"2557","title":"Special Values"},"2558":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Validation & Export","id":"2558","title":"Validation & Export"},"2559":{"body":"# Check Nickel syntax\\nnickel typecheck workspace_librecloud/config/config.ncl # Detailed validation with error messages\\nnickel typecheck workspace_librecloud/config/config.ncl 2>&1 # Schema validation happens during export\\nprovisioning config export","breadcrumbs":"TypeDialog Platform Config Guide » Validating Configuration","id":"2559","title":"Validating Configuration"},"256":{"body":"Version : 1.0.0 Last Updated : 2025-12-09 Status : Production Ready","breadcrumbs":"Setup System Guide » Provisioning Setup System Guide","id":"256","title":"Provisioning Setup System Guide"},"2560":{"body":"# One-time export\\nprovisioning config export # Export creates (pre-configured TOML for all services):\\nworkspace_librecloud/config/generated/\\n├── workspace.toml # Workspace metadata\\n├── providers/\\n│ ├── upcloud.toml # UpCloud provider\\n│ └── local.toml # Local provider\\n└── platform/ ├── orchestrator.toml # Orchestrator service ├── control_center.toml # Control center service ├── mcp_server.toml # MCP server service ├── installer.toml # Installer service ├── kms.toml # KMS service ├── vault_service.toml # Vault service (new) ├── extension_registry.toml # Extension registry (new) ├── rag.toml # RAG service (new) ├── ai_service.toml # AI service (new) └── provisioning_daemon.toml # Daemon service (new) # Public Nickel Schemas (20 total for 5 new services):\\nprovisioning/schemas/platform/\\n├── schemas/\\n│ ├── vault-service.ncl\\n│ ├── extension-registry.ncl\\n│ ├── rag.ncl\\n│ ├── ai-service.ncl\\n│ └── provisioning-daemon.ncl\\n├── defaults/\\n│ ├── vault-service-defaults.ncl\\n│ ├── extension-registry-defaults.ncl\\n│ ├── rag-defaults.ncl\\n│ ├── ai-service-defaults.ncl\\n│ ├── provisioning-daemon-defaults.ncl\\n│ └── deployment/\\n│ ├── solo-defaults.ncl\\n│ ├── multiuser-defaults.ncl\\n│ ├── cicd-defaults.ncl\\n│ └── enterprise-defaults.ncl\\n├── validators/\\n├── templates/\\n├── constraints/\\n└── values/ Using Pre-Generated Configurations : All 5 new services come with pre-built TOML configs for each deployment mode: # View available schemas for vault service\\nls -la provisioning/schemas/platform/schemas/vault-service.ncl\\nls -la provisioning/schemas/platform/defaults/vault-service-defaults.ncl # Load enterprise mode\\nexport VAULT_MODE=enterprise\\ncargo run -p vault-service # Or load multiuser mode\\nexport REGISTRY_MODE=multiuser\\ncargo run -p extension-registry # All 5 services support mode-based loading\\nexport RAG_MODE=cicd\\nexport AI_SERVICE_MODE=enterprise\\nexport DAEMON_MODE=multiuser","breadcrumbs":"TypeDialog Platform Config Guide » Exporting to Service Formats","id":"2560","title":"Exporting to Service Formats"},"2561":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Updating Configuration","id":"2561","title":"Updating Configuration"},"2562":{"body":"Edit source config : vim workspace_librecloud/config/config.ncl Validate changes : nickel typecheck workspace_librecloud/config/config.ncl Re-export to TOML : provisioning config export Restart affected service (if needed): provisioning restart orchestrator","breadcrumbs":"TypeDialog Platform Config Guide » Change a Setting","id":"2562","title":"Change a Setting"},"2563":{"body":"If you prefer interactive updating: # Re-run TypeDialog form (overwrites config.ncl)\\nprovisioning config platform orchestrator # Or edit via TypeDialog with existing values\\ntypedialog form .typedialog/provisioning/platform/orchestrator/form.toml","breadcrumbs":"TypeDialog Platform Config Guide » Using TypeDialog to Update","id":"2563","title":"Using TypeDialog to Update"},"2564":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Troubleshooting","id":"2564","title":"Troubleshooting"},"2565":{"body":"Problem : Failed to parse config file Solution : Check form.toml syntax and verify required fields are present (name, description, locales_path, templates_path) head -10 .typedialog/provisioning/platform/orchestrator/form.toml","breadcrumbs":"TypeDialog Platform Config Guide » Form Won\'t Load","id":"2565","title":"Form Won\'t Load"},"2566":{"body":"Problem : Nickel configuration validation failed Solution : Check for syntax errors and correct field names nickel typecheck workspace_librecloud/config/config.ncl 2>&1 | less Common issues: Missing closing braces, incorrect field names, wrong data types","breadcrumbs":"TypeDialog Platform Config Guide » Validation Fails","id":"2566","title":"Validation Fails"},"2567":{"body":"Problem : Generated TOML files are empty Solution : Verify config.ncl exports to JSON and check all required sections exist nickel export --format json workspace_librecloud/config/config.ncl | head -20","breadcrumbs":"TypeDialog Platform Config Guide » Export Creates Empty Files","id":"2567","title":"Export Creates Empty Files"},"2568":{"body":"Problem : Changes don\'t take effect Solution : Verify export succeeded: ls -lah workspace_librecloud/config/generated/platform/ Check service path: provisioning start orchestrator --check Restart service: provisioning restart orchestrator","breadcrumbs":"TypeDialog Platform Config Guide » Services Don\'t Use New Config","id":"2568","title":"Services Don\'t Use New Config"},"2569":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Configuration Examples","id":"2569","title":"Configuration Examples"},"257":{"body":"","breadcrumbs":"Setup System Guide » Quick Start","id":"257","title":"Quick Start"},"2570":{"body":"{ workspace = { name = \\"dev\\", path = \\"/Users/dev/workspace\\", description = \\"Development workspace\\" }, providers = { local = { enabled = true, base_path = \\"/opt/vms\\" }, upcloud = { enabled = false }, aws = { enabled = false } }, platform = { orchestrator = { enabled = true, server = { host = \\"127.0.0.1\\", port = 9090 }, storage = { type = \\"filesystem\\" }, logging = { level = \\"debug\\", format = \\"json\\" } }, kms = { enabled = true, backend = \\"age\\" } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Development Setup","id":"2570","title":"Development Setup"},"2571":{"body":"{ workspace = { name = \\"prod\\", path = \\"/opt/provisioning/prod\\", description = \\"Production workspace\\" }, providers = { upcloud = { enabled = true, api_user = \\"{{env.UPCLOUD_USER}}\\", api_password = \\"{{kms.decrypt(\'upcloud_prod\')}}\\", default_zone = \\"de-fra1\\" }, aws = { enabled = false }, local = { enabled = false } }, platform = { orchestrator = { enabled = true, server = { host = \\"0.0.0.0\\", port = 9090, workers = 8 }, storage = { type = \\"surrealdb-server\\", url = \\"ws://surreal.internal:8000\\" }, monitoring = { enabled = true, metrics_interval_seconds = 30 }, logging = { level = \\"info\\", format = \\"json\\" } }, kms = { enabled = true, backend = \\"vault\\", url = \\"https://vault.internal:8200\\" } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Production Setup","id":"2571","title":"Production Setup"},"2572":{"body":"{ workspace = { name = \\"multi\\", path = \\"/opt/multi\\", description = \\"Multi-cloud workspace\\" }, providers = { upcloud = { enabled = true, api_user = \\"{{env.UPCLOUD_USER}}\\", default_zone = \\"de-fra1\\", zones = [\\"de-fra1\\", \\"us-nyc1\\", \\"nl-ams1\\"] }, aws = { enabled = true, access_key = \\"{{env.AWS_ACCESS_KEY_ID}}\\" }, local = { enabled = true, base_path = \\"/opt/local-vms\\" } }, platform = { orchestrator = { enabled = true, multi_workspace = false, storage = { type = \\"filesystem\\" } }, kms = { enabled = true, backend = \\"rustyvault\\" } }\\n}","breadcrumbs":"TypeDialog Platform Config Guide » Multi-Provider Setup","id":"2572","title":"Multi-Provider Setup"},"2573":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Best Practices","id":"2573","title":"Best Practices"},"2574":{"body":"Start with TypeDialog forms for the best experience: provisioning config platform orchestrator","breadcrumbs":"TypeDialog Platform Config Guide » 1. Use TypeDialog for Initial Setup","id":"2574","title":"1. Use TypeDialog for Initial Setup"},"2575":{"body":"Only edit the source .ncl file, not the generated TOML files. Correct : vim workspace_librecloud/config/config.ncl Wrong : vim workspace_librecloud/config/generated/platform/orchestrator.toml","breadcrumbs":"TypeDialog Platform Config Guide » 2. Never Edit Generated Files","id":"2575","title":"2. Never Edit Generated Files"},"2576":{"body":"Always validate before deploying changes: nickel typecheck workspace_librecloud/config/config.ncl\\nprovisioning config export","breadcrumbs":"TypeDialog Platform Config Guide » 3. Validate Before Deploy","id":"2576","title":"3. Validate Before Deploy"},"2577":{"body":"Never hardcode credentials in config. Reference environment variables or KMS: Wrong : api_password = \\"my-password\\" Correct : api_password = \\"{{env.UPCLOUD_PASSWORD}}\\" Better : api_password = \\"{{kms.decrypt(\'upcloud_key\')}}\\"","breadcrumbs":"TypeDialog Platform Config Guide » 4. Use Environment Variables for Secrets","id":"2577","title":"4. Use Environment Variables for Secrets"},"2578":{"body":"Add comments explaining custom settings in the Nickel file.","breadcrumbs":"TypeDialog Platform Config Guide » 5. Document Changes","id":"2578","title":"5. Document Changes"},"2579":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Related Documentation","id":"2579","title":"Related Documentation"},"258":{"body":"Nushell 0.109.0+ bash One deployment tool: Docker, Kubernetes, SSH, or systemd Optional: KCL, SOPS, Age","breadcrumbs":"Setup System Guide » Prerequisites","id":"258","title":"Prerequisites"},"2580":{"body":"Configuration System : See CLAUDE.md#configuration-file-format-selection Migration Guide : See provisioning/config/README.md#migration-strategy Schema Reference : See provisioning/schemas/ Nickel Language : See ADR-011 in docs/architecture/adr/","breadcrumbs":"TypeDialog Platform Config Guide » Core Resources","id":"2580","title":"Core Resources"},"2581":{"body":"Platform Services Overview : See provisioning/platform/*/README.md Core Services (Phases 8-12): orchestrator, control-center, mcp-server New Services (Phases 13-19): vault-service: Secrets management and encryption extension-registry: Extension distribution via Gitea/OCI rag: Retrieval-Augmented Generation system ai-service: AI model integration with DAG workflows provisioning-daemon: Background provisioning operations Note : Installer is a distribution tool (provisioning/tools/distribution/create-installer.nu), not a platform service configurable via TypeDialog.","breadcrumbs":"TypeDialog Platform Config Guide » Platform Services","id":"2581","title":"Platform Services"},"2582":{"body":"TypeDialog Forms (Interactive UI): provisioning/.typedialog/platform/forms/ Nickel Schemas (Type Definitions): provisioning/schemas/platform/schemas/ Default Values (Base Configuration): provisioning/schemas/platform/defaults/ Validators (Business Logic): provisioning/schemas/platform/validators/ Deployment Modes (Presets): provisioning/schemas/platform/defaults/deployment/ Rust Integration : provisioning/platform/crates/*/src/config.rs","breadcrumbs":"TypeDialog Platform Config Guide » Public Definition Locations","id":"2582","title":"Public Definition Locations"},"2583":{"body":"","breadcrumbs":"TypeDialog Platform Config Guide » Getting Help","id":"2583","title":"Getting Help"},"2584":{"body":"Get detailed error messages and check available fields: nickel typecheck workspace_librecloud/config/config.ncl 2>&1 | less\\ngrep \\"prompt =\\" .typedialog/provisioning/platform/orchestrator/form.toml","breadcrumbs":"TypeDialog Platform Config Guide » Validation Errors","id":"2584","title":"Validation Errors"},"2585":{"body":"# Show all available config commands\\nprovisioning config --help # Show help for specific service\\nprovisioning config platform --help # List providers and services\\nprovisioning config providers list\\nprovisioning config services list","breadcrumbs":"TypeDialog Platform Config Guide » Configuration Questions","id":"2585","title":"Configuration Questions"},"2586":{"body":"# Validate without deploying\\nnickel typecheck workspace_librecloud/config/config.ncl # Export to see generated config\\nprovisioning config export # Check generated files\\nls -la workspace_librecloud/config/generated/","breadcrumbs":"TypeDialog Platform Config Guide » Test Configuration","id":"2586","title":"Test Configuration"},"2587":{"body":"This document provides comprehensive guidance on creating providers, task services, and clusters for provisioning, including templates, testing frameworks, publishing, and best practices.","breadcrumbs":"Overview » Extension Development Guide","id":"2587","title":"Extension Development Guide"},"2588":{"body":"Overview Extension Types Provider Development Task Service Development Cluster Development Testing and Validation Publishing and Distribution Best Practices Troubleshooting","breadcrumbs":"Overview » Table of Contents","id":"2588","title":"Table of Contents"},"2589":{"body":"Provisioning supports three types of extensions that enable customization and expansion of functionality: Providers : Cloud provider implementations for resource management Task Services : Infrastructure service components (databases, monitoring, etc.) Clusters : Complete deployment solutions combining multiple services Key Features : Template-Based Development : Comprehensive templates for all extension types Workspace Integration : Extensions developed in isolated workspace environments Configuration-Driven : KCL schemas for type-safe configuration Version Management : GitHub integration for version tracking Testing Framework : Comprehensive testing and validation tools Hot Reloading : Development-time hot reloading support Location : workspace/extensions/","breadcrumbs":"Overview » Overview","id":"2589","title":"Overview"},"259":{"body":"# Install provisioning\\ncurl -sSL https://install.provisioning.dev | bash # Run setup wizard\\nprovisioning setup system --interactive # Create workspace\\nprovisioning setup workspace myproject # Start deploying\\nprovisioning server create","breadcrumbs":"Setup System Guide » 30-Second Setup","id":"259","title":"30-Second Setup"},"2590":{"body":"","breadcrumbs":"Overview » Extension Types","id":"2590","title":"Extension Types"},"2591":{"body":"Extension Ecosystem\\n├── Providers # Cloud resource management\\n│ ├── AWS # Amazon Web Services\\n│ ├── UpCloud # UpCloud platform\\n│ ├── Local # Local development\\n│ └── Custom # User-defined providers\\n├── Task Services # Infrastructure components\\n│ ├── Kubernetes # Container orchestration\\n│ ├── Database Services # PostgreSQL, MongoDB, etc.\\n│ ├── Monitoring # Prometheus, Grafana, etc.\\n│ ├── Networking # Cilium, CoreDNS, etc.\\n│ └── Custom Services # User-defined services\\n└── Clusters # Complete solutions ├── Web Stack # Web application deployment ├── CI/CD Pipeline # Continuous integration/deployment ├── Data Platform # Data processing and analytics └── Custom Clusters # User-defined clusters","breadcrumbs":"Overview » Extension Architecture","id":"2591","title":"Extension Architecture"},"2592":{"body":"Discovery Order : workspace/extensions/{type}/{user}/{name} - User-specific extensions workspace/extensions/{type}/{name} - Workspace shared extensions workspace/extensions/{type}/template - Templates Core system paths (fallback) Path Resolution : # Automatic extension discovery\\nuse workspace/lib/path-resolver.nu # Find provider extension\\nlet provider_path = (path-resolver resolve_extension \\"providers\\" \\"my-aws-provider\\") # List all available task services\\nlet taskservs = (path-resolver list_extensions \\"taskservs\\" --include-core) # Resolve cluster definition\\nlet cluster_path = (path-resolver resolve_extension \\"clusters\\" \\"web-stack\\")","breadcrumbs":"Overview » Extension Discovery","id":"2592","title":"Extension Discovery"},"2593":{"body":"","breadcrumbs":"Overview » Provider Development","id":"2593","title":"Provider Development"},"2594":{"body":"Providers implement cloud resource management through a standardized interface that supports multiple cloud platforms while maintaining consistent APIs. Core Responsibilities : Authentication : Secure API authentication and credential management Resource Management : Server creation, deletion, and lifecycle management Configuration : Provider-specific settings and validation Error Handling : Comprehensive error handling and recovery Rate Limiting : API rate limiting and retry logic","breadcrumbs":"Overview » Provider Architecture","id":"2594","title":"Provider Architecture"},"2595":{"body":"1. Initialize from Template : # Copy provider template\\ncp -r workspace/extensions/providers/template workspace/extensions/providers/my-cloud # Navigate to new provider\\ncd workspace/extensions/providers/my-cloud 2. Update Configuration : # Initialize provider metadata\\nnu init-provider.nu \\\\ --name \\"my-cloud\\" \\\\ --display-name \\"MyCloud Provider\\" \\\\ --author \\"$USER\\" \\\\ --description \\"MyCloud platform integration\\"","breadcrumbs":"Overview » Creating a New Provider","id":"2595","title":"Creating a New Provider"},"2596":{"body":"my-cloud/\\n├── README.md # Provider documentation\\n├── schemas/ # Nickel configuration schemas\\n│ ├── settings.ncl # Provider settings schema\\n│ ├── servers.ncl # Server configuration schema\\n│ ├── networks.ncl # Network configuration schema\\n│ └── manifest.toml # Nickel module dependencies\\n├── nulib/ # Nushell implementation\\n│ ├── provider.nu # Main provider interface\\n│ ├── servers/ # Server management\\n│ │ ├── create.nu # Server creation logic\\n│ │ ├── delete.nu # Server deletion logic\\n│ │ ├── list.nu # Server listing\\n│ │ ├── status.nu # Server status checking\\n│ │ └── utils.nu # Server utilities\\n│ ├── auth/ # Authentication\\n│ │ ├── client.nu # API client setup\\n│ │ ├── tokens.nu # Token management\\n│ │ └── validation.nu # Credential validation\\n│ └── utils/ # Provider utilities\\n│ ├── api.nu # API interaction helpers\\n│ ├── config.nu # Configuration helpers\\n│ └── validation.nu # Input validation\\n├── templates/ # Jinja2 templates\\n│ ├── server-config.j2 # Server configuration\\n│ ├── cloud-init.j2 # Cloud initialization\\n│ └── network-config.j2 # Network configuration\\n├── generate/ # Code generation\\n│ ├── server-configs.nu # Generate server configurations\\n│ └── infrastructure.nu # Generate infrastructure\\n└── tests/ # Testing framework ├── unit/ # Unit tests │ ├── test-auth.nu # Authentication tests │ ├── test-servers.nu # Server management tests │ └── test-validation.nu # Validation tests ├── integration/ # Integration tests │ ├── test-lifecycle.nu # Complete lifecycle tests │ └── test-api.nu # API integration tests └── mock/ # Mock data and services ├── api-responses.json # Mock API responses └── test-configs.toml # Test configurations","breadcrumbs":"Overview » Provider Structure","id":"2596","title":"Provider Structure"},"2597":{"body":"Main Provider Interface (nulib/provider.nu): #!/usr/bin/env nu\\n# MyCloud Provider Implementation # Provider metadata\\nexport const PROVIDER_NAME = \\"my-cloud\\"\\nexport const PROVIDER_VERSION = \\"1.0.0\\"\\nexport const API_VERSION = \\"v1\\" # Main provider initialization\\nexport def \\"provider init\\" [ --config-path: string = \\"\\" # Path to provider configuration --validate: bool = true # Validate configuration on init\\n] -> record { let config = if $config_path == \\"\\" { load_provider_config } else { open $config_path | from toml } if $validate { validate_provider_config $config } # Initialize API client let client = (setup_api_client $config) # Return provider instance { name: $PROVIDER_NAME, version: $PROVIDER_VERSION, config: $config, client: $client, initialized: true }\\n} # Server management interface\\nexport def \\"provider create-server\\" [ name: string # Server name plan: string # Server plan/size --zone: string = \\"auto\\" # Deployment zone --template: string = \\"ubuntu22\\" # OS template --dry-run: bool = false # Show what would be created\\n] -> record { let provider = (provider init) # Validate inputs if ($name | str length) == 0 { error make {msg: \\"Server name cannot be empty\\"} } if not (is_valid_plan $plan) { error make {msg: $\\"Invalid server plan: ($plan)\\"} } # Build server configuration let server_config = { name: $name, plan: $plan, zone: (resolve_zone $zone), template: $template, provider: $PROVIDER_NAME } if $dry_run { return {action: \\"create\\", config: $server_config, status: \\"dry-run\\"} } # Create server via API let result = try { create_server_api $server_config $provider.client } catch { |e| error make { msg: $\\"Server creation failed: ($e.msg)\\", help: \\"Check provider credentials and quota limits\\" } } { server: $name, status: \\"created\\", id: $result.id, ip_address: $result.ip_address, created_at: (date now) }\\n} export def \\"provider delete-server\\" [ name: string # Server name or ID --force: bool = false # Force deletion without confirmation\\n] -> record { let provider = (provider init) # Find server let server = try { find_server $name $provider.client } catch { error make {msg: $\\"Server not found: ($name)\\"} } if not $force { let confirm = (input $\\"Delete server \'($name)\' (y/N)? \\") if $confirm != \\"y\\" and $confirm != \\"yes\\" { return {action: \\"delete\\", server: $name, status: \\"cancelled\\"} } } # Delete server let result = try { delete_server_api $server.id $provider.client } catch { |e| error make {msg: $\\"Server deletion failed: ($e.msg)\\"} } { server: $name, status: \\"deleted\\", deleted_at: (date now) }\\n} export def \\"provider list-servers\\" [ --zone: string = \\"\\" # Filter by zone --status: string = \\"\\" # Filter by status --format: string = \\"table\\" # Output format: table, json, yaml\\n] -> list { let provider = (provider init) let servers = try { list_servers_api $provider.client } catch { |e| error make {msg: $\\"Failed to list servers: ($e.msg)\\"} } # Apply filters let filtered = $servers | if $zone != \\"\\" { filter {|s| $s.zone == $zone} } else { $in } | if $status != \\"\\" { filter {|s| $s.status == $status} } else { $in } match $format { \\"json\\" => ($filtered | to json), \\"yaml\\" => ($filtered | to yaml), _ => $filtered }\\n} # Provider testing interface\\nexport def \\"provider test\\" [ --test-type: string = \\"basic\\" # Test type: basic, full, integration\\n] -> record { match $test_type { \\"basic\\" => test_basic_functionality, \\"full\\" => test_full_functionality, \\"integration\\" => test_integration, _ => (error make {msg: $\\"Unknown test type: ($test_type)\\"}) }\\n} Authentication Module (nulib/auth/client.nu): # API client setup and authentication export def setup_api_client [config: record] -> record { # Validate credentials if not (\\"api_key\\" in $config) { error make {msg: \\"API key not found in configuration\\"} } if not (\\"api_secret\\" in $config) { error make {msg: \\"API secret not found in configuration\\"} } # Setup HTTP client with authentication let client = { base_url: ($config.api_url? | default \\"https://api.my-cloud.com\\"), api_key: $config.api_key, api_secret: $config.api_secret, timeout: ($config.timeout? | default 30), retries: ($config.retries? | default 3) } # Test authentication try { test_auth_api $client } catch { |e| error make { msg: $\\"Authentication failed: ($e.msg)\\", help: \\"Check your API credentials and network connectivity\\" } } $client\\n} def test_auth_api [client: record] -> bool { let response = http get $\\"($client.base_url)/auth/test\\" --headers { \\"Authorization\\": $\\"Bearer ($client.api_key)\\", \\"Content-Type\\": \\"application/json\\" } $response.status == \\"success\\"\\n} Nickel Configuration Schema (schemas/settings.ncl): # MyCloud Provider Configuration Schema let MyCloudConfig = { # MyCloud provider configuration api_url | string | default = \\"https://api.my-cloud.com\\", api_key | string, api_secret | string, timeout | number | default = 30, retries | number | default = 3, # Rate limiting rate_limit | { requests_per_minute | number | default = 60, burst_size | number | default = 10, } | default = {}, # Default settings defaults | { zone | string | default = \\"us-east-1\\", template | string | default = \\"ubuntu-22.04\\", network | string | default = \\"default\\", } | default = {},\\n} in\\nMyCloudConfig let MyCloudServerConfig = { # MyCloud server configuration name | string, plan | string, zone | string | optional, template | string | default = \\"ubuntu-22.04\\", storage | number | default = 25, tags | { } | default = {}, # Network configuration network | { vpc_id | string | optional, subnet_id | string | optional, public_ip | bool | default = true, firewall_rules | array | default = [], } | optional,\\n} in\\nMyCloudServerConfig let FirewallRule = { # Firewall rule configuration port | (number | string), protocol | string | default = \\"tcp\\", source | string | default = \\"0.0.0.0/0\\", description | string | optional,\\n} in\\nFirewallRule","breadcrumbs":"Overview » Provider Implementation","id":"2597","title":"Provider Implementation"},"2598":{"body":"Unit Testing (tests/unit/test-servers.nu): # Unit tests for server management use ../../../nulib/provider.nu def test_server_creation [] { # Test valid server creation let result = (provider create-server \\"test-server\\" \\"small\\" --dry-run) assert ($result.action == \\"create\\") assert ($result.config.name == \\"test-server\\") assert ($result.config.plan == \\"small\\") assert ($result.status == \\"dry-run\\") print \\"✅ Server creation test passed\\"\\n} def test_invalid_server_name [] { # Test invalid server name try { provider create-server \\"\\" \\"small\\" --dry-run assert false \\"Should have failed with empty name\\" } catch { |e| assert ($e.msg | str contains \\"Server name cannot be empty\\") } print \\"✅ Invalid server name test passed\\"\\n} def test_invalid_plan [] { # Test invalid server plan try { provider create-server \\"test\\" \\"invalid-plan\\" --dry-run assert false \\"Should have failed with invalid plan\\" } catch { |e| assert ($e.msg | str contains \\"Invalid server plan\\") } print \\"✅ Invalid plan test passed\\"\\n} def main [] { print \\"Running server management unit tests...\\" test_server_creation test_invalid_server_name test_invalid_plan print \\"✅ All server management tests passed\\"\\n} Integration Testing (tests/integration/test-lifecycle.nu): # Integration tests for complete server lifecycle use ../../../nulib/provider.nu def test_complete_lifecycle [] { let test_server = $\\"test-server-(date now | format date \'%Y%m%d%H%M%S\')\\" try { # Test server creation (dry run) let create_result = (provider create-server $test_server \\"small\\" --dry-run) assert ($create_result.status == \\"dry-run\\") # Test server listing let servers = (provider list-servers --format json) assert ($servers | length) >= 0 # Test provider info let provider_info = (provider init) assert ($provider_info.name == \\"my-cloud\\") assert $provider_info.initialized print $\\"✅ Complete lifecycle test passed for ($test_server)\\" } catch { |e| print $\\"❌ Integration test failed: ($e.msg)\\" exit 1 }\\n} def main [] { print \\"Running provider integration tests...\\" test_complete_lifecycle print \\"✅ All integration tests passed\\"\\n}","breadcrumbs":"Overview » Provider Testing","id":"2598","title":"Provider Testing"},"2599":{"body":"","breadcrumbs":"Overview » Task Service Development","id":"2599","title":"Task Service Development"},"26":{"body":"Containerized testing Multi-node cluster simulation Topology templates Automated cleanup","breadcrumbs":"Home » ✅ Test Environments","id":"26","title":"✅ Test Environments"},"260":{"body":"macOS : ~/Library/Application Support/provisioning/ Linux : ~/.config/provisioning/ Windows : %APPDATA%/provisioning/","breadcrumbs":"Setup System Guide » Configuration Paths","id":"260","title":"Configuration Paths"},"2600":{"body":"Task services are infrastructure components that can be deployed and managed across different environments. They provide standardized interfaces for installation, configuration, and lifecycle management. Core Responsibilities : Installation : Service deployment and setup Configuration : Dynamic configuration management Health Checking : Service status monitoring Version Management : Automatic version updates from GitHub Integration : Integration with other services and clusters","breadcrumbs":"Overview » Task Service Architecture","id":"2600","title":"Task Service Architecture"},"2601":{"body":"1. Initialize from Template : # Copy task service template\\ncp -r workspace/extensions/taskservs/template workspace/extensions/taskservs/my-service # Navigate to new service\\ncd workspace/extensions/taskservs/my-service 2. Initialize Service : # Initialize service metadata\\nnu init-service.nu \\\\ --name \\"my-service\\" \\\\ --display-name \\"My Custom Service\\" \\\\ --type \\"database\\" \\\\ --github-repo \\"myorg/my-service\\"","breadcrumbs":"Overview » Creating a New Task Service","id":"2601","title":"Creating a New Task Service"},"2602":{"body":"my-service/\\n├── README.md # Service documentation\\n├── schemas/ # Nickel schemas\\n│ ├── version.ncl # Version and GitHub integration\\n│ ├── config.ncl # Service configuration schema\\n│ └── manifest.toml # Module dependencies\\n├── nushell/ # Nushell implementation\\n│ ├── taskserv.nu # Main service interface\\n│ ├── install.nu # Installation logic\\n│ ├── uninstall.nu # Removal logic\\n│ ├── config.nu # Configuration management\\n│ ├── status.nu # Status and health checking\\n│ ├── versions.nu # Version management\\n│ └── utils.nu # Service utilities\\n├── templates/ # Jinja2 templates\\n│ ├── deployment.yaml.j2 # Kubernetes deployment\\n│ ├── service.yaml.j2 # Kubernetes service\\n│ ├── configmap.yaml.j2 # Configuration\\n│ ├── install.sh.j2 # Installation script\\n│ └── systemd.service.j2 # Systemd service\\n├── manifests/ # Static manifests\\n│ ├── rbac.yaml # RBAC definitions\\n│ ├── pvc.yaml # Persistent volume claims\\n│ └── ingress.yaml # Ingress configuration\\n├── generate/ # Code generation\\n│ ├── manifests.nu # Generate Kubernetes manifests\\n│ ├── configs.nu # Generate configurations\\n│ └── docs.nu # Generate documentation\\n└── tests/ # Testing framework ├── unit/ # Unit tests ├── integration/ # Integration tests └── fixtures/ # Test fixtures and data","breadcrumbs":"Overview » Task Service Structure","id":"2602","title":"Task Service Structure"},"2603":{"body":"Main Service Interface (nushell/taskserv.nu): #!/usr/bin/env nu\\n# My Custom Service Task Service Implementation export const SERVICE_NAME = \\"my-service\\"\\nexport const SERVICE_TYPE = \\"database\\"\\nexport const SERVICE_VERSION = \\"1.0.0\\" # Service installation\\nexport def \\"taskserv install\\" [ target: string # Target server or cluster --config: string = \\"\\" # Custom configuration file --dry-run: bool = false # Show what would be installed --wait: bool = true # Wait for installation to complete\\n] -> record { # Load service configuration let service_config = if $config != \\"\\" { open $config | from toml } else { load_default_config } # Validate target environment let target_info = validate_target $target if not $target_info.valid { error make {msg: $\\"Invalid target: ($target_info.reason)\\"} } if $dry_run { let install_plan = generate_install_plan $target $service_config return { action: \\"install\\", service: $SERVICE_NAME, target: $target, plan: $install_plan, status: \\"dry-run\\" } } # Perform installation print $\\"Installing ($SERVICE_NAME) on ($target)...\\" let install_result = try { install_service $target $service_config $wait } catch { |e| error make { msg: $\\"Installation failed: ($e.msg)\\", help: \\"Check target connectivity and permissions\\" } } { service: $SERVICE_NAME, target: $target, status: \\"installed\\", version: $install_result.version, endpoint: $install_result.endpoint?, installed_at: (date now) }\\n} # Service removal\\nexport def \\"taskserv uninstall\\" [ target: string # Target server or cluster --force: bool = false # Force removal without confirmation --cleanup-data: bool = false # Remove persistent data\\n] -> record { let target_info = validate_target $target if not $target_info.valid { error make {msg: $\\"Invalid target: ($target_info.reason)\\"} } # Check if service is installed let status = get_service_status $target if $status.status != \\"installed\\" { error make {msg: $\\"Service ($SERVICE_NAME) is not installed on ($target)\\"} } if not $force { let confirm = (input $\\"Remove ($SERVICE_NAME) from ($target)? (y/N) \\") if $confirm != \\"y\\" and $confirm != \\"yes\\" { return {action: \\"uninstall\\", service: $SERVICE_NAME, status: \\"cancelled\\"} } } print $\\"Removing ($SERVICE_NAME) from ($target)...\\" let removal_result = try { uninstall_service $target $cleanup_data } catch { |e| error make {msg: $\\"Removal failed: ($e.msg)\\"} } { service: $SERVICE_NAME, target: $target, status: \\"uninstalled\\", data_removed: $cleanup_data, uninstalled_at: (date now) }\\n} # Service status checking\\nexport def \\"taskserv status\\" [ target: string # Target server or cluster --detailed: bool = false # Show detailed status information\\n] -> record { let target_info = validate_target $target if not $target_info.valid { error make {msg: $\\"Invalid target: ($target_info.reason)\\"} } let status = get_service_status $target if $detailed { let health = check_service_health $target let metrics = get_service_metrics $target $status | merge { health: $health, metrics: $metrics, checked_at: (date now) } } else { $status }\\n} # Version management\\nexport def \\"taskserv check-updates\\" [ --target: string = \\"\\" # Check updates for specific target\\n] -> record { let current_version = get_current_version let latest_version = get_latest_version_from_github let update_available = $latest_version != $current_version { service: $SERVICE_NAME, current_version: $current_version, latest_version: $latest_version, update_available: $update_available, target: $target, checked_at: (date now) }\\n} export def \\"taskserv update\\" [ target: string # Target to update --version: string = \\"latest\\" # Specific version to update to --dry-run: bool = false # Show what would be updated\\n] -> record { let current_status = (taskserv status $target) if $current_status.status != \\"installed\\" { error make {msg: $\\"Service not installed on ($target)\\"} } let target_version = if $version == \\"latest\\" { get_latest_version_from_github } else { $version } if $dry_run { return { action: \\"update\\", service: $SERVICE_NAME, target: $target, from_version: $current_status.version, to_version: $target_version, status: \\"dry-run\\" } } print $\\"Updating ($SERVICE_NAME) on ($target) to version ($target_version)...\\" let update_result = try { update_service $target $target_version } catch { |e| error make {msg: $\\"Update failed: ($e.msg)\\"} } { service: $SERVICE_NAME, target: $target, status: \\"updated\\", from_version: $current_status.version, to_version: $target_version, updated_at: (date now) }\\n} # Service testing\\nexport def \\"taskserv test\\" [ target: string = \\"local\\" # Target for testing --test-type: string = \\"basic\\" # Test type: basic, integration, full\\n] -> record { match $test_type { \\"basic\\" => test_basic_functionality $target, \\"integration\\" => test_integration $target, \\"full\\" => test_full_functionality $target, _ => (error make {msg: $\\"Unknown test type: ($test_type)\\"}) }\\n} Version Configuration (schemas/version.ncl): # Version management with GitHub integration let version_config = { service_name = \\"my-service\\", # GitHub repository for version checking github = { owner = \\"myorg\\", repo = \\"my-service\\", # Release configuration release = { tag_prefix = \\"v\\", prerelease = false, draft = false, }, # Asset patterns for different platforms assets = { linux_amd64 = \\"my-service-{version}-linux-amd64.tar.gz\\", darwin_amd64 = \\"my-service-{version}-darwin-amd64.tar.gz\\", windows_amd64 = \\"my-service-{version}-windows-amd64.zip\\", }, }, # Version constraints and compatibility compatibility = { min_kubernetes_version = \\"1.20.0\\", max_kubernetes_version = \\"1.28.*\\", # Dependencies requires = { \\"cert-manager\\" = \\">=1.8.0\\", \\"ingress-nginx\\" = \\">=1.0.0\\", }, # Conflicts conflicts = { \\"old-my-service\\" = \\"*\\", }, }, # Installation configuration installation = { default_namespace = \\"my-service\\", create_namespace = true, # Resource requirements resources = { requests = { cpu = \\"100m\\", memory = \\"128Mi\\", }, limits = { cpu = \\"500m\\", memory = \\"512Mi\\", }, }, # Persistence persistence = { enabled = true, storage_class = \\"default\\", size = \\"10Gi\\", }, }, # Health check configuration health_check = { initial_delay_seconds = 30, period_seconds = 10, timeout_seconds = 5, failure_threshold = 3, # Health endpoints endpoints = { liveness = \\"/health/live\\", readiness = \\"/health/ready\\", }, },\\n} in\\nversion_config","breadcrumbs":"Overview » Task Service Implementation","id":"2603","title":"Task Service Implementation"},"2604":{"body":"","breadcrumbs":"Overview » Cluster Development","id":"2604","title":"Cluster Development"},"2605":{"body":"Clusters represent complete deployment solutions that combine multiple task services, providers, and configurations to create functional environments. Core Responsibilities : Service Orchestration : Coordinate multiple task service deployments Dependency Management : Handle service dependencies and startup order Configuration Management : Manage cross-service configuration Health Monitoring : Monitor overall cluster health Scaling : Handle cluster scaling operations","breadcrumbs":"Overview » Cluster Architecture","id":"2605","title":"Cluster Architecture"},"2606":{"body":"1. Initialize from Template : # Copy cluster template\\ncp -r workspace/extensions/clusters/template workspace/extensions/clusters/my-stack # Navigate to new cluster\\ncd workspace/extensions/clusters/my-stack 2. Initialize Cluster : # Initialize cluster metadata\\nnu init-cluster.nu \\\\ --name \\"my-stack\\" \\\\ --display-name \\"My Application Stack\\" \\\\ --type \\"web-application\\"","breadcrumbs":"Overview » Creating a New Cluster","id":"2606","title":"Creating a New Cluster"},"2607":{"body":"Main Cluster Interface (nushell/cluster.nu): #!/usr/bin/env nu\\n# My Application Stack Cluster Implementation export const CLUSTER_NAME = \\"my-stack\\"\\nexport const CLUSTER_TYPE = \\"web-application\\"\\nexport const CLUSTER_VERSION = \\"1.0.0\\" # Cluster creation\\nexport def \\"cluster create\\" [ target: string # Target infrastructure --config: string = \\"\\" # Custom configuration file --dry-run: bool = false # Show what would be created --wait: bool = true # Wait for cluster to be ready\\n] -> record { let cluster_config = if $config != \\"\\" { open $config | from toml } else { load_default_cluster_config } if $dry_run { let deployment_plan = generate_deployment_plan $target $cluster_config return { action: \\"create\\", cluster: $CLUSTER_NAME, target: $target, plan: $deployment_plan, status: \\"dry-run\\" } } print $\\"Creating cluster ($CLUSTER_NAME) on ($target)...\\" # Deploy services in dependency order let services = get_service_deployment_order $cluster_config.services let deployment_results = [] for service in $services { print $\\"Deploying service: ($service.name)\\" let result = try { deploy_service $service $target $wait } catch { |e| # Rollback on failure rollback_cluster $target $deployment_results error make {msg: $\\"Service deployment failed: ($e.msg)\\"} } $deployment_results = ($deployment_results | append $result) } # Configure inter-service communication configure_service_mesh $target $deployment_results { cluster: $CLUSTER_NAME, target: $target, status: \\"created\\", services: $deployment_results, created_at: (date now) }\\n} # Cluster deletion\\nexport def \\"cluster delete\\" [ target: string # Target infrastructure --force: bool = false # Force deletion without confirmation --cleanup-data: bool = false # Remove persistent data\\n] -> record { let cluster_status = get_cluster_status $target if $cluster_status.status != \\"running\\" { error make {msg: $\\"Cluster ($CLUSTER_NAME) is not running on ($target)\\"} } if not $force { let confirm = (input $\\"Delete cluster ($CLUSTER_NAME) from ($target)? (y/N) \\") if $confirm != \\"y\\" and $confirm != \\"yes\\" { return {action: \\"delete\\", cluster: $CLUSTER_NAME, status: \\"cancelled\\"} } } print $\\"Deleting cluster ($CLUSTER_NAME) from ($target)...\\" # Delete services in reverse dependency order let services = get_service_deletion_order $cluster_status.services let deletion_results = [] for service in $services { print $\\"Removing service: ($service.name)\\" let result = try { remove_service $service $target $cleanup_data } catch { |e| print $\\"Warning: Failed to remove service ($service.name): ($e.msg)\\" } $deletion_results = ($deletion_results | append $result) } { cluster: $CLUSTER_NAME, target: $target, status: \\"deleted\\", services_removed: $deletion_results, data_removed: $cleanup_data, deleted_at: (date now) }\\n}","breadcrumbs":"Overview » Cluster Implementation","id":"2607","title":"Cluster Implementation"},"2608":{"body":"","breadcrumbs":"Overview » Testing and Validation","id":"2608","title":"Testing and Validation"},"2609":{"body":"Test Types : Unit Tests : Individual function and module testing Integration Tests : Cross-component interaction testing End-to-End Tests : Complete workflow testing Performance Tests : Load and performance validation Security Tests : Security and vulnerability testing","breadcrumbs":"Overview » Testing Framework","id":"2609","title":"Testing Framework"},"261":{"body":"provisioning/\\n├── system.toml # System info (immutable)\\n├── user_preferences.toml # User settings (editable)\\n├── platform/ # Platform services\\n├── providers/ # Provider configs\\n└── workspaces/ # Workspace definitions └── myproject/ ├── config/ ├── infra/ └── auth.token","breadcrumbs":"Setup System Guide » Directory Structure","id":"261","title":"Directory Structure"},"2610":{"body":"Workspace Testing Tools : # Validate extension syntax and structure\\nnu workspace.nu tools validate-extension providers/my-cloud # Run extension unit tests\\nnu workspace.nu tools test-extension taskservs/my-service --test-type unit # Integration testing with real infrastructure\\nnu workspace.nu tools test-extension clusters/my-stack --test-type integration --target test-env # Performance testing\\nnu workspace.nu tools test-extension providers/my-cloud --test-type performance --duration 5m","breadcrumbs":"Overview » Extension Testing Commands","id":"2610","title":"Extension Testing Commands"},"2611":{"body":"Test Runner (tests/run-tests.nu): #!/usr/bin/env nu\\n# Automated test runner for extensions def main [ extension_type: string # Extension type: providers, taskservs, clusters extension_name: string # Extension name --test-types: string = \\"all\\" # Test types to run: unit, integration, e2e, all --target: string = \\"local\\" # Test target environment --verbose: bool = false # Verbose test output --parallel: bool = true # Run tests in parallel\\n] -> record { let extension_path = $\\"workspace/extensions/($extension_type)/($extension_name)\\" if not ($extension_path | path exists) { error make {msg: $\\"Extension not found: ($extension_path)\\"} } let test_types = if $test_types == \\"all\\" { [\\"unit\\", \\"integration\\", \\"e2e\\"] } else { $test_types | split row \\",\\" } print $\\"Running tests for ($extension_type)/($extension_name)...\\" let test_results = [] for test_type in $test_types { print $\\"Running ($test_type) tests...\\" let result = try { run_test_suite $extension_path $test_type $target $verbose } catch { |e| { test_type: $test_type, status: \\"failed\\", error: $e.msg, duration: 0 } } $test_results = ($test_results | append $result) } let total_tests = ($test_results | length) let passed_tests = ($test_results | where status == \\"passed\\" | length) let failed_tests = ($test_results | where status == \\"failed\\" | length) { extension: $\\"($extension_type)/($extension_name)\\", test_results: $test_results, summary: { total: $total_tests, passed: $passed_tests, failed: $failed_tests, success_rate: ($passed_tests / $total_tests * 100) }, completed_at: (date now) }\\n}","breadcrumbs":"Overview » Automated Testing","id":"2611","title":"Automated Testing"},"2612":{"body":"","breadcrumbs":"Overview » Publishing and Distribution","id":"2612","title":"Publishing and Distribution"},"2613":{"body":"Publishing Process : Validation : Comprehensive testing and validation Documentation : Complete documentation and examples Packaging : Create distribution packages Registry : Publish to extension registry Versioning : Semantic version tagging","breadcrumbs":"Overview » Extension Publishing","id":"2613","title":"Extension Publishing"},"2614":{"body":"# Validate extension for publishing\\nnu workspace.nu tools validate-for-publish providers/my-cloud # Create distribution package\\nnu workspace.nu tools package-extension providers/my-cloud --version 1.0.0 # Publish to registry\\nnu workspace.nu tools publish-extension providers/my-cloud --registry official # Tag version\\nnu workspace.nu tools tag-extension providers/my-cloud --version 1.0.0 --push","breadcrumbs":"Overview » Publishing Commands","id":"2614","title":"Publishing Commands"},"2615":{"body":"Registry Structure : Extension Registry\\n├── providers/\\n│ ├── aws/ # Official AWS provider\\n│ ├── upcloud/ # Official UpCloud provider\\n│ └── community/ # Community providers\\n├── taskservs/\\n│ ├── kubernetes/ # Official Kubernetes service\\n│ ├── databases/ # Database services\\n│ └── monitoring/ # Monitoring services\\n└── clusters/ ├── web-stacks/ # Web application stacks ├── data-platforms/ # Data processing platforms └── ci-cd/ # CI/CD pipelines","breadcrumbs":"Overview » Extension Registry","id":"2615","title":"Extension Registry"},"2616":{"body":"","breadcrumbs":"Overview » Best Practices","id":"2616","title":"Best Practices"},"2617":{"body":"Function Design : # Good: Single responsibility, clear parameters, comprehensive error handling\\nexport def \\"provider create-server\\" [ name: string # Server name (must be unique in region) plan: string # Server plan (see list-plans for options) --zone: string = \\"auto\\" # Deployment zone (auto-selects optimal zone) --dry-run: bool = false # Preview changes without creating resources\\n] -> record { # Returns creation result with server details # Validate inputs first if ($name | str length) == 0 { error make { msg: \\"Server name cannot be empty\\" help: \\"Provide a unique name for the server\\" } } # Implementation with comprehensive error handling # ...\\n} # Bad: Unclear parameters, no error handling\\ndef create [n, p] { # Missing validation and error handling api_call $n $p\\n} Configuration Management : # Good: Configuration-driven with validation\\ndef get_api_endpoint [provider: string] -> string { let config = get-config-value $\\"providers.($provider).api_url\\" if ($config | is-empty) { error make { msg: $\\"API URL not configured for provider ($provider)\\", help: $\\"Add \'api_url\' to providers.($provider) configuration\\" } } $config\\n} # Bad: Hardcoded values\\ndef get_api_endpoint [] { \\"https://api.provider.com\\" # Never hardcode!\\n}","breadcrumbs":"Overview » Code Quality","id":"2617","title":"Code Quality"},"2618":{"body":"Comprehensive Error Context : def create_server_with_context [name: string, config: record] -> record { try { # Validate configuration validate_server_config $config } catch { |e| error make { msg: $\\"Invalid server configuration: ($e.msg)\\", label: {text: \\"configuration error\\", span: $e.span?}, help: \\"Check configuration syntax and required fields\\" } } try { # Create server via API let result = api_create_server $name $config return $result } catch { |e| match $e.msg { $msg if ($msg | str contains \\"quota\\") => { error make { msg: $\\"Server creation failed: quota limit exceeded\\", help: \\"Contact support to increase quota or delete unused servers\\" } }, $msg if ($msg | str contains \\"auth\\") => { error make { msg: \\"Server creation failed: authentication error\\", help: \\"Check API credentials and permissions\\" } }, _ => { error make { msg: $\\"Server creation failed: ($e.msg)\\", help: \\"Check network connectivity and try again\\" } } } }\\n}","breadcrumbs":"Overview » Error Handling","id":"2618","title":"Error Handling"},"2619":{"body":"Test Organization : # Organize tests by functionality\\n# tests/unit/server-creation-test.nu def test_valid_server_creation [] { # Test valid cases with various inputs let valid_configs = [ {name: \\"test-1\\", plan: \\"small\\"}, {name: \\"test-2\\", plan: \\"medium\\"}, {name: \\"test-3\\", plan: \\"large\\"} ] for config in $valid_configs { let result = create_server $config.name $config.plan --dry-run assert ($result.status == \\"dry-run\\") assert ($result.config.name == $config.name) }\\n} def test_invalid_inputs [] { # Test error conditions let invalid_cases = [ {name: \\"\\", plan: \\"small\\", error: \\"empty name\\"}, {name: \\"test\\", plan: \\"invalid\\", error: \\"invalid plan\\"}, {name: \\"test with spaces\\", plan: \\"small\\", error: \\"invalid characters\\"} ] for case in $invalid_cases { try { create_server $case.name $case.plan --dry-run assert false $\\"Should have failed: ($case.error)\\" } catch { |e| # Verify specific error message assert ($e.msg | str contains $case.error) } }\\n}","breadcrumbs":"Overview » Testing Practices","id":"2619","title":"Testing Practices"},"262":{"body":"Run the interactive setup wizard: provisioning setup system --interactive The wizard guides you through: Welcome & Prerequisites Check Operating System Detection Configuration Path Selection Platform Services Setup Provider Selection Security Configuration Review & Confirmation","breadcrumbs":"Setup System Guide » Setup Wizard","id":"262","title":"Setup Wizard"},"2620":{"body":"Function Documentation : # Comprehensive function documentation\\ndef \\"provider create-server\\" [ name: string # Server name - must be unique within the provider plan: string # Server size plan (run \'provider list-plans\' for options) --zone: string = \\"auto\\" # Target zone - \'auto\' selects optimal zone based on load --template: string = \\"ubuntu22\\" # OS template - see \'provider list-templates\' for options --storage: int = 25 # Storage size in GB (minimum 10, maximum 2048) --dry-run: bool = false # Preview mode - shows what would be created without creating\\n] -> record { # Returns server creation details including ID and IP \\"\\"\\" Creates a new server instance with the specified configuration. This function provisions a new server using the provider\'s API, configures basic security settings, and returns the server details upon successful creation. Examples: # Create a small server with default settings provider create-server \\"web-01\\" \\"small\\" # Create with specific zone and storage provider create-server \\"db-01\\" \\"large\\" --zone \\"us-west-2\\" --storage 100 # Preview what would be created provider create-server \\"test\\" \\"medium\\" --dry-run Error conditions: - Invalid server name (empty, invalid characters) - Invalid plan (not in supported plans list) - Insufficient quota or permissions - Network connectivity issues Returns: Record with keys: server, status, id, ip_address, created_at \\"\\"\\" # Implementation...\\n}","breadcrumbs":"Overview » Documentation Standards","id":"2620","title":"Documentation Standards"},"2621":{"body":"","breadcrumbs":"Overview » Troubleshooting","id":"2621","title":"Troubleshooting"},"2622":{"body":"Extension Not Found Error : Extension \'my-provider\' not found # Solution: Check extension location and structure\\nls -la workspace/extensions/providers/my-provider\\nnu workspace/lib/path-resolver.nu resolve_extension \\"providers\\" \\"my-provider\\" # Validate extension structure\\nnu workspace.nu tools validate-extension providers/my-provider Configuration Errors Error : Invalid Nickel configuration # Solution: Validate Nickel syntax\\nnickel check workspace/extensions/providers/my-provider/schemas/ # Format Nickel files\\nnickel fmt workspace/extensions/providers/my-provider/schemas/ # Test with example data\\nnickel eval workspace/extensions/providers/my-provider/schemas/settings.ncl API Integration Issues Error : Authentication failed # Solution: Test credentials and connectivity\\ncurl -H \\"Authorization: Bearer $API_KEY\\" https://api.provider.com/auth/test # Debug API calls\\nexport PROVISIONING_DEBUG=true\\nexport PROVISIONING_LOG_LEVEL=debug\\nnu workspace/extensions/providers/my-provider/nulib/provider.nu test --test-type basic","breadcrumbs":"Overview » Common Development Issues","id":"2622","title":"Common Development Issues"},"2623":{"body":"Enable Extension Debugging : # Set debug environment\\nexport PROVISIONING_DEBUG=true\\nexport PROVISIONING_LOG_LEVEL=debug\\nexport PROVISIONING_WORKSPACE_USER=$USER # Run extension with debug\\nnu workspace/extensions/providers/my-provider/nulib/provider.nu create-server test-server small --dry-run","breadcrumbs":"Overview » Debug Mode","id":"2623","title":"Debug Mode"},"2624":{"body":"Extension Performance : # Profile extension performance\\ntime nu workspace/extensions/providers/my-provider/nulib/provider.nu list-servers # Monitor resource usage\\nnu workspace/tools/runtime-manager.nu monitor --duration 1m --interval 5s # Optimize API calls (use caching)\\nexport PROVISIONING_CACHE_ENABLED=true\\nexport PROVISIONING_CACHE_TTL=300 # 5 minutes This extension development guide provides a comprehensive framework for creating high-quality, maintainable extensions that integrate seamlessly with provisioning\'s architecture and workflows.","breadcrumbs":"Overview » Performance Optimization","id":"2624","title":"Performance Optimization"},"2625":{"body":"This guide will help you create custom providers, task services, and cluster configurations to extend provisioning for your specific needs.","breadcrumbs":"Extension Development » Extension Development Guide","id":"2625","title":"Extension Development Guide"},"2626":{"body":"Extension architecture and concepts Creating custom cloud providers Developing task services Building cluster configurations Publishing and sharing extensions Best practices and patterns Testing and validation","breadcrumbs":"Extension Development » What You\'ll Learn","id":"2626","title":"What You\'ll Learn"},"2627":{"body":"","breadcrumbs":"Extension Development » Extension Architecture","id":"2627","title":"Extension Architecture"},"2628":{"body":"Extension Type Purpose Examples Providers Cloud platform integrations Custom cloud, on-premises Task Services Software components Custom databases, monitoring Clusters Service orchestration Application stacks, platforms Templates Reusable configurations Standard deployments","breadcrumbs":"Extension Development » Extension Types","id":"2628","title":"Extension Types"},"2629":{"body":"my-extension/\\n├── schemas/ # Nickel schemas and models\\n│ ├── contracts.ncl # Type contracts\\n│ ├── providers/ # Provider definitions\\n│ ├── taskservs/ # Task service definitions\\n│ └── clusters/ # Cluster definitions\\n├── nulib/ # Nushell implementation\\n│ ├── providers/ # Provider logic\\n│ ├── taskservs/ # Task service logic\\n│ └── utils/ # Utility functions\\n├── templates/ # Configuration templates\\n├── tests/ # Test files\\n├── docs/ # Documentation\\n├── extension.toml # Extension metadata\\n└── README.md # Extension documentation","breadcrumbs":"Extension Development » Extension Structure","id":"2629","title":"Extension Structure"},"263":{"body":"","breadcrumbs":"Setup System Guide » Configuration Management","id":"263","title":"Configuration Management"},"2630":{"body":"extension.toml: [extension]\\nname = \\"my-custom-provider\\"\\nversion = \\"1.0.0\\"\\ndescription = \\"Custom cloud provider integration\\"\\nauthor = \\"Your Name \\"\\nlicense = \\"MIT\\" [compatibility]\\nprovisioning_version = \\">=1.0.0\\"\\nnickel_version = \\">=1.15.0\\" [provides]\\nproviders = [\\"custom-cloud\\"]\\ntaskservs = [\\"custom-database\\"]\\nclusters = [\\"custom-stack\\"] [dependencies]\\nextensions = []\\nsystem_packages = [\\"curl\\", \\"jq\\"] [configuration]\\nrequired_env = [\\"CUSTOM_CLOUD_API_KEY\\"]\\noptional_env = [\\"CUSTOM_CLOUD_REGION\\"]","breadcrumbs":"Extension Development » Extension Metadata","id":"2630","title":"Extension Metadata"},"2631":{"body":"","breadcrumbs":"Extension Development » Creating Custom Providers","id":"2631","title":"Creating Custom Providers"},"2632":{"body":"A provider handles: Authentication with cloud APIs Resource lifecycle management (create, read, update, delete) Provider-specific configurations Cost estimation and billing integration","breadcrumbs":"Extension Development » Provider Architecture","id":"2632","title":"Provider Architecture"},"2633":{"body":"schemas/providers/custom_cloud.ncl: # Custom cloud provider schema\\n{ CustomCloudConfig = { # Configuration for Custom Cloud provider # Authentication api_key | String, api_secret | String = \\"\\", region | String = \\"us-west-1\\", # Provider-specific settings project_id | String = \\"\\", organization | String = \\"\\", # API configuration api_url | String = \\"https://api.custom-cloud.com/v1\\", timeout | Number = 30, # Cost configuration billing_account | String = \\"\\", cost_center | String = \\"\\", }, CustomCloudServer = { # Server configuration for Custom Cloud # Instance configuration machine_type | String, zone | String, disk_size | Number = 20, disk_type | String = \\"ssd\\", # Network configuration vpc | String = \\"\\", subnet | String = \\"\\", external_ip | Bool = true, # Custom Cloud specific preemptible | Bool = false, labels | {String: String} = {}, }, # Provider capabilities provider_capabilities = { name = \\"custom-cloud\\", supports_auto_scaling = true, supports_load_balancing = true, supports_managed_databases = true, regions = [ \\"us-west-1\\", \\"us-west-2\\", \\"us-east-1\\", \\"eu-west-1\\" ], machine_types = [ \\"micro\\", \\"small\\", \\"medium\\", \\"large\\", \\"xlarge\\" ], },\\n}","breadcrumbs":"Extension Development » Step 1: Define Provider Schema","id":"2633","title":"Step 1: Define Provider Schema"},"2634":{"body":"nulib/providers/custom_cloud.nu: # Custom Cloud provider implementation # Provider initialization\\nexport def custom_cloud_init [] { # Validate environment variables if ($env.CUSTOM_CLOUD_API_KEY | is-empty) { error make { msg: \\"CUSTOM_CLOUD_API_KEY environment variable is required\\" } } # Set up provider context $env.CUSTOM_CLOUD_INITIALIZED = true\\n} # Create server instance\\nexport def custom_cloud_create_server [ server_config: record --check: bool = false # Dry run mode\\n] -> record { custom_cloud_init print $\\"Creating server: ($server_config.name)\\" if $check { return { action: \\"create\\" resource: \\"server\\" name: $server_config.name status: \\"planned\\" estimated_cost: (calculate_server_cost $server_config) } } # Make API call to create server let api_response = (custom_cloud_api_call \\"POST\\" \\"instances\\" $server_config) if ($api_response.status | str contains \\"error\\") { error make { msg: $\\"Failed to create server: ($api_response.message)\\" } } # Wait for server to be ready let server_id = $api_response.instance_id custom_cloud_wait_for_server $server_id \\"running\\" return { id: $server_id name: $server_config.name status: \\"running\\" ip_address: $api_response.ip_address created_at: (date now | format date \\"%Y-%m-%d %H:%M:%S\\") }\\n} # Delete server instance\\nexport def custom_cloud_delete_server [ server_name: string --keep_storage: bool = false\\n] -> record { custom_cloud_init let server = (custom_cloud_get_server $server_name) if ($server | is-empty) { error make { msg: $\\"Server not found: ($server_name)\\" } } print $\\"Deleting server: ($server_name)\\" # Delete the instance let delete_response = (custom_cloud_api_call \\"DELETE\\" $\\"instances/($server.id)\\" { keep_storage: $keep_storage }) return { action: \\"delete\\" resource: \\"server\\" name: $server_name status: \\"deleted\\" }\\n} # List servers\\nexport def custom_cloud_list_servers [] -> list { custom_cloud_init let response = (custom_cloud_api_call \\"GET\\" \\"instances\\" {}) return ($response.instances | each {|instance| { id: $instance.id name: $instance.name status: $instance.status machine_type: $instance.machine_type zone: $instance.zone ip_address: $instance.ip_address created_at: $instance.created_at } })\\n} # Get server details\\nexport def custom_cloud_get_server [server_name: string] -> record { let servers = (custom_cloud_list_servers) return ($servers | where name == $server_name | first)\\n} # Calculate estimated costs\\nexport def calculate_server_cost [server_config: record] -> float { # Cost calculation logic based on machine type let base_costs = { micro: 0.01 small: 0.05 medium: 0.10 large: 0.20 xlarge: 0.40 } let machine_cost = ($base_costs | get $server_config.machine_type) let storage_cost = ($server_config.disk_size | default 20) * 0.001 return ($machine_cost + $storage_cost)\\n} # Make API call to Custom Cloud\\ndef custom_cloud_api_call [ method: string endpoint: string data: record\\n] -> record { let api_url = ($env.CUSTOM_CLOUD_API_URL | default \\"https://api.custom-cloud.com/v1\\") let api_key = $env.CUSTOM_CLOUD_API_KEY let headers = { \\"Authorization\\": $\\"Bearer ($api_key)\\" \\"Content-Type\\": \\"application/json\\" } let url = $\\"($api_url)/($endpoint)\\" match $method { \\"GET\\" => { http get $url --headers $headers } \\"POST\\" => { http post $url --headers $headers ($data | to json) } \\"PUT\\" => { http put $url --headers $headers ($data | to json) } \\"DELETE\\" => { http delete $url --headers $headers } _ => { error make { msg: $\\"Unsupported HTTP method: ($method)\\" } } }\\n} # Wait for server to reach desired state\\ndef custom_cloud_wait_for_server [ server_id: string target_status: string --timeout: int = 300\\n] { let start_time = (date now) loop { let response = (custom_cloud_api_call \\"GET\\" $\\"instances/($server_id)\\" {}) let current_status = $response.status if $current_status == $target_status { print $\\"Server ($server_id) reached status: ($target_status)\\" break } let elapsed = ((date now) - $start_time) / 1000000000 # Convert to seconds if $elapsed > $timeout { error make { msg: $\\"Timeout waiting for server ($server_id) to reach ($target_status)\\" } } sleep 10sec print $\\"Waiting for server status: ($current_status) -> ($target_status)\\" }\\n}","breadcrumbs":"Extension Development » Step 2: Implement Provider Logic","id":"2634","title":"Step 2: Implement Provider Logic"},"2635":{"body":"nulib/providers/mod.nu: # Provider module exports\\nexport use custom_cloud.nu * # Provider registry\\nexport def get_provider_info [] -> record { { name: \\"custom-cloud\\" version: \\"1.0.0\\" capabilities: { servers: true load_balancers: true databases: false storage: true } regions: [\\"us-west-1\\", \\"us-west-2\\", \\"us-east-1\\", \\"eu-west-1\\"] auth_methods: [\\"api_key\\", \\"oauth\\"] }\\n}","breadcrumbs":"Extension Development » Step 3: Provider Registration","id":"2635","title":"Step 3: Provider Registration"},"2636":{"body":"","breadcrumbs":"Extension Development » Creating Custom Task Services","id":"2636","title":"Creating Custom Task Services"},"2637":{"body":"Task services handle: Software installation and configuration Service lifecycle management Health checking and monitoring Version management and updates","breadcrumbs":"Extension Development » Task Service Architecture","id":"2637","title":"Task Service Architecture"},"2638":{"body":"schemas/taskservs/custom_database.ncl: # Custom database task service\\n{ CustomDatabaseConfig = { # Configuration for Custom Database service # Database configuration version | String = \\"14.0\\", port | Number = 5432, max_connections | Number = 100, memory_limit | String = \\"512 MB\\", # Data configuration data_directory | String = \\"/var/lib/customdb\\", log_directory | String = \\"/var/log/customdb\\", # Replication replication | { enabled | Bool = false, mode | String = \\"async\\", replicas | Number = 1, } = {}, # Backup configuration backup | { enabled | Bool = true, schedule | String = \\"0 2 * * *\\", retention_days | Number = 7, storage_location | String = \\"local\\", } = {}, # Security ssl | { enabled | Bool = true, cert_file | String = \\"/etc/ssl/certs/customdb.crt\\", key_file | String = \\"/etc/ssl/private/customdb.key\\", } = {}, # Monitoring monitoring | { enabled | Bool = true, metrics_port | Number = 9187, log_level | String = \\"info\\", } = {}, }, # Service metadata service_metadata = { name = \\"custom-database\\", description = \\"Custom Database Server\\", version = \\"14.0\\", category = \\"database\\", dependencies = [\\"systemd\\"], supported_os = [\\"ubuntu\\", \\"debian\\", \\"centos\\", \\"rhel\\"], ports = [5432, 9187], data_directories = [\\"/var/lib/customdb\\"], },\\n}","breadcrumbs":"Extension Development » Step 1: Define Service Schema","id":"2638","title":"Step 1: Define Service Schema"},"2639":{"body":"nulib/taskservs/custom_database.nu: # Custom Database task service implementation # Install custom database\\nexport def install_custom_database [ config: record --check: bool = false\\n] -> record { print \\"Installing Custom Database...\\" if $check { return { action: \\"install\\" service: \\"custom-database\\" version: ($config.version | default \\"14.0\\") status: \\"planned\\" changes: [ \\"Install Custom Database packages\\" \\"Configure database server\\" \\"Start database service\\" \\"Set up monitoring\\" ] } } # Check prerequisites validate_prerequisites $config # Install packages install_packages $config # Configure service configure_service $config # Initialize database initialize_database $config # Set up monitoring if ($config.monitoring?.enabled | default true) { setup_monitoring $config } # Set up backups if ($config.backup?.enabled | default true) { setup_backups $config } # Start service start_service # Verify installation let status = (verify_installation $config) return { action: \\"install\\" service: \\"custom-database\\" version: ($config.version | default \\"14.0\\") status: $status.status endpoint: $\\"localhost:($config.port | default 5432)\\" data_directory: ($config.data_directory | default \\"/var/lib/customdb\\") }\\n} # Configure custom database\\nexport def configure_custom_database [ config: record\\n] { print \\"Configuring Custom Database...\\" # Generate configuration file let db_config = generate_config $config $db_config | save \\"/etc/customdb/customdb.conf\\" # Set up SSL if enabled if ($config.ssl?.enabled | default true) { setup_ssl $config } # Configure replication if enabled if ($config.replication?.enabled | default false) { setup_replication $config } # Restart service to apply configuration restart_service\\n} # Start service\\nexport def start_custom_database [] { print \\"Starting Custom Database service...\\" ^systemctl start customdb ^systemctl enable customdb\\n} # Stop service\\nexport def stop_custom_database [] { print \\"Stopping Custom Database service...\\" ^systemctl stop customdb\\n} # Check service status\\nexport def status_custom_database [] -> record { let systemd_status = (^systemctl is-active customdb | str trim) let port_check = (check_port 5432) let version = (get_database_version) return { service: \\"custom-database\\" status: $systemd_status port_accessible: $port_check version: $version uptime: (get_service_uptime) connections: (get_active_connections) }\\n} # Health check\\nexport def health_custom_database [] -> record { let status = (status_custom_database) let health_checks = [ { name: \\"Service Running\\" status: ($status.status == \\"active\\") message: $\\"Systemd status: ($status.status)\\" } { name: \\"Port Accessible\\" status: $status.port_accessible message: \\"Database port 5432 is accessible\\" } { name: \\"Database Responsive\\" status: (test_database_connection) message: \\"Database responds to queries\\" } ] let healthy = ($health_checks | all {|check| $check.status}) return { service: \\"custom-database\\" healthy: $healthy checks: $health_checks last_check: (date now | format date \\"%Y-%m-%d %H:%M:%S\\") }\\n} # Update service\\nexport def update_custom_database [ target_version: string\\n] -> record { print $\\"Updating Custom Database to version ($target_version)...\\" # Create backup before update backup_database \\"pre-update\\" # Stop service stop_custom_database # Update packages update_packages $target_version # Migrate database if needed migrate_database $target_version # Start service start_custom_database # Verify update let new_version = (get_database_version) return { action: \\"update\\" service: \\"custom-database\\" old_version: (get_previous_version) new_version: $new_version status: \\"completed\\" }\\n} # Remove service\\nexport def remove_custom_database [ --keep_data: bool = false\\n] -> record { print \\"Removing Custom Database...\\" # Stop service stop_custom_database # Remove packages ^apt remove --purge -y customdb-server customdb-client # Remove configuration rm -rf \\"/etc/customdb\\" # Remove data (optional) if not $keep_data { print \\"Removing database data...\\" rm -rf \\"/var/lib/customdb\\" rm -rf \\"/var/log/customdb\\" } return { action: \\"remove\\" service: \\"custom-database\\" data_preserved: $keep_data status: \\"completed\\" }\\n} # Helper functions def validate_prerequisites [config: record] { # Check operating system let os_info = (^lsb_release -is | str trim | str downcase) let supported_os = [\\"ubuntu\\", \\"debian\\"] if not ($os_info in $supported_os) { error make { msg: $\\"Unsupported OS: ($os_info). Supported: ($supported_os | str join \', \')\\" } } # Check system resources let memory_mb = (^free -m | lines | get 1 | split row \' \' | get 1 | into int) if $memory_mb < 512 { error make { msg: $\\"Insufficient memory: ($memory_mb)MB. Minimum 512 MB required.\\" } }\\n} def install_packages [config: record] { let version = ($config.version | default \\"14.0\\") # Update package list ^apt update # Install packages ^apt install -y $\\"customdb-server-($version)\\" $\\"customdb-client-($version)\\"\\n} def configure_service [config: record] { let config_content = generate_config $config $config_content | save \\"/etc/customdb/customdb.conf\\" # Set permissions ^chown -R customdb:customdb \\"/etc/customdb\\" ^chmod 600 \\"/etc/customdb/customdb.conf\\"\\n} def generate_config [config: record] -> string { let port = ($config.port | default 5432) let max_connections = ($config.max_connections | default 100) let memory_limit = ($config.memory_limit | default \\"512 MB\\") return $\\"\\n# Custom Database Configuration\\nport = ($port)\\nmax_connections = ($max_connections)\\nshared_buffers = ($memory_limit)\\ndata_directory = \'($config.data_directory | default \\"/var/lib/customdb\\")\'\\nlog_directory = \'($config.log_directory | default \\"/var/log/customdb\\")\' # Logging\\nlog_level = \'($config.monitoring?.log_level | default \\"info\\")\' # SSL Configuration\\nssl = ($config.ssl?.enabled | default true)\\nssl_cert_file = \'($config.ssl?.cert_file | default \\"/etc/ssl/certs/customdb.crt\\")\'\\nssl_key_file = \'($config.ssl?.key_file | default \\"/etc/ssl/private/customdb.key\\")\'\\n\\"\\n} def initialize_database [config: record] { print \\"Initializing database...\\" # Create data directory let data_dir = ($config.data_directory | default \\"/var/lib/customdb\\") mkdir $data_dir ^chown -R customdb:customdb $data_dir # Initialize database ^su - customdb -c $\\"customdb-initdb -D ($data_dir)\\"\\n} def setup_monitoring [config: record] { if ($config.monitoring?.enabled | default true) { print \\"Setting up monitoring...\\" # Install monitoring exporter ^apt install -y customdb-exporter # Configure exporter let exporter_config = $\\"\\nport: ($config.monitoring?.metrics_port | default 9187)\\ndatabase_url: postgresql://localhost:($config.port | default 5432)/postgres\\n\\" $exporter_config | save \\"/etc/customdb-exporter/config.yaml\\" # Start exporter ^systemctl enable customdb-exporter ^systemctl start customdb-exporter }\\n} def setup_backups [config: record] { if ($config.backup?.enabled | default true) { print \\"Setting up backups...\\" let schedule = ($config.backup?.schedule | default \\"0 2 * * *\\") let retention = ($config.backup?.retention_days | default 7) # Create backup script let backup_script = $\\"#!/bin/bash\\ncustomdb-dump --all-databases > /var/backups/customdb-$(date +%Y%m%d_%H%M%S).sql\\nfind /var/backups -name \'customdb-*.sql\' -mtime +($retention) -delete\\n\\" $backup_script | save \\"/usr/local/bin/customdb-backup.sh\\" ^chmod +x \\"/usr/local/bin/customdb-backup.sh\\" # Add to crontab $\\"($schedule) /usr/local/bin/customdb-backup.sh\\" | ^crontab -u customdb - }\\n} def test_database_connection [] -> bool { let result = (^customdb-cli -h localhost -c \\"SELECT 1;\\" | complete) return ($result.exit_code == 0)\\n} def get_database_version [] -> string { let result = (^customdb-cli -h localhost -c \\"SELECT version();\\" | complete) if ($result.exit_code == 0) { return ($result.stdout | lines | first | parse \\"Custom Database {version}\\" | get version.0) } else { return \\"unknown\\" }\\n} def check_port [port: int] -> bool { let result = (^nc -z localhost $port | complete) return ($result.exit_code == 0)\\n}","breadcrumbs":"Extension Development » Step 2: Implement Service Logic","id":"2639","title":"Step 2: Implement Service Logic"},"264":{"body":"Runtime Arguments (--flag value) Environment Variables (PROVISIONING_*) Workspace Configuration Workspace Authentication Token User Preferences (user_preferences.toml) Platform Configurations (platform/*.toml) Provider Configurations (providers/*.toml) System Configuration (system.toml) Built-in Defaults","breadcrumbs":"Setup System Guide » Hierarchy (highest to lowest priority)","id":"264","title":"Hierarchy (highest to lowest priority)"},"2640":{"body":"","breadcrumbs":"Extension Development » Creating Custom Clusters","id":"2640","title":"Creating Custom Clusters"},"2641":{"body":"Clusters orchestrate multiple services to work together as a cohesive application stack.","breadcrumbs":"Extension Development » Cluster Architecture","id":"2641","title":"Cluster Architecture"},"2642":{"body":"schemas/clusters/custom_web_stack.ncl: # Custom web application stack\\n{ CustomWebStackConfig = { # Configuration for Custom Web Application Stack # Application configuration app_name | String, app_version | String = \\"latest\\", environment | String = \\"production\\", # Web tier configuration web_tier | { replicas | Number = 3, instance_type | String = \\"t3.medium\\", load_balancer | { enabled | Bool = true, ssl | Bool = true, health_check_path | String = \\"/health\\", } = {}, }, # Application tier configuration app_tier | { replicas | Number = 5, instance_type | String = \\"t3.large\\", auto_scaling | { enabled | Bool = true, min_replicas | Number = 2, max_replicas | Number = 10, cpu_threshold | Number = 70, } = {}, }, # Database tier configuration database_tier | { type | String = \\"postgresql\\", instance_type | String = \\"t3.xlarge\\", high_availability | Bool = true, backup_enabled | Bool = true, } = {}, # Monitoring configuration monitoring | { enabled | Bool = true, metrics_retention | String = \\"30d\\", alerting | Bool = true, } = {}, # Networking network | { vpc_cidr | String = \\"10.0.0.0/16\\", public_subnets | [String] = [\\"10.0.1.0/24\\", \\"10.0.2.0/24\\"], private_subnets | [String] = [\\"10.0.10.0/24\\", \\"10.0.20.0/24\\"], database_subnets | [String] = [\\"10.0.100.0/24\\", \\"10.0.200.0/24\\"], } = {}, }, # Cluster blueprint cluster_blueprint = { name = \\"custom-web-stack\\", description = \\"Custom web application stack with load balancer, app servers, and database\\", version = \\"1.0.0\\", components = [ { name = \\"load-balancer\\", type = \\"taskserv\\", service = \\"haproxy\\", tier = \\"web\\", }, { name = \\"web-servers\\", type = \\"server\\", tier = \\"web\\", scaling = \\"horizontal\\", }, { name = \\"app-servers\\", type = \\"server\\", tier = \\"app\\", scaling = \\"horizontal\\", }, { name = \\"database\\", type = \\"taskserv\\", service = \\"postgresql\\", tier = \\"database\\", }, { name = \\"monitoring\\", type = \\"taskserv\\", service = \\"prometheus\\", tier = \\"monitoring\\", }, ], },\\n}","breadcrumbs":"Extension Development » Step 1: Define Cluster Schema","id":"2642","title":"Step 1: Define Cluster Schema"},"2643":{"body":"nulib/clusters/custom_web_stack.nu: # Custom Web Stack cluster implementation # Deploy web stack cluster\\nexport def deploy_custom_web_stack [ config: record --check: bool = false\\n] -> record { print $\\"Deploying Custom Web Stack: ($config.app_name)\\" if $check { return { action: \\"deploy\\" cluster: \\"custom-web-stack\\" app_name: $config.app_name status: \\"planned\\" components: [ \\"Network infrastructure\\" \\"Load balancer\\" \\"Web servers\\" \\"Application servers\\" \\"Database\\" \\"Monitoring\\" ] estimated_cost: (calculate_cluster_cost $config) } } # Deploy in order let network = (deploy_network $config) let database = (deploy_database $config) let app_servers = (deploy_app_tier $config) let web_servers = (deploy_web_tier $config) let load_balancer = (deploy_load_balancer $config) let monitoring = (deploy_monitoring $config) # Configure service discovery configure_service_discovery $config # Set up health checks setup_health_checks $config return { action: \\"deploy\\" cluster: \\"custom-web-stack\\" app_name: $config.app_name status: \\"deployed\\" components: { network: $network database: $database app_servers: $app_servers web_servers: $web_servers load_balancer: $load_balancer monitoring: $monitoring } endpoints: { web: $load_balancer.public_ip monitoring: $monitoring.grafana_url } }\\n} # Scale cluster\\nexport def scale_custom_web_stack [ app_name: string tier: string replicas: int\\n] -> record { print $\\"Scaling ($tier) tier to ($replicas) replicas for ($app_name)\\" match $tier { \\"web\\" => { scale_web_tier $app_name $replicas } \\"app\\" => { scale_app_tier $app_name $replicas } _ => { error make { msg: $\\"Invalid tier: ($tier). Valid options: web, app\\" } } } return { action: \\"scale\\" cluster: \\"custom-web-stack\\" app_name: $app_name tier: $tier new_replicas: $replicas status: \\"completed\\" }\\n} # Update cluster\\nexport def update_custom_web_stack [ app_name: string config: record\\n] -> record { print $\\"Updating Custom Web Stack: ($app_name)\\" # Rolling update strategy update_app_tier $app_name $config update_web_tier $app_name $config update_load_balancer $app_name $config return { action: \\"update\\" cluster: \\"custom-web-stack\\" app_name: $app_name status: \\"completed\\" }\\n} # Delete cluster\\nexport def delete_custom_web_stack [ app_name: string --keep_data: bool = false\\n] -> record { print $\\"Deleting Custom Web Stack: ($app_name)\\" # Delete in reverse order delete_load_balancer $app_name delete_web_tier $app_name delete_app_tier $app_name if not $keep_data { delete_database $app_name } delete_monitoring $app_name delete_network $app_name return { action: \\"delete\\" cluster: \\"custom-web-stack\\" app_name: $app_name data_preserved: $keep_data status: \\"completed\\" }\\n} # Cluster status\\nexport def status_custom_web_stack [ app_name: string\\n] -> record { let web_status = (get_web_tier_status $app_name) let app_status = (get_app_tier_status $app_name) let db_status = (get_database_status $app_name) let lb_status = (get_load_balancer_status $app_name) let monitoring_status = (get_monitoring_status $app_name) let overall_healthy = ( $web_status.healthy and $app_status.healthy and $db_status.healthy and $lb_status.healthy and $monitoring_status.healthy ) return { cluster: \\"custom-web-stack\\" app_name: $app_name healthy: $overall_healthy components: { web_tier: $web_status app_tier: $app_status database: $db_status load_balancer: $lb_status monitoring: $monitoring_status } last_check: (date now | format date \\"%Y-%m-%d %H:%M:%S\\") }\\n} # Helper functions for deployment def deploy_network [config: record] -> record { print \\"Deploying network infrastructure...\\" # Create VPC let vpc_config = { cidr: ($config.network.vpc_cidr | default \\"10.0.0.0/16\\") name: $\\"($config.app_name)-vpc\\" } # Create subnets let subnets = [ {name: \\"public-1\\", cidr: ($config.network.public_subnets | get 0)} {name: \\"public-2\\", cidr: ($config.network.public_subnets | get 1)} {name: \\"private-1\\", cidr: ($config.network.private_subnets | get 0)} {name: \\"private-2\\", cidr: ($config.network.private_subnets | get 1)} {name: \\"database-1\\", cidr: ($config.network.database_subnets | get 0)} {name: \\"database-2\\", cidr: ($config.network.database_subnets | get 1)} ] return { vpc: $vpc_config subnets: $subnets status: \\"deployed\\" }\\n} def deploy_database [config: record] -> record { print \\"Deploying database tier...\\" let db_config = { name: $\\"($config.app_name)-db\\" type: ($config.database_tier.type | default \\"postgresql\\") instance_type: ($config.database_tier.instance_type | default \\"t3.xlarge\\") high_availability: ($config.database_tier.high_availability | default true) backup_enabled: ($config.database_tier.backup_enabled | default true) } # Deploy database servers if $db_config.high_availability { deploy_ha_database $db_config } else { deploy_single_database $db_config } return { name: $db_config.name type: $db_config.type high_availability: $db_config.high_availability status: \\"deployed\\" endpoint: $\\"($config.app_name)-db.local:5432\\" }\\n} def deploy_app_tier [config: record] -> record { print \\"Deploying application tier...\\" let replicas = ($config.app_tier.replicas | default 5) # Deploy app servers mut servers = [] for i in 1..$replicas { let server_config = { name: $\\"($config.app_name)-app-($i | fill --width 2 --char \'0\')\\" instance_type: ($config.app_tier.instance_type | default \\"t3.large\\") subnet: \\"private\\" } let server = (deploy_app_server $server_config) $servers = ($servers | append $server) } return { tier: \\"application\\" servers: $servers replicas: $replicas status: \\"deployed\\" }\\n} def calculate_cluster_cost [config: record] -> float { let web_cost = ($config.web_tier.replicas | default 3) * 0.10 let app_cost = ($config.app_tier.replicas | default 5) * 0.20 let db_cost = if ($config.database_tier.high_availability | default true) { 0.80 } else { 0.40 } let lb_cost = 0.05 return ($web_cost + $app_cost + $db_cost + $lb_cost)\\n}","breadcrumbs":"Extension Development » Step 2: Implement Cluster Logic","id":"2643","title":"Step 2: Implement Cluster Logic"},"2644":{"body":"","breadcrumbs":"Extension Development » Extension Testing","id":"2644","title":"Extension Testing"},"2645":{"body":"tests/\\n├── unit/ # Unit tests\\n│ ├── provider_test.nu # Provider unit tests\\n│ ├── taskserv_test.nu # Task service unit tests\\n│ └── cluster_test.nu # Cluster unit tests\\n├── integration/ # Integration tests\\n│ ├── provider_integration_test.nu\\n│ ├── taskserv_integration_test.nu\\n│ └── cluster_integration_test.nu\\n├── e2e/ # End-to-end tests\\n│ └── full_stack_test.nu\\n└── fixtures/ # Test data ├── configs/ └── mocks/","breadcrumbs":"Extension Development » Test Structure","id":"2645","title":"Test Structure"},"2646":{"body":"tests/unit/provider_test.nu: # Unit tests for custom cloud provider use std testing export def test_provider_validation [] { # Test valid configuration let valid_config = { api_key: \\"test-key\\" region: \\"us-west-1\\" project_id: \\"test-project\\" } let result = (validate_custom_cloud_config $valid_config) assert equal $result.valid true # Test invalid configuration let invalid_config = { region: \\"us-west-1\\" # Missing api_key } let result2 = (validate_custom_cloud_config $invalid_config) assert equal $result2.valid false assert str contains $result2.error \\"api_key\\"\\n} export def test_cost_calculation [] { let server_config = { machine_type: \\"medium\\" disk_size: 50 } let cost = (calculate_server_cost $server_config) assert equal $cost 0.15 # 0.10 (medium) + 0.05 (50 GB storage)\\n} export def test_api_call_formatting [] { let config = { name: \\"test-server\\" machine_type: \\"small\\" zone: \\"us-west-1a\\" } let api_payload = (format_create_server_request $config) assert str contains ($api_payload | to json) \\"test-server\\" assert equal $api_payload.machine_type \\"small\\" assert equal $api_payload.zone \\"us-west-1a\\"\\n}","breadcrumbs":"Extension Development » Example Unit Test","id":"2646","title":"Example Unit Test"},"2647":{"body":"tests/integration/provider_integration_test.nu: # Integration tests for custom cloud provider use std testing export def test_server_lifecycle [] { # Set up test environment $env.CUSTOM_CLOUD_API_KEY = \\"test-api-key\\" $env.CUSTOM_CLOUD_API_URL = \\"https://api.test.custom-cloud.com/v1\\" let server_config = { name: \\"test-integration-server\\" machine_type: \\"micro\\" zone: \\"us-west-1a\\" } # Test server creation let create_result = (custom_cloud_create_server $server_config --check true) assert equal $create_result.status \\"planned\\" # Note: Actual creation would require valid API credentials # In integration tests, you might use a test/sandbox environment\\n} export def test_server_listing [] { # Mock API response for testing with-env [CUSTOM_CLOUD_API_KEY \\"test-key\\"] { # This would test against a real API in integration environment let servers = (custom_cloud_list_servers) assert ($servers | is-not-empty) }\\n}","breadcrumbs":"Extension Development » Integration Test","id":"2647","title":"Integration Test"},"2648":{"body":"","breadcrumbs":"Extension Development » Publishing Extensions","id":"2648","title":"Publishing Extensions"},"2649":{"body":"my-extension-package/\\n├── extension.toml # Extension metadata\\n├── README.md # Documentation\\n├── LICENSE # License file\\n├── CHANGELOG.md # Version history\\n├── examples/ # Usage examples\\n├── src/ # Source code\\n│ ├── kcl/\\n│ ├── nulib/\\n│ └── templates/\\n└── tests/ # Test files","breadcrumbs":"Extension Development » Extension Package Structure","id":"2649","title":"Extension Package Structure"},"265":{"body":"system.toml - System information (OS, architecture, paths) user_preferences.toml - User preferences (editor, format, etc.) platform/*.toml - Service endpoints and configuration providers/*.toml - Cloud provider settings","breadcrumbs":"Setup System Guide » Configuration Files","id":"265","title":"Configuration Files"},"2650":{"body":"extension.toml: [extension]\\nname = \\"my-custom-provider\\"\\nversion = \\"1.0.0\\"\\ndescription = \\"Custom cloud provider integration\\"\\nauthor = \\"Your Name \\"\\nlicense = \\"MIT\\"\\nhomepage = \\"https://github.com/username/my-custom-provider\\"\\nrepository = \\"https://github.com/username/my-custom-provider\\"\\nkeywords = [\\"cloud\\", \\"provider\\", \\"infrastructure\\"]\\ncategories = [\\"providers\\"] [compatibility]\\nprovisioning_version = \\">=1.0.0\\"\\nnickel_version = \\">=1.15.0\\" [provides]\\nproviders = [\\"custom-cloud\\"]\\ntaskservs = []\\nclusters = [] [dependencies]\\nsystem_packages = [\\"curl\\", \\"jq\\"]\\nextensions = [] [build]\\ninclude = [\\"src/**\\", \\"examples/**\\", \\"README.md\\", \\"LICENSE\\"]\\nexclude = [\\"tests/**\\", \\".git/**\\", \\"*.tmp\\"]","breadcrumbs":"Extension Development » Publishing Configuration","id":"2650","title":"Publishing Configuration"},"2651":{"body":"# 1. Validate extension\\nprovisioning extension validate . # 2. Run tests\\nprovisioning extension test . # 3. Build package\\nprovisioning extension build . # 4. Publish to registry\\nprovisioning extension publish ./dist/my-custom-provider-1.0.0.tar.gz","breadcrumbs":"Extension Development » Publishing Process","id":"2651","title":"Publishing Process"},"2652":{"body":"","breadcrumbs":"Extension Development » Best Practices","id":"2652","title":"Best Practices"},"2653":{"body":"# Follow standard structure\\nextension/\\n├── schemas/ # Nickel schemas and models\\n├── nulib/ # Nushell implementation\\n├── templates/ # Configuration templates\\n├── tests/ # Comprehensive tests\\n└── docs/ # Documentation","breadcrumbs":"Extension Development » 1. Code Organization","id":"2653","title":"1. Code Organization"},"2654":{"body":"# Always provide meaningful error messages\\nif ($api_response | get -o status | default \\"\\" | str contains \\"error\\") { error make { msg: $\\"API Error: ($api_response.message)\\" label: { text: \\"Custom Cloud API failure\\" span: (metadata $api_response | get span) } help: \\"Check your API key and network connectivity\\" }\\n}","breadcrumbs":"Extension Development » 2. Error Handling","id":"2654","title":"2. Error Handling"},"2655":{"body":"# Use Nickel\'s validation features with contracts\\n{ CustomConfig = { # Configuration with validation name | String | doc \\"Name must not be empty\\", size | Number | doc \\"Size must be positive and at most 1000\\", }, # Validation rules validate_config = fun config => let valid_name = (std.string.length config.name) > 0 in let valid_size = config.size > 0 && config.size <= 1000 in if valid_name && valid_size then config else (std.fail \\"Configuration validation failed\\"),\\n}","breadcrumbs":"Extension Development » 3. Configuration Validation","id":"2655","title":"3. Configuration Validation"},"2656":{"body":"Write comprehensive unit tests Include integration tests Test error conditions Use fixtures for consistent test data Mock external dependencies","breadcrumbs":"Extension Development » 4. Testing","id":"2656","title":"4. Testing"},"2657":{"body":"Include README with examples Document all configuration options Provide troubleshooting guide Include architecture diagrams Write API documentation","breadcrumbs":"Extension Development » 5. Documentation","id":"2657","title":"5. Documentation"},"2658":{"body":"Now that you understand extension development: Study existing extensions in the providers/ and taskservs/ directories Practice with simple extensions before building complex ones Join the community to share and collaborate on extensions Contribute to the core system by improving extension APIs Build a library of reusable templates and patterns You\'re now equipped to extend provisioning for any custom requirements!","breadcrumbs":"Extension Development » Next Steps","id":"2658","title":"Next Steps"},"2659":{"body":"A high-performance Rust microservice that provides a unified REST API for extension discovery, versioning, and download from multiple Git-based sources and OCI registries. Source : provisioning/platform/crates/extension-registry/","breadcrumbs":"Extension Registry » Extension Registry Service","id":"2659","title":"Extension Registry Service"},"266":{"body":"Create and manage multiple isolated environments: # Create workspace\\nprovisioning setup workspace dev\\nprovisioning setup workspace prod # List workspaces\\nprovisioning workspace list # Activate workspace\\nprovisioning workspace activate prod","breadcrumbs":"Setup System Guide » Multiple Workspaces","id":"266","title":"Multiple Workspaces"},"2660":{"body":"Multi-Backend Source Support : Fetch extensions from Gitea, Forgejo, and GitHub releases Multi-Registry Distribution Support : Distribute extensions to Zot, Harbor, Docker Hub, GHCR, Quay, and other OCI-compliant registries Unified REST API : Single API for all extension operations across all backends Smart Caching : LRU cache with TTL to reduce backend API calls Prometheus Metrics : Built-in metrics for monitoring Health Monitoring : Parallel health checks for all backends with aggregated status Aggregation & Fallback : Intelligent request routing with aggregation and fallback strategies Type-Safe : Strong typing for extension metadata Async/Await : High-performance async operations with Tokio Backward Compatible : Old single-instance configs auto-migrate to new multi-instance format","breadcrumbs":"Extension Registry » Features","id":"2660","title":"Features"},"2661":{"body":"","breadcrumbs":"Extension Registry » Architecture","id":"2661","title":"Architecture"},"2662":{"body":"The extension registry uses a trait-based architecture separating source and distribution backends: ┌────────────────────────────────────────────────────────────────────┐\\n│ Extension Registry API │\\n│ (axum) │\\n├────────────────────────────────────────────────────────────────────┤\\n│ │\\n│ ┌─ SourceClients ────────────┐ ┌─ DistributionClients ────────┐ │\\n│ │ │ │ │ │\\n│ │ • Gitea (Git releases) │ │ • OCI Registries │ │\\n│ │ • Forgejo (Git releases) │ │ - Zot │ │\\n│ │ • GitHub (Releases API) │ │ - Harbor │ │\\n│ │ │ │ - Docker Hub │ │\\n│ │ Strategy: Aggregation + │ │ - GHCR / Quay │ │\\n│ │ Fallback across all sources │ │ - Any OCI-compliant │ │\\n│ │ │ │ │ │\\n│ └─────────────────────────────┘ └──────────────────────────────┘ │\\n│ │\\n│ ┌─ LRU Cache ───────────────────────────────────────────────────┐ │\\n│ │ • Metadata cache (with TTL) │ │\\n│ │ • List cache (with TTL) │ │\\n│ │ • Version cache (version strings only) │ │\\n│ └───────────────────────────────────────────────────────────────┘ │\\n│ │\\n└────────────────────────────────────────────────────────────────────┘","breadcrumbs":"Extension Registry » Dual-Trait System","id":"2662","title":"Dual-Trait System"},"2663":{"body":"Aggregation Strategy (list_extensions, list_versions, search) Parallel Execution : Spawn concurrent tasks for all source and distribution clients Merge Results : Combine results from all backends Deduplication : Remove duplicates, preferring more recent versions Pagination : Apply limit/offset to merged results Caching : Store merged results with composite cache key Fallback Strategy (get_extension, download_extension) Sequential Retry : Try source clients first (in configured order) Distribution Fallback : If all sources fail, try distribution clients Return First Success : Return result from first successful client Caching : Cache successful result with backend-specific key","breadcrumbs":"Extension Registry » Request Strategies","id":"2663","title":"Request Strategies"},"2664":{"body":"cd provisioning/platform/extension-registry\\ncargo build --release","breadcrumbs":"Extension Registry » Installation","id":"2664","title":"Installation"},"2665":{"body":"","breadcrumbs":"Extension Registry » Configuration","id":"2665","title":"Configuration"},"2666":{"body":"Old format is automatically migrated to new multi-instance format: [server]\\nhost = \\"0.0.0.0\\"\\nport = 8082 # Single Gitea instance (auto-migrated to sources.gitea[0])\\n[gitea]\\nurl = \\"https://gitea.example.com\\"\\norganization = \\"provisioning-extensions\\"\\ntoken_path = \\"/path/to/gitea-token.txt\\" # Single OCI registry (auto-migrated to distributions.oci[0])\\n[oci]\\nregistry = \\"registry.example.com\\"\\nnamespace = \\"provisioning\\"\\nauth_token_path = \\"/path/to/oci-token.txt\\" [cache]\\ncapacity = 1000\\nttl_seconds = 300","breadcrumbs":"Extension Registry » Single-Instance Configuration (Legacy - Auto-Migrated)","id":"2666","title":"Single-Instance Configuration (Legacy - Auto-Migrated)"},"2667":{"body":"New format supporting multiple backends of each type: [server]\\nhost = \\"0.0.0.0\\"\\nport = 8082\\nworkers = 4\\nenable_cors = false\\nenable_compression = true # Multiple Gitea sources\\n[sources.gitea] [[sources.gitea]]\\nid = \\"internal-gitea\\"\\nurl = \\"https://gitea.internal.example.com\\"\\norganization = \\"provisioning\\"\\ntoken_path = \\"/etc/secrets/gitea-internal-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true [[sources.gitea]]\\nid = \\"public-gitea\\"\\nurl = \\"https://gitea.public.example.com\\"\\norganization = \\"extensions\\"\\ntoken_path = \\"/etc/secrets/gitea-public-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true # Forgejo sources (API compatible with Gitea)\\n[sources.forgejo] [[sources.forgejo]]\\nid = \\"community-forgejo\\"\\nurl = \\"https://forgejo.community.example.com\\"\\norganization = \\"provisioning\\"\\ntoken_path = \\"/etc/secrets/forgejo-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true # GitHub sources\\n[sources.github] [[sources.github]]\\nid = \\"org-github\\"\\norganization = \\"my-organization\\"\\ntoken_path = \\"/etc/secrets/github-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true # Multiple OCI distribution registries\\n[distributions.oci] [[distributions.oci]]\\nid = \\"internal-zot\\"\\nregistry = \\"zot.internal.example.com\\"\\nnamespace = \\"extensions\\"\\ntimeout_seconds = 30\\nverify_ssl = true [[distributions.oci]]\\nid = \\"public-harbor\\"\\nregistry = \\"harbor.public.example.com\\"\\nnamespace = \\"extensions\\"\\nauth_token_path = \\"/etc/secrets/harbor-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true [[distributions.oci]]\\nid = \\"docker-hub\\"\\nregistry = \\"docker.io\\"\\nnamespace = \\"myorg\\"\\nauth_token_path = \\"/etc/secrets/docker-hub-token.txt\\"\\ntimeout_seconds = 30\\nverify_ssl = true # Cache configuration\\n[cache]\\ncapacity = 1000\\nttl_seconds = 300\\nenable_metadata_cache = true\\nenable_list_cache = true","breadcrumbs":"Extension Registry » Multi-Instance Configuration (Recommended)","id":"2667","title":"Multi-Instance Configuration (Recommended)"},"2668":{"body":"Backend Identifiers : Use id field to uniquely identify each backend instance (auto-generated if omitted) Gitea/Forgejo Compatible : Both use same config format; organization field is required for Git repos GitHub Configuration : Uses organization as owner; token_path points to GitHub Personal Access Token OCI Registries : Support any OCI-compliant registry (Zot, Harbor, Docker Hub, GHCR, Quay, etc.) Optional Fields : id, verify_ssl, timeout_seconds have sensible defaults Token Files : Should contain only the token with no extra whitespace; permissions should be 0600","breadcrumbs":"Extension Registry » Configuration Notes","id":"2668","title":"Configuration Notes"},"2669":{"body":"Legacy environment variable support (for backward compatibility): REGISTRY_SERVER_HOST=127.0.0.1\\nREGISTRY_SERVER_PORT=8083\\nREGISTRY_SERVER_WORKERS=8\\nREGISTRY_GITEA_URL=https://gitea.example.com\\nREGISTRY_GITEA_ORG=extensions\\nREGISTRY_GITEA_TOKEN_PATH=/path/to/token\\nREGISTRY_OCI_REGISTRY=registry.example.com\\nREGISTRY_OCI_NAMESPACE=extensions\\nREGISTRY_CACHE_CAPACITY=2000\\nREGISTRY_CACHE_TTL=600","breadcrumbs":"Extension Registry » Environment Variable Overrides","id":"2669","title":"Environment Variable Overrides"},"267":{"body":"Update any setting: # Update platform configuration\\nprovisioning setup platform --config new-config.toml # Update provider settings\\nprovisioning setup provider upcloud --config upcloud-config.toml # Validate changes\\nprovisioning setup validate","breadcrumbs":"Setup System Guide » Configuration Updates","id":"267","title":"Configuration Updates"},"2670":{"body":"","breadcrumbs":"Extension Registry » API Endpoints","id":"2670","title":"API Endpoints"},"2671":{"body":"List Extensions GET /api/v1/extensions?type=provider&limit=10 Get Extension GET /api/v1/extensions/{type}/{name} List Versions GET /api/v1/extensions/{type}/{name}/versions Download Extension GET /api/v1/extensions/{type}/{name}/{version} Search Extensions GET /api/v1/extensions/search?q=kubernetes&type=taskserv","breadcrumbs":"Extension Registry » Extension Operations","id":"2671","title":"Extension Operations"},"2672":{"body":"Health Check GET /api/v1/health Response (with multi-backend aggregation): { \\"status\\": \\"healthy|degraded|unhealthy\\", \\"version\\": \\"0.1.0\\", \\"uptime\\": 3600, \\"backends\\": { \\"gitea\\": { \\"enabled\\": true, \\"healthy\\": true, \\"error\\": null }, \\"oci\\": { \\"enabled\\": true, \\"healthy\\": true, \\"error\\": null } }\\n} Status Values : healthy: All configured backends are healthy degraded: At least one backend is healthy, but some are failing unhealthy: No backends are responding Metrics GET /api/v1/metrics Cache Statistics GET /api/v1/cache/stats Response : { \\"metadata_hits\\": 1024, \\"metadata_misses\\": 256, \\"list_hits\\": 512, \\"list_misses\\": 128, \\"version_hits\\": 2048, \\"version_misses\\": 512, \\"size\\": 4096\\n}","breadcrumbs":"Extension Registry » System Endpoints","id":"2672","title":"System Endpoints"},"2673":{"body":"","breadcrumbs":"Extension Registry » Extension Naming Conventions","id":"2673","title":"Extension Naming Conventions"},"2674":{"body":"Providers : {name}_prov (for example, aws_prov) Task Services : {name}_taskserv (for example, kubernetes_taskserv) Clusters : {name}_cluster (for example, buildkit_cluster)","breadcrumbs":"Extension Registry » Gitea Repositories","id":"2674","title":"Gitea Repositories"},"2675":{"body":"Providers : {namespace}/{name}-provider Task Services : {namespace}/{name}-taskserv Clusters : {namespace}/{name}-cluster","breadcrumbs":"Extension Registry » OCI Artifacts","id":"2675","title":"OCI Artifacts"},"2676":{"body":"","breadcrumbs":"Extension Registry » Deployment","id":"2676","title":"Deployment"},"2677":{"body":"docker build -t extension-registry:latest .\\ndocker run -d -p 8082:8082 -v $(pwd)/config.toml:/app/config.toml:ro extension-registry:latest","breadcrumbs":"Extension Registry » Docker","id":"2677","title":"Docker"},"2678":{"body":"apiVersion: apps/v1\\nkind: Deployment\\nmetadata: name: extension-registry\\nspec: replicas: 3 template: spec: containers: - name: extension-registry image: extension-registry:latest ports: - containerPort: 8082","breadcrumbs":"Extension Registry » Kubernetes","id":"2678","title":"Kubernetes"},"2679":{"body":"","breadcrumbs":"Extension Registry » Migration Guide: Single to Multi-Instance","id":"2679","title":"Migration Guide: Single to Multi-Instance"},"268":{"body":"# Backup current configuration\\nprovisioning setup backup --path ./backup.tar.gz # Restore from backup\\nprovisioning setup restore --path ./backup.tar.gz # Migrate from old setup\\nprovisioning setup migrate --from-existing","breadcrumbs":"Setup System Guide » Backup & Restore","id":"268","title":"Backup & Restore"},"2680":{"body":"Old single-instance configs are automatically detected and migrated to the new multi-instance format during startup: Detection : Registry checks if old-style fields (gitea, oci) contain values Migration : Single instances are moved to new Vec-based format (sources.gitea[0], distributions.oci[0]) Logging : Migration event is logged for audit purposes Transparency : No user action required; old configs continue to work","breadcrumbs":"Extension Registry » Automatic Migration","id":"2680","title":"Automatic Migration"},"2681":{"body":"[gitea]\\nurl = \\"https://gitea.example.com\\"\\norganization = \\"extensions\\"\\ntoken_path = \\"/path/to/token\\" [oci]\\nregistry = \\"registry.example.com\\"\\nnamespace = \\"extensions\\"","breadcrumbs":"Extension Registry » Before Migration","id":"2681","title":"Before Migration"},"2682":{"body":"[sources.gitea]\\n[[sources.gitea]]\\nurl = \\"https://gitea.example.com\\"\\norganization = \\"extensions\\"\\ntoken_path = \\"/path/to/token\\" [distributions.oci]\\n[[distributions.oci]]\\nregistry = \\"registry.example.com\\"\\nnamespace = \\"extensions\\"","breadcrumbs":"Extension Registry » After Migration (Automatic)","id":"2682","title":"After Migration (Automatic)"},"2683":{"body":"To adopt the new format manually: Backup current config - Keep old format as reference Adopt new format - Replace old fields with new structure Test - Verify all backends are reachable and extensions are discovered Add new backends - Use new format to add Forgejo, GitHub, or additional OCI registries Remove old fields - Delete deprecated gitea and oci top-level sections","breadcrumbs":"Extension Registry » Gradual Upgrade Path","id":"2683","title":"Gradual Upgrade Path"},"2684":{"body":"Multiple Sources : Support Gitea, Forgejo, and GitHub simultaneously Multiple Registries : Distribute to multiple OCI registries Better Resilience : If one backend fails, others continue to work Flexible Configuration : Each backend can have different credentials and timeouts Future-Proof : New backends can be added without config restructuring","breadcrumbs":"Extension Registry » Benefits of Upgrading","id":"2684","title":"Benefits of Upgrading"},"2685":{"body":"Extension Development : Module System Extension Development Quickstart : Getting Started Guide ADR-005 : Extension Framework Architecture OCI Registry Integration : OCI Registry Guide","breadcrumbs":"Extension Registry » Related Documentation","id":"2685","title":"Related Documentation"},"2686":{"body":"This guide shows how to quickly add a new provider to the provider-agnostic infrastructure system.","breadcrumbs":"Quick Provider Guide » Quick Developer Guide: Adding New Providers","id":"2686","title":"Quick Developer Guide: Adding New Providers"},"2687":{"body":"Understand the Provider-Agnostic Architecture Have the provider\'s SDK or API available Know the provider\'s authentication requirements","breadcrumbs":"Quick Provider Guide » Prerequisites","id":"2687","title":"Prerequisites"},"2688":{"body":"","breadcrumbs":"Quick Provider Guide » 5-Minute Provider Addition","id":"2688","title":"5-Minute Provider Addition"},"2689":{"body":"mkdir -p provisioning/extensions/providers/{provider_name}\\nmkdir -p provisioning/extensions/providers/{provider_name}/nulib/{provider_name}","breadcrumbs":"Quick Provider Guide » Step 1: Create Provider Directory","id":"2689","title":"Step 1: Create Provider Directory"},"269":{"body":"","breadcrumbs":"Setup System Guide » Troubleshooting","id":"269","title":"Troubleshooting"},"2690":{"body":"# Copy the local provider as a template\\ncp provisioning/extensions/providers/local/provider.nu \\\\ provisioning/extensions/providers/{provider_name}/provider.nu","breadcrumbs":"Quick Provider Guide » Step 2: Copy Template and Customize","id":"2690","title":"Step 2: Copy Template and Customize"},"2691":{"body":"Edit provisioning/extensions/providers/{provider_name}/provider.nu: export def get-provider-metadata []: nothing -> record { { name: \\"your_provider_name\\" version: \\"1.0.0\\" description: \\"Your Provider Description\\" capabilities: { server_management: true network_management: true # Set based on provider features auto_scaling: false # Set based on provider features multi_region: true # Set based on provider features serverless: false # Set based on provider features # ... customize other capabilities } }\\n}","breadcrumbs":"Quick Provider Guide » Step 3: Update Provider Metadata","id":"2691","title":"Step 3: Update Provider Metadata"},"2692":{"body":"The provider interface requires these essential functions: # Required: Server operations\\nexport def query_servers [find?: string, cols?: string]: nothing -> list { # Call your provider\'s server listing API your_provider_query_servers $find $cols\\n} export def create_server [settings: record, server: record, check: bool, wait: bool]: nothing -> bool { # Call your provider\'s server creation API your_provider_create_server $settings $server $check $wait\\n} export def server_exists [server: record, error_exit: bool]: nothing -> bool { # Check if server exists in your provider your_provider_server_exists $server $error_exit\\n} export def get_ip [settings: record, server: record, ip_type: string, error_exit: bool]: nothing -> string { # Get server IP from your provider your_provider_get_ip $settings $server $ip_type $error_exit\\n} # Required: Infrastructure operations\\nexport def delete_server [settings: record, server: record, keep_storage: bool, error_exit: bool]: nothing -> bool { your_provider_delete_server $settings $server $keep_storage $error_exit\\n} export def server_state [server: record, new_state: string, error_exit: bool, wait: bool, settings: record]: nothing -> bool { your_provider_server_state $server $new_state $error_exit $wait $settings\\n}","breadcrumbs":"Quick Provider Guide » Step 4: Implement Core Functions","id":"2692","title":"Step 4: Implement Core Functions"},"2693":{"body":"Create provisioning/extensions/providers/{provider_name}/nulib/{provider_name}/servers.nu: # Example: DigitalOcean provider functions\\nexport def digitalocean_query_servers [find?: string, cols?: string]: nothing -> list { # Use DigitalOcean API to list droplets let droplets = (http get \\"https://api.digitalocean.com/v2/droplets\\" --headers { Authorization: $\\"Bearer ($env.DO_TOKEN)\\" }) $droplets.droplets | select name status memory disk region.name networks.v4\\n} export def digitalocean_create_server [settings: record, server: record, check: bool, wait: bool]: nothing -> bool { # Use DigitalOcean API to create droplet let payload = { name: $server.hostname region: $server.zone size: $server.plan image: ($server.image? | default \\"ubuntu-20-04-x64\\") } if $check { print $\\"Would create DigitalOcean droplet: ($payload)\\" return true } let result = (http post \\"https://api.digitalocean.com/v2/droplets\\" --headers { Authorization: $\\"Bearer ($env.DO_TOKEN)\\" } --content-type application/json $payload) $result.droplet.id != null\\n}","breadcrumbs":"Quick Provider Guide » Step 5: Create Provider-Specific Functions","id":"2693","title":"Step 5: Create Provider-Specific Functions"},"2694":{"body":"# Test provider discovery\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/providers/registry.nu *; init-provider-registry; list-providers\\" # Test provider loading\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/providers/loader.nu *; load-provider \'your_provider_name\'\\" # Test provider functions\\nnu -c \\"use provisioning/extensions/providers/your_provider_name/provider.nu *; query_servers\\"","breadcrumbs":"Quick Provider Guide » Step 6: Test Your Provider","id":"2694","title":"Step 6: Test Your Provider"},"2695":{"body":"Add to your Nickel configuration: # workspace/infra/example/servers.ncl\\nlet servers = [ { hostname = \\"test-server\\", provider = \\"your_provider_name\\", zone = \\"your-region-1\\", plan = \\"your-instance-type\\", }\\n] in\\nservers","breadcrumbs":"Quick Provider Guide » Step 7: Add Provider to Infrastructure","id":"2695","title":"Step 7: Add Provider to Infrastructure"},"2696":{"body":"","breadcrumbs":"Quick Provider Guide » Provider Templates","id":"2696","title":"Provider Templates"},"2697":{"body":"For cloud providers (AWS, GCP, Azure, etc.): # Use HTTP calls to cloud APIs\\nexport def cloud_query_servers [find?: string, cols?: string]: nothing -> list { let auth_header = { Authorization: $\\"Bearer ($env.PROVIDER_TOKEN)\\" } let servers = (http get $\\"($env.PROVIDER_API_URL)/servers\\" --headers $auth_header) $servers | select name status region instance_type public_ip\\n}","breadcrumbs":"Quick Provider Guide » Cloud Provider Template","id":"2697","title":"Cloud Provider Template"},"2698":{"body":"For container platforms (Docker, Podman, etc.): # Use CLI commands for container platforms\\nexport def container_query_servers [find?: string, cols?: string]: nothing -> list { let containers = (docker ps --format json | from json) $containers | select Names State Status Image\\n}","breadcrumbs":"Quick Provider Guide » Container Platform Template","id":"2698","title":"Container Platform Template"},"2699":{"body":"For bare metal or existing servers: # Use SSH or local commands\\nexport def baremetal_query_servers [find?: string, cols?: string]: nothing -> list { # Read from inventory file or ping servers let inventory = (open inventory.yaml | from yaml) $inventory.servers | select hostname ip_address status\\n}","breadcrumbs":"Quick Provider Guide » Bare Metal Provider Template","id":"2699","title":"Bare Metal Provider Template"},"27":{"body":"Solo: Local development Multi-user: Team collaboration CI/CD: Automated pipelines Enterprise: Production deployment","breadcrumbs":"Home » ✅ Mode-Based Operation","id":"27","title":"✅ Mode-Based Operation"},"270":{"body":"export PATH=\\"/usr/local/bin:$PATH\\"","breadcrumbs":"Setup System Guide » \\"Command not found: provisioning\\"","id":"270","title":"\\"Command not found: provisioning\\""},"2700":{"body":"","breadcrumbs":"Quick Provider Guide » Best Practices","id":"2700","title":"Best Practices"},"2701":{"body":"export def provider_operation []: nothing -> any { try { # Your provider operation provider_api_call } catch {|err| log-error $\\"Provider operation failed: ($err.msg)\\" \\"provider\\" if $error_exit { exit 1 } null }\\n}","breadcrumbs":"Quick Provider Guide » 1. Error Handling","id":"2701","title":"1. Error Handling"},"2702":{"body":"# Check for required environment variables\\ndef check_auth []: nothing -> bool { if ($env | get -o PROVIDER_TOKEN) == null { log-error \\"PROVIDER_TOKEN environment variable required\\" \\"auth\\" return false } true\\n}","breadcrumbs":"Quick Provider Guide » 2. Authentication","id":"2702","title":"2. Authentication"},"2703":{"body":"# Add delays for API rate limits\\ndef api_call_with_retry [url: string]: nothing -> any { mut attempts = 0 mut max_attempts = 3 while $attempts < $max_attempts { try { return (http get $url) } catch { $attempts += 1 sleep 1sec } } error make { msg: \\"API call failed after retries\\" }\\n}","breadcrumbs":"Quick Provider Guide » 3. Rate Limiting","id":"2703","title":"3. Rate Limiting"},"2704":{"body":"Set capabilities accurately: capabilities: { server_management: true # Can create/delete servers network_management: true # Can manage networks/VPCs storage_management: true # Can manage block storage load_balancer: false # No load balancer support dns_management: false # No DNS support auto_scaling: true # Supports auto-scaling spot_instances: false # No spot instance support multi_region: true # Supports multiple regions containers: false # No container support serverless: false # No serverless support encryption_at_rest: true # Supports encryption compliance_certifications: [\\"SOC2\\"] # Available certifications\\n}","breadcrumbs":"Quick Provider Guide » 4. Provider Capabilities","id":"2704","title":"4. Provider Capabilities"},"2705":{"body":"Provider discovered by registry Provider loads without errors All required interface functions implemented Provider metadata correct Authentication working Can query existing resources Can create new resources (in test mode) Error handling working Compatible with existing infrastructure configs","breadcrumbs":"Quick Provider Guide » Testing Checklist","id":"2705","title":"Testing Checklist"},"2706":{"body":"","breadcrumbs":"Quick Provider Guide » Common Issues","id":"2706","title":"Common Issues"},"2707":{"body":"# Check provider directory structure\\nls -la provisioning/extensions/providers/your_provider_name/ # Ensure provider.nu exists and has get-provider-metadata function\\ngrep \\"get-provider-metadata\\" provisioning/extensions/providers/your_provider_name/provider.nu","breadcrumbs":"Quick Provider Guide » Provider Not Found","id":"2707","title":"Provider Not Found"},"2708":{"body":"# Check which functions are missing\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/providers/interface.nu *; validate-provider-interface \'your_provider_name\'\\"","breadcrumbs":"Quick Provider Guide » Interface Validation Failed","id":"2708","title":"Interface Validation Failed"},"2709":{"body":"# Check environment variables\\nenv | grep PROVIDER # Test API access manually\\ncurl -H \\"Authorization: Bearer $PROVIDER_TOKEN\\" https://api.provider.com/test","breadcrumbs":"Quick Provider Guide » Authentication Errors","id":"2709","title":"Authentication Errors"},"271":{"body":"curl -sSL https://raw.githubusercontent.com/nushell/nushell/main/install.sh | bash","breadcrumbs":"Setup System Guide » \\"Nushell not found\\"","id":"271","title":"\\"Nushell not found\\""},"2710":{"body":"Documentation : Add provider-specific documentation to docs/providers/ Examples : Create example infrastructure using your provider Testing : Add integration tests for your provider Optimization : Implement caching and performance optimizations Features : Add provider-specific advanced features","breadcrumbs":"Quick Provider Guide » Next Steps","id":"2710","title":"Next Steps"},"2711":{"body":"Check existing providers for implementation patterns Review the Provider Interface Documentation Test with the provider test suite: ./provisioning/tools/test-provider-agnostic.nu Run migration checks: ./provisioning/tools/migrate-to-provider-agnostic.nu status","breadcrumbs":"Quick Provider Guide » Getting Help","id":"2711","title":"Getting Help"},"2712":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Provider-Agnostic Architecture Documentation","id":"2712","title":"Provider-Agnostic Architecture Documentation"},"2713":{"body":"The new provider-agnostic architecture eliminates hardcoded provider dependencies and enables true multi-provider infrastructure deployments. This addresses two critical limitations of the previous middleware: Hardcoded provider dependencies - No longer requires importing specific provider modules Single-provider limitation - Now supports mixing multiple providers in the same deployment (for example, AWS compute + Cloudflare DNS + UpCloud backup)","breadcrumbs":"Provider Agnostic Architecture » Overview","id":"2713","title":"Overview"},"2714":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Architecture Components","id":"2714","title":"Architecture Components"},"2715":{"body":"Defines the contract that all providers must implement: # Standard interface functions\\n- query_servers\\n- server_info\\n- server_exists\\n- create_server\\n- delete_server\\n- server_state\\n- get_ip\\n# ... and 20+ other functions Key Features: Type-safe function signatures Comprehensive validation Provider capability flags Interface versioning","breadcrumbs":"Provider Agnostic Architecture » 1. Provider Interface (interface.nu)","id":"2715","title":"1. Provider Interface (interface.nu)"},"2716":{"body":"Manages provider discovery and registration: # Initialize registry\\ninit-provider-registry # List available providers\\nlist-providers --available-only # Check provider availability\\nis-provider-available \\"aws\\" Features: Automatic provider discovery Core and extension provider support Caching for performance Provider capability tracking","breadcrumbs":"Provider Agnostic Architecture » 2. Provider Registry (registry.nu)","id":"2716","title":"2. Provider Registry (registry.nu)"},"2717":{"body":"Handles dynamic provider loading and validation: # Load provider dynamically\\nload-provider \\"aws\\" # Get provider with auto-loading\\nget-provider \\"upcloud\\" # Call provider function\\ncall-provider-function \\"aws\\" \\"query_servers\\" $find $cols Features: Lazy loading (load only when needed) Interface compliance validation Error handling and recovery Provider health checking","breadcrumbs":"Provider Agnostic Architecture » 3. Provider Loader (loader.nu)","id":"2717","title":"3. Provider Loader (loader.nu)"},"2718":{"body":"Each provider implements a standard adapter: provisioning/extensions/providers/\\n├── aws/provider.nu # AWS adapter\\n├── upcloud/provider.nu # UpCloud adapter\\n├── local/provider.nu # Local adapter\\n└── {custom}/provider.nu # Custom providers Adapter Structure: # AWS Provider Adapter\\nexport def query_servers [find?: string, cols?: string] { aws_query_servers $find $cols\\n} export def create_server [settings: record, server: record, check: bool, wait: bool] { # AWS-specific implementation\\n}","breadcrumbs":"Provider Agnostic Architecture » 4. Provider Adapters","id":"2718","title":"4. Provider Adapters"},"2719":{"body":"The new middleware that uses dynamic dispatch: # No hardcoded imports!\\nexport def mw_query_servers [settings: record, find?: string, cols?: string] { $settings.data.servers | each { |server| # Dynamic provider loading and dispatch dispatch_provider_function $server.provider \\"query_servers\\" $find $cols }\\n}","breadcrumbs":"Provider Agnostic Architecture » 5. Provider-Agnostic Middleware (middleware_provider_agnostic.nu)","id":"2719","title":"5. Provider-Agnostic Middleware (middleware_provider_agnostic.nu)"},"272":{"body":"chmod 755 ~/Library/Application\\\\ Support/provisioning/","breadcrumbs":"Setup System Guide » \\"Cannot write to directory\\"","id":"272","title":"\\"Cannot write to directory\\""},"2720":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Multi-Provider Support","id":"2720","title":"Multi-Provider Support"},"2721":{"body":"let servers = [ { hostname = \\"compute-01\\", provider = \\"aws\\", # AWS-specific config }, { hostname = \\"backup-01\\", provider = \\"upcloud\\", # UpCloud-specific config }, { hostname = \\"api.example.com\\", provider = \\"cloudflare\\", # DNS-specific config },\\n] in\\nservers","breadcrumbs":"Provider Agnostic Architecture » Example: Mixed Provider Infrastructure","id":"2721","title":"Example: Mixed Provider Infrastructure"},"2722":{"body":"# Deploy across multiple providers automatically\\nmw_deploy_multi_provider_infra $settings $deployment_plan # Get deployment strategy recommendations\\nmw_suggest_deployment_strategy { regions: [\\"us-east-1\\", \\"eu-west-1\\"] high_availability: true cost_optimization: true\\n}","breadcrumbs":"Provider Agnostic Architecture » Multi-Provider Deployment","id":"2722","title":"Multi-Provider Deployment"},"2723":{"body":"Providers declare their capabilities: capabilities: { server_management: true network_management: true auto_scaling: true # AWS: yes, Local: no multi_region: true # AWS: yes, Local: no serverless: true # AWS: yes, UpCloud: no compliance_certifications: [\\"SOC2\\", \\"HIPAA\\"]\\n}","breadcrumbs":"Provider Agnostic Architecture » Provider Capabilities","id":"2723","title":"Provider Capabilities"},"2724":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Migration Guide","id":"2724","title":"Migration Guide"},"2725":{"body":"Before (hardcoded): # middleware.nu\\nuse ../aws/nulib/aws/servers.nu *\\nuse ../upcloud/nulib/upcloud/servers.nu * match $server.provider { \\"aws\\" => { aws_query_servers $find $cols } \\"upcloud\\" => { upcloud_query_servers $find $cols }\\n} After (provider-agnostic): # middleware_provider_agnostic.nu\\n# No hardcoded imports! # Dynamic dispatch\\ndispatch_provider_function $server.provider \\"query_servers\\" $find $cols","breadcrumbs":"Provider Agnostic Architecture » From Old Middleware","id":"2725","title":"From Old Middleware"},"2726":{"body":"Replace middleware file: cp provisioning/extensions/providers/prov_lib/middleware.nu \\\\ provisioning/extensions/providers/prov_lib/middleware_legacy.backup cp provisioning/extensions/providers/prov_lib/middleware_provider_agnostic.nu \\\\ provisioning/extensions/providers/prov_lib/middleware.nu Test with existing infrastructure: ./provisioning/tools/test-provider-agnostic.nu run-all-tests Update any custom code that directly imported provider modules","breadcrumbs":"Provider Agnostic Architecture » Migration Steps","id":"2726","title":"Migration Steps"},"2727":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Adding New Providers","id":"2727","title":"Adding New Providers"},"2728":{"body":"Create provisioning/extensions/providers/{name}/provider.nu: # Digital Ocean Provider Example\\nexport def get-provider-metadata [] { { name: \\"digitalocean\\" version: \\"1.0.0\\" capabilities: { server_management: true # ... other capabilities } }\\n} # Implement required interface functions\\nexport def query_servers [find?: string, cols?: string] { # DigitalOcean-specific implementation\\n} export def create_server [settings: record, server: record, check: bool, wait: bool] { # DigitalOcean-specific implementation\\n} # ... implement all required functions","breadcrumbs":"Provider Agnostic Architecture » 1. Create Provider Adapter","id":"2728","title":"1. Create Provider Adapter"},"2729":{"body":"The registry will automatically discover the new provider on next initialization.","breadcrumbs":"Provider Agnostic Architecture » 2. Provider Discovery","id":"2729","title":"2. Provider Discovery"},"273":{"body":"provisioning setup validate --check-tools","breadcrumbs":"Setup System Guide » Check required tools","id":"273","title":"Check required tools"},"2730":{"body":"# Check if discovered\\nis-provider-available \\"digitalocean\\" # Load and test\\nload-provider \\"digitalocean\\"\\ncheck-provider-health \\"digitalocean\\"","breadcrumbs":"Provider Agnostic Architecture » 3. Test New Provider","id":"2730","title":"3. Test New Provider"},"2731":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Best Practices","id":"2731","title":"Best Practices"},"2732":{"body":"Implement full interface - All functions must be implemented Handle errors gracefully - Return appropriate error values Follow naming conventions - Use consistent function naming Document capabilities - Accurately declare what your provider supports Test thoroughly - Validate against the interface specification","breadcrumbs":"Provider Agnostic Architecture » Provider Development","id":"2732","title":"Provider Development"},"2733":{"body":"Use capability-based selection - Choose providers based on required features Handle provider failures - Design for provider unavailability Optimize for cost/performance - Mix providers strategically Monitor cross-provider dependencies - Understand inter-provider communication","breadcrumbs":"Provider Agnostic Architecture » Multi-Provider Deployments","id":"2733","title":"Multi-Provider Deployments"},"2734":{"body":"# Environment profiles can restrict providers\\nPROVISIONING_PROFILE=production # Only allows certified providers\\nPROVISIONING_PROFILE=development # Allows all providers including local","breadcrumbs":"Provider Agnostic Architecture » Profile-Based Security","id":"2734","title":"Profile-Based Security"},"2735":{"body":"","breadcrumbs":"Provider Agnostic Architecture » Troubleshooting","id":"2735","title":"Troubleshooting"},"2736":{"body":"Provider not found Check provider is in correct directory Verify provider.nu exists and implements interface Run init-provider-registry to refresh Interface validation failed Use validate-provider-interface to check compliance Ensure all required functions are implemented Check function signatures match interface Provider loading errors Check Nushell module syntax Verify import paths are correct Use check-provider-health for diagnostics","breadcrumbs":"Provider Agnostic Architecture » Common Issues","id":"2736","title":"Common Issues"},"2737":{"body":"# Registry diagnostics\\nget-provider-stats\\nlist-providers --verbose # Provider diagnostics\\ncheck-provider-health \\"aws\\"\\ncheck-all-providers-health # Loader diagnostics\\nget-loader-stats","breadcrumbs":"Provider Agnostic Architecture » Debug Commands","id":"2737","title":"Debug Commands"},"2738":{"body":"Lazy Loading - Providers loaded only when needed Caching - Provider registry cached to disk Reduced Memory - No hardcoded imports reducing memory usage Parallel Operations - Multi-provider operations can run in parallel","breadcrumbs":"Provider Agnostic Architecture » Performance Benefits","id":"2738","title":"Performance Benefits"},"2739":{"body":"Provider Plugins - Support for external provider plugins Provider Versioning - Multiple versions of same provider Provider Composition - Compose providers for complex scenarios Provider Marketplace - Community provider sharing","breadcrumbs":"Provider Agnostic Architecture » Future Enhancements","id":"2739","title":"Future Enhancements"},"274":{"body":"Q: Do I need all optional tools? A: No. You need at least one deployment tool (Docker, Kubernetes, SSH, or systemd). Q: Can I use provisioning without Docker? A: Yes. Provisioning supports Docker, Kubernetes, SSH, systemd, or combinations. Q: How do I update configuration? A: provisioning setup update Q: Can I have multiple workspaces? A: Yes, unlimited workspaces. Q: Is my configuration secure? A: Yes. Credentials stored securely, never in config files. Q: Can I share workspaces with my team? A: Yes, via GitOps - configurations in Git, secrets in secure storage.","breadcrumbs":"Setup System Guide » FAQ","id":"274","title":"FAQ"},"2740":{"body":"See the interface specification for complete function documentation: get-provider-interface-docs | table This returns the complete API with signatures and descriptions for all provider interface functions.","breadcrumbs":"Provider Agnostic Architecture » API Reference","id":"2740","title":"API Reference"},"2741":{"body":"Version : 2.0 Status : Production Ready Based On : Hetzner, UpCloud, AWS (3 completed providers)","breadcrumbs":"Provider Development Guide » Cloud Provider Development Guide","id":"2741","title":"Cloud Provider Development Guide"},"2742":{"body":"A cloud provider is production-ready when it completes all 4 tasks: Task Requirements Reference 1. Nushell Compliance 0 deprecated patterns, full implementations provisioning/extensions/providers/hetzner/ 2. Test Infrastructure 51 tests (14 unit + 37 integration, mock-based) provisioning/extensions/providers/upcloud/tests/ 3. Runtime Templates 3+ Jinja2/Bash templates for core resources provisioning/extensions/providers/aws/templates/ 4. Nickel Validation Schemas pass nickel typecheck provisioning/extensions/providers/hetzner/nickel/","breadcrumbs":"Provider Development Guide » Overview: 4-Task Completion Framework","id":"2742","title":"Overview: 4-Task Completion Framework"},"2743":{"body":"Tarea 4 (5 min) ──────┐\\nTarea 1 (main) ───┐ ├──> Tarea 2 (tests)\\nTarea 3 (parallel)┘ │ └──> Production Ready ✅","breadcrumbs":"Provider Development Guide » Execution Sequence","id":"2743","title":"Execution Sequence"},"2744":{"body":"These rules are mandatory for all provider Nushell code:","breadcrumbs":"Provider Development Guide » Nushell 0.109.0+ Core Rules","id":"2744","title":"Nushell 0.109.0+ Core Rules"},"2745":{"body":"use mod.nu\\nuse api.nu\\nuse servers.nu","breadcrumbs":"Provider Development Guide » Rule 1: Module System & Imports","id":"2745","title":"Rule 1: Module System & Imports"},"2746":{"body":"def function_name [param: type, optional: type = default] { }","breadcrumbs":"Provider Development Guide » Rule 2: Function Signatures","id":"2746","title":"Rule 2: Function Signatures"},"2747":{"body":"def operation [resource: record] { if ($resource | get -o id | is-empty) { error make {msg: \\"Resource ID required\\"} }\\n}","breadcrumbs":"Provider Development Guide » Rule 3: Return Early, Fail Fast","id":"2747","title":"Rule 3: Return Early, Fail Fast"},"2748":{"body":"❌ FORBIDDEN - Deprecated try-catch: try { ^external_command\\n} catch {|err| print $\\"Error: ($err.msg)\\"\\n} ✅ REQUIRED - Modern do/complete pattern: let result = (do { ^external_command } | complete) if $result.exit_code != 0 { error make {msg: $\\"Command failed: ($result.stderr)\\"}\\n} $result.stdout","breadcrumbs":"Provider Development Guide » Rule 4: Modern Error Handling (CRITICAL)","id":"2748","title":"Rule 4: Modern Error Handling (CRITICAL)"},"2749":{"body":"All operations must fully succeed or fully fail. No partial state changes.","breadcrumbs":"Provider Development Guide » Rule 5: Atomic Operations","id":"2749","title":"Rule 5: Atomic Operations"},"275":{"body":"# General help\\nprovisioning help # Setup help\\nprovisioning help setup # Specific command help\\nprovisioning setup system --help","breadcrumbs":"Setup System Guide » Getting Help","id":"275","title":"Getting Help"},"2750":{"body":"error make { msg: \\"Human-readable message\\", label: {text: \\"Error context\\", span: (metadata error).span}\\n}","breadcrumbs":"Provider Development Guide » Rule 12: Structured Error Returns","id":"2750","title":"Rule 12: Structured Error Returns"},"2751":{"body":"❌ FORBIDDEN : try { } catch { } blocks let mut variable = value (mutable state) error make {msg: \\"Not implemented\\"} (stubs) Empty function bodies returning ok Deprecated error patterns","breadcrumbs":"Provider Development Guide » Critical Violations (INSTANT FAIL)","id":"2751","title":"Critical Violations (INSTANT FAIL)"},"2752":{"body":"All Nickel schemas follow this pattern:","breadcrumbs":"Provider Development Guide » Nickel IaC: Three-File Pattern","id":"2752","title":"Nickel IaC: Three-File Pattern"},"2753":{"body":"{ Server = { id | String, name | String, instance_type | String, zone | String, }, Volume = { id | String, name | String, size | Number, type | String, }\\n}","breadcrumbs":"Provider Development Guide » contracts.ncl: Type Definitions","id":"2753","title":"contracts.ncl: Type Definitions"},"2754":{"body":"{ Server = { instance_type = \\"t3.micro\\", zone = \\"us-east-1a\\", }, Volume = { size = 20, type = \\"gp3\\", }\\n}","breadcrumbs":"Provider Development Guide » defaults.ncl: Default Values","id":"2754","title":"defaults.ncl: Default Values"},"2755":{"body":"let contracts = import \\"contracts.ncl\\" in\\nlet defaults = import \\"defaults.ncl\\" in { make_server = fun config => defaults.Server & config, make_volume = fun config => defaults.Volume & config,\\n}","breadcrumbs":"Provider Development Guide » main.ncl: Public API","id":"2755","title":"main.ncl: Public API"},"2756":{"body":"{ provider_version = \\"1.0.0\\", cli_tools = { hcloud = \\"1.47.0+\\", }, nickel_version = \\"1.7.0+\\",\\n} Validation : nickel typecheck nickel/contracts.ncl\\nnickel typecheck nickel/defaults.ncl\\nnickel typecheck nickel/main.ncl\\nnickel typecheck nickel/version.ncl\\nnickel export nickel/main.ncl","breadcrumbs":"Provider Development Guide » version.ncl: Version Tracking","id":"2756","title":"version.ncl: Version Tracking"},"2757":{"body":"","breadcrumbs":"Provider Development Guide » Tarea 1: Nushell Compliance","id":"2757","title":"Tarea 1: Nushell Compliance"},"2758":{"body":"cd provisioning/extensions/providers/{PROVIDER} grep -r \\"try {\\" nulib/ --include=\\"*.nu\\" | wc -l\\ngrep -r \\"let mut \\" nulib/ --include=\\"*.nu\\" | wc -l\\ngrep -r \\"not implemented\\" nulib/ --include=\\"*.nu\\" | wc -l All three commands should return 0.","breadcrumbs":"Provider Development Guide » Identify Violations","id":"2758","title":"Identify Violations"},"2759":{"body":"def retry_with_backoff [ closure: closure, max_attempts: int\\n]: nothing -> any { let result = ( 0..$max_attempts | reduce --fold { success: false, value: null, delay: 100 ms } {|attempt, acc| if $acc.success { $acc } else { let op_result = (do { $closure | call } | complete) if $op_result.exit_code == 0 { {success: true, value: $op_result.stdout, delay: $acc.delay} } else if $attempt >= ($max_attempts - 1) { $acc } else { sleep $acc.delay {success: false, value: null, delay: ($acc.delay * 2)} } } } ) if $result.success { $result.value } else { error make {msg: $\\"Failed after ($max_attempts) attempts\\"} }\\n}","breadcrumbs":"Provider Development Guide » Fix Mutable Loops: Accumulation Pattern","id":"2759","title":"Fix Mutable Loops: Accumulation Pattern"},"276":{"body":"Installation Guide Workspace Setup Provider Configuration From Scratch Guide Status : Production Ready ✅ Version : 1.0.0 Last Updated : 2025-12-09","breadcrumbs":"Setup System Guide » Next Steps","id":"276","title":"Next Steps"},"2760":{"body":"def _wait_for_state [ resource_id: string, target_state: string, timeout_sec: int, elapsed: int = 0, interval: int = 2\\n]: nothing -> bool { let current = (^aws ec2 describe-volumes \\\\ --volume-ids $resource_id \\\\ --query \\"Volumes[0].State\\" \\\\ --output text) if ($current | str contains $target_state) { true } else if $elapsed > $timeout_sec { false } else { sleep ($\\"($interval)sec\\" | into duration) _wait_for_state $resource_id $target_state $timeout_sec ($elapsed + $interval) $interval }\\n}","breadcrumbs":"Provider Development Guide » Fix Mutable Loops: Recursive Pattern","id":"2760","title":"Fix Mutable Loops: Recursive Pattern"},"2761":{"body":"def create_server [config: record] { if ($config | get -o name | is-empty) { error make {msg: \\"Server name required\\"} } let api_result = (do { ^hcloud server create \\\\ --name $config.name \\\\ --type $config.instance_type \\\\ --format json } | complete) if $api_result.exit_code != 0 { error make {msg: $\\"Server creation failed: ($api_result.stderr)\\"} } let response = ($api_result.stdout | from json) { id: $response.server.id, name: $response.server.name, status: \\"created\\" }\\n}","breadcrumbs":"Provider Development Guide » Fix Error Handling","id":"2761","title":"Fix Error Handling"},"2762":{"body":"cd provisioning/extensions/providers/{PROVIDER} for file in nulib/*/\\\\*.nu; do nu --ide-check 100 \\"$file\\" 2>&1 | grep -i error && exit 1\\ndone nu -c \\"use nulib/{provider}/mod.nu; print \'OK\'\\" echo \\"✅ Nushell compliance complete\\"","breadcrumbs":"Provider Development Guide » Validation","id":"2762","title":"Validation"},"2763":{"body":"","breadcrumbs":"Provider Development Guide » Tarea 2: Test Infrastructure","id":"2763","title":"Tarea 2: Test Infrastructure"},"2764":{"body":"tests/\\n├── mocks/\\n│ └── mock_api_responses.json\\n├── unit/\\n│ └── test_utils.nu\\n├── integration/\\n│ ├── test_api_client.nu\\n│ ├── test_server_lifecycle.nu\\n│ └── test_pricing_cache.nu\\n└── run_{provider}_tests.nu","breadcrumbs":"Provider Development Guide » Directory Structure","id":"2764","title":"Directory Structure"},"2765":{"body":"{ \\"list_servers\\": { \\"servers\\": [ { \\"id\\": \\"srv-123\\", \\"name\\": \\"test-server\\", \\"status\\": \\"running\\" } ] }, \\"error_401\\": { \\"error\\": {\\"message\\": \\"Unauthorized\\", \\"code\\": 401} }, \\"error_429\\": { \\"error\\": {\\"message\\": \\"Rate limited\\", \\"code\\": 429} }\\n}","breadcrumbs":"Provider Development Guide » Mock API Responses","id":"2765","title":"Mock API Responses"},"2766":{"body":"def test-result [name: string, result: bool] { if $result { print $\\"✓ ($name)\\" } else { print $\\"✗ ($name)\\" } $result\\n} def test-validate-instance-id [] { let valid = \\"i-1234567890abcdef0\\" let invalid = \\"invalid-id\\" let test1 = (test-result \\"Instance ID valid\\" ($valid | str contains \\"i-\\")) let test2 = (test-result \\"Instance ID invalid\\" (($invalid | str contains \\"i-\\") == false)) $test1 and $test2\\n} def test-validate-ipv4 [] { let valid = \\"10.0.1.100\\" let parts = ($valid | split row \\".\\") test-result \\"IPv4 four octets\\" (($parts | length) == 4)\\n} def test-validate-instance-type [] { let valid_types = [\\"t3.micro\\" \\"t3.small\\" \\"m5.large\\"] let invalid = \\"invalid_type\\" let test1 = (test-result \\"Instance type valid\\" (($valid_types | contains [\\"t3.micro\\"]))) let test2 = (test-result \\"Instance type invalid\\" (($valid_types | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-zone [] { let valid_zones = [\\"us-east-1a\\" \\"us-east-1b\\" \\"eu-west-1a\\"] let invalid = \\"invalid-zone\\" let test1 = (test-result \\"Zone valid\\" (($valid_zones | contains [\\"us-east-1a\\"]))) let test2 = (test-result \\"Zone invalid\\" (($valid_zones | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-volume-id [] { let valid = \\"vol-12345678\\" let invalid = \\"invalid-vol\\" let test1 = (test-result \\"Volume ID valid\\" ($valid | str contains \\"vol-\\")) let test2 = (test-result \\"Volume ID invalid\\" (($invalid | str contains \\"vol-\\") == false)) $test1 and $test2\\n} def test-validate-volume-state [] { let valid_states = [\\"available\\" \\"in-use\\" \\"creating\\"] let invalid = \\"pending\\" let test1 = (test-result \\"Volume state valid\\" (($valid_states | contains [\\"available\\"]))) let test2 = (test-result \\"Volume state invalid\\" (($valid_states | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-cidr [] { let valid = \\"10.0.0.0/16\\" let invalid = \\"10.0.0.1\\" let test1 = (test-result \\"CIDR valid\\" ($valid | str contains \\"/\\")) let test2 = (test-result \\"CIDR invalid\\" (($invalid | str contains \\"/\\") == false)) $test1 and $test2\\n} def test-validate-volume-type [] { let valid_types = [\\"gp2\\" \\"gp3\\" \\"io1\\" \\"io2\\"] let invalid = \\"invalid-type\\" let test1 = (test-result \\"Volume type valid\\" (($valid_types | contains [\\"gp3\\"]))) let test2 = (test-result \\"Volume type invalid\\" (($valid_types | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-timestamp [] { let valid = \\"2025-01-07T10:00:00.000Z\\" let invalid = \\"not-a-timestamp\\" let test1 = (test-result \\"Timestamp valid\\" ($valid | str contains \\"T\\" and $valid | str contains \\"Z\\")) let test2 = (test-result \\"Timestamp invalid\\" (($invalid | str contains \\"T\\") == false)) $test1 and $test2\\n} def test-validate-server-state [] { let valid_states = [\\"running\\" \\"stopped\\" \\"pending\\"] let invalid = \\"hibernating\\" let test1 = (test-result \\"Server state valid\\" (($valid_states | contains [\\"running\\"]))) let test2 = (test-result \\"Server state invalid\\" (($valid_states | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-security-group [] { let valid = \\"sg-12345678\\" let invalid = \\"invalid-sg\\" let test1 = (test-result \\"Security group valid\\" ($valid | str contains \\"sg-\\")) let test2 = (test-result \\"Security group invalid\\" (($invalid | str contains \\"sg-\\") == false)) $test1 and $test2\\n} def test-validate-memory [] { let valid_mems = [\\"512 MB\\" \\"1 GB\\" \\"2 GB\\" \\"4 GB\\"] let invalid = \\"0 GB\\" let test1 = (test-result \\"Memory valid\\" (($valid_mems | contains [\\"1 GB\\"]))) let test2 = (test-result \\"Memory invalid\\" (($valid_mems | contains [$invalid]) == false)) $test1 and $test2\\n} def test-validate-vcpu [] { let valid_cpus = [1, 2, 4, 8, 16] let invalid = 0 let test1 = (test-result \\"vCPU valid\\" (($valid_cpus | contains [1]))) let test2 = (test-result \\"vCPU invalid\\" (($valid_cpus | contains [$invalid]) == false)) $test1 and $test2\\n} def main [] { print \\"=== Unit Tests ===\\" print \\"\\" let results = [ (test-validate-instance-id), (test-validate-ipv4), (test-validate-instance-type), (test-validate-zone), (test-validate-volume-id), (test-validate-volume-state), (test-validate-cidr), (test-validate-volume-type), (test-validate-timestamp), (test-validate-server-state), (test-validate-security-group), (test-validate-memory), (test-validate-vcpu) ] let passed = ($results | where {|it| $it == true} | length) let failed = ($results | where {|it| $it == false} | length) print \\"\\" print $\\"Results: ($passed) passed, ($failed) failed\\" { passed: $passed, failed: $failed, total: ($passed + $failed) }\\n} main","breadcrumbs":"Provider Development Guide » Unit Tests: 14 Tests","id":"2766","title":"Unit Tests: 14 Tests"},"2767":{"body":"Module 1: test_api_client.nu (13 tests) Response structure validation Error handling for 401, 404, 429 Resource listing operations Pricing data validation Module 2: test_server_lifecycle.nu (12 tests) Server creation, listing, state Instance type and zone info Storage and security attachment Server state transitions Module 3: test_pricing_cache.nu (12 tests) Pricing data structure validation On-demand vs reserved pricing Cost calculations Volume pricing operations","breadcrumbs":"Provider Development Guide » Integration Tests: 37 Tests across 3 Modules","id":"2767","title":"Integration Tests: 37 Tests across 3 Modules"},"2768":{"body":"def main [] { print \\"=== Provider Test Suite ===\\" let unit_result = (nu tests/unit/test_utils.nu) let api_result = (nu tests/integration/test_api_client.nu) let lifecycle_result = (nu tests/integration/test_server_lifecycle.nu) let pricing_result = (nu tests/integration/test_pricing_cache.nu) let total_passed = ( $unit_result.passed + $api_result.passed + $lifecycle_result.passed + $pricing_result.passed ) let total_failed = ( $unit_result.failed + $api_result.failed + $lifecycle_result.failed + $pricing_result.failed ) print $\\"Results: ($total_passed) passed, ($total_failed) failed\\" { passed: $total_passed, failed: $total_failed, success: ($total_failed == 0) }\\n} let result = (main)\\nexit (if $result.success {0} else {1})","breadcrumbs":"Provider Development Guide » Test Orchestrator","id":"2768","title":"Test Orchestrator"},"2769":{"body":"cd provisioning/extensions/providers/{PROVIDER}\\nnu tests/run_{provider}_tests.nu Expected: 51 tests passing, exit code 0","breadcrumbs":"Provider Development Guide » Validation","id":"2769","title":"Validation"},"277":{"body":"This guide has moved to a multi-chapter format for better readability.","breadcrumbs":"Quick Start (Full) » Quick Start","id":"277","title":"Quick Start"},"2770":{"body":"","breadcrumbs":"Provider Development Guide » Tarea 3: Runtime Templates","id":"2770","title":"Tarea 3: Runtime Templates"},"2771":{"body":"templates/\\n├── {provider}_servers.j2\\n├── {provider}_networks.j2\\n└── {provider}_volumes.j2","breadcrumbs":"Provider Development Guide » Directory Structure","id":"2771","title":"Directory Structure"},"2772":{"body":"#!/bin/bash\\n# {{ provider_name }} Server Provisioning\\nset -e\\n{% if debug %}set -x{% endif %} {%- for server in servers %} {%- if server.name %} echo \\"Creating server: {{ server.name }}\\" {%- if server.instance_type %}\\nINSTANCE_TYPE=\\"{{ server.instance_type }}\\"\\n{%- else %}\\nINSTANCE_TYPE=\\"t3.micro\\"\\n{%- endif %} SERVER_ID=$(^hcloud server create \\\\ --name \\"{{ server.name }}\\" \\\\ --type $INSTANCE_TYPE \\\\ --query \'id\' \\\\ --output text 2>/dev/null) if [ -z \\"$SERVER_ID\\" ]; then echo \\"Failed to create server {{ server.name }}\\" exit 1\\nfi echo \\"✓ Server {{ server.name }} created: $SERVER_ID\\" {%- endif %}\\n{%- endfor %} echo \\"Server provisioning complete\\"","breadcrumbs":"Provider Development Guide » Template Example","id":"2772","title":"Template Example"},"2773":{"body":"cd provisioning/extensions/providers/{PROVIDER} for template in templates/*.j2; do bash -n <(sed \'s/{%.*%}//\' \\"$template\\" | sed \'s/{{.*}}/x/g\')\\ndone echo \\"✅ Templates valid\\"","breadcrumbs":"Provider Development Guide » Validation","id":"2773","title":"Validation"},"2774":{"body":"cd provisioning/extensions/providers/{PROVIDER} nickel typecheck nickel/contracts.ncl || exit 1\\nnickel typecheck nickel/defaults.ncl || exit 1\\nnickel typecheck nickel/main.ncl || exit 1\\nnickel typecheck nickel/version.ncl || exit 1 nickel export nickel/main.ncl || exit 1 echo \\"✅ Nickel schemas validated\\"","breadcrumbs":"Provider Development Guide » Tarea 4: Nickel Schema Validation","id":"2774","title":"Tarea 4: Nickel Schema Validation"},"2775":{"body":"#!/bin/bash\\nset -e PROVIDER=\\"hetzner\\"\\nPROV=\\"provisioning/extensions/providers/$PROVIDER\\" echo \\"=== Provider Completeness Check: $PROVIDER ===\\" echo \\"\\"\\necho \\"✓ Tarea 4: Validating Nickel...\\"\\nnickel typecheck \\"$PROV/nickel/main.ncl\\" echo \\"✓ Tarea 1: Checking Nushell...\\"\\n[ $(grep -r \\"try {\\" \\"$PROV/nulib\\" 2>/dev/null | wc -l) -eq 0 ]\\n[ $(grep -r \\"let mut \\" \\"$PROV/nulib\\" 2>/dev/null | wc -l) -eq 0 ]\\necho \\" - No deprecated patterns ✓\\" echo \\"✓ Tarea 3: Validating templates...\\"\\nfor f in \\"$PROV\\"/templates/*.j2; do bash -n <(sed \'s/{%.*%}//\' \\"$f\\" | sed \'s/{{.*}}/x/g\')\\ndone echo \\"✓ Tarea 2: Running tests...\\"\\nnu \\"$PROV/tests/run_${PROVIDER}_tests.nu\\" echo \\"\\"\\necho \\"╔════════════════════════════════════════╗\\"\\necho \\"║ ✅ ALL TASKS COMPLETE ║\\"\\necho \\"║ PRODUCTION READY ║\\"\\necho \\"╚════════════════════════════════════════╝\\"","breadcrumbs":"Provider Development Guide » Complete Validation Script","id":"2775","title":"Complete Validation Script"},"2776":{"body":"Hetzner : provisioning/extensions/providers/hetzner/ UpCloud : provisioning/extensions/providers/upcloud/ AWS : provisioning/extensions/providers/aws/ Use these as templates for new providers.","breadcrumbs":"Provider Development Guide » Reference Implementations","id":"2776","title":"Reference Implementations"},"2777":{"body":"cd provisioning/extensions/providers/{PROVIDER} # Validate completeness\\nnickel typecheck nickel/main.ncl && \\\\\\n[ $(grep -r \\"try {\\" nulib/ 2>/dev/null | wc -l) -eq 0 ] && \\\\\\nnu tests/run_{provider}_tests.nu && \\\\\\nfor f in templates/*.j2; do bash -n <(sed \'s/{%.*%}//\' \\"$f\\"); done && \\\\\\necho \\"✅ PRODUCTION READY\\"","breadcrumbs":"Provider Development Guide » Quick Start","id":"2777","title":"Quick Start"},"2778":{"body":"Strategic Guide for Provider Management and Distribution This guide explains the two complementary approaches for managing providers in the provisioning system and when to use each.","breadcrumbs":"Provider Distribution Guide » Provider Distribution Guide","id":"2778","title":"Provider Distribution Guide"},"2779":{"body":"Overview Module-Loader Approach Provider Packs Approach Comparison Matrix Recommended Hybrid Workflow Command Reference Real-World Scenarios Best Practices","breadcrumbs":"Provider Distribution Guide » Table of Contents","id":"2779","title":"Table of Contents"},"278":{"body":"Please see the complete quick start guide here: Prerequisites - System requirements and setup Installation - Install provisioning platform First Deployment - Deploy your first infrastructure Verification - Verify your deployment","breadcrumbs":"Quick Start (Full) » 📖 Navigate to Quick Start Guide","id":"278","title":"📖 Navigate to Quick Start Guide"},"2780":{"body":"The provisioning system supports two complementary approaches for provider management: Module-Loader : Symlink-based local development with dynamic discovery Provider Packs : Versioned, distributable artifacts for production Both approaches work seamlessly together and serve different phases of the development lifecycle.","breadcrumbs":"Provider Distribution Guide » Overview","id":"2780","title":"Overview"},"2781":{"body":"","breadcrumbs":"Provider Distribution Guide » Module-Loader Approach","id":"2781","title":"Module-Loader Approach"},"2782":{"body":"Fast, local development with direct access to provider source code.","breadcrumbs":"Provider Distribution Guide » Purpose","id":"2782","title":"Purpose"},"2783":{"body":"# Install provider for infrastructure (creates symlinks)\\nprovisioning providers install upcloud wuji # Internal Process:\\n# 1. Discovers provider in extensions/providers/upcloud/\\n# 2. Creates symlink: workspace/infra/wuji/.nickel-modules/upcloud_prov -> extensions/providers/upcloud/nickel/\\n# 3. Updates workspace/infra/wuji/manifest.toml with local path dependency\\n# 4. Updates workspace/infra/wuji/providers.manifest.yaml","breadcrumbs":"Provider Distribution Guide » How It Works","id":"2783","title":"How It Works"},"2784":{"body":"✅ Instant Changes : Edit code in extensions/providers/, immediately available in infrastructure ✅ Auto-Discovery : Automatically finds all providers in extensions/ ✅ Simple Commands : providers install/remove/list/validate ✅ Easy Debugging : Direct access to source code ✅ No Packaging : Skip build/package step during development","breadcrumbs":"Provider Distribution Guide » Key Features","id":"2784","title":"Key Features"},"2785":{"body":"🔧 Active Development : Writing new provider features 🧪 Testing : Rapid iteration and testing cycles 🏠 Local Infrastructure : Single machine or small team 📝 Debugging : Need to modify and test provider code 🎓 Learning : Understanding how providers work","breadcrumbs":"Provider Distribution Guide » Best Use Cases","id":"2785","title":"Best Use Cases"},"2786":{"body":"# 1. List available providers\\nprovisioning providers list # 2. Install provider for infrastructure\\nprovisioning providers install upcloud wuji # 3. Verify installation\\nprovisioning providers validate wuji # 4. Edit provider code\\nvim extensions/providers/upcloud/nickel/server_upcloud.ncl # 5. Test changes immediately (no repackaging!)\\ncd workspace/infra/wuji\\nnickel export main.ncl # 6. Remove when done\\nprovisioning providers remove upcloud wuji","breadcrumbs":"Provider Distribution Guide » Example Workflow","id":"2786","title":"Example Workflow"},"2787":{"body":"extensions/providers/upcloud/\\n├── nickel/\\n│ ├── manifest.toml\\n│ ├── server_upcloud.ncl\\n│ └── network_upcloud.ncl\\n└── README.md workspace/infra/wuji/\\n├── .nickel-modules/\\n│ └── upcloud_prov -> ../../../../extensions/providers/upcloud/nickel/ # Symlink\\n├── manifest.toml # Updated with local path dependency\\n├── providers.manifest.yaml # Tracks installed providers\\n└── schemas/ └── servers.ncl","breadcrumbs":"Provider Distribution Guide » File Structure","id":"2787","title":"File Structure"},"2788":{"body":"","breadcrumbs":"Provider Distribution Guide » Provider Packs Approach","id":"2788","title":"Provider Packs Approach"},"2789":{"body":"Create versioned, distributable artifacts for production deployments and team collaboration.","breadcrumbs":"Provider Distribution Guide » Purpose","id":"2789","title":"Purpose"},"279":{"body":"# Check system status\\nprovisioning status # Get next step suggestions\\nprovisioning next # View interactive guide\\nprovisioning guide from-scratch For the complete step-by-step walkthrough, start with Prerequisites.","breadcrumbs":"Quick Start (Full) » Quick Commands","id":"279","title":"Quick Commands"},"2790":{"body":"# Package providers into distributable artifacts\\nexport PROVISIONING=/Users/Akasha/project-provisioning/provisioning\\n./provisioning/core/cli/pack providers # Internal Process:\\n# 1. Enters each provider\'s nickel/ directory\\n# 2. Runs: nickel export . --format json (generates JSON for distribution)\\n# 3. Creates: upcloud_prov_0.0.1.tar\\n# 4. Generates metadata: distribution/registry/upcloud_prov.json","breadcrumbs":"Provider Distribution Guide » How It Works","id":"2790","title":"How It Works"},"2791":{"body":"✅ Versioned Artifacts : Immutable, reproducible packages ✅ Portable : Share across teams and environments ✅ Registry Publishing : Push to artifact registries ✅ Metadata : Version, maintainer, license information ✅ Production-Ready : What you package is what you deploy","breadcrumbs":"Provider Distribution Guide » Key Features","id":"2791","title":"Key Features"},"2792":{"body":"🚀 Production Deployments : Stable, tested provider versions 📦 Distribution : Share across teams or organizations 🔄 CI/CD Pipelines : Automated build and deploy 📊 Version Control : Track provider versions explicitly 🌐 Registry Publishing : Publish to artifact registries 🔒 Compliance : Immutable artifacts for auditing","breadcrumbs":"Provider Distribution Guide » Best Use Cases","id":"2792","title":"Best Use Cases"},"2793":{"body":"# Set environment variable\\nexport PROVISIONING=/Users/Akasha/project-provisioning/provisioning # 1. Package all providers\\n./provisioning/core/cli/pack providers # Output:\\n# ✅ Creates: distribution/packages/upcloud_prov_0.0.1.tar\\n# ✅ Creates: distribution/packages/aws_prov_0.0.1.tar\\n# ✅ Creates: distribution/packages/local_prov_0.0.1.tar\\n# ✅ Metadata: distribution/registry/*.json # 2. List packaged modules\\n./provisioning/core/cli/pack list # 3. Package only core schemas\\n./provisioning/core/cli/pack core # 4. Clean old packages (keep latest 3 versions)\\n./provisioning/core/cli/pack clean --keep-latest 3 # 5. Upload to registry (your implementation)\\n# rsync distribution/packages/*.tar repo.jesusperez.pro:/registry/","breadcrumbs":"Provider Distribution Guide » Example Workflow","id":"2793","title":"Example Workflow"},"2794":{"body":"provisioning/\\n├── distribution/\\n│ ├── packages/\\n│ │ ├── provisioning_0.0.1.tar # Core schemas\\n│ │ ├── upcloud_prov_0.0.1.tar # Provider packages\\n│ │ ├── aws_prov_0.0.1.tar\\n│ │ └── local_prov_0.0.1.tar\\n│ └── registry/\\n│ ├── provisioning_core.json # Metadata\\n│ ├── upcloud_prov.json\\n│ ├── aws_prov.json\\n│ └── local_prov.json\\n└── extensions/providers/ # Source code","breadcrumbs":"Provider Distribution Guide » File Structure","id":"2794","title":"File Structure"},"2795":{"body":"{ \\"name\\": \\"upcloud_prov\\", \\"version\\": \\"0.0.1\\", \\"package_file\\": \\"/path/to/upcloud_prov_0.0.1.tar\\", \\"created\\": \\"2025-09-29 20:47:21\\", \\"maintainer\\": \\"JesusPerezLorenzo\\", \\"repository\\": \\"https://repo.jesusperez.pro/provisioning\\", \\"license\\": \\"MIT\\", \\"homepage\\": \\"https://github.com/jesusperezlorenzo/provisioning\\"\\n}","breadcrumbs":"Provider Distribution Guide » Package Metadata Example","id":"2795","title":"Package Metadata Example"},"2796":{"body":"Feature Module-Loader Provider Packs Speed ⚡ Instant (symlinks) 📦 Requires packaging Versioning ❌ No explicit versions ✅ Semantic versioning Portability ❌ Local filesystem only ✅ Distributable archives Development ✅ Excellent (live reload) ⚠️ Need repackage cycle Production ⚠️ Mutable source ✅ Immutable artifacts Discovery ✅ Auto-discovery ⚠️ Manual tracking Team Sharing ⚠️ Git repository only ✅ Registry + Git Debugging ✅ Direct source access ❌ Need to unpack Rollback ⚠️ Git revert ✅ Version pinning Compliance ❌ Hard to audit ✅ Signed artifacts Setup Time ⚡ Seconds ⏱️ Minutes CI/CD ⚠️ Not ideal ✅ Perfect","breadcrumbs":"Provider Distribution Guide » Comparison Matrix","id":"2796","title":"Comparison Matrix"},"2797":{"body":"","breadcrumbs":"Provider Distribution Guide » Recommended Hybrid Workflow","id":"2797","title":"Recommended Hybrid Workflow"},"2798":{"body":"# 1. Start with module-loader for development\\nprovisioning providers list\\nprovisioning providers install upcloud wuji # 2. Develop and iterate quickly\\nvim extensions/providers/upcloud/nickel/server_upcloud.ncl\\n# Test immediately - no packaging needed # 3. Validate before release\\nprovisioning providers validate wuji\\nnickel export workspace/infra/wuji/main.ncl","breadcrumbs":"Provider Distribution Guide » Development Phase","id":"2798","title":"Development Phase"},"2799":{"body":"# 4. Create release packages\\nexport PROVISIONING=/Users/Akasha/project-provisioning/provisioning\\n./provisioning/core/cli/pack providers # 5. Verify packages\\n./provisioning/core/cli/pack list # 6. Tag release\\ngit tag v0.0.2\\ngit push origin v0.0.2 # 7. Publish to registry (your workflow)\\nrsync distribution/packages/*.tar user@repo.jesusperez.pro:/registry/v0.0.2/","breadcrumbs":"Provider Distribution Guide » Release Phase","id":"2799","title":"Release Phase"},"28":{"body":"OCI-native distribution Automatic dependency resolution Version management Local and remote sources","breadcrumbs":"Home » ✅ Extension Management","id":"28","title":"✅ Extension Management"},"280":{"body":"Before installing the Provisioning Platform, ensure your system meets the following requirements.","breadcrumbs":"Prerequisites » Prerequisites","id":"280","title":"Prerequisites"},"2800":{"body":"# 8. Download specific version from registry\\nwget https://repo.jesusperez.pro/registry/v0.0.2/upcloud_prov_0.0.2.tar # 9. Extract and install\\ntar -xf upcloud_prov_0.0.2.tar -C infrastructure/providers/ # 10. Use in production infrastructure\\n# (Configure manifest.toml to point to extracted package)","breadcrumbs":"Provider Distribution Guide » Production Deployment","id":"2800","title":"Production Deployment"},"2801":{"body":"","breadcrumbs":"Provider Distribution Guide » Command Reference","id":"2801","title":"Command Reference"},"2802":{"body":"# List all available providers\\nprovisioning providers list [--kcl] [--format table|json|yaml] # Show provider information\\nprovisioning providers info [--kcl] # Install provider for infrastructure\\nprovisioning providers install [--version 0.0.1] # Remove provider from infrastructure\\nprovisioning providers remove [--force] # List installed providers\\nprovisioning providers installed [--format table|json|yaml] # Validate provider installation\\nprovisioning providers validate # Sync KCL dependencies\\n./provisioning/core/cli/module-loader sync-kcl ","breadcrumbs":"Provider Distribution Guide » Module-Loader Commands","id":"2802","title":"Module-Loader Commands"},"2803":{"body":"# Set environment variable (required)\\nexport PROVISIONING=/path/to/provisioning # Package core provisioning schemas\\n./provisioning/core/cli/pack core [--output dir] [--version 0.0.1] # Package single provider\\n./provisioning/core/cli/pack provider [--output dir] [--version 0.0.1] # Package all providers\\n./provisioning/core/cli/pack providers [--output dir] # List all packages\\n./provisioning/core/cli/pack list [--format table|json|yaml] # Clean old packages\\n./provisioning/core/cli/pack clean [--keep-latest 3] [--dry-run]","breadcrumbs":"Provider Distribution Guide » Provider Pack Commands","id":"2803","title":"Provider Pack Commands"},"2804":{"body":"","breadcrumbs":"Provider Distribution Guide » Real-World Scenarios","id":"2804","title":"Real-World Scenarios"},"2805":{"body":"Situation : Working alone on local infrastructure projects Recommendation : Module-Loader only # Simple and fast\\nproviders install upcloud homelab\\nproviders install aws cloud-backup\\n# Edit and test freely Why : No need for versioning, packaging overhead unnecessary.","breadcrumbs":"Provider Distribution Guide » Scenario 1: Solo Developer - Local Infrastructure","id":"2805","title":"Scenario 1: Solo Developer - Local Infrastructure"},"2806":{"body":"Situation : 2-5 developers sharing code via Git Recommendation : Module-Loader + Git # Each developer\\ngit clone repo\\nproviders install upcloud project-x\\n# Make changes, commit to Git\\ngit commit -m \\"Add upcloud GPU support\\"\\ngit push\\n# Others pull changes\\ngit pull\\n# Changes immediately available via symlinks Why : Git provides version control, symlinks provide instant updates.","breadcrumbs":"Provider Distribution Guide » Scenario 2: Small Team - Shared Development","id":"2806","title":"Scenario 2: Small Team - Shared Development"},"2807":{"body":"Situation : 10+ developers, multiple infrastructure projects Recommendation : Hybrid (Module-Loader dev + Provider Packs releases) # Development (team member)\\nproviders install upcloud staging-env\\n# Make changes... # Release (release engineer)\\npack providers # Create v0.2.0\\ngit tag v0.2.0\\n# Upload to internal registry # Other projects\\n# Download upcloud_prov_0.2.0.tar\\n# Use stable, tested version Why : Developers iterate fast, other teams use stable versions.","breadcrumbs":"Provider Distribution Guide » Scenario 3: Medium Team - Multiple Projects","id":"2807","title":"Scenario 3: Medium Team - Multiple Projects"},"2808":{"body":"Situation : Critical production systems, compliance requirements Recommendation : Provider Packs only # CI/CD Pipeline\\npack providers # Build artifacts\\n# Run tests on packages\\n# Sign packages\\n# Publish to artifact registry # Production Deployment\\n# Download signed upcloud_prov_1.0.0.tar\\n# Verify signature\\n# Deploy immutable artifact\\n# Document exact versions for compliance Why : Immutability, auditability, and rollback capabilities required.","breadcrumbs":"Provider Distribution Guide » Scenario 4: Enterprise - Production Infrastructure","id":"2808","title":"Scenario 4: Enterprise - Production Infrastructure"},"2809":{"body":"Situation : Sharing providers with community Recommendation : Provider Packs + Registry # Maintainer\\npack providers\\n# Create release on GitHub\\ngh release create v1.0.0 distribution/packages/*.tar # Community User\\n# Download from GitHub releases\\nwget https://github.com/project/releases/v1.0.0/upcloud_prov_1.0.0.tar\\n# Extract and use Why : Easy distribution, versioning, and downloading for users.","breadcrumbs":"Provider Distribution Guide » Scenario 5: Open Source - Public Distribution","id":"2809","title":"Scenario 5: Open Source - Public Distribution"},"281":{"body":"","breadcrumbs":"Prerequisites » Hardware Requirements","id":"281","title":"Hardware Requirements"},"2810":{"body":"","breadcrumbs":"Provider Distribution Guide » Best Practices","id":"2810","title":"Best Practices"},"2811":{"body":"Use Module-Loader by default Fast iteration is crucial during development Symlinks allow immediate testing Keep providers.manifest.yaml in Git Documents which providers are used Team members can sync easily Validate before committing providers validate wuji\\nnickel eval defs/servers.ncl","breadcrumbs":"Provider Distribution Guide » For Development","id":"2811","title":"For Development"},"2812":{"body":"Version Everything Use semantic versioning (0.1.0, 0.2.0, 1.0.0) Update version in kcl.mod before packing Create Packs for Releases pack providers --version 0.2.0\\ngit tag v0.2.0 Test Packs Before Publishing Extract and test packages Verify metadata is correct","breadcrumbs":"Provider Distribution Guide » For Releases","id":"2812","title":"For Releases"},"2813":{"body":"Pin Versions Use exact versions in production kcl.mod Never use \\"latest\\" or symlinks Maintain Artifact Registry Store all production versions Keep old versions for rollback Document Deployments Record which versions deployed when Maintain change log","breadcrumbs":"Provider Distribution Guide » For Production","id":"2813","title":"For Production"},"2814":{"body":"Automate Pack Creation # .github/workflows/release.yml\\n- name: Pack Providers run: | export PROVISIONING=$GITHUB_WORKSPACE/provisioning ./provisioning/core/cli/pack providers Run Tests on Packs Extract packages Run validation tests Ensure they work in isolation Publish Automatically Upload to artifact registry on tag Update package index","breadcrumbs":"Provider Distribution Guide » For CI/CD","id":"2814","title":"For CI/CD"},"2815":{"body":"","breadcrumbs":"Provider Distribution Guide » Migration Path","id":"2815","title":"Migration Path"},"2816":{"body":"When you\'re ready to move to production: # 1. Clean up development setup\\nproviders remove upcloud wuji # 2. Create release pack\\npack providers --version 1.0.0 # 3. Extract pack in infrastructure\\ncd workspace/infra/wuji\\ntar -xf ../../../distribution/packages/upcloud_prov_1.0.0.tar vendor/ # 4. Update kcl.mod to use vendored path\\n# Change from: upcloud_prov = { path = \\"./.kcl-modules/upcloud_prov\\" }\\n# To: upcloud_prov = { path = \\"./vendor/upcloud_prov\\", version = \\"1.0.0\\" } # 5. Test\\nnickel eval defs/servers.ncl","breadcrumbs":"Provider Distribution Guide » From Module-Loader to Packs","id":"2816","title":"From Module-Loader to Packs"},"2817":{"body":"When you need to debug or develop: # 1. Remove vendored version\\nrm -rf workspace/infra/wuji/vendor/upcloud_prov # 2. Install via module-loader\\nproviders install upcloud wuji # 3. Make changes in extensions/providers/upcloud/kcl/ # 4. Test immediately\\ncd workspace/infra/wuji\\nnickel eval defs/servers.ncl","breadcrumbs":"Provider Distribution Guide » From Packs Back to Module-Loader","id":"2817","title":"From Packs Back to Module-Loader"},"2818":{"body":"","breadcrumbs":"Provider Distribution Guide » Configuration","id":"2818","title":"Configuration"},"2819":{"body":"# Required for pack commands\\nexport PROVISIONING=/path/to/provisioning # Alternative\\nexport PROVISIONING_CONFIG=/path/to/provisioning","breadcrumbs":"Provider Distribution Guide » Environment Variables","id":"2819","title":"Environment Variables"},"282":{"body":"CPU : 2 cores RAM : 4 GB Disk : 20 GB available space Network : Internet connection for downloading dependencies","breadcrumbs":"Prerequisites » Minimum Requirements (Solo Mode)","id":"282","title":"Minimum Requirements (Solo Mode)"},"2820":{"body":"Distribution settings in provisioning/config/config.defaults.toml: [distribution]\\npack_path = \\"{{paths.base}}/distribution/packages\\"\\nregistry_path = \\"{{paths.base}}/distribution/registry\\"\\ncache_path = \\"{{paths.base}}/distribution/cache\\"\\nregistry_type = \\"local\\" [distribution.metadata]\\nmaintainer = \\"JesusPerezLorenzo\\"\\nrepository = \\"https://repo.jesusperez.pro/provisioning\\"\\nlicense = \\"MIT\\"\\nhomepage = \\"https://github.com/jesusperezlorenzo/provisioning\\" [kcl]\\ncore_module = \\"{{paths.base}}/kcl\\"\\ncore_version = \\"0.0.1\\"\\ncore_package_name = \\"provisioning_core\\"\\nuse_module_loader = true\\nmodules_dir = \\".kcl-modules\\"","breadcrumbs":"Provider Distribution Guide » Config Files","id":"2820","title":"Config Files"},"2821":{"body":"","breadcrumbs":"Provider Distribution Guide » Troubleshooting","id":"2821","title":"Troubleshooting"},"2822":{"body":"Problem : Provider not found after install # Check provider exists\\nproviders list | grep upcloud # Validate installation\\nproviders validate wuji # Check symlink\\nls -la workspace/infra/wuji/.kcl-modules/ Problem : Changes not reflected # Verify symlink is correct\\nreadlink workspace/infra/wuji/.kcl-modules/upcloud_prov # Should point to extensions/providers/upcloud/kcl/","breadcrumbs":"Provider Distribution Guide » Module-Loader Issues","id":"2822","title":"Module-Loader Issues"},"2823":{"body":"Problem : No .tar file created # Check KCL version (need 0.11.3+)\\nkcl version # Check kcl.mod exists\\nls extensions/providers/upcloud/kcl/kcl.mod Problem : PROVISIONING environment variable not set # Set it\\nexport PROVISIONING=/Users/Akasha/project-provisioning/provisioning # Or add to shell profile\\necho \'export PROVISIONING=/path/to/provisioning\' >> ~/.zshrc","breadcrumbs":"Provider Distribution Guide » Provider Pack Issues","id":"2823","title":"Provider Pack Issues"},"2824":{"body":"Both approaches are valuable and complementary: Module-Loader : Development velocity, rapid iteration Provider Packs : Production stability, version control Default Strategy: Use Module-Loader for day-to-day development Create Provider Packs for releases and production Both systems work seamlessly together The system is designed for flexibility - choose the right tool for your current phase of work!","breadcrumbs":"Provider Distribution Guide » Conclusion","id":"2824","title":"Conclusion"},"2825":{"body":"Module-Loader Implementation KCL Packaging Implementation [Providers CLI](.provisioning providers) Pack CLI KCL Documentation Document Version : 1.0.0 Last Updated : 2025-09-29 Maintained by : JesusPerezLorenzo","breadcrumbs":"Provider Distribution Guide » Additional Resources","id":"2825","title":"Additional Resources"},"2826":{"body":"This document provides a comprehensive comparison of supported cloud providers: Hetzner, UpCloud, AWS, and DigitalOcean. Use this matrix to make informed decisions about which provider is best suited for your workloads.","breadcrumbs":"Provider Comparison Matrix » Provider Comparison Matrix","id":"2826","title":"Provider Comparison Matrix"},"2827":{"body":"","breadcrumbs":"Provider Comparison Matrix » Feature Comparison","id":"2827","title":"Feature Comparison"},"2828":{"body":"Feature Hetzner UpCloud AWS DigitalOcean Product Name Cloud Servers Servers EC2 Droplets Instance Sizing Standard, dedicated cores 2-32 vCPUs Extensive (t2, t3, m5, c5, etc) 1-48 vCPUs Custom CPU/RAM ✓ ✓ Limited ✗ Hourly Billing ✓ ✓ ✓ ✓ Monthly Discount 30% 25% ~30% (RI) ~25% GPU Instances ✓ ✗ ✓ ✗ Auto-scaling Via API Via API Native (ASG) Via API Bare Metal ✓ ✗ ✓ (EC2) ✗","breadcrumbs":"Provider Comparison Matrix » Compute","id":"2828","title":"Compute"},"2829":{"body":"Feature Hetzner UpCloud AWS DigitalOcean Product Name Volumes Storage EBS Volumes SSD Volumes ✓ ✓ ✓ (gp3, io1) ✓ HDD Volumes ✗ ✓ ✓ (st1, sc1) ✗ Max Volume Size 10 TB Unlimited 16 TB 100 TB IOPS Provisioning Limited ✓ ✓ ✗ Snapshots ✓ ✓ ✓ ✓ Encryption ✓ ✓ ✓ ✓ Backup Service ✗ ✗ ✓ (AWS Backup) ✓","breadcrumbs":"Provider Comparison Matrix » Block Storage","id":"2829","title":"Block Storage"},"283":{"body":"CPU : 4 cores RAM : 8 GB Disk : 50 GB available space Network : Reliable internet connection","breadcrumbs":"Prerequisites » Recommended Requirements (Multi-User Mode)","id":"283","title":"Recommended Requirements (Multi-User Mode)"},"2830":{"body":"Feature Hetzner UpCloud AWS DigitalOcean Product Name Object Storage — S3 Spaces API Compatibility S3-compatible — S3 (native) S3-compatible Pricing (per GB) €0.025 N/A $0.023 $0.015 Regions 2 N/A 30+ 4 Versioning ✓ N/A ✓ ✓ Lifecycle Rules ✓ N/A ✓ ✓ CDN Integration ✗ N/A ✓ (CloudFront) ✓ (CDN add-on) Access Control Bucket policies N/A IAM + bucket policies Token-based","breadcrumbs":"Provider Comparison Matrix » Object Storage","id":"2830","title":"Object Storage"},"2831":{"body":"Feature Hetzner UpCloud AWS DigitalOcean Product Name Load Balancer Load Balancer ELB/ALB/NLB Load Balancer Type Layer 4/7 Layer 4 Layer 4/7 Layer 4/7 Health Checks ✓ ✓ ✓ ✓ SSL/TLS Termination ✓ Limited ✓ ✓ Path-based Routing ✓ ✗ ✓ (ALB) ✗ Host-based Routing ✓ ✗ ✓ (ALB) ✗ Sticky Sessions ✓ ✓ ✓ ✓ Geographic Distribution ✗ ✗ ✓ (multi-region) ✗ DDoS Protection Basic ✓ ✓ (Shield) ✓","breadcrumbs":"Provider Comparison Matrix » Load Balancing","id":"2831","title":"Load Balancing"},"2832":{"body":"Feature Hetzner UpCloud AWS DigitalOcean PostgreSQL ✗ ✗ ✓ (RDS) ✓ MySQL ✗ ✗ ✓ (RDS) ✓ Redis ✗ ✗ ✓ (ElastiCache) ✓ MongoDB ✗ ✗ ✓ (DocumentDB) ✗ Multi-AZ N/A N/A ✓ ✓ Automatic Backups N/A N/A ✓ ✓ Read Replicas N/A N/A ✓ ✓ Param Groups N/A N/A ✓ ✗","breadcrumbs":"Provider Comparison Matrix » Managed Databases","id":"2832","title":"Managed Databases"},"2833":{"body":"Feature Hetzner UpCloud AWS DigitalOcean Service Manual K8s Manual K8s EKS DOKS Managed Service ✗ ✗ ✓ ✓ Control Plane Managed ✗ ✗ ✓ ✓ Node Management ✗ ✗ ✓ (node groups) ✓ (node pools) Multi-AZ ✗ ✗ ✓ ✓ Ingress Support Via add-on Via add-on ✓ (ALB) ✓ Storage Classes Via add-on Via add-on ✓ (EBS) ✓","breadcrumbs":"Provider Comparison Matrix » Kubernetes","id":"2833","title":"Kubernetes"},"2834":{"body":"Feature Hetzner UpCloud AWS DigitalOcean CDN Service ✗ ✗ ✓ (CloudFront) ✓ Edge Locations — — 600+ 12+ Geographic Routing — — ✓ ✗ Cache Invalidation — — ✓ ✓ Origins — — Any HTTP/S, Object Storage SSL/TLS — — ✓ ✓ DDoS Protection — — ✓ (Shield) ✓","breadcrumbs":"Provider Comparison Matrix » CDN/Edge","id":"2834","title":"CDN/Edge"},"2835":{"body":"Feature Hetzner UpCloud AWS DigitalOcean DNS Service ✓ (Basic) ✗ ✓ (Route53) ✓ Zones ✓ N/A ✓ ✓ Failover Manual N/A ✓ (health checks) ✓ (health checks) Geolocation ✗ N/A ✓ ✗ DNSSEC ✓ N/A ✓ ✗ API Management Limited N/A Full Full","breadcrumbs":"Provider Comparison Matrix » DNS","id":"2835","title":"DNS"},"2836":{"body":"","breadcrumbs":"Provider Comparison Matrix » Pricing Comparison","id":"2836","title":"Pricing Comparison"},"2837":{"body":"Comparison for 1-year term where applicable: Configuration Hetzner UpCloud AWS* DigitalOcean 1 vCPU, 1 GB RAM €3.29 $5 $18 (t3.micro) $6 2 vCPU, 4 GB RAM €6.90 $15 $36 (t3.small) $24 4 vCPU, 8 GB RAM €13.80 $30 $73 (t3.medium) $48 8 vCPU, 16 GB RAM €27.60 $60 $146 (t3.large) $96 16 vCPU, 32 GB RAM €55.20 $120 $291 (t3.xlarge) $192 *AWS pricing: on-demand; reserved instances 25-30% discount","breadcrumbs":"Provider Comparison Matrix » Compute Pricing (Monthly)","id":"2837","title":"Compute Pricing (Monthly)"},"2838":{"body":"Per GB for block storage: Provider Price/GB Monthly Cost (100 GB) Hetzner €0.026 €2.60 UpCloud $0.025 $2.50 AWS EBS $0.10 $10.00 DigitalOcean $0.10 $10.00","breadcrumbs":"Provider Comparison Matrix » Storage Pricing (Monthly)","id":"2838","title":"Storage Pricing (Monthly)"},"2839":{"body":"Outbound data transfer (per GB): Provider First 1 TB Beyond 1 TB Hetzner Included €0.12/GB UpCloud $0.02/GB $0.01/GB AWS $0.09/GB $0.085/GB DigitalOcean $0.01/GB $0.01/GB","breadcrumbs":"Provider Comparison Matrix » Data Transfer Pricing","id":"2839","title":"Data Transfer Pricing"},"284":{"body":"CPU : 16 cores RAM : 32 GB Disk : 500 GB available space (SSD recommended) Network : High-bandwidth connection with static IP","breadcrumbs":"Prerequisites » Production Requirements (Enterprise Mode)","id":"284","title":"Production Requirements (Enterprise Mode)"},"2840":{"body":"Small Application (2 servers, 100 GB storage) Provider Compute Storage Data Transfer Monthly Hetzner €13.80 €2.60 Included €16.40 UpCloud $30 $2.50 $20 $52.50 AWS $72 $10 $45 $127 DigitalOcean $48 $10 Included $58 Medium Application (5 servers, 500 GB storage, 10 TB data transfer) Provider Compute Storage Data Transfer Monthly Hetzner €69 €13 €1,200 €1,282 UpCloud $150 $12.50 $200 $362.50 AWS $360 $50 $900 $1,310 DigitalOcean $240 $50 Included $290","breadcrumbs":"Provider Comparison Matrix » Total Cost of Ownership (TCO) Examples","id":"2840","title":"Total Cost of Ownership (TCO) Examples"},"2841":{"body":"","breadcrumbs":"Provider Comparison Matrix » Regional Availability","id":"2841","title":"Regional Availability"},"2842":{"body":"Region Location Data Center Highlights nbg1 Nuremberg, Germany 3 EU hub, good performance fsn1 Falkenstein, Germany 1 Lower latency, German regulations hel1 Helsinki, Finland 1 Nordic region option ash Ashburn, USA 1 North American presence","breadcrumbs":"Provider Comparison Matrix » Hetzner Regions","id":"2842","title":"Hetzner Regions"},"2843":{"body":"Region Location Highlights fi-hel1 Helsinki, Finland Primary EU location de-fra1 Frankfurt, Germany EU alternative gb-lon1 London, UK European coverage us-nyc1 New York, USA North America sg-sin1 Singapore Asia Pacific jp-tok1 Tokyo, Japan APAC alternative","breadcrumbs":"Provider Comparison Matrix » UpCloud Regions","id":"2843","title":"UpCloud Regions"},"2844":{"body":"Region Location Availability Zones Highlights us-east-1 N. Virginia, USA 6 Largest, most services eu-west-1 Ireland 3 EU primary, GDPR compliant eu-central-1 Frankfurt, Germany 3 German data residency ap-southeast-1 Singapore 3 APAC primary ap-northeast-1 Tokyo, Japan 4 Asia alternative","breadcrumbs":"Provider Comparison Matrix » AWS Regions (Selection)","id":"2844","title":"AWS Regions (Selection)"},"2845":{"body":"Region Location Highlights nyc3 New York, USA Primary US location sfo3 San Francisco, USA US West Coast lon1 London, UK European hub fra1 Frankfurt, Germany German regulations sgp1 Singapore APAC coverage blr1 Bangalore, India India region","breadcrumbs":"Provider Comparison Matrix » DigitalOcean Regions","id":"2845","title":"DigitalOcean Regions"},"2846":{"body":"Best Global Coverage : AWS (30+ regions, most services) Best EU Coverage : All providers have good EU options Best APAC Coverage : AWS (most regions), DigitalOcean (Singapore) Best North America : All providers have coverage Emerging Markets : DigitalOcean (India via Bangalore)","breadcrumbs":"Provider Comparison Matrix » Regional Coverage Summary","id":"2846","title":"Regional Coverage Summary"},"2847":{"body":"","breadcrumbs":"Provider Comparison Matrix » Compliance and Certifications","id":"2847","title":"Compliance and Certifications"},"2848":{"body":"Standard Hetzner UpCloud AWS DigitalOcean GDPR ✓ ✓ ✓ ✓ CCPA ✓ ✓ ✓ ✓ SOC 2 Type II ✓ ✓ ✓ ✓ ISO 27001 ✓ ✓ ✓ ✓ ISO 9001 ✗ ✗ ✓ ✓ FedRAMP ✗ ✗ ✓ ✗","breadcrumbs":"Provider Comparison Matrix » Security Standards","id":"2848","title":"Security Standards"},"2849":{"body":"Standard Hetzner UpCloud AWS DigitalOcean HIPAA ✗ ✗ ✓ ✓** PCI-DSS ✓ ✓ ✓ ✓ HITRUST ✗ ✗ ✓ ✗ FIPS 140-2 ✗ ✗ ✓ ✗ SOX (Sarbanes-Oxley) Limited Limited ✓ Limited **DigitalOcean: Requires BAA for HIPAA compliance","breadcrumbs":"Provider Comparison Matrix » Industry-Specific Compliance","id":"2849","title":"Industry-Specific Compliance"},"285":{"body":"","breadcrumbs":"Prerequisites » Operating System","id":"285","title":"Operating System"},"2850":{"body":"Region Hetzner UpCloud AWS DigitalOcean EU (GDPR) ✓ DE,FI ✓ FI,DE,GB ✓ (multiple) ✓ (multiple) Germany (NIS2) ✓ ✓ ✓ ✓ UK (Post-Brexit) ✗ ✓ GB ✓ ✓ USA (CCPA) ✗ ✓ ✓ ✓ Canada ✗ ✗ ✓ ✗ Australia ✗ ✗ ✓ ✗ India ✗ ✗ ✓ ✓","breadcrumbs":"Provider Comparison Matrix » Data Residency Support","id":"2850","title":"Data Residency Support"},"2851":{"body":"","breadcrumbs":"Provider Comparison Matrix » Use Case Recommendations","id":"2851","title":"Use Case Recommendations"},"2852":{"body":"Recommended : Hetzner primary + DigitalOcean backup Rationale : Hetzner has best price/performance ratio DigitalOcean for geographic diversification Both have simple interfaces and good documentation Monthly cost: $30-80 for basic HA setup Example Setup : Primary: Hetzner cx31 (2 vCPU, 4 GB) Backup: DigitalOcean $24/month droplet Database: Self-managed PostgreSQL or Hetzner volume Total: ~$35/month","breadcrumbs":"Provider Comparison Matrix » 1. Cost-Sensitive Startups","id":"2852","title":"1. Cost-Sensitive Startups"},"2853":{"body":"Recommended : AWS primary + UpCloud backup Rationale : AWS for managed services and compliance UpCloud for cost-effective disaster recovery AWS compliance certifications (HIPAA, FIPS, SOC2) Multiple regions within AWS Mature enterprise support Example Setup : Primary: AWS RDS (managed DB) Secondary: UpCloud for compute burst Compliance: Full audit trail and encryption","breadcrumbs":"Provider Comparison Matrix » 2. Enterprise Production","id":"2853","title":"2. Enterprise Production"},"2854":{"body":"Recommended : Hetzner + AWS spot instances Rationale : Hetzner for sustained compute (good price) AWS spot for burst workloads (70-90% discount) Hetzner bare metal for specialized workloads Cost-effective scaling","breadcrumbs":"Provider Comparison Matrix » 3. High-Performance Computing","id":"2854","title":"3. High-Performance Computing"},"2855":{"body":"Recommended : AWS + DigitalOcean + Hetzner Rationale : AWS for primary regions and managed services DigitalOcean for edge locations and simpler regions Hetzner for EU cost optimization Geographic redundancy across 3 providers Example Setup : US: AWS (primary region) EU: Hetzner (cost-optimized) APAC: DigitalOcean (Singapore) Global: CloudFront CDN","breadcrumbs":"Provider Comparison Matrix » 4. Multi-Region Global Application","id":"2855","title":"4. Multi-Region Global Application"},"2856":{"body":"Recommended : AWS RDS/ElastiCache + DigitalOcean Spaces Rationale : AWS managed databases are feature-rich DigitalOcean managed DB for simpler needs Both support replicas and backups Cost: $60-200/month for medium database","breadcrumbs":"Provider Comparison Matrix » 5. Database-Heavy Applications","id":"2856","title":"5. Database-Heavy Applications"},"2857":{"body":"Recommended : DigitalOcean + AWS Rationale : DigitalOcean for simplicity and speed Droplets easy to manage and scale AWS for advanced features and multi-region Good community and documentation","breadcrumbs":"Provider Comparison Matrix » 6. Web Applications","id":"2857","title":"6. Web Applications"},"2858":{"body":"","breadcrumbs":"Provider Comparison Matrix » Provider Strength Matrix","id":"2858","title":"Provider Strength Matrix"},"2859":{"body":"Category Winner Notes CPU Performance Hetzner Dedicated cores, good specs per price Network Bandwidth AWS 1Gbps+ guaranteed in multiple regions Storage IOPS AWS gp3 with 16K IOPS provisioning Latency (Global) AWS Most regions, best infrastructure","breadcrumbs":"Provider Comparison Matrix » Performance ⚡","id":"2859","title":"Performance ⚡"},"286":{"body":"macOS : 12.0 (Monterey) or later Linux : Ubuntu 22.04 LTS or later Fedora 38 or later Debian 12 (Bookworm) or later RHEL 9 or later","breadcrumbs":"Prerequisites » Supported Platforms","id":"286","title":"Supported Platforms"},"2860":{"body":"Category Winner Notes Compute Hetzner 50% cheaper than AWS on-demand Managed Services AWS Only provider with full managed stack Data Transfer DigitalOcean Included with many services Storage Hetzner Object Storage €0.025/GB vs AWS S3 $0.023/GB","breadcrumbs":"Provider Comparison Matrix » Cost 💰","id":"2860","title":"Cost 💰"},"2861":{"body":"Category Winner Notes UI/Dashboard DigitalOcean Simple, intuitive, clear pricing CLI Tools AWS Comprehensive aws-cli (but steep) API Documentation DigitalOcean Clear examples, community-driven Getting Started DigitalOcean Fastest path to first deployment","breadcrumbs":"Provider Comparison Matrix » Ease of Use 🎯","id":"2861","title":"Ease of Use 🎯"},"2862":{"body":"Category Winner Notes Managed Services AWS RDS, ElastiCache, SQS, SNS, etc Compliance AWS Most certifications (HIPAA, FIPS, etc) Support AWS 24/7 support with paid plans Scale AWS Best for 1000+ servers","breadcrumbs":"Provider Comparison Matrix » Enterprise Features 🏢","id":"2862","title":"Enterprise Features 🏢"},"2863":{"body":"Use this matrix to quickly select a provider: If you need: Then use:\\n─────────────────────────────────────────────────────────────\\nLowest cost compute Hetzner\\nSimplest interface DigitalOcean\\nManaged databases AWS or DigitalOcean\\nGlobal multi-region AWS\\nCompliance (HIPAA/FIPS) AWS\\nEuropean data residency Hetzner or DigitalOcean\\nHigh performance compute Hetzner or AWS (bare metal)\\nDisaster recovery setup UpCloud or Hetzner\\nQuick startup DigitalOcean\\nEnterprise SLA AWS or UpCloud","breadcrumbs":"Provider Comparison Matrix » Decision Matrix","id":"2863","title":"Decision Matrix"},"2864":{"body":"Hetzner : Best for cost-conscious teams, European focus, good performance UpCloud : Mid-market option, Nordic/EU focus, reliable alternative AWS : Enterprise standard, global coverage, most services, highest cost DigitalOcean : Developer-friendly, simplicity-focused, good value For most organizations, a multi-provider strategy combining Hetzner (compute), AWS (managed services), and DigitalOcean (edge) provides the best balance of cost, capability, and resilience.","breadcrumbs":"Provider Comparison Matrix » Conclusion","id":"2864","title":"Conclusion"},"2865":{"body":"","breadcrumbs":"TaskServ Quick Guide » Taskserv Quick Guide","id":"2865","title":"Taskserv Quick Guide"},"2866":{"body":"","breadcrumbs":"TaskServ Quick Guide » 🚀 Quick Start","id":"2866","title":"🚀 Quick Start"},"2867":{"body":"nu provisioning/tools/create-taskserv-helper.nu interactive","breadcrumbs":"TaskServ Quick Guide » Create a New Taskserv (Interactive)","id":"2867","title":"Create a New Taskserv (Interactive)"},"2868":{"body":"nu provisioning/tools/create-taskserv-helper.nu create my-api \\\\ --category development \\\\ --port 8080 \\\\ --description \\"My REST API service\\"","breadcrumbs":"TaskServ Quick Guide » Create a New Taskserv (Direct)","id":"2868","title":"Create a New Taskserv (Direct)"},"2869":{"body":"","breadcrumbs":"TaskServ Quick Guide » 📋 5-Minute Setup","id":"2869","title":"📋 5-Minute Setup"},"287":{"body":"macOS : Xcode Command Line Tools required Homebrew recommended for package management Linux : systemd-based distribution recommended sudo access required for some operations","breadcrumbs":"Prerequisites » Platform-Specific Notes","id":"287","title":"Platform-Specific Notes"},"2870":{"body":"Interactive : nu provisioning/tools/create-taskserv-helper.nu interactive Command Line : Use the direct command above Manual : Follow the structure guide below","breadcrumbs":"TaskServ Quick Guide » 1. Choose Your Method","id":"2870","title":"1. Choose Your Method"},"2871":{"body":"my-service/\\n├── nickel/\\n│ ├── manifest.toml # Package definition\\n│ ├── my-service.ncl # Main schema\\n│ └── version.ncl # Version info\\n├── default/\\n│ ├── defs.toml # Default config\\n│ └── install-*.sh # Install script\\n└── README.md # Documentation","breadcrumbs":"TaskServ Quick Guide » 2. Basic Structure","id":"2871","title":"2. Basic Structure"},"2872":{"body":"manifest.toml (package definition): [package]\\nname = \\"my-service\\"\\nversion = \\"1.0.0\\"\\ndescription = \\"My service\\" [dependencies]\\nk8s = { oci = \\"oci://ghcr.io/kcl-lang/k8s\\", tag = \\"1.30\\" } my-service.ncl (main schema): let MyService = { name | String, version | String, port | Number, replicas | Number,\\n} in { my_service_config = { name = \\"my-service\\", version = \\"latest\\", port = 8080, replicas = 1, }\\n}","breadcrumbs":"TaskServ Quick Guide » 3. Essential Files","id":"2872","title":"3. Essential Files"},"2873":{"body":"# Discover your taskserv\\nnu -c \\"use provisioning/core/nulib/taskservs/discover.nu *; get-taskserv-info my-service\\" # Test layer resolution\\nnu -c \\"use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution my-service wuji upcloud\\" # Deploy with check\\nprovisioning/core/cli/provisioning taskserv create my-service --infra wuji --check","breadcrumbs":"TaskServ Quick Guide » 4. Test Your Taskserv","id":"2873","title":"4. Test Your Taskserv"},"2874":{"body":"","breadcrumbs":"TaskServ Quick Guide » 🎯 Common Patterns","id":"2874","title":"🎯 Common Patterns"},"2875":{"body":"let WebService = { name | String, version | String | default = \\"latest\\", port | Number | default = 8080, replicas | Number | default = 1, ingress | { enabled | Bool | default = true, hostname | String, tls | Bool | default = false, }, resources | { cpu | String | default = \\"100m\\", memory | String | default = \\"128Mi\\", },\\n} in\\nWebService","breadcrumbs":"TaskServ Quick Guide » Web Service","id":"2875","title":"Web Service"},"2876":{"body":"let DatabaseService = { name | String, version | String | default = \\"latest\\", port | Number | default = 5432, persistence | { enabled | Bool | default = true, size | String | default = \\"10Gi\\", storage_class | String | default = \\"ssd\\", }, auth | { database | String | default = \\"app\\", username | String | default = \\"user\\", password_secret | String, },\\n} in\\nDatabaseService","breadcrumbs":"TaskServ Quick Guide » Database Service","id":"2876","title":"Database Service"},"2877":{"body":"let BackgroundWorker = { name | String, version | String | default = \\"latest\\", replicas | Number | default = 1, job | { schedule | String | optional, # Cron format for scheduled jobs parallelism | Number | default = 1, completions | Number | default = 1, }, resources | { cpu | String | default = \\"500m\\", memory | String | default = \\"512Mi\\", },\\n} in\\nBackgroundWorker","breadcrumbs":"TaskServ Quick Guide » Background Worker","id":"2877","title":"Background Worker"},"2878":{"body":"","breadcrumbs":"TaskServ Quick Guide » 🛠️ CLI Shortcuts","id":"2878","title":"🛠️ CLI Shortcuts"},"2879":{"body":"# List all taskservs\\nnu -c \\"use provisioning/core/nulib/taskservs/discover.nu *; discover-taskservs | select name group\\" # Search taskservs\\nnu -c \\"use provisioning/core/nulib/taskservs/discover.nu *; search-taskservs redis\\" # Show stats\\nnu -c \\"use provisioning/workspace/tools/layer-utils.nu *; show_layer_stats\\"","breadcrumbs":"TaskServ Quick Guide » Discovery","id":"2879","title":"Discovery"},"288":{"body":"","breadcrumbs":"Prerequisites » Required Software","id":"288","title":"Required Software"},"2880":{"body":"# Check Nickel syntax\\nnickel typecheck provisioning/extensions/taskservs/{category}/{name}/schemas/{name}.ncl # Generate configuration\\nprovisioning/core/cli/provisioning taskserv generate {name} --infra {infra} # Version management\\nprovisioning/core/cli/provisioning taskserv versions {name}\\nprovisioning/core/cli/provisioning taskserv check-updates","breadcrumbs":"TaskServ Quick Guide » Development","id":"2880","title":"Development"},"2881":{"body":"# Dry run deployment\\nprovisioning/core/cli/provisioning taskserv create {name} --infra {infra} --check # Layer resolution debug\\nnu -c \\"use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution {name} {infra} {provider}\\"","breadcrumbs":"TaskServ Quick Guide » Testing","id":"2881","title":"Testing"},"2882":{"body":"Category Examples Use Case container-runtime containerd, crio, podman Container runtime engines databases postgres, redis Database services development coder, gitea, desktop Development tools infrastructure kms, webhook, os System infrastructure kubernetes kubernetes Kubernetes orchestration networking cilium, coredns, etcd Network services storage rook-ceph, external-nfs Storage solutions","breadcrumbs":"TaskServ Quick Guide » 📚 Categories Reference","id":"2882","title":"📚 Categories Reference"},"2883":{"body":"","breadcrumbs":"TaskServ Quick Guide » 🔧 Troubleshooting","id":"2883","title":"🔧 Troubleshooting"},"2884":{"body":"# Check if discovered\\nnu -c \\"use provisioning/core/nulib/taskservs/discover.nu *; discover-taskservs | where name == my-service\\" # Verify kcl.mod exists\\nls provisioning/extensions/taskservs/{category}/my-service/kcl/kcl.mod","breadcrumbs":"TaskServ Quick Guide » Taskserv Not Found","id":"2884","title":"Taskserv Not Found"},"2885":{"body":"# Debug resolution\\nnu -c \\"use provisioning/workspace/tools/layer-utils.nu *; test_layer_resolution my-service wuji upcloud\\" # Check template exists\\nls provisioning/workspace/templates/taskservs/{category}/my-service.ncl","breadcrumbs":"TaskServ Quick Guide » Layer Resolution Issues","id":"2885","title":"Layer Resolution Issues"},"2886":{"body":"# Check syntax\\nnickel typecheck provisioning/extensions/taskservs/{category}/my-service/schemas/my-service.ncl # Format code\\nnickel format provisioning/extensions/taskservs/{category}/my-service/schemas/","breadcrumbs":"TaskServ Quick Guide » Nickel Syntax Errors","id":"2886","title":"Nickel Syntax Errors"},"2887":{"body":"Use existing taskservs as templates - Copy and modify similar services Test with --check first - Always use dry run before actual deployment Follow naming conventions - Use kebab-case for consistency Document thoroughly - Good docs save time later Version your schemas - Include version.ncl for compatibility tracking","breadcrumbs":"TaskServ Quick Guide » 💡 Pro Tips","id":"2887","title":"💡 Pro Tips"},"2888":{"body":"Read the full Taskserv Developer Guide Explore existing taskservs in provisioning/extensions/taskservs/ Check out templates in provisioning/workspace/templates/taskservs/ Join the development community for support","breadcrumbs":"TaskServ Quick Guide » 🔗 Next Steps","id":"2888","title":"🔗 Next Steps"},"2889":{"body":"","breadcrumbs":"TaskServ Categorization » Taskserv Categorization Plan","id":"2889","title":"Taskserv Categorization Plan"},"289":{"body":"Software Version Purpose Nushell 0.107.1+ Shell and scripting language Nickel 1.15.0+ Configuration language Docker 20.10+ Container runtime (for platform services) SOPS 3.10.2+ Secrets management Age 1.2.1+ Encryption tool","breadcrumbs":"Prerequisites » Core Dependencies","id":"289","title":"Core Dependencies"},"2890":{"body":"","breadcrumbs":"TaskServ Categorization » Categories and Taskservs (38 total)","id":"2890","title":"Categories and Taskservs (38 total)"},"2891":{"body":"kubernetes","breadcrumbs":"TaskServ Categorization » kubernetes/ (1)","id":"2891","title":"kubernetes/ (1)"},"2892":{"body":"cilium coredns etcd ip-aliases proxy resolv","breadcrumbs":"TaskServ Categorization » networking/ (6)","id":"2892","title":"networking/ (6)"},"2893":{"body":"containerd crio crun podman runc youki","breadcrumbs":"TaskServ Categorization » container-runtime/ (6)","id":"2893","title":"container-runtime/ (6)"},"2894":{"body":"external-nfs mayastor oci-reg rook-ceph","breadcrumbs":"TaskServ Categorization » storage/ (4)","id":"2894","title":"storage/ (4)"},"2895":{"body":"postgres redis","breadcrumbs":"TaskServ Categorization » databases/ (2)","id":"2895","title":"databases/ (2)"},"2896":{"body":"coder desktop gitea nushell oras radicle","breadcrumbs":"TaskServ Categorization » development/ (6)","id":"2896","title":"development/ (6)"},"2897":{"body":"kms os provisioning polkadot webhook kubectl","breadcrumbs":"TaskServ Categorization » infrastructure/ (6)","id":"2897","title":"infrastructure/ (6)"},"2898":{"body":"generate","breadcrumbs":"TaskServ Categorization » misc/ (1)","id":"2898","title":"misc/ (1)"},"2899":{"body":"info.md manifest.toml manifest.lock README.md REFERENCE.md version.ncl Total categorized: 32 taskservs + 6 root files = 38 items ✓","breadcrumbs":"TaskServ Categorization » Keep in root/ (6)","id":"2899","title":"Keep in root/ (6)"},"29":{"body":"","breadcrumbs":"Home » Key Achievements","id":"29","title":"Key Achievements"},"290":{"body":"Software Version Purpose Podman 4.0+ Alternative container runtime OrbStack Latest macOS-optimized container runtime K9s 0.50.6+ Kubernetes management interface glow Latest Markdown renderer for guides bat Latest Syntax highlighting for file viewing","breadcrumbs":"Prerequisites » Optional Dependencies","id":"290","title":"Optional Dependencies"},"2900":{"body":"Version : 1.0.0 Last Updated : 2026-01-05 Target Audience : DevOps Engineers, Platform Operators Status : Production Ready Practical guide for deploying the 9-service provisioning platform in any environment using mode-based configuration.","breadcrumbs":"Platform Deployment Guide » Platform Deployment Guide","id":"2900","title":"Platform Deployment Guide"},"2901":{"body":"Prerequisites Deployment Modes Quick Start Solo Mode Deployment Multiuser Mode Deployment CICD Mode Deployment Enterprise Mode Deployment Service Management Health Checks & Monitoring Troubleshooting","breadcrumbs":"Platform Deployment Guide » Table of Contents","id":"2901","title":"Table of Contents"},"2902":{"body":"","breadcrumbs":"Platform Deployment Guide » Prerequisites","id":"2902","title":"Prerequisites"},"2903":{"body":"Rust : 1.70+ (for building services) Nickel : Latest (for config validation) Nushell : 0.109.1+ (for scripts) Cargo : Included with Rust Git : For cloning and pulling updates","breadcrumbs":"Platform Deployment Guide » Required Software","id":"2903","title":"Required Software"},"2904":{"body":"Tool Solo Multiuser CICD Enterprise Docker/Podman No Optional Yes Yes SurrealDB No Yes No No Etcd No No No Yes PostgreSQL No Optional No Optional OpenAI/Anthropic API No Optional Yes Yes","breadcrumbs":"Platform Deployment Guide » Required Tools (Mode-Dependent)","id":"2904","title":"Required Tools (Mode-Dependent)"},"2905":{"body":"Resource Solo Multiuser CICD Enterprise CPU Cores 2+ 4+ 8+ 16+ Memory 2 GB 4 GB 8 GB 16 GB Disk 10 GB 50 GB 100 GB 500 GB Network Local Local/Cloud Cloud HA Cloud","breadcrumbs":"Platform Deployment Guide » System Requirements","id":"2905","title":"System Requirements"},"2906":{"body":"# Ensure base directories exist\\nmkdir -p provisioning/schemas/platform\\nmkdir -p provisioning/platform/logs\\nmkdir -p provisioning/platform/data\\nmkdir -p provisioning/.typedialog/platform\\nmkdir -p provisioning/config/runtime","breadcrumbs":"Platform Deployment Guide » Directory Structure","id":"2906","title":"Directory Structure"},"2907":{"body":"","breadcrumbs":"Platform Deployment Guide » Deployment Modes","id":"2907","title":"Deployment Modes"},"2908":{"body":"Requirement Recommended Mode Development & testing solo Team environment (2-10 people) multiuser CI/CD pipelines & automation cicd Production with HA enterprise","breadcrumbs":"Platform Deployment Guide » Mode Selection Matrix","id":"2908","title":"Mode Selection Matrix"},"2909":{"body":"Solo Mode Use Case : Development, testing, demonstration Characteristics : All services run locally with minimal resources Filesystem-based storage (no external databases) No TLS/SSL required Embedded/in-memory backends Single machine only Services Configuration : 2-4 workers per service 30-60 second timeouts No replication or clustering Debug-level logging enabled Startup Time : ~2-5 minutes Data Persistence : Local files only Multiuser Mode Use Case : Team environments, shared infrastructure Characteristics : Shared database backends (SurrealDB) Multiple concurrent users CORS and multi-user features enabled Optional TLS support 2-4 machines (or containerized) Services Configuration : 4-6 workers per service 60-120 second timeouts Basic replication available Info-level logging Startup Time : ~3-8 minutes (database dependent) Data Persistence : SurrealDB (shared) CICD Mode Use Case : CI/CD pipelines, ephemeral environments Characteristics : Ephemeral storage (memory, temporary) High throughput RAG system disabled Minimal logging Stateless services Services Configuration : 8-12 workers per service 10-30 second timeouts No persistence Warn-level logging Startup Time : ~1-2 minutes Data Persistence : None (ephemeral) Enterprise Mode Use Case : Production, high availability, compliance Characteristics : Distributed, replicated backends High availability (HA) clustering TLS/SSL encryption Audit logging Full monitoring and observability Services Configuration : 16-32 workers per service 120-300 second timeouts Active replication across 3+ nodes Info-level logging with audit trails Startup Time : ~5-15 minutes (cluster initialization) Data Persistence : Replicated across cluster","breadcrumbs":"Platform Deployment Guide » Mode Characteristics","id":"2909","title":"Mode Characteristics"},"291":{"body":"Before proceeding, verify your system has the core dependencies installed:","breadcrumbs":"Prerequisites » Installation Verification","id":"291","title":"Installation Verification"},"2910":{"body":"","breadcrumbs":"Platform Deployment Guide » Quick Start","id":"2910","title":"Quick Start"},"2911":{"body":"git clone https://github.com/your-org/project-provisioning.git\\ncd project-provisioning","breadcrumbs":"Platform Deployment Guide » 1. Clone Repository","id":"2911","title":"1. Clone Repository"},"2912":{"body":"Choose your mode based on use case: # For development\\nexport DEPLOYMENT_MODE=solo # For team environments\\nexport DEPLOYMENT_MODE=multiuser # For CI/CD\\nexport DEPLOYMENT_MODE=cicd # For production\\nexport DEPLOYMENT_MODE=enterprise","breadcrumbs":"Platform Deployment Guide » 2. Select Deployment Mode","id":"2912","title":"2. Select Deployment Mode"},"2913":{"body":"All services use mode-specific TOML configs automatically loaded via environment variables: # Vault Service\\nexport VAULT_MODE=$DEPLOYMENT_MODE # Extension Registry\\nexport REGISTRY_MODE=$DEPLOYMENT_MODE # RAG System\\nexport RAG_MODE=$DEPLOYMENT_MODE # AI Service\\nexport AI_SERVICE_MODE=$DEPLOYMENT_MODE # Provisioning Daemon\\nexport DAEMON_MODE=$DEPLOYMENT_MODE","breadcrumbs":"Platform Deployment Guide » 3. Set Environment Variables","id":"2913","title":"3. Set Environment Variables"},"2914":{"body":"# Build all platform crates\\ncargo build --release -p vault-service \\\\ -p extension-registry \\\\ -p provisioning-rag \\\\ -p ai-service \\\\ -p provisioning-daemon \\\\ -p orchestrator \\\\ -p control-center \\\\ -p mcp-server \\\\ -p installer","breadcrumbs":"Platform Deployment Guide » 4. Build All Services","id":"2914","title":"4. Build All Services"},"2915":{"body":"# Start in dependency order: # 1. Core infrastructure (KMS, storage)\\ncargo run --release -p vault-service & # 2. Configuration and extensions\\ncargo run --release -p extension-registry & # 3. AI/RAG layer\\ncargo run --release -p provisioning-rag &\\ncargo run --release -p ai-service & # 4. Orchestration layer\\ncargo run --release -p orchestrator &\\ncargo run --release -p control-center &\\ncargo run --release -p mcp-server & # 5. Background operations\\ncargo run --release -p provisioning-daemon & # 6. Installer (optional, for new deployments)\\ncargo run --release -p installer &","breadcrumbs":"Platform Deployment Guide » 5. Start Services (Order Matters)","id":"2915","title":"5. Start Services (Order Matters)"},"2916":{"body":"# Check all services are running\\npgrep -l \\"vault-service|extension-registry|provisioning-rag|ai-service\\" # Test endpoints\\ncurl http://localhost:8200/health # Vault\\ncurl http://localhost:8081/health # Registry\\ncurl http://localhost:8083/health # RAG\\ncurl http://localhost:8082/health # AI Service\\ncurl http://localhost:9090/health # Orchestrator\\ncurl http://localhost:8080/health # Control Center","breadcrumbs":"Platform Deployment Guide » 6. Verify Services","id":"2916","title":"6. Verify Services"},"2917":{"body":"Perfect for : Development, testing, learning","breadcrumbs":"Platform Deployment Guide » Solo Mode Deployment","id":"2917","title":"Solo Mode Deployment"},"2918":{"body":"# Check that solo schemas are available\\nls -la provisioning/schemas/platform/defaults/deployment/solo-defaults.ncl # Available schemas for each service:\\n# - provisioning/schemas/platform/schemas/vault-service.ncl\\n# - provisioning/schemas/platform/schemas/extension-registry.ncl\\n# - provisioning/schemas/platform/schemas/rag.ncl\\n# - provisioning/schemas/platform/schemas/ai-service.ncl\\n# - provisioning/schemas/platform/schemas/provisioning-daemon.ncl","breadcrumbs":"Platform Deployment Guide » Step 1: Verify Solo Configuration Files","id":"2918","title":"Step 1: Verify Solo Configuration Files"},"2919":{"body":"# Set all services to solo mode\\nexport VAULT_MODE=solo\\nexport REGISTRY_MODE=solo\\nexport RAG_MODE=solo\\nexport AI_SERVICE_MODE=solo\\nexport DAEMON_MODE=solo # Verify settings\\necho $VAULT_MODE # Should output: solo","breadcrumbs":"Platform Deployment Guide » Step 2: Set Solo Environment Variables","id":"2919","title":"Step 2: Set Solo Environment Variables"},"292":{"body":"# Check Nushell version\\nnu --version # Expected output: 0.107.1 or higher","breadcrumbs":"Prerequisites » Nushell","id":"292","title":"Nushell"},"2920":{"body":"# Build in release mode for better performance\\ncargo build --release","breadcrumbs":"Platform Deployment Guide » Step 3: Build Services","id":"2920","title":"Step 3: Build Services"},"2921":{"body":"# Create storage directories for solo mode\\nmkdir -p /tmp/provisioning-solo/{vault,registry,rag,ai,daemon}\\nchmod 755 /tmp/provisioning-solo/{vault,registry,rag,ai,daemon}","breadcrumbs":"Platform Deployment Guide » Step 4: Create Local Data Directories","id":"2921","title":"Step 4: Create Local Data Directories"},"2922":{"body":"# Start each service in a separate terminal or use tmux: # Terminal 1: Vault\\ncargo run --release -p vault-service # Terminal 2: Registry\\ncargo run --release -p extension-registry # Terminal 3: RAG\\ncargo run --release -p provisioning-rag # Terminal 4: AI Service\\ncargo run --release -p ai-service # Terminal 5: Orchestrator\\ncargo run --release -p orchestrator # Terminal 6: Control Center\\ncargo run --release -p control-center # Terminal 7: Daemon\\ncargo run --release -p provisioning-daemon","breadcrumbs":"Platform Deployment Guide » Step 5: Start Services","id":"2922","title":"Step 5: Start Services"},"2923":{"body":"# Wait 10-15 seconds for services to start, then test # Check service health\\ncurl -s http://localhost:8200/health | jq .\\ncurl -s http://localhost:8081/health | jq .\\ncurl -s http://localhost:8083/health | jq . # Try a simple operation\\ncurl -X GET http://localhost:9090/api/v1/health","breadcrumbs":"Platform Deployment Guide » Step 6: Test Services","id":"2923","title":"Step 6: Test Services"},"2924":{"body":"# Check that data is stored locally\\nls -la /tmp/provisioning-solo/vault/\\nls -la /tmp/provisioning-solo/registry/ # Data should accumulate as you use the services","breadcrumbs":"Platform Deployment Guide » Step 7: Verify Persistence (Optional)","id":"2924","title":"Step 7: Verify Persistence (Optional)"},"2925":{"body":"# Stop all services\\npkill -f \\"cargo run --release\\" # Remove temporary data (optional)\\nrm -rf /tmp/provisioning-solo","breadcrumbs":"Platform Deployment Guide » Cleanup","id":"2925","title":"Cleanup"},"2926":{"body":"Perfect for : Team environments, shared infrastructure","breadcrumbs":"Platform Deployment Guide » Multiuser Mode Deployment","id":"2926","title":"Multiuser Mode Deployment"},"2927":{"body":"SurrealDB : Running and accessible at http://surrealdb:8000 Network Access : All machines can reach SurrealDB DNS/Hostnames : Services accessible via hostnames (not just localhost)","breadcrumbs":"Platform Deployment Guide » Prerequisites","id":"2927","title":"Prerequisites"},"2928":{"body":"# Using Docker (recommended)\\ndocker run -d \\\\ --name surrealdb \\\\ -p 8000:8000 \\\\ surrealdb/surrealdb:latest \\\\ start --user root --pass root # Or using native installation:\\nsurreal start --user root --pass root","breadcrumbs":"Platform Deployment Guide » Step 1: Deploy SurrealDB","id":"2928","title":"Step 1: Deploy SurrealDB"},"2929":{"body":"# Test SurrealDB connection\\ncurl -s http://localhost:8000/health # Should return: {\\"version\\":\\"v1.x.x\\"}","breadcrumbs":"Platform Deployment Guide » Step 2: Verify SurrealDB Connectivity","id":"2929","title":"Step 2: Verify SurrealDB Connectivity"},"293":{"body":"# Check Nickel version\\nnickel --version # Expected output: 1.15.0 or higher","breadcrumbs":"Prerequisites » Nickel","id":"293","title":"Nickel"},"2930":{"body":"# Configure all services for multiuser mode\\nexport VAULT_MODE=multiuser\\nexport REGISTRY_MODE=multiuser\\nexport RAG_MODE=multiuser\\nexport AI_SERVICE_MODE=multiuser\\nexport DAEMON_MODE=multiuser # Set database connection\\nexport SURREALDB_URL=http://surrealdb:8000\\nexport SURREALDB_USER=root\\nexport SURREALDB_PASS=root # Set service hostnames (if not localhost)\\nexport VAULT_SERVICE_HOST=vault.internal\\nexport REGISTRY_HOST=registry.internal\\nexport RAG_HOST=rag.internal","breadcrumbs":"Platform Deployment Guide » Step 3: Set Multiuser Environment Variables","id":"2930","title":"Step 3: Set Multiuser Environment Variables"},"2931":{"body":"cargo build --release","breadcrumbs":"Platform Deployment Guide » Step 4: Build Services","id":"2931","title":"Step 4: Build Services"},"2932":{"body":"# Create directories on shared storage (NFS, etc.)\\nmkdir -p /mnt/provisioning-data/{vault,registry,rag,ai}\\nchmod 755 /mnt/provisioning-data/{vault,registry,rag,ai} # Or use local directories if on separate machines\\nmkdir -p /var/lib/provisioning/{vault,registry,rag,ai}","breadcrumbs":"Platform Deployment Guide » Step 5: Create Shared Data Directories","id":"2932","title":"Step 5: Create Shared Data Directories"},"2933":{"body":"# Machine 1: Infrastructure services\\nssh ops@machine1\\nexport VAULT_MODE=multiuser\\ncargo run --release -p vault-service &\\ncargo run --release -p extension-registry & # Machine 2: AI services\\nssh ops@machine2\\nexport RAG_MODE=multiuser\\nexport AI_SERVICE_MODE=multiuser\\ncargo run --release -p provisioning-rag &\\ncargo run --release -p ai-service & # Machine 3: Orchestration\\nssh ops@machine3\\ncargo run --release -p orchestrator &\\ncargo run --release -p control-center & # Machine 4: Background tasks\\nssh ops@machine4\\nexport DAEMON_MODE=multiuser\\ncargo run --release -p provisioning-daemon &","breadcrumbs":"Platform Deployment Guide » Step 6: Start Services on Multiple Machines","id":"2933","title":"Step 6: Start Services on Multiple Machines"},"2934":{"body":"# From any machine, test cross-machine connectivity\\ncurl -s http://machine1:8200/health\\ncurl -s http://machine2:8083/health\\ncurl -s http://machine3:9090/health # Test integration\\ncurl -X POST http://machine3:9090/api/v1/provision \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"workspace\\": \\"test\\"}\'","breadcrumbs":"Platform Deployment Guide » Step 7: Test Multi-Machine Setup","id":"2934","title":"Step 7: Test Multi-Machine Setup"},"2935":{"body":"# Create shared credentials\\nexport VAULT_TOKEN=s.xxxxxxxxxxx # Configure TLS (optional but recommended)\\n# Update configs to use https:// URLs\\nexport VAULT_MODE=multiuser\\n# Edit provisioning/schemas/platform/schemas/vault-service.ncl\\n# Add TLS configuration in the schema definition\\n# See: provisioning/schemas/platform/validators/ for constraints","breadcrumbs":"Platform Deployment Guide » Step 8: Enable User Access","id":"2935","title":"Step 8: Enable User Access"},"2936":{"body":"# Check all services are connected to SurrealDB\\nfor host in machine1 machine2 machine3 machine4; do ssh ops@$host \\"curl -s http://localhost/api/v1/health | jq .database_connected\\"\\ndone # Monitor SurrealDB\\ncurl -s http://surrealdb:8000/version","breadcrumbs":"Platform Deployment Guide » Monitoring Multiuser Deployment","id":"2936","title":"Monitoring Multiuser Deployment"},"2937":{"body":"Perfect for : GitHub Actions, GitLab CI, Jenkins, cloud automation","breadcrumbs":"Platform Deployment Guide » CICD Mode Deployment","id":"2937","title":"CICD Mode Deployment"},"2938":{"body":"CICD mode services: Don\'t persist data between runs Use in-memory storage Have RAG disabled Optimize for startup speed Suitable for containerized deployments","breadcrumbs":"Platform Deployment Guide » Step 1: Understand Ephemeral Nature","id":"2938","title":"Step 1: Understand Ephemeral Nature"},"2939":{"body":"# Use cicd mode for all services\\nexport VAULT_MODE=cicd\\nexport REGISTRY_MODE=cicd\\nexport RAG_MODE=cicd\\nexport AI_SERVICE_MODE=cicd\\nexport DAEMON_MODE=cicd # Disable TLS (not needed in CI)\\nexport CI_ENVIRONMENT=true","breadcrumbs":"Platform Deployment Guide » Step 2: Set CICD Environment Variables","id":"2939","title":"Step 2: Set CICD Environment Variables"},"294":{"body":"# Check Docker version\\ndocker --version # Check Docker is running\\ndocker ps # Expected: Docker version 20.10+ and connection successful","breadcrumbs":"Prerequisites » Docker","id":"294","title":"Docker"},"2940":{"body":"# Dockerfile for CICD deployments\\nFROM rust:1.75-slim WORKDIR /app\\nCOPY . . # Build all services\\nRUN cargo build --release # Set CICD mode\\nENV VAULT_MODE=cicd\\nENV REGISTRY_MODE=cicd\\nENV RAG_MODE=cicd\\nENV AI_SERVICE_MODE=cicd # Expose ports\\nEXPOSE 8200 8081 8083 8082 9090 8080 # Run services\\nCMD [\\"sh\\", \\"-c\\", \\"\\\\ cargo run --release -p vault-service & \\\\ cargo run --release -p extension-registry & \\\\ cargo run --release -p provisioning-rag & \\\\ cargo run --release -p ai-service & \\\\ cargo run --release -p orchestrator & \\\\ wait\\"]","breadcrumbs":"Platform Deployment Guide » Step 3: Containerize Services (Optional)","id":"2940","title":"Step 3: Containerize Services (Optional)"},"2941":{"body":"name: CICD Platform Deployment on: push: branches: [main, develop] jobs: test-deployment: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: 1.75 profile: minimal - name: Set CICD Mode run: | echo \\"VAULT_MODE=cicd\\" >> $GITHUB_ENV echo \\"REGISTRY_MODE=cicd\\" >> $GITHUB_ENV echo \\"RAG_MODE=cicd\\" >> $GITHUB_ENV echo \\"AI_SERVICE_MODE=cicd\\" >> $GITHUB_ENV echo \\"DAEMON_MODE=cicd\\" >> $GITHUB_ENV - name: Build Services run: cargo build --release - name: Run Integration Tests run: | # Start services in background cargo run --release -p vault-service & cargo run --release -p extension-registry & cargo run --release -p orchestrator & # Wait for startup sleep 10 # Run tests cargo test --release - name: Health Checks run: | curl -f http://localhost:8200/health curl -f http://localhost:8081/health curl -f http://localhost:9090/health deploy: needs: test-deployment runs-on: ubuntu-latest if: github.ref == \'refs/heads/main\' steps: - uses: actions/checkout@v3 - name: Deploy to Production run: | # Deploy production enterprise cluster ./scripts/deploy-enterprise.sh","breadcrumbs":"Platform Deployment Guide » Step 4: GitHub Actions Example","id":"2941","title":"Step 4: GitHub Actions Example"},"2942":{"body":"# Simulate CI environment locally\\nexport VAULT_MODE=cicd\\nexport CI_ENVIRONMENT=true # Build\\ncargo build --release # Run short-lived services for testing\\ntimeout 30 cargo run --release -p vault-service &\\ntimeout 30 cargo run --release -p extension-registry &\\ntimeout 30 cargo run --release -p orchestrator & # Run tests while services are running\\nsleep 5\\ncargo test --release # Services auto-cleanup after timeout","breadcrumbs":"Platform Deployment Guide » Step 5: Run CICD Tests","id":"2942","title":"Step 5: Run CICD Tests"},"2943":{"body":"Perfect for : Production, high availability, compliance","breadcrumbs":"Platform Deployment Guide » Enterprise Mode Deployment","id":"2943","title":"Enterprise Mode Deployment"},"2944":{"body":"3+ Machines : Minimum 3 for HA Etcd Cluster : For distributed consensus Load Balancer : HAProxy, nginx, or cloud LB TLS Certificates : Valid certificates for all services Monitoring : Prometheus, ELK, or cloud monitoring Backup System : Daily snapshots to S3 or similar","breadcrumbs":"Platform Deployment Guide » Prerequisites","id":"2944","title":"Prerequisites"},"2945":{"body":"1.1 Deploy Etcd Cluster # Node 1, 2, 3\\netcd --name=node-1 \\\\ --listen-client-urls=http://0.0.0.0:2379 \\\\ --advertise-client-urls=http://node-1.internal:2379 \\\\ --initial-cluster=\\"node-1=http://node-1.internal:2380,node-2=http://node-2.internal:2380,node-3=http://node-3.internal:2380\\" \\\\ --initial-cluster-state=new # Verify cluster\\netcdctl --endpoints=http://localhost:2379 member list 1.2 Deploy Load Balancer # HAProxy configuration for vault-service (example)\\nfrontend vault_frontend bind *:8200 mode tcp default_backend vault_backend backend vault_backend mode tcp balance roundrobin server vault-1 10.0.1.10:8200 check server vault-2 10.0.1.11:8200 check server vault-3 10.0.1.12:8200 check 1.3 Configure TLS # Generate certificates (or use existing)\\nmkdir -p /etc/provisioning/tls # For each service:\\nopenssl req -x509 -newkey rsa:4096 \\\\ -keyout /etc/provisioning/tls/vault-key.pem \\\\ -out /etc/provisioning/tls/vault-cert.pem \\\\ -days 365 -nodes \\\\ -subj \\"/CN=vault.provisioning.prod\\" # Set permissions\\nchmod 600 /etc/provisioning/tls/*-key.pem\\nchmod 644 /etc/provisioning/tls/*-cert.pem","breadcrumbs":"Platform Deployment Guide » Step 1: Deploy Infrastructure","id":"2945","title":"Step 1: Deploy Infrastructure"},"2946":{"body":"# All machines: Set enterprise mode\\nexport VAULT_MODE=enterprise\\nexport REGISTRY_MODE=enterprise\\nexport RAG_MODE=enterprise\\nexport AI_SERVICE_MODE=enterprise\\nexport DAEMON_MODE=enterprise # Database cluster\\nexport SURREALDB_URL=\\"ws://surrealdb-cluster.internal:8000\\"\\nexport SURREALDB_REPLICAS=3 # Etcd cluster\\nexport ETCD_ENDPOINTS=\\"http://node-1.internal:2379,http://node-2.internal:2379,http://node-3.internal:2379\\" # TLS configuration\\nexport TLS_CERT_PATH=/etc/provisioning/tls\\nexport TLS_VERIFY=true\\nexport TLS_CA_CERT=/etc/provisioning/tls/ca.crt # Monitoring\\nexport PROMETHEUS_URL=http://prometheus.internal:9090\\nexport METRICS_ENABLED=true\\nexport AUDIT_LOG_ENABLED=true","breadcrumbs":"Platform Deployment Guide » Step 2: Set Enterprise Environment Variables","id":"2946","title":"Step 2: Set Enterprise Environment Variables"},"2947":{"body":"# Ansible playbook (simplified)\\n---\\n- hosts: provisioning_cluster tasks: - name: Build services shell: cargo build --release - name: Start vault-service (machine 1-3) shell: \\"cargo run --release -p vault-service\\" when: \\"\'vault\' in group_names\\" - name: Start orchestrator (machine 2-3) shell: \\"cargo run --release -p orchestrator\\" when: \\"\'orchestrator\' in group_names\\" - name: Start daemon (machine 3) shell: \\"cargo run --release -p provisioning-daemon\\" when: \\"\'daemon\' in group_names\\" - name: Verify cluster health uri: url: \\"https://{{ inventory_hostname }}:9090/health\\" validate_certs: yes","breadcrumbs":"Platform Deployment Guide » Step 3: Deploy Services Across Cluster","id":"2947","title":"Step 3: Deploy Services Across Cluster"},"2948":{"body":"# Check cluster status\\ncurl -s https://vault.internal:8200/health | jq .state # Check replication\\ncurl -s https://orchestrator.internal:9090/api/v1/cluster/status # Monitor etcd\\netcdctl --endpoints=https://node-1.internal:2379 endpoint health # Check leader election\\netcdctl --endpoints=https://node-1.internal:2379 election list","breadcrumbs":"Platform Deployment Guide » Step 4: Monitor Cluster Health","id":"2948","title":"Step 4: Monitor Cluster Health"},"2949":{"body":"# Prometheus configuration\\nglobal: scrape_interval: 30s evaluation_interval: 30s scrape_configs: - job_name: \'vault-service\' scheme: https tls_config: ca_file: /etc/provisioning/tls/ca.crt static_configs: - targets: [\'vault-1.internal:8200\', \'vault-2.internal:8200\', \'vault-3.internal:8200\'] - job_name: \'orchestrator\' scheme: https static_configs: - targets: [\'orch-1.internal:9090\', \'orch-2.internal:9090\', \'orch-3.internal:9090\']","breadcrumbs":"Platform Deployment Guide » Step 5: Enable Monitoring & Alerting","id":"2949","title":"Step 5: Enable Monitoring & Alerting"},"295":{"body":"# Check SOPS version\\nsops --version # Expected output: 3.10.2 or higher","breadcrumbs":"Prerequisites » SOPS","id":"295","title":"SOPS"},"2950":{"body":"# Daily backup script\\n#!/bin/bash\\nBACKUP_DIR=\\"/mnt/provisioning-backups\\"\\nDATE=$(date +%Y%m%d_%H%M%S) # Backup etcd\\netcdctl --endpoints=https://node-1.internal:2379 \\\\ snapshot save \\"$BACKUP_DIR/etcd-$DATE.db\\" # Backup SurrealDB\\ncurl -X POST https://surrealdb.internal:8000/backup \\\\ -H \\"Authorization: Bearer $SURREALDB_TOKEN\\" \\\\ > \\"$BACKUP_DIR/surreal-$DATE.sql\\" # Upload to S3\\naws s3 cp \\"$BACKUP_DIR/etcd-$DATE.db\\" \\\\ s3://provisioning-backups/etcd/ # Cleanup old backups (keep 30 days)\\nfind \\"$BACKUP_DIR\\" -mtime +30 -delete","breadcrumbs":"Platform Deployment Guide » Step 6: Backup & Recovery","id":"2950","title":"Step 6: Backup & Recovery"},"2951":{"body":"","breadcrumbs":"Platform Deployment Guide » Service Management","id":"2951","title":"Service Management"},"2952":{"body":"Individual Service Startup # Start one service\\nexport VAULT_MODE=enterprise\\ncargo run --release -p vault-service # In another terminal\\nexport REGISTRY_MODE=enterprise\\ncargo run --release -p extension-registry Batch Startup # Start all services (dependency order)\\n#!/bin/bash\\nset -e MODE=${1:-solo}\\nexport VAULT_MODE=$MODE\\nexport REGISTRY_MODE=$MODE\\nexport RAG_MODE=$MODE\\nexport AI_SERVICE_MODE=$MODE\\nexport DAEMON_MODE=$MODE echo \\"Starting provisioning platform in $MODE mode...\\" # Core services first\\necho \\"Starting infrastructure...\\"\\ncargo run --release -p vault-service &\\nVAULT_PID=$! echo \\"Starting extension registry...\\"\\ncargo run --release -p extension-registry &\\nREGISTRY_PID=$! # AI layer\\necho \\"Starting AI services...\\"\\ncargo run --release -p provisioning-rag &\\nRAG_PID=$! cargo run --release -p ai-service &\\nAI_PID=$! # Orchestration\\necho \\"Starting orchestration...\\"\\ncargo run --release -p orchestrator &\\nORCH_PID=$! echo \\"All services started. PIDs: $VAULT_PID $REGISTRY_PID $RAG_PID $AI_PID $ORCH_PID\\"","breadcrumbs":"Platform Deployment Guide » Starting Services","id":"2952","title":"Starting Services"},"2953":{"body":"# Stop all services gracefully\\npkill -SIGTERM -f \\"cargo run --release -p\\" # Wait for graceful shutdown\\nsleep 5 # Force kill if needed\\npkill -9 -f \\"cargo run --release -p\\" # Verify all stopped\\npgrep -f \\"cargo run --release -p\\" && echo \\"Services still running\\" || echo \\"All stopped\\"","breadcrumbs":"Platform Deployment Guide » Stopping Services","id":"2953","title":"Stopping Services"},"2954":{"body":"# Restart single service\\npkill -SIGTERM vault-service\\nsleep 2\\ncargo run --release -p vault-service & # Restart all services\\n./scripts/restart-all.sh $MODE # Restart with config reload\\nexport VAULT_MODE=multiuser\\npkill -SIGTERM vault-service\\nsleep 2\\ncargo run --release -p vault-service &","breadcrumbs":"Platform Deployment Guide » Restarting Services","id":"2954","title":"Restarting Services"},"2955":{"body":"# Check running processes\\npgrep -a \\"cargo run --release\\" # Check listening ports\\nnetstat -tlnp | grep -E \\"8200|8081|8083|8082|9090|8080\\" # Or using ss (modern alternative)\\nss -tlnp | grep -E \\"8200|8081|8083|8082|9090|8080\\" # Health endpoint checks\\nfor service in vault registry rag ai orchestrator; do echo \\"=== $service ===\\" curl -s http://localhost:${port[$service]}/health | jq .\\ndone","breadcrumbs":"Platform Deployment Guide » Checking Service Status","id":"2955","title":"Checking Service Status"},"2956":{"body":"","breadcrumbs":"Platform Deployment Guide » Health Checks & Monitoring","id":"2956","title":"Health Checks & Monitoring"},"2957":{"body":"# Vault Service\\ncurl -s http://localhost:8200/health | jq .\\n# Expected: {\\"status\\":\\"ok\\",\\"uptime\\":123.45} # Extension Registry\\ncurl -s http://localhost:8081/health | jq . # RAG System\\ncurl -s http://localhost:8083/health | jq .\\n# Expected: {\\"status\\":\\"ok\\",\\"embeddings\\":\\"ready\\",\\"vector_db\\":\\"connected\\"} # AI Service\\ncurl -s http://localhost:8082/health | jq . # Orchestrator\\ncurl -s http://localhost:9090/health | jq . # Control Center\\ncurl -s http://localhost:8080/health | jq .","breadcrumbs":"Platform Deployment Guide » Manual Health Verification","id":"2957","title":"Manual Health Verification"},"2958":{"body":"# Test vault <-> registry integration\\ncurl -X POST http://localhost:8200/api/encrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"plaintext\\":\\"secret\\"}\' | jq . # Test RAG system\\ncurl -X POST http://localhost:8083/api/ingest \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"document\\":\\"test.md\\",\\"content\\":\\"# Test\\"}\' | jq . # Test orchestrator\\ncurl -X GET http://localhost:9090/api/v1/status | jq . # End-to-end workflow\\ncurl -X POST http://localhost:9090/api/v1/provision \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"workspace\\": \\"test\\", \\"services\\": [\\"vault\\", \\"registry\\"], \\"mode\\": \\"solo\\" }\' | jq .","breadcrumbs":"Platform Deployment Guide » Service Integration Tests","id":"2958","title":"Service Integration Tests"},"2959":{"body":"Prometheus Metrics # Query service uptime\\ncurl -s \'http://prometheus:9090/api/v1/query?query=up\' | jq . # Query request rate\\ncurl -s \'http://prometheus:9090/api/v1/query?query=rate(http_requests_total[5m])\' | jq . # Query error rate\\ncurl -s \'http://prometheus:9090/api/v1/query?query=rate(http_errors_total[5m])\' | jq . Log Aggregation # Follow vault logs\\ntail -f /var/log/provisioning/vault-service.log # Follow all service logs\\ntail -f /var/log/provisioning/*.log # Search for errors\\ngrep -r \\"ERROR\\" /var/log/provisioning/ # Follow with filtering\\ntail -f /var/log/provisioning/orchestrator.log | grep -E \\"ERROR|WARN\\"","breadcrumbs":"Platform Deployment Guide » Monitoring Dashboards","id":"2959","title":"Monitoring Dashboards"},"296":{"body":"# Check Age version\\nage --version # Expected output: 1.2.1 or higher","breadcrumbs":"Prerequisites » Age","id":"296","title":"Age"},"2960":{"body":"# AlertManager configuration\\ngroups: - name: provisioning rules: - alert: ServiceDown expr: up{job=~\\"vault|registry|rag|orchestrator\\"} == 0 for: 5m annotations: summary: \\"{{ $labels.job }} is down\\" - alert: HighErrorRate expr: rate(http_errors_total[5m]) > 0.05 annotations: summary: \\"High error rate detected\\" - alert: DiskSpaceWarning expr: node_filesystem_avail_bytes / node_filesystem_size_bytes < 0.2 annotations: summary: \\"Disk space below 20%\\"","breadcrumbs":"Platform Deployment Guide » Alerting","id":"2960","title":"Alerting"},"2961":{"body":"","breadcrumbs":"Platform Deployment Guide » Troubleshooting","id":"2961","title":"Troubleshooting"},"2962":{"body":"Problem : error: failed to bind to port 8200 Solutions : # Check if port is in use\\nlsof -i :8200\\nss -tlnp | grep 8200 # Kill existing process\\npkill -9 -f vault-service # Or use different port\\nexport VAULT_SERVER_PORT=8201\\ncargo run --release -p vault-service","breadcrumbs":"Platform Deployment Guide » Service Won\'t Start","id":"2962","title":"Service Won\'t Start"},"2963":{"body":"Problem : error: failed to load config from mode file Solutions : # Verify schemas exist\\nls -la provisioning/schemas/platform/schemas/vault-service.ncl # Validate schema syntax\\nnickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl # Check defaults are present\\nnickel typecheck provisioning/schemas/platform/defaults/vault-service-defaults.ncl # Verify deployment mode overlay exists\\nls -la provisioning/schemas/platform/defaults/deployment/$VAULT_MODE-defaults.ncl # Run service with explicit mode\\nexport VAULT_MODE=solo\\ncargo run --release -p vault-service","breadcrumbs":"Platform Deployment Guide » Configuration Loading Fails","id":"2963","title":"Configuration Loading Fails"},"2964":{"body":"Problem : error: failed to connect to database Solutions : # Verify database is running\\ncurl http://surrealdb:8000/health\\netcdctl --endpoints=http://etcd:2379 endpoint health # Check connectivity\\nnc -zv surrealdb 8000\\nnc -zv etcd 2379 # Update connection string\\nexport SURREALDB_URL=ws://surrealdb:8000\\nexport ETCD_ENDPOINTS=http://etcd:2379 # Restart service with new config\\npkill -9 vault-service\\ncargo run --release -p vault-service","breadcrumbs":"Platform Deployment Guide » Database Connection Issues","id":"2964","title":"Database Connection Issues"},"2965":{"body":"Problem : Service exits with code 1 or 139 Solutions : # Run with verbose logging\\nRUST_LOG=debug cargo run -p vault-service 2>&1 | head -50 # Check system resources\\nfree -h\\ndf -h # Check for core dumps\\ncoredumpctl list # Run under debugger (if crash suspected)\\nrust-gdb --args target/release/vault-service","breadcrumbs":"Platform Deployment Guide » Service Crashes on Startup","id":"2965","title":"Service Crashes on Startup"},"2966":{"body":"Problem : Service consuming > expected memory Solutions : # Check memory usage\\nps aux | grep vault-service | grep -v grep # Monitor over time\\nwatch -n 1 \'ps aux | grep vault-service | grep -v grep\' # Reduce worker count\\nexport VAULT_SERVER_WORKERS=2\\ncargo run --release -p vault-service # Check for memory leaks\\nvalgrind --leak-check=full target/release/vault-service","breadcrumbs":"Platform Deployment Guide » High Memory Usage","id":"2966","title":"High Memory Usage"},"2967":{"body":"Problem : error: failed to resolve hostname Solutions : # Test DNS resolution\\nnslookup vault.internal\\ndig vault.internal # Test connectivity to service\\ncurl -v http://vault.internal:8200/health # Add to /etc/hosts if needed\\necho \\"10.0.1.10 vault.internal\\" >> /etc/hosts # Check network interface\\nip addr show\\nnetstat -nr","breadcrumbs":"Platform Deployment Guide » Network/DNS Issues","id":"2967","title":"Network/DNS Issues"},"2968":{"body":"Problem : Data lost after restart Solutions : # Verify backup exists\\nls -la /mnt/provisioning-backups/\\nls -la /var/lib/provisioning/ # Check disk space\\ndf -h /var/lib/provisioning # Verify file permissions\\nls -l /var/lib/provisioning/vault/\\nchmod 755 /var/lib/provisioning/vault/* # Restore from backup\\n./scripts/restore-backup.sh /mnt/provisioning-backups/vault-20260105.sql","breadcrumbs":"Platform Deployment Guide » Data Persistence Issues","id":"2968","title":"Data Persistence Issues"},"2969":{"body":"When troubleshooting, use this systematic approach: # 1. Check service is running\\npgrep -f vault-service || echo \\"Service not running\\" # 2. Check port is listening\\nss -tlnp | grep 8200 || echo \\"Port not listening\\" # 3. Check logs for errors\\ntail -20 /var/log/provisioning/vault-service.log | grep -i error # 4. Test HTTP endpoint\\ncurl -i http://localhost:8200/health # 5. Check dependencies\\ncurl http://surrealdb:8000/health\\netcdctl --endpoints=http://etcd:2379 endpoint health # 6. Check schema definition\\nnickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl # 7. Verify environment variables\\nenv | grep -E \\"VAULT_|SURREALDB_|ETCD_\\" # 8. Check system resources\\nfree -h && df -h && top -bn1 | head -10","breadcrumbs":"Platform Deployment Guide » Debugging Checklist","id":"2969","title":"Debugging Checklist"},"297":{"body":"","breadcrumbs":"Prerequisites » Installing Missing Dependencies","id":"297","title":"Installing Missing Dependencies"},"2970":{"body":"","breadcrumbs":"Platform Deployment Guide » Configuration Updates","id":"2970","title":"Configuration Updates"},"2971":{"body":"# 1. Edit the schema definition\\nvim provisioning/schemas/platform/schemas/vault-service.ncl # 2. Update defaults if needed\\nvim provisioning/schemas/platform/defaults/vault-service-defaults.ncl # 3. Validate syntax\\nnickel typecheck provisioning/schemas/platform/schemas/vault-service.ncl # 4. Re-export configuration from schemas\\n./provisioning/.typedialog/platform/scripts/generate-configs.nu vault-service multiuser # 5. Restart affected service (no downtime for clients)\\npkill -SIGTERM vault-service\\nsleep 2\\ncargo run --release -p vault-service & # 4. Verify configuration loaded\\ncurl http://localhost:8200/api/config | jq .","breadcrumbs":"Platform Deployment Guide » Updating Service Configuration","id":"2971","title":"Updating Service Configuration"},"2972":{"body":"# Migrate from solo to multiuser: # 1. Stop services\\npkill -SIGTERM -f \\"cargo run\\"\\nsleep 5 # 2. Backup current data\\ntar -czf /backup/provisioning-solo-$(date +%s).tar.gz /var/lib/provisioning/ # 3. Set new mode\\nexport VAULT_MODE=multiuser\\nexport REGISTRY_MODE=multiuser\\nexport RAG_MODE=multiuser # 4. Start services with new config\\ncargo run --release -p vault-service &\\ncargo run --release -p extension-registry & # 5. Verify new mode\\ncurl http://localhost:8200/api/config | jq .deployment_mode","breadcrumbs":"Platform Deployment Guide » Mode Migration","id":"2972","title":"Mode Migration"},"2973":{"body":"Before deploying to production: All services compiled in release mode (--release) TLS certificates installed and valid Database cluster deployed and healthy Load balancer configured and routing traffic Monitoring and alerting configured Backup system tested and working High availability verified (failover tested) Security hardening applied (firewall rules, etc.) Documentation updated for your environment Team trained on deployment procedures Runbooks created for common operations Disaster recovery plan tested","breadcrumbs":"Platform Deployment Guide » Production Checklist","id":"2973","title":"Production Checklist"},"2974":{"body":"","breadcrumbs":"Platform Deployment Guide » Getting Help","id":"2974","title":"Getting Help"},"2975":{"body":"GitHub Issues : Report bugs at github.com/your-org/provisioning/issues Documentation : Full docs at provisioning/docs/ Slack Channel : #provisioning-platform","breadcrumbs":"Platform Deployment Guide » Community Resources","id":"2975","title":"Community Resources"},"2976":{"body":"Platform Team : platform@your-org.com On-Call : Check PagerDuty for active rotation Escalation : Contact infrastructure leadership","breadcrumbs":"Platform Deployment Guide » Internal Support","id":"2976","title":"Internal Support"},"2977":{"body":"# View all available commands\\ncargo run -- --help # View service schemas\\nls -la provisioning/schemas/platform/schemas/\\nls -la provisioning/schemas/platform/defaults/ # List running services\\nps aux | grep cargo # Monitor service logs in real-time\\njournalctl -fu provisioning-vault # Generate diagnostics bundle\\n./scripts/generate-diagnostics.sh > /tmp/diagnostics-$(date +%s).tar.gz","breadcrumbs":"Platform Deployment Guide » Useful Commands Reference","id":"2977","title":"Useful Commands Reference"},"2978":{"body":"Version : 1.0.0 Last Updated : 2025-10-06","breadcrumbs":"Service Management Guide » Service Management Guide","id":"2978","title":"Service Management Guide"},"2979":{"body":"Overview Service Architecture Service Registry Platform Commands Service Commands Deployment Modes Health Monitoring Dependency Management Pre-flight Checks Troubleshooting","breadcrumbs":"Service Management Guide » Table of Contents","id":"2979","title":"Table of Contents"},"298":{"body":"# Install Homebrew if not already installed\\n/bin/bash -c \\"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\\" # Install Nushell\\nbrew install nushell # Install Nickel\\nbrew install nickel # Install Docker Desktop\\nbrew install --cask docker # Install SOPS\\nbrew install sops # Install Age\\nbrew install age # Optional: Install extras\\nbrew install k9s glow bat","breadcrumbs":"Prerequisites » macOS (using Homebrew)","id":"298","title":"macOS (using Homebrew)"},"2980":{"body":"The Service Management System provides comprehensive lifecycle management for all platform services (orchestrator, control-center, CoreDNS, Gitea, OCI registry, MCP server, API gateway).","breadcrumbs":"Service Management Guide » Overview","id":"2980","title":"Overview"},"2981":{"body":"Unified Service Management : Single interface for all services Automatic Dependency Resolution : Start services in correct order Health Monitoring : Continuous health checks with automatic recovery Multiple Deployment Modes : Binary, Docker, Docker Compose, Kubernetes, Remote Pre-flight Checks : Validate prerequisites before operations Service Registry : Centralized service configuration","breadcrumbs":"Service Management Guide » Key Features","id":"2981","title":"Key Features"},"2982":{"body":"Service Type Category Description orchestrator Platform Orchestration Rust-based workflow coordinator control-center Platform UI Web-based management interface coredns Infrastructure DNS Local DNS resolution gitea Infrastructure Git Self-hosted Git service oci-registry Infrastructure Registry OCI-compliant container registry mcp-server Platform API Model Context Protocol server api-gateway Platform API Unified REST API gateway","breadcrumbs":"Service Management Guide » Supported Services","id":"2982","title":"Supported Services"},"2983":{"body":"","breadcrumbs":"Service Management Guide » Service Architecture","id":"2983","title":"Service Architecture"},"2984":{"body":"┌─────────────────────────────────────────┐\\n│ Service Management CLI │\\n│ (platform/services commands) │\\n└─────────────────┬───────────────────────┘ │ ┌──────────┴──────────┐ │ │ ▼ ▼\\n┌──────────────┐ ┌───────────────┐\\n│ Manager │ │ Lifecycle │\\n│ (Core) │ │ (Start/Stop)│\\n└──────┬───────┘ └───────┬───────┘ │ │ ▼ ▼\\n┌──────────────┐ ┌───────────────┐\\n│ Health │ │ Dependencies │\\n│ (Checks) │ │ (Resolution) │\\n└──────────────┘ └───────────────┘ │ │ └────────┬───────────┘ │ ▼ ┌────────────────┐ │ Pre-flight │ │ (Validation) │ └────────────────┘","breadcrumbs":"Service Management Guide » System Architecture","id":"2984","title":"System Architecture"},"2985":{"body":"Manager (manager.nu) Service registry loading Service status tracking State persistence Lifecycle (lifecycle.nu) Service start/stop operations Deployment mode handling Process management Health (health.nu) Health check execution HTTP/TCP/Command/File checks Continuous monitoring Dependencies (dependencies.nu) Dependency graph analysis Topological sorting Startup order calculation Pre-flight (preflight.nu) Prerequisite validation Conflict detection Auto-start orchestration","breadcrumbs":"Service Management Guide » Component Responsibilities","id":"2985","title":"Component Responsibilities"},"2986":{"body":"","breadcrumbs":"Service Management Guide » Service Registry","id":"2986","title":"Service Registry"},"2987":{"body":"Location : provisioning/config/services.toml","breadcrumbs":"Service Management Guide » Configuration File","id":"2987","title":"Configuration File"},"2988":{"body":"[services.]\\nname = \\"\\"\\ntype = \\"platform\\" | \\"infrastructure\\" | \\"utility\\"\\ncategory = \\"orchestration\\" | \\"auth\\" | \\"dns\\" | \\"git\\" | \\"registry\\" | \\"api\\" | \\"ui\\"\\ndescription = \\"Service description\\"\\nrequired_for = [\\"operation1\\", \\"operation2\\"]\\ndependencies = [\\"dependency1\\", \\"dependency2\\"]\\nconflicts = [\\"conflicting-service\\"] [services..deployment]\\nmode = \\"binary\\" | \\"docker\\" | \\"docker-compose\\" | \\"kubernetes\\" | \\"remote\\" # Mode-specific configuration\\n[services..deployment.binary]\\nbinary_path = \\"/path/to/binary\\"\\nargs = [\\"--arg1\\", \\"value1\\"]\\nworking_dir = \\"/working/directory\\"\\nenv = { KEY = \\"value\\" } [services..health_check]\\ntype = \\"http\\" | \\"tcp\\" | \\"command\\" | \\"file\\" | \\"none\\"\\ninterval = 10\\nretries = 3\\ntimeout = 5 [services..health_check.http]\\nendpoint = \\"http://localhost:9090/health\\"\\nexpected_status = 200\\nmethod = \\"GET\\" [services..startup]\\nauto_start = true\\nstart_timeout = 30\\nstart_order = 10\\nrestart_on_failure = true\\nmax_restarts = 3","breadcrumbs":"Service Management Guide » Service Definition Structure","id":"2988","title":"Service Definition Structure"},"2989":{"body":"[services.orchestrator]\\nname = \\"orchestrator\\"\\ntype = \\"platform\\"\\ncategory = \\"orchestration\\"\\ndescription = \\"Rust-based orchestrator for workflow coordination\\"\\nrequired_for = [\\"server\\", \\"taskserv\\", \\"cluster\\", \\"workflow\\", \\"batch\\"] [services.orchestrator.deployment]\\nmode = \\"binary\\" [services.orchestrator.deployment.binary]\\nbinary_path = \\"${HOME}/.provisioning/bin/provisioning-orchestrator\\"\\nargs = [\\"--port\\", \\"8080\\", \\"--data-dir\\", \\"${HOME}/.provisioning/orchestrator/data\\"] [services.orchestrator.health_check]\\ntype = \\"http\\" [services.orchestrator.health_check.http]\\nendpoint = \\"http://localhost:9090/health\\"\\nexpected_status = 200 [services.orchestrator.startup]\\nauto_start = true\\nstart_timeout = 30\\nstart_order = 10","breadcrumbs":"Service Management Guide » Example: Orchestrator Service","id":"2989","title":"Example: Orchestrator Service"},"299":{"body":"# Update package list\\nsudo apt update # Install prerequisites\\nsudo apt install -y curl git build-essential # Install Nushell (from GitHub releases)\\ncurl -LO https://github.com/nushell/nushell/releases/download/0.107.1/nu-0.107.1-x86_64-linux-musl.tar.gz\\ntar xzf nu-0.107.1-x86_64-linux-musl.tar.gz\\nsudo mv nu /usr/local/bin/ # Install Nickel (using Rust cargo)\\ncurl --proto \'=https\' --tlsv1.2 -sSf https://sh.rustup.rs | sh\\nsource $HOME/.cargo/env\\ncargo install nickel # Install Docker\\nsudo apt install -y docker.io\\nsudo systemctl enable --now docker\\nsudo usermod -aG docker $USER # Install SOPS\\ncurl -LO https://github.com/getsops/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64\\nchmod +x sops-v3.10.2.linux.amd64\\nsudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops # Install Age\\nsudo apt install -y age","breadcrumbs":"Prerequisites » Ubuntu/Debian","id":"299","title":"Ubuntu/Debian"},"2990":{"body":"Platform commands manage all services as a cohesive system.","breadcrumbs":"Service Management Guide » Platform Commands","id":"2990","title":"Platform Commands"},"2991":{"body":"Start all auto-start services or specific services: # Start all auto-start services\\nprovisioning platform start # Start specific services (with dependencies)\\nprovisioning platform start orchestrator control-center # Force restart if already running\\nprovisioning platform start --force orchestrator Behavior : Resolves dependencies Calculates startup order (topological sort) Starts services in correct order Waits for health checks Reports success/failure","breadcrumbs":"Service Management Guide » Start Platform","id":"2991","title":"Start Platform"},"2992":{"body":"Stop all running services or specific services: # Stop all running services\\nprovisioning platform stop # Stop specific services\\nprovisioning platform stop orchestrator control-center # Force stop (kill -9)\\nprovisioning platform stop --force orchestrator Behavior : Checks for dependent services Stops in reverse dependency order Updates service state Cleans up PID files","breadcrumbs":"Service Management Guide » Stop Platform","id":"2992","title":"Stop Platform"},"2993":{"body":"Restart running services: # Restart all running services\\nprovisioning platform restart # Restart specific services\\nprovisioning platform restart orchestrator","breadcrumbs":"Service Management Guide » Restart Platform","id":"2993","title":"Restart Platform"},"2994":{"body":"Show status of all services: provisioning platform status Output : Platform Services Status Running: 3/7 === ORCHESTRATION === 🟢 orchestrator - running (uptime: 3600s) ✅ === UI === 🟢 control-center - running (uptime: 3550s) ✅ === DNS === ⚪ coredns - stopped ❓ === GIT === ⚪ gitea - stopped ❓ === REGISTRY === ⚪ oci-registry - stopped ❓ === API === 🟢 mcp-server - running (uptime: 3540s) ✅ ⚪ api-gateway - stopped ❓","breadcrumbs":"Service Management Guide » Platform Status","id":"2994","title":"Platform Status"},"2995":{"body":"Check health of all running services: provisioning platform health Output : Platform Health Check ✅ orchestrator: Healthy - HTTP health check passed\\n✅ control-center: Healthy - HTTP status 200 matches expected\\n⚪ coredns: Not running\\n✅ mcp-server: Healthy - HTTP health check passed Summary: 3 healthy, 0 unhealthy, 4 not running","breadcrumbs":"Service Management Guide » Platform Health","id":"2995","title":"Platform Health"},"2996":{"body":"View service logs: # View last 50 lines\\nprovisioning platform logs orchestrator # View last 100 lines\\nprovisioning platform logs orchestrator --lines 100 # Follow logs in real-time\\nprovisioning platform logs orchestrator --follow","breadcrumbs":"Service Management Guide » Platform Logs","id":"2996","title":"Platform Logs"},"2997":{"body":"Individual service management commands.","breadcrumbs":"Service Management Guide » Service Commands","id":"2997","title":"Service Commands"},"2998":{"body":"# List all services\\nprovisioning services list # List only running services\\nprovisioning services list --running # Filter by category\\nprovisioning services list --category orchestration Output : name type category status deployment_mode auto_start\\norchestrator platform orchestration running binary true\\ncontrol-center platform ui stopped binary false\\ncoredns infrastructure dns stopped docker false","breadcrumbs":"Service Management Guide » List Services","id":"2998","title":"List Services"},"2999":{"body":"Get detailed status of a service: provisioning services status orchestrator Output : Service: orchestrator\\nType: platform\\nCategory: orchestration\\nStatus: running\\nDeployment: binary\\nHealth: healthy\\nAuto-start: true\\nPID: 12345\\nUptime: 3600s\\nDependencies: []","breadcrumbs":"Service Management Guide » Service Status","id":"2999","title":"Service Status"},"3":{"body":"Document Description CLI Reference Complete command reference Workspace Management Workspace creation and management Workspace Switching Switch between workspaces Infrastructure Management Server, taskserv, cluster operations Service Management Platform service lifecycle management OCI Registry OCI artifact management Gitea Integration Git workflow and collaboration CoreDNS Guide DNS management Test Environments Containerized testing Extension Development Create custom extensions","breadcrumbs":"Home » 📚 User Guides","id":"3","title":"📚 User Guides"},"30":{"body":"Provider-agnostic batch operations Mixed provider support (UpCloud + AWS + local) Dependency resolution with soft/hard dependencies Real-time monitoring and rollback","breadcrumbs":"Home » 🚀 Batch Workflow System (v3.1.0)","id":"30","title":"🚀 Batch Workflow System (v3.1.0)"},"300":{"body":"# Install Nushell\\nsudo dnf install -y nushell # Install Nickel (using Rust cargo)\\ncurl --proto \'=https\' --tlsv1.2 -sSf https://sh.rustup.rs | sh\\nsource $HOME/.cargo/env\\ncargo install nickel # Install Docker\\nsudo dnf install -y docker\\nsudo systemctl enable --now docker\\nsudo usermod -aG docker $USER # Install SOPS\\nsudo dnf install -y sops # Install Age\\nsudo dnf install -y age","breadcrumbs":"Prerequisites » Fedora/RHEL","id":"300","title":"Fedora/RHEL"},"3000":{"body":"# Start service (with pre-flight checks)\\nprovisioning services start orchestrator # Force start (skip checks)\\nprovisioning services start orchestrator --force Pre-flight Checks : Validate prerequisites (binary exists, Docker running, etc.) Check for conflicts Verify dependencies are running Auto-start dependencies if needed","breadcrumbs":"Service Management Guide » Start Service","id":"3000","title":"Start Service"},"3001":{"body":"# Stop service (with dependency check)\\nprovisioning services stop orchestrator # Force stop (ignore dependents)\\nprovisioning services stop orchestrator --force","breadcrumbs":"Service Management Guide » Stop Service","id":"3001","title":"Stop Service"},"3002":{"body":"provisioning services restart orchestrator","breadcrumbs":"Service Management Guide » Restart Service","id":"3002","title":"Restart Service"},"3003":{"body":"Check service health: provisioning services health orchestrator Output : Service: orchestrator\\nStatus: healthy\\nHealthy: true\\nMessage: HTTP health check passed\\nCheck type: http\\nCheck duration: 15 ms","breadcrumbs":"Service Management Guide » Service Health","id":"3003","title":"Service Health"},"3004":{"body":"# View logs\\nprovisioning services logs orchestrator # Follow logs\\nprovisioning services logs orchestrator --follow # Custom line count\\nprovisioning services logs orchestrator --lines 200","breadcrumbs":"Service Management Guide » Service Logs","id":"3004","title":"Service Logs"},"3005":{"body":"Check which services are required for an operation: provisioning services check server Output : Operation: server\\nRequired services: orchestrator\\nAll running: true","breadcrumbs":"Service Management Guide » Check Required Services","id":"3005","title":"Check Required Services"},"3006":{"body":"View dependency graph: # View all dependencies\\nprovisioning services dependencies # View specific service dependencies\\nprovisioning services dependencies control-center","breadcrumbs":"Service Management Guide » Service Dependencies","id":"3006","title":"Service Dependencies"},"3007":{"body":"Validate all service configurations: provisioning services validate Output : Total services: 7\\nValid: 6\\nInvalid: 1 Invalid services: ❌ coredns: - Docker is not installed or not running","breadcrumbs":"Service Management Guide » Validate Services","id":"3007","title":"Validate Services"},"3008":{"body":"Get platform readiness report: provisioning services readiness Output : Platform Readiness Report Total services: 7\\nRunning: 3\\nReady to start: 6 Services: 🟢 orchestrator - platform - orchestration 🟢 control-center - platform - ui 🔴 coredns - infrastructure - dns Issues: 1 🟡 gitea - infrastructure - git","breadcrumbs":"Service Management Guide » Readiness Report","id":"3008","title":"Readiness Report"},"3009":{"body":"Continuous health monitoring: # Monitor with default interval (30s)\\nprovisioning services monitor orchestrator # Custom interval\\nprovisioning services monitor orchestrator --interval 10","breadcrumbs":"Service Management Guide » Monitor Service","id":"3009","title":"Monitor Service"},"301":{"body":"","breadcrumbs":"Prerequisites » Network Requirements","id":"301","title":"Network Requirements"},"3010":{"body":"","breadcrumbs":"Service Management Guide » Deployment Modes","id":"3010","title":"Deployment Modes"},"3011":{"body":"Run services as native binaries. Configuration : [services.orchestrator.deployment]\\nmode = \\"binary\\" [services.orchestrator.deployment.binary]\\nbinary_path = \\"${HOME}/.provisioning/bin/provisioning-orchestrator\\"\\nargs = [\\"--port\\", \\"8080\\"]\\nworking_dir = \\"${HOME}/.provisioning/orchestrator\\"\\nenv = { RUST_LOG = \\"info\\" } Process Management : PID tracking in ~/.provisioning/services/pids/ Log output to ~/.provisioning/services/logs/ State tracking in ~/.provisioning/services/state/","breadcrumbs":"Service Management Guide » Binary Deployment","id":"3011","title":"Binary Deployment"},"3012":{"body":"Run services as Docker containers. Configuration : [services.coredns.deployment]\\nmode = \\"docker\\" [services.coredns.deployment.docker]\\nimage = \\"coredns/coredns:1.11.1\\"\\ncontainer_name = \\"provisioning-coredns\\"\\nports = [\\"5353:53/udp\\"]\\nvolumes = [\\"${HOME}/.provisioning/coredns/Corefile:/Corefile:ro\\"]\\nrestart_policy = \\"unless-stopped\\" Prerequisites : Docker daemon running Docker CLI installed","breadcrumbs":"Service Management Guide » Docker Deployment","id":"3012","title":"Docker Deployment"},"3013":{"body":"Run services via Docker Compose. Configuration : [services.platform.deployment]\\nmode = \\"docker-compose\\" [services.platform.deployment.docker_compose]\\ncompose_file = \\"${HOME}/.provisioning/platform/docker-compose.yaml\\"\\nservice_name = \\"orchestrator\\"\\nproject_name = \\"provisioning\\" File : provisioning/platform/docker-compose.yaml","breadcrumbs":"Service Management Guide » Docker Compose Deployment","id":"3013","title":"Docker Compose Deployment"},"3014":{"body":"Run services on Kubernetes. Configuration : [services.orchestrator.deployment]\\nmode = \\"kubernetes\\" [services.orchestrator.deployment.kubernetes]\\nnamespace = \\"provisioning\\"\\ndeployment_name = \\"orchestrator\\"\\nmanifests_path = \\"${HOME}/.provisioning/k8s/orchestrator/\\" Prerequisites : kubectl installed and configured Kubernetes cluster accessible","breadcrumbs":"Service Management Guide » Kubernetes Deployment","id":"3014","title":"Kubernetes Deployment"},"3015":{"body":"Connect to remotely-running services. Configuration : [services.orchestrator.deployment]\\nmode = \\"remote\\" [services.orchestrator.deployment.remote]\\nendpoint = \\"https://orchestrator.example.com\\"\\ntls_enabled = true\\nauth_token_path = \\"${HOME}/.provisioning/tokens/orchestrator.token\\"","breadcrumbs":"Service Management Guide » Remote Deployment","id":"3015","title":"Remote Deployment"},"3016":{"body":"","breadcrumbs":"Service Management Guide » Health Monitoring","id":"3016","title":"Health Monitoring"},"3017":{"body":"HTTP Health Check [services.orchestrator.health_check]\\ntype = \\"http\\" [services.orchestrator.health_check.http]\\nendpoint = \\"http://localhost:9090/health\\"\\nexpected_status = 200\\nmethod = \\"GET\\" TCP Health Check [services.coredns.health_check]\\ntype = \\"tcp\\" [services.coredns.health_check.tcp]\\nhost = \\"localhost\\"\\nport = 5353 Command Health Check [services.custom.health_check]\\ntype = \\"command\\" [services.custom.health_check.command]\\ncommand = \\"systemctl is-active myservice\\"\\nexpected_exit_code = 0 File Health Check [services.custom.health_check]\\ntype = \\"file\\" [services.custom.health_check.file]\\npath = \\"/var/run/myservice.pid\\"\\nmust_exist = true","breadcrumbs":"Service Management Guide » Health Check Types","id":"3017","title":"Health Check Types"},"3018":{"body":"interval: Seconds between checks (default: 10) retries: Max retry attempts (default: 3) timeout: Check timeout in seconds (default: 5)","breadcrumbs":"Service Management Guide » Health Check Configuration","id":"3018","title":"Health Check Configuration"},"3019":{"body":"provisioning services monitor orchestrator --interval 30 Output : Starting health monitoring for orchestrator (interval: 30s)\\nPress Ctrl+C to stop\\n2025-10-06 14:30:00 ✅ orchestrator: HTTP health check passed\\n2025-10-06 14:30:30 ✅ orchestrator: HTTP health check passed\\n2025-10-06 14:31:00 ✅ orchestrator: HTTP health check passed","breadcrumbs":"Service Management Guide » Continuous Monitoring","id":"3019","title":"Continuous Monitoring"},"302":{"body":"If running platform services, ensure these ports are available: Service Port Protocol Purpose Orchestrator 8080 HTTP Workflow API Control Center 9090 HTTP Policy engine KMS Service 8082 HTTP Key management API Server 8083 HTTP REST API Extension Registry 8084 HTTP Extension discovery OCI Registry 5000 HTTP Artifact storage","breadcrumbs":"Prerequisites » Firewall Ports","id":"302","title":"Firewall Ports"},"3020":{"body":"","breadcrumbs":"Service Management Guide » Dependency Management","id":"3020","title":"Dependency Management"},"3021":{"body":"Services can depend on other services: [services.control-center]\\ndependencies = [\\"orchestrator\\"] [services.api-gateway]\\ndependencies = [\\"orchestrator\\", \\"control-center\\", \\"mcp-server\\"]","breadcrumbs":"Service Management Guide » Dependency Graph","id":"3021","title":"Dependency Graph"},"3022":{"body":"Services start in topological order: orchestrator (order: 10) └─> control-center (order: 20) └─> api-gateway (order: 45)","breadcrumbs":"Service Management Guide » Startup Order","id":"3022","title":"Startup Order"},"3023":{"body":"Automatic dependency resolution when starting services: # Starting control-center automatically starts orchestrator first\\nprovisioning services start control-center Output : Starting dependency: orchestrator\\n✅ Started orchestrator with PID 12345\\nWaiting for orchestrator to become healthy...\\n✅ Service orchestrator is healthy\\nStarting service: control-center\\n✅ Started control-center with PID 12346\\n✅ Service control-center is healthy","breadcrumbs":"Service Management Guide » Dependency Resolution","id":"3023","title":"Dependency Resolution"},"3024":{"body":"Services can conflict with each other: [services.coredns]\\nconflicts = [\\"dnsmasq\\", \\"systemd-resolved\\"] Attempting to start a conflicting service will fail: provisioning services start coredns Output : ❌ Pre-flight check failed: conflicts\\nConflicting services running: dnsmasq","breadcrumbs":"Service Management Guide » Conflicts","id":"3024","title":"Conflicts"},"3025":{"body":"Check which services depend on a service: provisioning services dependencies orchestrator Output : ## orchestrator\\n- Type: platform\\n- Category: orchestration\\n- Required by: - control-center - mcp-server - api-gateway","breadcrumbs":"Service Management Guide » Reverse Dependencies","id":"3025","title":"Reverse Dependencies"},"3026":{"body":"System prevents stopping services with running dependents: provisioning services stop orchestrator Output : ❌ Cannot stop orchestrator: Dependent services running: control-center, mcp-server, api-gateway Use --force to stop anyway","breadcrumbs":"Service Management Guide » Safe Stop","id":"3026","title":"Safe Stop"},"3027":{"body":"","breadcrumbs":"Service Management Guide » Pre-flight Checks","id":"3027","title":"Pre-flight Checks"},"3028":{"body":"Pre-flight checks ensure services can start successfully before attempting to start them.","breadcrumbs":"Service Management Guide » Purpose","id":"3028","title":"Purpose"},"3029":{"body":"Prerequisites : Binary exists, Docker running, etc. Conflicts : No conflicting services running Dependencies : All dependencies available","breadcrumbs":"Service Management Guide » Check Types","id":"3029","title":"Check Types"},"303":{"body":"The platform requires outbound internet access to: Download dependencies and updates Pull container images Access cloud provider APIs (AWS, UpCloud) Fetch extension packages","breadcrumbs":"Prerequisites » External Connectivity","id":"303","title":"External Connectivity"},"3030":{"body":"Pre-flight checks run automatically when starting services: provisioning services start orchestrator Check Process : Running pre-flight checks for orchestrator...\\n✅ Binary found: /Users/user/.provisioning/bin/provisioning-orchestrator\\n✅ No conflicts detected\\n✅ All dependencies available\\nStarting service: orchestrator","breadcrumbs":"Service Management Guide » Automatic Checks","id":"3030","title":"Automatic Checks"},"3031":{"body":"Validate all services: provisioning services validate Validate specific service: provisioning services status orchestrator","breadcrumbs":"Service Management Guide » Manual Validation","id":"3031","title":"Manual Validation"},"3032":{"body":"Services with auto_start = true can be started automatically when needed: # Orchestrator auto-starts if needed for server operations\\nprovisioning server create Output : Starting required services...\\n✅ Orchestrator started\\nCreating server...","breadcrumbs":"Service Management Guide » Auto-Start","id":"3032","title":"Auto-Start"},"3033":{"body":"","breadcrumbs":"Service Management Guide » Troubleshooting","id":"3033","title":"Troubleshooting"},"3034":{"body":"Check prerequisites : provisioning services validate\\nprovisioning services status Common issues : Binary not found: Check binary_path in config Docker not running: Start Docker daemon Port already in use: Check for conflicting processes Dependencies not running: Start dependencies first","breadcrumbs":"Service Management Guide » Service Won\'t Start","id":"3034","title":"Service Won\'t Start"},"3035":{"body":"View health status : provisioning services health Check logs : provisioning services logs --follow Common issues : Service not fully initialized: Wait longer or increase start_timeout Wrong health check endpoint: Verify endpoint in config Network issues: Check firewall, port bindings","breadcrumbs":"Service Management Guide » Service Health Check Failing","id":"3035","title":"Service Health Check Failing"},"3036":{"body":"View dependency tree : provisioning services dependencies Check dependency status : provisioning services status Start with dependencies : provisioning platform start ","breadcrumbs":"Service Management Guide » Dependency Issues","id":"3036","title":"Dependency Issues"},"3037":{"body":"Validate dependency graph : # This is done automatically but you can check manually\\nnu -c \\"use lib_provisioning/services/mod.nu *; validate-dependency-graph\\"","breadcrumbs":"Service Management Guide » Circular Dependencies","id":"3037","title":"Circular Dependencies"},"3038":{"body":"If service reports running but isn\'t: # Manual cleanup\\nrm ~/.provisioning/services/pids/.pid # Force restart\\nprovisioning services restart ","breadcrumbs":"Service Management Guide » PID File Stale","id":"3038","title":"PID File Stale"},"3039":{"body":"Find process using port : lsof -i :9090 Kill conflicting process : kill ","breadcrumbs":"Service Management Guide » Port Conflicts","id":"3039","title":"Port Conflicts"},"304":{"body":"If you plan to use cloud providers, prepare credentials:","breadcrumbs":"Prerequisites » Cloud Provider Credentials (Optional)","id":"304","title":"Cloud Provider Credentials (Optional)"},"3040":{"body":"Check Docker status : docker ps\\ndocker info View container logs : docker logs provisioning- Restart Docker daemon : # macOS\\nkillall Docker && open /Applications/Docker.app # Linux\\nsystemctl restart docker","breadcrumbs":"Service Management Guide » Docker Issues","id":"3040","title":"Docker Issues"},"3041":{"body":"View recent logs : tail -f ~/.provisioning/services/logs/.log Search logs : grep \\"ERROR\\" ~/.provisioning/services/logs/.log","breadcrumbs":"Service Management Guide » Service Logs","id":"3041","title":"Service Logs"},"3042":{"body":"","breadcrumbs":"Service Management Guide » Advanced Usage","id":"3042","title":"Advanced Usage"},"3043":{"body":"Add custom services by editing provisioning/config/services.toml.","breadcrumbs":"Service Management Guide » Custom Service Registration","id":"3043","title":"Custom Service Registration"},"3044":{"body":"Services automatically start when required by workflows: # Orchestrator starts automatically if not running\\nprovisioning workflow submit my-workflow","breadcrumbs":"Service Management Guide » Integration with Workflows","id":"3044","title":"Integration with Workflows"},"3045":{"body":"# GitLab CI\\nbefore_script: - provisioning platform start orchestrator - provisioning services health orchestrator test: script: - provisioning test quick kubernetes","breadcrumbs":"Service Management Guide » CI/CD Integration","id":"3045","title":"CI/CD Integration"},"3046":{"body":"Services can integrate with monitoring systems via health endpoints.","breadcrumbs":"Service Management Guide » Monitoring Integration","id":"3046","title":"Monitoring Integration"},"3047":{"body":"Orchestrator README Test Environment Guide Workflow Management","breadcrumbs":"Service Management Guide » Related Documentation","id":"3047","title":"Related Documentation"},"3048":{"body":"Version : 1.0.0","breadcrumbs":"Service Management Guide » Quick Reference","id":"3048","title":"Quick Reference"},"3049":{"body":"# Start all auto-start services\\nprovisioning platform start # Start specific services with dependencies\\nprovisioning platform start control-center mcp-server # Stop all running services\\nprovisioning platform stop # Stop specific services\\nprovisioning platform stop orchestrator # Restart services\\nprovisioning platform restart # Show platform status\\nprovisioning platform status # Check platform health\\nprovisioning platform health # View service logs\\nprovisioning platform logs orchestrator --follow","breadcrumbs":"Service Management Guide » Platform Commands (Manage All Services)","id":"3049","title":"Platform Commands (Manage All Services)"},"305":{"body":"AWS Access Key ID AWS Secret Access Key Configured via ~/.aws/credentials or environment variables","breadcrumbs":"Prerequisites » AWS","id":"305","title":"AWS"},"3050":{"body":"# List all services\\nprovisioning services list # List only running services\\nprovisioning services list --running # Filter by category\\nprovisioning services list --category orchestration # Service status\\nprovisioning services status orchestrator # Start service (with pre-flight checks)\\nprovisioning services start orchestrator # Force start (skip checks)\\nprovisioning services start orchestrator --force # Stop service\\nprovisioning services stop orchestrator # Force stop (ignore dependents)\\nprovisioning services stop orchestrator --force # Restart service\\nprovisioning services restart orchestrator # Check health\\nprovisioning services health orchestrator # View logs\\nprovisioning services logs orchestrator --follow --lines 100 # Monitor health continuously\\nprovisioning services monitor orchestrator --interval 30","breadcrumbs":"Service Management Guide » Service Commands (Individual Services)","id":"3050","title":"Service Commands (Individual Services)"},"3051":{"body":"# View dependency graph\\nprovisioning services dependencies # View specific service dependencies\\nprovisioning services dependencies control-center # Validate all services\\nprovisioning services validate # Check readiness\\nprovisioning services readiness # Check required services for operation\\nprovisioning services check server","breadcrumbs":"Service Management Guide » Dependency & Validation","id":"3051","title":"Dependency & Validation"},"3052":{"body":"Service Port Type Auto-Start Dependencies orchestrator 8080 Platform Yes - control-center 8081 Platform No orchestrator coredns 5353 Infrastructure No - gitea 3000, 222 Infrastructure No - oci-registry 5000 Infrastructure No - mcp-server 8082 Platform No orchestrator api-gateway 8083 Platform No orchestrator, control-center, mcp-server","breadcrumbs":"Service Management Guide » Registered Services","id":"3052","title":"Registered Services"},"3053":{"body":"# Start all services\\ncd provisioning/platform\\ndocker-compose up -d # Start specific services\\ndocker-compose up -d orchestrator control-center # Check status\\ndocker-compose ps # View logs\\ndocker-compose logs -f orchestrator # Stop all services\\ndocker-compose down # Stop and remove volumes\\ndocker-compose down -v","breadcrumbs":"Service Management Guide » Docker Compose","id":"3053","title":"Docker Compose"},"3054":{"body":"~/.provisioning/services/\\n├── pids/ # Process ID files\\n├── state/ # Service state (JSON)\\n└── logs/ # Service logs","breadcrumbs":"Service Management Guide » Service State Directories","id":"3054","title":"Service State Directories"},"3055":{"body":"Service Endpoint Type orchestrator http://localhost:9090/health HTTP control-center http://localhost:9080/health HTTP coredns localhost:5353 TCP gitea http://localhost:3000/api/healthz HTTP oci-registry http://localhost:5000/v2/ HTTP mcp-server http://localhost:8082/health HTTP api-gateway http://localhost:8083/health HTTP","breadcrumbs":"Service Management Guide » Health Check Endpoints","id":"3055","title":"Health Check Endpoints"},"3056":{"body":"Start Platform for Development # Start core services\\nprovisioning platform start orchestrator # Check status\\nprovisioning platform status # Check health\\nprovisioning platform health Start Full Platform Stack # Use Docker Compose\\ncd provisioning/platform\\ndocker-compose up -d # Verify\\ndocker-compose ps\\nprovisioning platform health Debug Service Issues # Check service status\\nprovisioning services status # View logs\\nprovisioning services logs --follow # Check health\\nprovisioning services health # Validate prerequisites\\nprovisioning services validate # Restart service\\nprovisioning services restart Safe Service Shutdown # Check dependents\\nnu -c \\"use lib_provisioning/services/mod.nu *; can-stop-service orchestrator\\" # Stop with dependency check\\nprovisioning services stop orchestrator # Force stop if needed\\nprovisioning services stop orchestrator --force","breadcrumbs":"Service Management Guide » Common Workflows","id":"3056","title":"Common Workflows"},"3057":{"body":"Service Won\'t Start # 1. Check prerequisites\\nprovisioning services validate # 2. View detailed status\\nprovisioning services status # 3. Check logs\\nprovisioning services logs # 4. Verify binary/image exists\\nls ~/.provisioning/bin/\\ndocker images | grep Health Check Failing # Check endpoint manually\\ncurl http://localhost:9090/health # View health details\\nprovisioning services health # Monitor continuously\\nprovisioning services monitor --interval 10 PID File Stale # Remove stale PID file\\nrm ~/.provisioning/services/pids/.pid # Restart service\\nprovisioning services restart Port Already in Use # Find process using port\\nlsof -i :9090 # Kill process\\nkill # Restart service\\nprovisioning services start ","breadcrumbs":"Service Management Guide » Troubleshooting","id":"3057","title":"Troubleshooting"},"3058":{"body":"Server Operations # Orchestrator auto-starts if needed\\nprovisioning server create # Manual check\\nprovisioning services check server Workflow Operations # Orchestrator auto-starts\\nprovisioning workflow submit my-workflow # Check status\\nprovisioning services status orchestrator Test Operations # Orchestrator required for test environments\\nprovisioning test quick kubernetes # Pre-flight check\\nprovisioning services check test-env","breadcrumbs":"Service Management Guide » Integration with Operations","id":"3058","title":"Integration with Operations"},"3059":{"body":"Custom Service Startup Order Services start based on: Dependency order (topological sort) start_order field (lower = earlier) Auto-Start Configuration Edit provisioning/config/services.toml: [services..startup]\\nauto_start = true # Enable auto-start\\nstart_timeout = 30 # Timeout in seconds\\nstart_order = 10 # Startup priority Health Check Configuration [services..health_check]\\ntype = \\"http\\" # http, tcp, command, file\\ninterval = 10 # Seconds between checks\\nretries = 3 # Max retry attempts\\ntimeout = 5 # Check timeout [services..health_check.http]\\nendpoint = \\"http://localhost:9090/health\\"\\nexpected_status = 200","breadcrumbs":"Service Management Guide » Advanced Usage","id":"3059","title":"Advanced Usage"},"306":{"body":"UpCloud username UpCloud password Configured via environment variables or config files","breadcrumbs":"Prerequisites » UpCloud","id":"306","title":"UpCloud"},"3060":{"body":"Service Registry : provisioning/config/services.toml KCL Schema : provisioning/kcl/services.k Docker Compose : provisioning/platform/docker-compose.yaml User Guide : docs/user/SERVICE_MANAGEMENT_GUIDE.md","breadcrumbs":"Service Management Guide » Key Files","id":"3060","title":"Key Files"},"3061":{"body":"# View documentation\\ncat docs/user/SERVICE_MANAGEMENT_GUIDE.md | less # Run verification\\nnu provisioning/core/nulib/tests/verify_services.nu # Check readiness\\nprovisioning services readiness Quick Tip : Use --help flag with any command for detailed usage information. Maintained By : Platform Team Support : GitHub Issues","breadcrumbs":"Service Management Guide » Getting Help","id":"3061","title":"Getting Help"},"3062":{"body":"Complete guide for monitoring the 9-service platform with Prometheus, Grafana, and AlertManager Version : 1.0.0 Last Updated : 2026-01-05 Target Audience : DevOps Engineers, Platform Operators Status : Production Ready","breadcrumbs":"Monitoring & Alerting Setup » Service Monitoring & Alerting Setup","id":"3062","title":"Service Monitoring & Alerting Setup"},"3063":{"body":"This guide provides complete setup instructions for monitoring and alerting on the provisioning platform using industry-standard tools: Prometheus : Metrics collection and time-series database Grafana : Visualization and dashboarding AlertManager : Alert routing and notification","breadcrumbs":"Monitoring & Alerting Setup » Overview","id":"3063","title":"Overview"},"3064":{"body":"Services (metrics endpoints) ↓\\nPrometheus (scrapes every 30s) ↓\\nAlertManager (evaluates rules) ↓\\nNotification Channels (email, slack, pagerduty) Prometheus Data ↓\\nGrafana (queries) ↓\\nDashboards & Visualization","breadcrumbs":"Monitoring & Alerting Setup » Architecture","id":"3064","title":"Architecture"},"3065":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Prerequisites","id":"3065","title":"Prerequisites"},"3066":{"body":"# Prometheus (for metrics)\\nwget https://github.com/prometheus/prometheus/releases/download/v2.48.0/prometheus-2.48.0.linux-amd64.tar.gz\\ntar xvfz prometheus-2.48.0.linux-amd64.tar.gz\\nsudo mv prometheus-2.48.0.linux-amd64 /opt/prometheus # Grafana (for dashboards)\\nsudo apt-get install -y grafana-server # AlertManager (for alerting)\\nwget https://github.com/prometheus/alertmanager/releases/download/v0.26.0/alertmanager-0.26.0.linux-amd64.tar.gz\\ntar xvfz alertmanager-0.26.0.linux-amd64.tar.gz\\nsudo mv alertmanager-0.26.0.linux-amd64 /opt/alertmanager","breadcrumbs":"Monitoring & Alerting Setup » Software Requirements","id":"3066","title":"Software Requirements"},"3067":{"body":"CPU : 2+ cores Memory : 4 GB minimum, 8 GB recommended Disk : 100 GB for metrics retention (30 days) Network : Access to all service endpoints","breadcrumbs":"Monitoring & Alerting Setup » System Requirements","id":"3067","title":"System Requirements"},"3068":{"body":"Component Port Purpose Prometheus 9090 Web UI & API Grafana 3000 Web UI AlertManager 9093 Web UI & API Node Exporter 9100 System metrics","breadcrumbs":"Monitoring & Alerting Setup » Ports","id":"3068","title":"Ports"},"3069":{"body":"All platform services expose metrics on the /metrics endpoint: # Health and metrics endpoints for each service\\ncurl http://localhost:8200/health # Vault health\\ncurl http://localhost:8200/metrics # Vault metrics (Prometheus format) curl http://localhost:8081/health # Registry health\\ncurl http://localhost:8081/metrics # Registry metrics curl http://localhost:8083/health # RAG health\\ncurl http://localhost:8083/metrics # RAG metrics curl http://localhost:8082/health # AI Service health\\ncurl http://localhost:8082/metrics # AI Service metrics curl http://localhost:9090/health # Orchestrator health\\ncurl http://localhost:9090/metrics # Orchestrator metrics curl http://localhost:8080/health # Control Center health\\ncurl http://localhost:8080/metrics # Control Center metrics curl http://localhost:8084/health # MCP Server health\\ncurl http://localhost:8084/metrics # MCP Server metrics","breadcrumbs":"Monitoring & Alerting Setup » Service Metrics Endpoints","id":"3069","title":"Service Metrics Endpoints"},"307":{"body":"Once all prerequisites are met, proceed to: → Installation","breadcrumbs":"Prerequisites » Next Steps","id":"307","title":"Next Steps"},"3070":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Prometheus Configuration","id":"3070","title":"Prometheus Configuration"},"3071":{"body":"# /etc/prometheus/prometheus.yml\\nglobal: scrape_interval: 30s evaluation_interval: 30s external_labels: monitor: \'provisioning-platform\' environment: \'production\' alerting: alertmanagers: - static_configs: - targets: - localhost:9093 rule_files: - \'/etc/prometheus/rules/*.yml\' scrape_configs: # Core Platform Services - job_name: \'vault-service\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8200\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'vault-service\' - job_name: \'extension-registry\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8081\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'registry\' - job_name: \'rag-service\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8083\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'rag\' - job_name: \'ai-service\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8082\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'ai-service\' - job_name: \'orchestrator\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:9090\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'orchestrator\' - job_name: \'control-center\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8080\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'control-center\' - job_name: \'mcp-server\' metrics_path: \'/metrics\' static_configs: - targets: [\'localhost:8084\'] relabel_configs: - source_labels: [__address__] target_label: instance replacement: \'mcp-server\' # System Metrics (Node Exporter) - job_name: \'node\' static_configs: - targets: [\'localhost:9100\'] labels: instance: \'system\' # SurrealDB (if multiuser/enterprise) - job_name: \'surrealdb\' metrics_path: \'/metrics\' static_configs: - targets: [\'surrealdb:8000\'] # Etcd (if enterprise) - job_name: \'etcd\' metrics_path: \'/metrics\' static_configs: - targets: [\'etcd:2379\']","breadcrumbs":"Monitoring & Alerting Setup » 1. Create Prometheus Config","id":"3071","title":"1. Create Prometheus Config"},"3072":{"body":"# Create necessary directories\\nsudo mkdir -p /etc/prometheus /var/lib/prometheus\\nsudo mkdir -p /etc/prometheus/rules # Start Prometheus\\ncd /opt/prometheus\\nsudo ./prometheus --config.file=/etc/prometheus/prometheus.yml \\\\ --storage.tsdb.path=/var/lib/prometheus \\\\ --web.console.templates=consoles \\\\ --web.console.libraries=console_libraries # Or as systemd service\\nsudo tee /etc/systemd/system/prometheus.service > /dev/null << EOF\\n[Unit]\\nDescription=Prometheus\\nWants=network-online.target\\nAfter=network-online.target [Service]\\nUser=prometheus\\nType=simple\\nExecStart=/opt/prometheus/prometheus \\\\ --config.file=/etc/prometheus/prometheus.yml \\\\ --storage.tsdb.path=/var/lib/prometheus Restart=on-failure\\nRestartSec=10 [Install]\\nWantedBy=multi-user.target\\nEOF sudo systemctl daemon-reload\\nsudo systemctl enable prometheus\\nsudo systemctl start prometheus","breadcrumbs":"Monitoring & Alerting Setup » 2. Start Prometheus","id":"3072","title":"2. Start Prometheus"},"3073":{"body":"# Check Prometheus is running\\ncurl -s http://localhost:9090/-/healthy # List scraped targets\\ncurl -s http://localhost:9090/api/v1/targets | jq . # Query test metric\\ncurl -s \'http://localhost:9090/api/v1/query?query=up\' | jq .","breadcrumbs":"Monitoring & Alerting Setup » 3. Verify Prometheus","id":"3073","title":"3. Verify Prometheus"},"3074":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Alert Rules Configuration","id":"3074","title":"Alert Rules Configuration"},"3075":{"body":"# /etc/prometheus/rules/platform-alerts.yml\\ngroups: - name: platform_availability interval: 30s rules: - alert: ServiceDown expr: up{job=~\\"vault-service|registry|rag|ai-service|orchestrator\\"} == 0 for: 5m labels: severity: critical service: \'{{ $labels.job }}\' annotations: summary: \\"{{ $labels.job }} is DOWN\\" description: \\"{{ $labels.job }} has been down for 5+ minutes\\" - alert: ServiceSlowResponse expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 1 for: 5m labels: severity: warning service: \'{{ $labels.job }}\' annotations: summary: \\"{{ $labels.job }} slow response times\\" description: \\"95th percentile latency above 1 second\\" - name: platform_errors interval: 30s rules: - alert: HighErrorRate expr: rate(http_requests_total{status=~\\"5..\\"}[5m]) > 0.05 for: 5m labels: severity: warning service: \'{{ $labels.job }}\' annotations: summary: \\"{{ $labels.job }} high error rate\\" description: \\"Error rate above 5% for 5 minutes\\" - alert: DatabaseConnectionError expr: increase(database_connection_errors_total[5m]) > 10 for: 2m labels: severity: critical component: database annotations: summary: \\"Database connection failures detected\\" description: \\"{{ $value }} connection errors in last 5 minutes\\" - alert: QueueBacklog expr: orchestrator_queue_depth > 1000 for: 5m labels: severity: warning component: orchestrator annotations: summary: \\"Orchestrator queue backlog growing\\" description: \\"Queue depth: {{ $value }} tasks\\" - name: platform_resources interval: 30s rules: - alert: HighMemoryUsage expr: container_memory_usage_bytes / container_spec_memory_limit_bytes > 0.9 for: 5m labels: severity: warning resource: memory annotations: summary: \\"{{ $labels.container_name }} memory usage critical\\" description: \\"Memory usage: {{ $value | humanizePercentage }}\\" - alert: HighDiskUsage expr: node_filesystem_avail_bytes{mountpoint=\\"/\\"} / node_filesystem_size_bytes < 0.1 for: 5m labels: severity: warning resource: disk annotations: summary: \\"Disk space critically low\\" description: \\"Available disk space: {{ $value | humanizePercentage }}\\" - alert: HighCPUUsage expr: (1 - avg(rate(node_cpu_seconds_total{mode=\\"idle\\"}[5m])) by (instance)) > 0.9 for: 10m labels: severity: warning resource: cpu annotations: summary: \\"High CPU usage detected\\" description: \\"CPU usage: {{ $value | humanizePercentage }}\\" - alert: DiskIOLatency expr: node_disk_io_time_seconds_total > 100 for: 5m labels: severity: warning resource: disk annotations: summary: \\"High disk I/O latency\\" description: \\"I/O latency: {{ $value }}ms\\" - name: platform_network interval: 30s rules: - alert: HighNetworkLatency expr: probe_duration_seconds > 0.5 for: 5m labels: severity: warning component: network annotations: summary: \\"High network latency detected\\" description: \\"Latency: {{ $value }}ms\\" - alert: PacketLoss expr: node_network_transmit_errors_total > 100 for: 5m labels: severity: warning component: network annotations: summary: \\"Packet loss detected\\" description: \\"Transmission errors: {{ $value }}\\" - name: platform_services interval: 30s rules: - alert: VaultSealed expr: vault_core_unsealed == 0 for: 1m labels: severity: critical service: vault annotations: summary: \\"Vault is sealed\\" description: \\"Vault instance is sealed and requires unseal operation\\" - alert: RegistryAuthError expr: increase(registry_auth_failures_total[5m]) > 5 for: 2m labels: severity: warning service: registry annotations: summary: \\"Registry authentication failures\\" description: \\"{{ $value }} auth failures in last 5 minutes\\" - alert: RAGVectorDBDown expr: rag_vectordb_connection_status == 0 for: 2m labels: severity: critical service: rag annotations: summary: \\"RAG Vector Database disconnected\\" description: \\"Vector DB connection lost\\" - alert: AIServiceMCPError expr: increase(ai_service_mcp_errors_total[5m]) > 10 for: 2m labels: severity: warning service: ai_service annotations: summary: \\"AI Service MCP integration errors\\" description: \\"{{ $value }} errors in last 5 minutes\\" - alert: OrchestratorLeaderElectionIssue expr: orchestrator_leader_elected == 0 for: 5m labels: severity: critical service: orchestrator annotations: summary: \\"Orchestrator leader election failed\\" description: \\"No leader elected in cluster\\"","breadcrumbs":"Monitoring & Alerting Setup » 1. Create Alert Rules","id":"3075","title":"1. Create Alert Rules"},"3076":{"body":"# Check rule syntax\\n/opt/prometheus/promtool check rules /etc/prometheus/rules/platform-alerts.yml # Reload Prometheus with new rules (without restart)\\ncurl -X POST http://localhost:9090/-/reload","breadcrumbs":"Monitoring & Alerting Setup » 2. Validate Alert Rules","id":"3076","title":"2. Validate Alert Rules"},"3077":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » AlertManager Configuration","id":"3077","title":"AlertManager Configuration"},"3078":{"body":"# /etc/alertmanager/alertmanager.yml\\nglobal: resolve_timeout: 5m slack_api_url: \'YOUR_SLACK_WEBHOOK_URL\' pagerduty_url: \'https://events.pagerduty.com/v2/enqueue\' route: receiver: \'platform-notifications\' group_by: [\'alertname\', \'service\', \'severity\'] group_wait: 10s group_interval: 10s repeat_interval: 12h routes: # Critical alerts go to PagerDuty - match: severity: critical receiver: \'pagerduty-critical\' group_wait: 0s repeat_interval: 5m # Warnings go to Slack - match: severity: warning receiver: \'slack-warnings\' repeat_interval: 1h # Service-specific routing - match: service: vault receiver: \'vault-team\' group_by: [\'service\', \'severity\'] - match: service: orchestrator receiver: \'orchestrator-team\' group_by: [\'service\', \'severity\'] receivers: - name: \'platform-notifications\' slack_configs: - channel: \'#platform-alerts\' title: \'Platform Alert\' text: \'{{ range .Alerts }}{{ .Annotations.description }}{{ end }}\' send_resolved: true - name: \'slack-warnings\' slack_configs: - channel: \'#platform-warnings\' title: \'Warning: {{ .GroupLabels.alertname }}\' text: \'{{ range .Alerts }}{{ .Annotations.description }}{{ end }}\' - name: \'pagerduty-critical\' pagerduty_configs: - service_key: \'YOUR_PAGERDUTY_SERVICE_KEY\' description: \'{{ .GroupLabels.alertname }}\' details: firing: \'{{ template \\"pagerduty.default.instances\\" .Alerts.Firing }}\' - name: \'vault-team\' email_configs: - to: \'vault-team@company.com\' from: \'alertmanager@company.com\' smarthost: \'smtp.company.com:587\' auth_username: \'alerts@company.com\' auth_password: \'PASSWORD\' headers: Subject: \'Vault Alert: {{ .GroupLabels.alertname }}\' - name: \'orchestrator-team\' email_configs: - to: \'orchestrator-team@company.com\' from: \'alertmanager@company.com\' smarthost: \'smtp.company.com:587\' inhibit_rules: # Don\'t alert on errors if service is already down - source_match: severity: \'critical\' alertname: \'ServiceDown\' target_match_re: severity: \'warning|info\' equal: [\'service\', \'instance\'] # Don\'t alert on resource exhaustion if service is down - source_match: alertname: \'ServiceDown\' target_match_re: alertname: \'HighMemoryUsage|HighCPUUsage\' equal: [\'instance\']","breadcrumbs":"Monitoring & Alerting Setup » 1. Create AlertManager Config","id":"3078","title":"1. Create AlertManager Config"},"3079":{"body":"cd /opt/alertmanager\\nsudo ./alertmanager --config.file=/etc/alertmanager/alertmanager.yml \\\\ --storage.path=/var/lib/alertmanager # Or as systemd service\\nsudo tee /etc/systemd/system/alertmanager.service > /dev/null << EOF\\n[Unit]\\nDescription=AlertManager\\nWants=network-online.target\\nAfter=network-online.target [Service]\\nUser=alertmanager\\nType=simple\\nExecStart=/opt/alertmanager/alertmanager \\\\ --config.file=/etc/alertmanager/alertmanager.yml \\\\ --storage.path=/var/lib/alertmanager Restart=on-failure\\nRestartSec=10 [Install]\\nWantedBy=multi-user.target\\nEOF sudo systemctl daemon-reload\\nsudo systemctl enable alertmanager\\nsudo systemctl start alertmanager","breadcrumbs":"Monitoring & Alerting Setup » 2. Start AlertManager","id":"3079","title":"2. Start AlertManager"},"308":{"body":"This guide walks you through installing the Provisioning Platform on your system.","breadcrumbs":"Installation Steps » Installation","id":"308","title":"Installation"},"3080":{"body":"# Check AlertManager is running\\ncurl -s http://localhost:9093/-/healthy # List active alerts\\ncurl -s http://localhost:9093/api/v1/alerts | jq . # Check configuration\\ncurl -s http://localhost:9093/api/v1/status | jq .","breadcrumbs":"Monitoring & Alerting Setup » 3. Verify AlertManager","id":"3080","title":"3. Verify AlertManager"},"3081":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Grafana Dashboards","id":"3081","title":"Grafana Dashboards"},"3082":{"body":"# Install Grafana\\nsudo apt-get install -y grafana-server # Start Grafana\\nsudo systemctl enable grafana-server\\nsudo systemctl start grafana-server # Access at http://localhost:3000\\n# Default: admin/admin","breadcrumbs":"Monitoring & Alerting Setup » 1. Install Grafana","id":"3082","title":"1. Install Grafana"},"3083":{"body":"# Via API\\ncurl -X POST http://localhost:3000/api/datasources \\\\ -H \\"Content-Type: application/json\\" \\\\ -u admin:admin \\\\ -d \'{ \\"name\\": \\"Prometheus\\", \\"type\\": \\"prometheus\\", \\"url\\": \\"http://localhost:9090\\", \\"access\\": \\"proxy\\", \\"isDefault\\": true }\'","breadcrumbs":"Monitoring & Alerting Setup » 2. Add Prometheus Data Source","id":"3083","title":"2. Add Prometheus Data Source"},"3084":{"body":"{ \\"dashboard\\": { \\"title\\": \\"Platform Overview\\", \\"description\\": \\"9-service provisioning platform metrics\\", \\"tags\\": [\\"platform\\", \\"overview\\"], \\"timezone\\": \\"browser\\", \\"panels\\": [ { \\"title\\": \\"Service Status\\", \\"type\\": \\"stat\\", \\"targets\\": [ { \\"expr\\": \\"up{job=~\\\\\\"vault-service|registry|rag|ai-service|orchestrator|control-center|mcp-server\\\\\\"}\\" } ], \\"fieldConfig\\": { \\"defaults\\": { \\"mappings\\": [ { \\"type\\": \\"value\\", \\"value\\": \\"1\\", \\"text\\": \\"UP\\" }, { \\"type\\": \\"value\\", \\"value\\": \\"0\\", \\"text\\": \\"DOWN\\" } ] } } }, { \\"title\\": \\"Request Rate\\", \\"type\\": \\"graph\\", \\"targets\\": [ { \\"expr\\": \\"rate(http_requests_total[5m])\\" } ] }, { \\"title\\": \\"Error Rate\\", \\"type\\": \\"graph\\", \\"targets\\": [ { \\"expr\\": \\"rate(http_requests_total{status=~\\\\\\"5..\\\\\\"}[5m])\\" } ] }, { \\"title\\": \\"Latency (p95)\\", \\"type\\": \\"graph\\", \\"targets\\": [ { \\"expr\\": \\"histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))\\" } ] }, { \\"title\\": \\"Memory Usage\\", \\"type\\": \\"graph\\", \\"targets\\": [ { \\"expr\\": \\"container_memory_usage_bytes / 1024 / 1024\\" } ] }, { \\"title\\": \\"Disk Usage\\", \\"type\\": \\"gauge\\", \\"targets\\": [ { \\"expr\\": \\"(1 - (node_filesystem_avail_bytes / node_filesystem_size_bytes)) * 100\\" } ] } ] }\\n}","breadcrumbs":"Monitoring & Alerting Setup » 3. Create Platform Overview Dashboard","id":"3084","title":"3. Create Platform Overview Dashboard"},"3085":{"body":"# Save dashboard JSON to file\\ncat > platform-overview.json << \'EOF\'\\n{ \\"dashboard\\": { ... }\\n}\\nEOF # Import dashboard\\ncurl -X POST http://localhost:3000/api/dashboards/db \\\\ -H \\"Content-Type: application/json\\" \\\\ -u admin:admin \\\\ -d @platform-overview.json","breadcrumbs":"Monitoring & Alerting Setup » 4. Import Dashboard via API","id":"3085","title":"4. Import Dashboard via API"},"3086":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Health Check Monitoring","id":"3086","title":"Health Check Monitoring"},"3087":{"body":"#!/bin/bash\\n# scripts/check-service-health.sh SERVICES=( \\"vault:8200\\" \\"registry:8081\\" \\"rag:8083\\" \\"ai-service:8082\\" \\"orchestrator:9090\\" \\"control-center:8080\\" \\"mcp-server:8084\\"\\n) UNHEALTHY=0 for service in \\"${SERVICES[@]}\\"; do IFS=\':\' read -r name port <<< \\"$service\\" response=$(curl -s -o /dev/null -w \\"%{http_code}\\" http://localhost:$port/health) if [ \\"$response\\" = \\"200\\" ]; then echo \\"✓ $name is healthy\\" else echo \\"✗ $name is UNHEALTHY (HTTP $response)\\" ((UNHEALTHY++)) fi\\ndone if [ $UNHEALTHY -gt 0 ]; then echo \\"\\" echo \\"WARNING: $UNHEALTHY service(s) unhealthy\\" exit 1\\nfi exit 0","breadcrumbs":"Monitoring & Alerting Setup » 1. Service Health Check Script","id":"3087","title":"1. Service Health Check Script"},"3088":{"body":"# For Kubernetes deployments\\napiVersion: v1\\nkind: Pod\\nmetadata: name: vault-service\\nspec: containers: - name: vault-service image: vault-service:latest livenessProbe: httpGet: path: /health port: 8200 initialDelaySeconds: 30 periodSeconds: 10 failureThreshold: 3 readinessProbe: httpGet: path: /health port: 8200 initialDelaySeconds: 10 periodSeconds: 5 failureThreshold: 2","breadcrumbs":"Monitoring & Alerting Setup » 2. Liveness Probe Configuration","id":"3088","title":"2. Liveness Probe Configuration"},"3089":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Log Aggregation (ELK Stack)","id":"3089","title":"Log Aggregation (ELK Stack)"},"309":{"body":"The installation process involves: Cloning the repository Installing Nushell plugins Setting up configuration Initializing your first workspace Estimated time: 15-20 minutes","breadcrumbs":"Installation Steps » Overview","id":"309","title":"Overview"},"3090":{"body":"# Install Elasticsearch\\nwget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.11.0-linux-x86_64.tar.gz\\ntar xvfz elasticsearch-8.11.0-linux-x86_64.tar.gz\\ncd elasticsearch-8.11.0/bin\\n./elasticsearch","breadcrumbs":"Monitoring & Alerting Setup » 1. Elasticsearch Setup","id":"3090","title":"1. Elasticsearch Setup"},"3091":{"body":"# /etc/filebeat/filebeat.yml\\nfilebeat.inputs: - type: log enabled: true paths: - /var/log/provisioning/*.log fields: service: provisioning-platform environment: production output.elasticsearch: hosts: [\\"localhost:9200\\"] username: \\"elastic\\" password: \\"changeme\\" logging.level: info\\nlogging.to_files: true\\nlogging.files: path: /var/log/filebeat","breadcrumbs":"Monitoring & Alerting Setup » 2. Filebeat Configuration","id":"3091","title":"2. Filebeat Configuration"},"3092":{"body":"# Access at http://localhost:5601\\n# Create index pattern: provisioning-*\\n# Create visualizations for:\\n# - Error rate over time\\n# - Service availability\\n# - Performance metrics\\n# - Request volume","breadcrumbs":"Monitoring & Alerting Setup » 3. Kibana Dashboard","id":"3092","title":"3. Kibana Dashboard"},"3093":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Monitoring Dashboard Queries","id":"3093","title":"Monitoring Dashboard Queries"},"3094":{"body":"# Service availability (last hour)\\navg(increase(up[1h])) by (job) # Request rate per service\\nsum(rate(http_requests_total[5m])) by (job) # Error rate per service\\nsum(rate(http_requests_total{status=~\\"5..\\"}[5m])) by (job) # Latency percentiles\\nhistogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))\\nhistogram_quantile(0.99, rate(http_request_duration_seconds_bucket[5m])) # Memory usage per service\\ncontainer_memory_usage_bytes / 1024 / 1024 / 1024 # CPU usage per service\\nrate(container_cpu_usage_seconds_total[5m]) * 100 # Disk I/O operations\\nrate(node_disk_io_time_seconds_total[5m]) # Network throughput\\nrate(node_network_transmit_bytes_total[5m]) # Queue depth (Orchestrator)\\norchestrator_queue_depth # Task processing rate\\nrate(orchestrator_tasks_total[5m]) # Task failure rate\\nrate(orchestrator_tasks_failed_total[5m]) # Cache hit ratio\\nrate(service_cache_hits_total[5m]) / (rate(service_cache_hits_total[5m]) + rate(service_cache_misses_total[5m])) # Database connection pool status\\ndatabase_connection_pool_usage{job=\\"orchestrator\\"} # TLS certificate expiration\\n(ssl_certificate_expiry - time()) / 86400","breadcrumbs":"Monitoring & Alerting Setup » Common Prometheus Queries","id":"3094","title":"Common Prometheus Queries"},"3095":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Alert Testing","id":"3095","title":"Alert Testing"},"3096":{"body":"# Manually fire test alert\\ncurl -X POST http://localhost:9093/api/v1/alerts \\\\ -H \'Content-Type: application/json\' \\\\ -d \'[ { \\"status\\": \\"firing\\", \\"labels\\": { \\"alertname\\": \\"TestAlert\\", \\"severity\\": \\"critical\\" }, \\"annotations\\": { \\"summary\\": \\"This is a test alert\\", \\"description\\": \\"Test alert to verify notification routing\\" } } ]\'","breadcrumbs":"Monitoring & Alerting Setup » 1. Test Alert Firing","id":"3096","title":"1. Test Alert Firing"},"3097":{"body":"# Stop a service to trigger ServiceDown alert\\npkill -9 vault-service # Within 5 minutes, alert should fire\\n# Check AlertManager UI: http://localhost:9093 # Restart service\\ncargo run --release -p vault-service & # Alert should resolve after service is back up","breadcrumbs":"Monitoring & Alerting Setup » 2. Stop Service to Trigger Alert","id":"3097","title":"2. Stop Service to Trigger Alert"},"3098":{"body":"# Generate request load\\nab -n 10000 -c 100 http://localhost:9090/api/v1/health # Monitor error rate in Prometheus\\ncurl -s \'http://localhost:9090/api/v1/query?query=rate(http_requests_total{status=~\\"5..\\"}[5m])\' | jq .","breadcrumbs":"Monitoring & Alerting Setup » 3. Generate Load to Test Error Alerts","id":"3098","title":"3. Generate Load to Test Error Alerts"},"3099":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Backup & Retention Policies","id":"3099","title":"Backup & Retention Policies"},"31":{"body":"Solves Nushell deep call stack limitations Preserves all business logic REST API for external integration Checkpoint-based state management","breadcrumbs":"Home » 🏗️ Hybrid Orchestrator (v3.0.0)","id":"31","title":"🏗️ Hybrid Orchestrator (v3.0.0)"},"310":{"body":"# Clone the repository\\ngit clone https://github.com/provisioning/provisioning-platform.git\\ncd provisioning-platform # Checkout the latest stable release (optional)\\ngit checkout tags/v3.5.0","breadcrumbs":"Installation Steps » Step 1: Clone the Repository","id":"310","title":"Step 1: Clone the Repository"},"3100":{"body":"#!/bin/bash\\n# scripts/backup-prometheus-data.sh BACKUP_DIR=\\"/backups/prometheus\\"\\nRETENTION_DAYS=30 # Create snapshot\\ncurl -X POST http://localhost:9090/api/v1/admin/tsdb/snapshot # Backup snapshot\\nSNAPSHOT=$(ls -t /var/lib/prometheus/snapshots | head -1)\\ntar -czf \\"$BACKUP_DIR/prometheus-$SNAPSHOT.tar.gz\\" \\\\ \\"/var/lib/prometheus/snapshots/$SNAPSHOT\\" # Upload to S3\\naws s3 cp \\"$BACKUP_DIR/prometheus-$SNAPSHOT.tar.gz\\" \\\\ s3://backups/prometheus/ # Clean old backups\\nfind \\"$BACKUP_DIR\\" -mtime +$RETENTION_DAYS -delete","breadcrumbs":"Monitoring & Alerting Setup » 1. Prometheus Data Backup","id":"3100","title":"1. Prometheus Data Backup"},"3101":{"body":"# Keep metrics for 15 days\\n/opt/prometheus/prometheus \\\\ --storage.tsdb.retention.time=15d \\\\ --storage.tsdb.retention.size=50 GB","breadcrumbs":"Monitoring & Alerting Setup » 2. Prometheus Retention Configuration","id":"3101","title":"2. Prometheus Retention Configuration"},"3102":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Maintenance & Troubleshooting","id":"3102","title":"Maintenance & Troubleshooting"},"3103":{"body":"Prometheus Won\'t Scrape Service # Check configuration\\n/opt/prometheus/promtool check config /etc/prometheus/prometheus.yml # Verify service is accessible\\ncurl http://localhost:8200/metrics # Check Prometheus targets\\ncurl -s http://localhost:9090/api/v1/targets | jq \'.data.activeTargets[] | select(.job==\\"vault-service\\")\' # Check scrape error\\ncurl -s http://localhost:9090/api/v1/targets | jq \'.data.activeTargets[] | .lastError\' AlertManager Not Sending Notifications # Verify AlertManager config\\n/opt/alertmanager/amtool config routes # Test webhook\\ncurl -X POST http://localhost:3012/ -d \'{\\"test\\": \\"alert\\"}\' # Check AlertManager logs\\njournalctl -u alertmanager -n 100 -f # Verify notification channels configured\\ncurl -s http://localhost:9093/api/v1/receivers High Memory Usage # Reduce Prometheus retention\\nprometheus --storage.tsdb.retention.time=7d --storage.tsdb.max-block-duration=2h # Disable unused scrape jobs\\n# Edit prometheus.yml and remove unused jobs # Monitor memory\\nps aux | grep prometheus | grep -v grep","breadcrumbs":"Monitoring & Alerting Setup » Common Issues","id":"3103","title":"Common Issues"},"3104":{"body":"Prometheus installed and running AlertManager installed and running Grafana installed and configured Prometheus scraping all 8 services Alert rules deployed and validated Notification channels configured (Slack, email, PagerDuty) AlertManager webhooks tested Grafana dashboards created Log aggregation stack deployed (optional) Backup scripts configured Retention policies set Health checks configured Team notified of alerting setup Runbooks created for common alerts Alert testing procedure documented","breadcrumbs":"Monitoring & Alerting Setup » Production Deployment Checklist","id":"3104","title":"Production Deployment Checklist"},"3105":{"body":"# Prometheus\\ncurl http://localhost:9090/api/v1/targets # List scrape targets\\ncurl \'http://localhost:9090/api/v1/query?query=up\' # Query metric\\ncurl -X POST http://localhost:9090/-/reload # Reload config # AlertManager\\ncurl http://localhost:9093/api/v1/alerts # List active alerts\\ncurl http://localhost:9093/api/v1/receivers # List receivers\\ncurl http://localhost:9093/api/v2/status # Check status # Grafana\\ncurl -u admin:admin http://localhost:3000/api/datasources # List data sources\\ncurl -u admin:admin http://localhost:3000/api/dashboards # List dashboards # Validation\\npromtool check config /etc/prometheus/prometheus.yml\\npromtool check rules /etc/prometheus/rules/platform-alerts.yml\\namtool config routes","breadcrumbs":"Monitoring & Alerting Setup » Quick Commands Reference","id":"3105","title":"Quick Commands Reference"},"3106":{"body":"","breadcrumbs":"Monitoring & Alerting Setup » Documentation & Runbooks","id":"3106","title":"Documentation & Runbooks"},"3107":{"body":"# Service Down Alert ## Detection\\nAlert fires when service is unreachable for 5+ minutes ## Immediate Actions\\n1. Check service is running: pgrep -f service-name\\n2. Check service port: ss -tlnp | grep 8200\\n3. Check service logs: tail -100 /var/log/provisioning/service.log ## Diagnosis\\n1. Service crashed: look for panic/error in logs\\n2. Port conflict: lsof -i :8200\\n3. Configuration issue: validate config file\\n4. Dependency down: check database/cache connectivity ## Remediation\\n1. Restart service: pkill service && cargo run --release -p service &\\n2. Check health: curl http://localhost:8200/health\\n3. Verify dependencies: curl http://localhost:5432/health ## Escalation\\nIf service doesn\'t recover after restart, escalate to on-call engineer","breadcrumbs":"Monitoring & Alerting Setup » Sample Runbook: Service Down","id":"3107","title":"Sample Runbook: Service Down"},"3108":{"body":"Prometheus Documentation AlertManager Documentation Grafana Documentation Platform Deployment Guide Service Management Guide Last Updated : 2026-01-05 Version : 1.0.0 Status : Production Ready ✅","breadcrumbs":"Monitoring & Alerting Setup » Resources","id":"3108","title":"Resources"},"3109":{"body":"Version : 1.0.0 Date : 2025-10-06 Author : CoreDNS Integration Agent","breadcrumbs":"CoreDNS Guide » CoreDNS Integration Guide","id":"3109","title":"CoreDNS Integration Guide"},"311":{"body":"The platform uses multiple Nushell plugins for enhanced functionality.","breadcrumbs":"Installation Steps » Step 2: Install Nushell Plugins","id":"311","title":"Step 2: Install Nushell Plugins"},"3110":{"body":"Overview Installation Configuration CLI Commands Zone Management Record Management Docker Deployment Integration Troubleshooting Advanced Topics","breadcrumbs":"CoreDNS Guide » Table of Contents","id":"3110","title":"Table of Contents"},"3111":{"body":"The CoreDNS integration provides comprehensive DNS management capabilities for the provisioning system. It supports: Local DNS service - Run CoreDNS as binary or Docker container Dynamic DNS updates - Automatic registration of infrastructure changes Multi-zone support - Manage multiple DNS zones Provider integration - Seamless integration with orchestrator REST API - Programmatic DNS management Docker deployment - Containerized CoreDNS with docker-compose","breadcrumbs":"CoreDNS Guide » Overview","id":"3111","title":"Overview"},"3112":{"body":"✅ Automatic Server Registration - Servers automatically registered in DNS on creation ✅ Zone File Management - Create, update, and manage zone files programmatically ✅ Multiple Deployment Modes - Binary, Docker, remote, or hybrid ✅ Health Monitoring - Built-in health checks and metrics ✅ CLI Interface - Comprehensive command-line tools ✅ API Integration - REST API for external integration","breadcrumbs":"CoreDNS Guide » Key Features","id":"3112","title":"Key Features"},"3113":{"body":"","breadcrumbs":"CoreDNS Guide » Installation","id":"3113","title":"Installation"},"3114":{"body":"Nushell 0.107+ - For CLI and scripts Docker (optional) - For containerized deployment dig (optional) - For DNS queries","breadcrumbs":"CoreDNS Guide » Prerequisites","id":"3114","title":"Prerequisites"},"3115":{"body":"# Install latest version\\nprovisioning dns install # Install specific version\\nprovisioning dns install 1.11.1 # Check mode\\nprovisioning dns install --check The binary will be installed to ~/.provisioning/bin/coredns.","breadcrumbs":"CoreDNS Guide » Install CoreDNS Binary","id":"3115","title":"Install CoreDNS Binary"},"3116":{"body":"# Check CoreDNS version\\n~/.provisioning/bin/coredns -version # Verify installation\\nls -lh ~/.provisioning/bin/coredns","breadcrumbs":"CoreDNS Guide » Verify Installation","id":"3116","title":"Verify Installation"},"3117":{"body":"","breadcrumbs":"CoreDNS Guide » Configuration","id":"3117","title":"Configuration"},"3118":{"body":"Add CoreDNS configuration to your infrastructure config: # In workspace/infra/{name}/config.ncl\\nlet coredns_config = { mode = \\"local\\", local = { enabled = true, deployment_type = \\"binary\\", # or \\"docker\\" binary_path = \\"~/.provisioning/bin/coredns\\", config_path = \\"~/.provisioning/coredns/Corefile\\", zones_path = \\"~/.provisioning/coredns/zones\\", port = 5353, auto_start = true, zones = [\\"provisioning.local\\", \\"workspace.local\\"], }, dynamic_updates = { enabled = true, api_endpoint = \\"http://localhost:9090/dns\\", auto_register_servers = true, auto_unregister_servers = true, ttl = 300, }, upstream = [\\"8.8.8.8\\", \\"1.1.1.1\\"], default_ttl = 3600, enable_logging = true, enable_metrics = true, metrics_port = 9153,\\n} in\\ncoredns_config","breadcrumbs":"CoreDNS Guide » Nickel Configuration Schema","id":"3118","title":"Nickel Configuration Schema"},"3119":{"body":"Local Mode (Binary) Run CoreDNS as a local binary process: let coredns_config = { mode = \\"local\\", local = { deployment_type = \\"binary\\", auto_start = true, },\\n} in\\ncoredns_config Local Mode (Docker) Run CoreDNS in Docker container: let coredns_config = { mode = \\"local\\", local = { deployment_type = \\"docker\\", docker = { image = \\"coredns/coredns:1.11.1\\", container_name = \\"provisioning-coredns\\", restart_policy = \\"unless-stopped\\", }, },\\n} in\\ncoredns_config Remote Mode Connect to external CoreDNS service: let coredns_config = { mode = \\"remote\\", remote = { enabled = true, endpoints = [\\"https://dns1.example.com\\", \\"https://dns2.example.com\\"], zones = [\\"production.local\\"], verify_tls = true, },\\n} in\\ncoredns_config Disabled Mode Disable CoreDNS integration: let coredns_config = { mode = \\"disabled\\",\\n} in\\ncoredns_config","breadcrumbs":"CoreDNS Guide » Configuration Modes","id":"3119","title":"Configuration Modes"},"312":{"body":"# Install from crates.io\\ncargo install nu_plugin_tera # Register with Nushell\\nnu -c \\"plugin add ~/.cargo/bin/nu_plugin_tera; plugin use tera\\"","breadcrumbs":"Installation Steps » Install nu_plugin_tera (Template Rendering)","id":"312","title":"Install nu_plugin_tera (Template Rendering)"},"3120":{"body":"","breadcrumbs":"CoreDNS Guide » CLI Commands","id":"3120","title":"CLI Commands"},"3121":{"body":"# Check status\\nprovisioning dns status # Start service\\nprovisioning dns start # Start in foreground (for debugging)\\nprovisioning dns start --foreground # Stop service\\nprovisioning dns stop # Restart service\\nprovisioning dns restart # Reload configuration (graceful)\\nprovisioning dns reload # View logs\\nprovisioning dns logs # Follow logs\\nprovisioning dns logs --follow # Show last 100 lines\\nprovisioning dns logs --lines 100","breadcrumbs":"CoreDNS Guide » Service Management","id":"3121","title":"Service Management"},"3122":{"body":"# Check health\\nprovisioning dns health # View configuration\\nprovisioning dns config show # Validate configuration\\nprovisioning dns config validate # Generate new Corefile\\nprovisioning dns config generate","breadcrumbs":"CoreDNS Guide » Health & Monitoring","id":"3122","title":"Health & Monitoring"},"3123":{"body":"","breadcrumbs":"CoreDNS Guide » Zone Management","id":"3123","title":"Zone Management"},"3124":{"body":"# List all zones\\nprovisioning dns zone list Output: DNS Zones\\n========= • provisioning.local ✓ • workspace.local ✓","breadcrumbs":"CoreDNS Guide » List Zones","id":"3124","title":"List Zones"},"3125":{"body":"# Create new zone\\nprovisioning dns zone create myapp.local # Check mode\\nprovisioning dns zone create myapp.local --check","breadcrumbs":"CoreDNS Guide » Create Zone","id":"3125","title":"Create Zone"},"3126":{"body":"# Show all records in zone\\nprovisioning dns zone show provisioning.local # JSON format\\nprovisioning dns zone show provisioning.local --format json # YAML format\\nprovisioning dns zone show provisioning.local --format yaml","breadcrumbs":"CoreDNS Guide » Show Zone Details","id":"3126","title":"Show Zone Details"},"3127":{"body":"# Delete zone (with confirmation)\\nprovisioning dns zone delete myapp.local # Force deletion (skip confirmation)\\nprovisioning dns zone delete myapp.local --force # Check mode\\nprovisioning dns zone delete myapp.local --check","breadcrumbs":"CoreDNS Guide » Delete Zone","id":"3127","title":"Delete Zone"},"3128":{"body":"","breadcrumbs":"CoreDNS Guide » Record Management","id":"3128","title":"Record Management"},"3129":{"body":"A Record (IPv4) provisioning dns record add server-01 A 10.0.1.10 # With custom TTL\\nprovisioning dns record add server-01 A 10.0.1.10 --ttl 600 # With comment\\nprovisioning dns record add server-01 A 10.0.1.10 --comment \\"Web server\\" # Different zone\\nprovisioning dns record add server-01 A 10.0.1.10 --zone myapp.local AAAA Record (IPv6) provisioning dns record add server-01 AAAA 2001:db8::1 CNAME Record provisioning dns record add web CNAME server-01.provisioning.local MX Record provisioning dns record add @ MX mail.example.com --priority 10 TXT Record provisioning dns record add @ TXT \\"v=spf1 mx -all\\"","breadcrumbs":"CoreDNS Guide » Add Records","id":"3129","title":"Add Records"},"313":{"body":"# Start Nushell\\nnu # List installed plugins\\nplugin list # Expected output should include:\\n# - tera","breadcrumbs":"Installation Steps » Verify Plugin Installation","id":"313","title":"Verify Plugin Installation"},"3130":{"body":"# Remove record\\nprovisioning dns record remove server-01 # Different zone\\nprovisioning dns record remove server-01 --zone myapp.local # Check mode\\nprovisioning dns record remove server-01 --check","breadcrumbs":"CoreDNS Guide » Remove Records","id":"3130","title":"Remove Records"},"3131":{"body":"# Update record value\\nprovisioning dns record update server-01 A 10.0.1.20 # With new TTL\\nprovisioning dns record update server-01 A 10.0.1.20 --ttl 1800","breadcrumbs":"CoreDNS Guide » Update Records","id":"3131","title":"Update Records"},"3132":{"body":"# List all records in zone\\nprovisioning dns record list # Different zone\\nprovisioning dns record list --zone myapp.local # JSON format\\nprovisioning dns record list --format json # YAML format\\nprovisioning dns record list --format yaml Example Output: DNS Records - Zone: provisioning.local ╭───┬──────────────┬──────┬─────────────┬─────╮\\n│ # │ name │ type │ value │ ttl │\\n├───┼──────────────┼──────┼─────────────┼─────┤\\n│ 0 │ server-01 │ A │ 10.0.1.10 │ 300 │\\n│ 1 │ server-02 │ A │ 10.0.1.11 │ 300 │\\n│ 2 │ db-01 │ A │ 10.0.2.10 │ 300 │\\n│ 3 │ web │ CNAME│ server-01 │ 300 │\\n╰───┴──────────────┴──────┴─────────────┴─────╯","breadcrumbs":"CoreDNS Guide » List Records","id":"3132","title":"List Records"},"3133":{"body":"","breadcrumbs":"CoreDNS Guide » Docker Deployment","id":"3133","title":"Docker Deployment"},"3134":{"body":"Ensure Docker and docker-compose are installed: docker --version\\ndocker-compose --version","breadcrumbs":"CoreDNS Guide » Prerequisites","id":"3134","title":"Prerequisites"},"3135":{"body":"# Start CoreDNS container\\nprovisioning dns docker start # Check mode\\nprovisioning dns docker start --check","breadcrumbs":"CoreDNS Guide » Start CoreDNS in Docker","id":"3135","title":"Start CoreDNS in Docker"},"3136":{"body":"# Check status\\nprovisioning dns docker status # View logs\\nprovisioning dns docker logs # Follow logs\\nprovisioning dns docker logs --follow # Restart container\\nprovisioning dns docker restart # Stop container\\nprovisioning dns docker stop # Check health\\nprovisioning dns docker health","breadcrumbs":"CoreDNS Guide » Manage Docker Container","id":"3136","title":"Manage Docker Container"},"3137":{"body":"# Pull latest image\\nprovisioning dns docker pull # Pull specific version\\nprovisioning dns docker pull --version 1.11.1 # Update and restart\\nprovisioning dns docker update","breadcrumbs":"CoreDNS Guide » Update Docker Image","id":"3137","title":"Update Docker Image"},"3138":{"body":"# Remove container (with confirmation)\\nprovisioning dns docker remove # Remove with volumes\\nprovisioning dns docker remove --volumes # Force remove (skip confirmation)\\nprovisioning dns docker remove --force # Check mode\\nprovisioning dns docker remove --check","breadcrumbs":"CoreDNS Guide » Remove Container","id":"3138","title":"Remove Container"},"3139":{"body":"# Show docker-compose config\\nprovisioning dns docker config","breadcrumbs":"CoreDNS Guide » View Configuration","id":"3139","title":"View Configuration"},"314":{"body":"Make the provisioning command available globally: # Option 1: Symlink to /usr/local/bin (recommended)\\nsudo ln -s \\"$(pwd)/provisioning/core/cli/provisioning\\" /usr/local/bin/provisioning # Option 2: Add to PATH in your shell profile\\necho \'export PATH=\\"$PATH:\'\\"$(pwd)\\"\'/provisioning/core/cli\\"\' >> ~/.bashrc # or ~/.zshrc\\nsource ~/.bashrc # or ~/.zshrc # Verify installation\\nprovisioning --version","breadcrumbs":"Installation Steps » Step 3: Add CLI to PATH","id":"314","title":"Step 3: Add CLI to PATH"},"3140":{"body":"","breadcrumbs":"CoreDNS Guide » Integration","id":"3140","title":"Integration"},"3141":{"body":"When dynamic DNS is enabled, servers are automatically registered: # Create server (automatically registers in DNS)\\nprovisioning server create web-01 --infra myapp # Server gets DNS record: web-01.provisioning.local -> ","breadcrumbs":"CoreDNS Guide » Automatic Server Registration","id":"3141","title":"Automatic Server Registration"},"3142":{"body":"use lib_provisioning/coredns/integration.nu * # Register server\\nregister-server-in-dns \\"web-01\\" \\"10.0.1.10\\" # Unregister server\\nunregister-server-from-dns \\"web-01\\" # Bulk register\\nbulk-register-servers [ {hostname: \\"web-01\\", ip: \\"10.0.1.10\\"} {hostname: \\"web-02\\", ip: \\"10.0.1.11\\"} {hostname: \\"db-01\\", ip: \\"10.0.2.10\\"}\\n]","breadcrumbs":"CoreDNS Guide » Manual Registration","id":"3142","title":"Manual Registration"},"3143":{"body":"# Sync all servers in infrastructure with DNS\\nprovisioning dns sync myapp # Check mode\\nprovisioning dns sync myapp --check","breadcrumbs":"CoreDNS Guide » Sync Infrastructure with DNS","id":"3143","title":"Sync Infrastructure with DNS"},"3144":{"body":"use lib_provisioning/coredns/integration.nu * # Register service\\nregister-service-in-dns \\"api\\" \\"10.0.1.10\\" # Unregister service\\nunregister-service-from-dns \\"api\\"","breadcrumbs":"CoreDNS Guide » Service Registration","id":"3144","title":"Service Registration"},"3145":{"body":"","breadcrumbs":"CoreDNS Guide » Query DNS","id":"3145","title":"Query DNS"},"3146":{"body":"# Query A record\\nprovisioning dns query server-01 # Query specific type\\nprovisioning dns query server-01 --type AAAA # Query different server\\nprovisioning dns query server-01 --server 8.8.8.8 --port 53 # Query from local CoreDNS\\nprovisioning dns query server-01 --server 127.0.0.1 --port 5353","breadcrumbs":"CoreDNS Guide » Using CLI","id":"3146","title":"Using CLI"},"3147":{"body":"# Query from local CoreDNS\\ndig @127.0.0.1 -p 5353 server-01.provisioning.local # Query CNAME\\ndig @127.0.0.1 -p 5353 web.provisioning.local CNAME # Query MX\\ndig @127.0.0.1 -p 5353 example.com MX","breadcrumbs":"CoreDNS Guide » Using dig","id":"3147","title":"Using dig"},"3148":{"body":"","breadcrumbs":"CoreDNS Guide » Troubleshooting","id":"3148","title":"Troubleshooting"},"3149":{"body":"Symptoms: dns start fails or service doesn\'t respond Solutions: Check if port is in use: lsof -i :5353\\nnetstat -an | grep 5353 Validate Corefile: provisioning dns config validate Check logs: provisioning dns logs\\ntail -f ~/.provisioning/coredns/coredns.log Verify binary exists: ls -lh ~/.provisioning/bin/coredns\\nprovisioning dns install","breadcrumbs":"CoreDNS Guide » CoreDNS Not Starting","id":"3149","title":"CoreDNS Not Starting"},"315":{"body":"Generate keys for encrypting sensitive configuration: # Create Age key directory\\nmkdir -p ~/.config/provisioning/age # Generate private key\\nage-keygen -o ~/.config/provisioning/age/private_key.txt # Extract public key\\nage-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt # Secure the keys\\nchmod 600 ~/.config/provisioning/age/private_key.txt\\nchmod 644 ~/.config/provisioning/age/public_key.txt","breadcrumbs":"Installation Steps » Step 4: Generate Age Encryption Keys","id":"315","title":"Step 4: Generate Age Encryption Keys"},"3150":{"body":"Symptoms: dig returns SERVFAIL or timeout Solutions: Check CoreDNS is running: provisioning dns status\\nprovisioning dns health Verify zone file exists: ls -lh ~/.provisioning/coredns/zones/\\ncat ~/.provisioning/coredns/zones/provisioning.local.zone Test with dig: dig @127.0.0.1 -p 5353 provisioning.local SOA Check firewall: # macOS\\nsudo pfctl -sr | grep 5353 # Linux\\nsudo iptables -L -n | grep 5353","breadcrumbs":"CoreDNS Guide » DNS Queries Not Working","id":"3150","title":"DNS Queries Not Working"},"3151":{"body":"Symptoms: dns config validate shows errors Solutions: Backup zone file: cp ~/.provisioning/coredns/zones/provisioning.local.zone \\\\ ~/.provisioning/coredns/zones/provisioning.local.zone.backup Regenerate zone: provisioning dns zone create provisioning.local --force Check syntax manually: cat ~/.provisioning/coredns/zones/provisioning.local.zone Increment serial: Edit zone file manually Increase serial number in SOA record","breadcrumbs":"CoreDNS Guide » Zone File Validation Errors","id":"3151","title":"Zone File Validation Errors"},"3152":{"body":"Symptoms: Docker container won\'t start or crashes Solutions: Check Docker logs: provisioning dns docker logs\\ndocker logs provisioning-coredns Verify volumes exist: ls -lh ~/.provisioning/coredns/ Check container status: provisioning dns docker status\\ndocker ps -a | grep coredns Recreate container: provisioning dns docker stop\\nprovisioning dns docker remove --volumes\\nprovisioning dns docker start","breadcrumbs":"CoreDNS Guide » Docker Container Issues","id":"3152","title":"Docker Container Issues"},"3153":{"body":"Symptoms: Servers not auto-registered in DNS Solutions: Check if enabled: provisioning dns config show | grep -A 5 dynamic_updates Verify orchestrator running: curl http://localhost:9090/health Check logs for errors: provisioning dns logs | grep -i error Test manual registration: use lib_provisioning/coredns/integration.nu *\\nregister-server-in-dns \\"test-server\\" \\"10.0.0.1\\"","breadcrumbs":"CoreDNS Guide » Dynamic Updates Not Working","id":"3153","title":"Dynamic Updates Not Working"},"3154":{"body":"","breadcrumbs":"CoreDNS Guide » Advanced Topics","id":"3154","title":"Advanced Topics"},"3155":{"body":"Add custom plugins to Corefile: use lib_provisioning/coredns/corefile.nu * # Add plugin to zone\\nadd-corefile-plugin \\\\ \\"~/.provisioning/coredns/Corefile\\" \\\\ \\"provisioning.local\\" \\\\ \\"cache 30\\"","breadcrumbs":"CoreDNS Guide » Custom Corefile Plugins","id":"3155","title":"Custom Corefile Plugins"},"3156":{"body":"# Backup configuration\\ntar czf coredns-backup.tar.gz ~/.provisioning/coredns/ # Restore configuration\\ntar xzf coredns-backup.tar.gz -C ~/","breadcrumbs":"CoreDNS Guide » Backup and Restore","id":"3156","title":"Backup and Restore"},"3157":{"body":"use lib_provisioning/coredns/zones.nu * # Backup zone\\nbackup-zone-file \\"provisioning.local\\" # Creates: ~/.provisioning/coredns/zones/provisioning.local.zone.YYYYMMDD-HHMMSS.bak","breadcrumbs":"CoreDNS Guide » Zone File Backup","id":"3157","title":"Zone File Backup"},"3158":{"body":"CoreDNS exposes Prometheus metrics on port 9153: # View metrics\\ncurl http://localhost:9153/metrics # Common metrics:\\n# - coredns_dns_request_duration_seconds\\n# - coredns_dns_requests_total\\n# - coredns_dns_responses_total","breadcrumbs":"CoreDNS Guide » Metrics and Monitoring","id":"3158","title":"Metrics and Monitoring"},"3159":{"body":"coredns_config: CoreDNSConfig = { local = { zones = [ \\"provisioning.local\\", \\"workspace.local\\", \\"dev.local\\", \\"staging.local\\", \\"prod.local\\" ] }\\n}","breadcrumbs":"CoreDNS Guide » Multi-Zone Setup","id":"3159","title":"Multi-Zone Setup"},"316":{"body":"Set up basic environment variables: # Create environment file\\ncat > ~/.provisioning/env << \'ENVEOF\'\\n# Provisioning Environment Configuration\\nexport PROVISIONING_ENV=dev\\nexport PROVISIONING_PATH=$(pwd)\\nexport PROVISIONING_KAGE=~/.config/provisioning/age\\nENVEOF # Source the environment\\nsource ~/.provisioning/env # Add to shell profile for persistence\\necho \'source ~/.provisioning/env\' >> ~/.bashrc # or ~/.zshrc","breadcrumbs":"Installation Steps » Step 5: Configure Environment","id":"316","title":"Step 5: Configure Environment"},"3160":{"body":"Configure different zones for internal/external: coredns_config: CoreDNSConfig = { local = { zones = [\\"internal.local\\"] port = 5353 } remote = { zones = [\\"external.com\\"] endpoints = [\\"https://dns.external.com\\"] }\\n}","breadcrumbs":"CoreDNS Guide » Split-Horizon DNS","id":"3160","title":"Split-Horizon DNS"},"3161":{"body":"","breadcrumbs":"CoreDNS Guide » Configuration Reference","id":"3161","title":"Configuration Reference"},"3162":{"body":"Field Type Default Description mode \\"local\\" | \\"remote\\" | \\"hybrid\\" | \\"disabled\\" \\"local\\" Deployment mode local LocalCoreDNS? - Local config (required for local mode) remote RemoteCoreDNS? - Remote config (required for remote mode) dynamic_updates DynamicDNS - Dynamic DNS configuration upstream [str] [\\"8.8.8.8\\", \\"1.1.1.1\\"] Upstream DNS servers default_ttl int 300 Default TTL (seconds) enable_logging bool True Enable query logging enable_metrics bool True Enable Prometheus metrics metrics_port int 9153 Metrics port","breadcrumbs":"CoreDNS Guide » CoreDNSConfig Fields","id":"3162","title":"CoreDNSConfig Fields"},"3163":{"body":"Field Type Default Description enabled bool True Enable local CoreDNS deployment_type \\"binary\\" | \\"docker\\" \\"binary\\" How to deploy binary_path str \\"~/.provisioning/bin/coredns\\" Path to binary config_path str \\"~/.provisioning/coredns/Corefile\\" Corefile path zones_path str \\"~/.provisioning/coredns/zones\\" Zones directory port int 5353 DNS listening port auto_start bool True Auto-start on boot zones [str] [\\"provisioning.local\\"] Managed zones","breadcrumbs":"CoreDNS Guide » LocalCoreDNS Fields","id":"3163","title":"LocalCoreDNS Fields"},"3164":{"body":"Field Type Default Description enabled bool True Enable dynamic updates api_endpoint str \\"http://localhost:9090/dns\\" Orchestrator API auto_register_servers bool True Auto-register on create auto_unregister_servers bool True Auto-unregister on delete ttl int 300 TTL for dynamic records update_strategy \\"immediate\\" | \\"batched\\" | \\"scheduled\\" \\"immediate\\" Update strategy","breadcrumbs":"CoreDNS Guide » DynamicDNS Fields","id":"3164","title":"DynamicDNS Fields"},"3165":{"body":"","breadcrumbs":"CoreDNS Guide » Examples","id":"3165","title":"Examples"},"3166":{"body":"# 1. Install CoreDNS\\nprovisioning dns install # 2. Generate configuration\\nprovisioning dns config generate # 3. Start service\\nprovisioning dns start # 4. Create custom zone\\nprovisioning dns zone create myapp.local # 5. Add DNS records\\nprovisioning dns record add web-01 A 10.0.1.10\\nprovisioning dns record add web-02 A 10.0.1.11\\nprovisioning dns record add api CNAME web-01.myapp.local --zone myapp.local # 6. Query records\\nprovisioning dns query web-01 --server 127.0.0.1 --port 5353 # 7. Check status\\nprovisioning dns status\\nprovisioning dns health","breadcrumbs":"CoreDNS Guide » Complete Setup Example","id":"3166","title":"Complete Setup Example"},"3167":{"body":"# 1. Start CoreDNS in Docker\\nprovisioning dns docker start # 2. Check status\\nprovisioning dns docker status # 3. View logs\\nprovisioning dns docker logs --follow # 4. Add records (container must be running)\\nprovisioning dns record add server-01 A 10.0.1.10 # 5. Query\\ndig @127.0.0.1 -p 5353 server-01.provisioning.local # 6. Stop\\nprovisioning dns docker stop","breadcrumbs":"CoreDNS Guide » Docker Deployment Example","id":"3167","title":"Docker Deployment Example"},"3168":{"body":"Use TTL wisely - Lower TTL (300s) for frequently changing records, higher (3600s) for stable Enable logging - Essential for troubleshooting Regular backups - Backup zone files before major changes Validate before reload - Always run dns config validate before reloading Monitor metrics - Track DNS query rates and error rates Use comments - Add comments to records for documentation Separate zones - Use different zones for different environments (dev, staging, prod)","breadcrumbs":"CoreDNS Guide » Best Practices","id":"3168","title":"Best Practices"},"3169":{"body":"Architecture Documentation API Reference Orchestrator Integration Nickel Schema Reference","breadcrumbs":"CoreDNS Guide » See Also","id":"3169","title":"See Also"},"317":{"body":"Create your first workspace: # Initialize a new workspace\\nprovisioning workspace init my-first-workspace # Expected output:\\n# ✓ Workspace \'my-first-workspace\' created successfully\\n# ✓ Configuration template generated\\n# ✓ Workspace activated # Verify workspace\\nprovisioning workspace list","breadcrumbs":"Installation Steps » Step 6: Initialize Workspace","id":"317","title":"Step 6: Initialize Workspace"},"3170":{"body":"Quick command reference for CoreDNS DNS management","breadcrumbs":"CoreDNS Guide » Quick Reference","id":"3170","title":"Quick Reference"},"3171":{"body":"# Install CoreDNS binary\\nprovisioning dns install # Install specific version\\nprovisioning dns install 1.11.1","breadcrumbs":"CoreDNS Guide » Installation","id":"3171","title":"Installation"},"3172":{"body":"# Status\\nprovisioning dns status # Start\\nprovisioning dns start # Stop\\nprovisioning dns stop # Restart\\nprovisioning dns restart # Reload (graceful)\\nprovisioning dns reload # Logs\\nprovisioning dns logs\\nprovisioning dns logs --follow\\nprovisioning dns logs --lines 100 # Health\\nprovisioning dns health","breadcrumbs":"CoreDNS Guide » Service Management","id":"3172","title":"Service Management"},"3173":{"body":"# List zones\\nprovisioning dns zone list # Create zone\\nprovisioning dns zone create myapp.local # Show zone records\\nprovisioning dns zone show provisioning.local\\nprovisioning dns zone show provisioning.local --format json # Delete zone\\nprovisioning dns zone delete myapp.local\\nprovisioning dns zone delete myapp.local --force","breadcrumbs":"CoreDNS Guide » Zone Management","id":"3173","title":"Zone Management"},"3174":{"body":"# Add A record\\nprovisioning dns record add server-01 A 10.0.1.10 # Add with custom TTL\\nprovisioning dns record add server-01 A 10.0.1.10 --ttl 600 # Add with comment\\nprovisioning dns record add server-01 A 10.0.1.10 --comment \\"Web server\\" # Add to specific zone\\nprovisioning dns record add server-01 A 10.0.1.10 --zone myapp.local # Add CNAME\\nprovisioning dns record add web CNAME server-01.provisioning.local # Add MX\\nprovisioning dns record add @ MX mail.example.com --priority 10 # Add TXT\\nprovisioning dns record add @ TXT \\"v=spf1 mx -all\\" # Remove record\\nprovisioning dns record remove server-01\\nprovisioning dns record remove server-01 --zone myapp.local # Update record\\nprovisioning dns record update server-01 A 10.0.1.20 # List records\\nprovisioning dns record list\\nprovisioning dns record list --zone myapp.local\\nprovisioning dns record list --format json","breadcrumbs":"CoreDNS Guide » Record Management","id":"3174","title":"Record Management"},"3175":{"body":"# Query A record\\nprovisioning dns query server-01 # Query CNAME\\nprovisioning dns query web --type CNAME # Query from local CoreDNS\\nprovisioning dns query server-01 --server 127.0.0.1 --port 5353 # Using dig\\ndig @127.0.0.1 -p 5353 server-01.provisioning.local\\ndig @127.0.0.1 -p 5353 provisioning.local SOA","breadcrumbs":"CoreDNS Guide » DNS Queries","id":"3175","title":"DNS Queries"},"3176":{"body":"# Show configuration\\nprovisioning dns config show # Validate configuration\\nprovisioning dns config validate # Generate Corefile\\nprovisioning dns config generate","breadcrumbs":"CoreDNS Guide » Configuration","id":"3176","title":"Configuration"},"3177":{"body":"# Start Docker container\\nprovisioning dns docker start # Status\\nprovisioning dns docker status # Logs\\nprovisioning dns docker logs\\nprovisioning dns docker logs --follow # Restart\\nprovisioning dns docker restart # Stop\\nprovisioning dns docker stop # Health\\nprovisioning dns docker health # Remove\\nprovisioning dns docker remove\\nprovisioning dns docker remove --volumes\\nprovisioning dns docker remove --force # Pull image\\nprovisioning dns docker pull\\nprovisioning dns docker pull --version 1.11.1 # Update\\nprovisioning dns docker update # Show config\\nprovisioning dns docker config","breadcrumbs":"CoreDNS Guide » Docker Deployment","id":"3177","title":"Docker Deployment"},"3178":{"body":"Initial Setup # 1. Install\\nprovisioning dns install # 2. Start\\nprovisioning dns start # 3. Verify\\nprovisioning dns status\\nprovisioning dns health Add Server # Add DNS record for new server\\nprovisioning dns record add web-01 A 10.0.1.10 # Verify\\nprovisioning dns query web-01 Create Custom Zone # 1. Create zone\\nprovisioning dns zone create myapp.local # 2. Add records\\nprovisioning dns record add web-01 A 10.0.1.10 --zone myapp.local\\nprovisioning dns record add api CNAME web-01.myapp.local --zone myapp.local # 3. List records\\nprovisioning dns record list --zone myapp.local # 4. Query\\ndig @127.0.0.1 -p 5353 web-01.myapp.local Docker Setup # 1. Start container\\nprovisioning dns docker start # 2. Check status\\nprovisioning dns docker status # 3. Add records\\nprovisioning dns record add server-01 A 10.0.1.10 # 4. Query\\ndig @127.0.0.1 -p 5353 server-01.provisioning.local","breadcrumbs":"CoreDNS Guide » Common Workflows","id":"3178","title":"Common Workflows"},"3179":{"body":"# Check if CoreDNS is running\\nprovisioning dns status\\nps aux | grep coredns # Check port usage\\nlsof -i :5353\\nnetstat -an | grep 5353 # View logs\\nprovisioning dns logs\\ntail -f ~/.provisioning/coredns/coredns.log # Validate configuration\\nprovisioning dns config validate # Test DNS query\\ndig @127.0.0.1 -p 5353 provisioning.local SOA # Restart service\\nprovisioning dns restart # For Docker\\nprovisioning dns docker logs\\nprovisioning dns docker health\\ndocker ps -a | grep coredns","breadcrumbs":"CoreDNS Guide » Troubleshooting","id":"3179","title":"Troubleshooting"},"318":{"body":"Run the installation verification: # Check system configuration\\nprovisioning validate config # Check all dependencies\\nprovisioning env # View detailed environment\\nprovisioning allenv Expected output should show: ✅ All core dependencies installed ✅ Age keys configured ✅ Workspace initialized ✅ Configuration valid","breadcrumbs":"Installation Steps » Step 7: Validate Installation","id":"318","title":"Step 7: Validate Installation"},"3180":{"body":"# Binary\\n~/.provisioning/bin/coredns # Corefile\\n~/.provisioning/coredns/Corefile # Zone files\\n~/.provisioning/coredns/zones/ # Logs\\n~/.provisioning/coredns/coredns.log # PID file\\n~/.provisioning/coredns/coredns.pid # Docker compose\\nprovisioning/config/coredns/docker-compose.yml","breadcrumbs":"CoreDNS Guide » File Locations","id":"3180","title":"File Locations"},"3181":{"body":"import provisioning.coredns as dns coredns_config: dns.CoreDNSConfig = { mode = \\"local\\" local = { enabled = True deployment_type = \\"binary\\" # or \\"docker\\" port = 5353 zones = [\\"provisioning.local\\", \\"myapp.local\\"] } dynamic_updates = { enabled = True auto_register_servers = True } upstream = [\\"8.8.8.8\\", \\"1.1.1.1\\"]\\n}","breadcrumbs":"CoreDNS Guide » Configuration Example","id":"3181","title":"Configuration Example"},"3182":{"body":"# None required - configuration via Nickel","breadcrumbs":"CoreDNS Guide » Environment Variables","id":"3182","title":"Environment Variables"},"3183":{"body":"Setting Default Port 5353 Zones [\\"provisioning.local\\"] Upstream [\\"8.8.8.8\\", \\"1.1.1.1\\"] TTL 300 Deployment binary Auto-start true Logging enabled Metrics enabled Metrics Port 9153","breadcrumbs":"CoreDNS Guide » Default Values","id":"3183","title":"Default Values"},"3184":{"body":"Complete Guide - Full documentation Implementation Summary - Technical details Nickel Schema - Configuration schema Last Updated : 2025-10-06 Version : 1.0.0","breadcrumbs":"CoreDNS Guide » See Also","id":"3184","title":"See Also"},"3185":{"body":"Status : ✅ PRODUCTION READY Version : 1.0.0 Last Verified : 2025-12-09","breadcrumbs":"Production Readiness Checklist » Production Readiness Checklist","id":"3185","title":"Production Readiness Checklist"},"3186":{"body":"The Provisioning Setup System is production-ready for enterprise deployment. All components have been tested, validated, and verified to meet production standards.","breadcrumbs":"Production Readiness Checklist » Executive Summary","id":"3186","title":"Executive Summary"},"3187":{"body":"✅ Code Quality : 100% Nushell 0.109 compliant ✅ Test Coverage : 33/33 tests passing (100% pass rate) ✅ Security : Enterprise-grade security controls ✅ Performance : Sub-second response times ✅ Documentation : Comprehensive user and admin guides ✅ Reliability : Graceful error handling and fallbacks","breadcrumbs":"Production Readiness Checklist » Quality Metrics","id":"3187","title":"Quality Metrics"},"3188":{"body":"","breadcrumbs":"Production Readiness Checklist » Pre-Deployment Verification","id":"3188","title":"Pre-Deployment Verification"},"3189":{"body":"Nushell 0.109.0 or higher bash shell available One deployment tool (Docker/Kubernetes/SSH/systemd) 2+ CPU cores (4+ recommended) 4+ GB RAM (8+ recommended) Network connectivity (optional for offline mode)","breadcrumbs":"Production Readiness Checklist » 1. System Requirements ✅","id":"3189","title":"1. System Requirements ✅"},"319":{"body":"If you plan to use platform services (orchestrator, control center, etc.): # Build platform services\\ncd provisioning/platform # Build orchestrator\\ncd orchestrator\\ncargo build --release\\ncd .. # Build control center\\ncd control-center\\ncargo build --release\\ncd .. # Build KMS service\\ncd kms-service\\ncargo build --release\\ncd .. # Verify builds\\nls */target/release/","breadcrumbs":"Installation Steps » Optional: Install Platform Services","id":"319","title":"Optional: Install Platform Services"},"3190":{"body":"All 9 modules passing syntax validation 46 total issues identified and resolved Nushell 0.109 compatibility verified Code style guidelines followed No hardcoded credentials or secrets","breadcrumbs":"Production Readiness Checklist » 2. Code Quality ✅","id":"3190","title":"2. Code Quality ✅"},"3191":{"body":"Unit tests: 33/33 passing Integration tests: All passing E2E tests: All passing Health check: Operational Deployment validation: Working","breadcrumbs":"Production Readiness Checklist » 3. Testing ✅","id":"3191","title":"3. Testing ✅"},"3192":{"body":"Configuration encryption ready Credential management secure No sensitive data in logs GDPR-compliant audit logging Role-based access control (RBAC) ready","breadcrumbs":"Production Readiness Checklist » 4. Security ✅","id":"3192","title":"4. Security ✅"},"3193":{"body":"User Quick Start Guide Comprehensive Setup Guide Installation Guide Troubleshooting Guide API Documentation","breadcrumbs":"Production Readiness Checklist » 5. Documentation ✅","id":"3193","title":"5. Documentation ✅"},"3194":{"body":"Installation script tested Health check script operational Configuration validation working Backup/restore functionality verified Migration path available","breadcrumbs":"Production Readiness Checklist » 6. Deployment Readiness ✅","id":"3194","title":"6. Deployment Readiness ✅"},"3195":{"body":"","breadcrumbs":"Production Readiness Checklist » Pre-Production Checklist","id":"3195","title":"Pre-Production Checklist"},"3196":{"body":"Team trained on provisioning basics Admin team trained on configuration management Support team trained on troubleshooting Operations team ready for deployment Security team reviewed security controls","breadcrumbs":"Production Readiness Checklist » Team Preparation","id":"3196","title":"Team Preparation"},"3197":{"body":"Target deployment environment prepared Network connectivity verified Required tools installed and tested Backup systems in place Monitoring configured","breadcrumbs":"Production Readiness Checklist » Infrastructure Preparation","id":"3197","title":"Infrastructure Preparation"},"3198":{"body":"Provider credentials securely stored Network configuration planned Workspace structure defined Deployment strategy documented Rollback plan prepared","breadcrumbs":"Production Readiness Checklist » Configuration Preparation","id":"3198","title":"Configuration Preparation"},"3199":{"body":"System installed on staging environment All capabilities tested Health checks passing Full deployment scenario tested Failover procedures tested","breadcrumbs":"Production Readiness Checklist » Testing in Production-Like Environment","id":"3199","title":"Testing in Production-Like Environment"},"32":{"body":"Migrated from ENV to config-driven Hierarchical configuration loading Variable interpolation True IaC without hardcoded fallbacks","breadcrumbs":"Home » ⚙️ Configuration System (v2.0.0)","id":"32","title":"⚙️ Configuration System (v2.0.0)"},"320":{"body":"Use the interactive installer for a guided setup: # Build the installer\\ncd provisioning/platform/installer\\ncargo build --release # Run interactive installer\\n./target/release/provisioning-installer # Or headless installation\\n./target/release/provisioning-installer --headless --mode solo --yes","breadcrumbs":"Installation Steps » Optional: Install Platform with Installer","id":"320","title":"Optional: Install Platform with Installer"},"3200":{"body":"","breadcrumbs":"Production Readiness Checklist » Deployment Steps","id":"3200","title":"Deployment Steps"},"3201":{"body":"# 1. Run installation script\\n./scripts/install-provisioning.sh # 2. Verify installation\\nprovisioning -v # 3. Run health check\\nnu scripts/health-check.nu","breadcrumbs":"Production Readiness Checklist » Phase 1: Installation (30 minutes)","id":"3201","title":"Phase 1: Installation (30 minutes)"},"3202":{"body":"# 1. Run setup wizard\\nprovisioning setup system --interactive # 2. Validate configuration\\nprovisioning setup validate # 3. Test health\\nprovisioning platform health","breadcrumbs":"Production Readiness Checklist » Phase 2: Initial Configuration (15 minutes)","id":"3202","title":"Phase 2: Initial Configuration (15 minutes)"},"3203":{"body":"# 1. Create production workspace\\nprovisioning setup workspace production # 2. Configure providers\\nprovisioning setup provider upcloud --config config.toml # 3. Validate workspace\\nprovisioning setup validate","breadcrumbs":"Production Readiness Checklist » Phase 3: Workspace Setup (10 minutes)","id":"3203","title":"Phase 3: Workspace Setup (10 minutes)"},"3204":{"body":"# 1. Run comprehensive health check\\nprovisioning setup validate --verbose # 2. Test deployment (dry-run)\\nprovisioning server create --check # 3. Verify no errors\\n# Review output and confirm readiness","breadcrumbs":"Production Readiness Checklist » Phase 4: Verification (10 minutes)","id":"3204","title":"Phase 4: Verification (10 minutes)"},"3205":{"body":"","breadcrumbs":"Production Readiness Checklist » Post-Deployment Verification","id":"3205","title":"Post-Deployment Verification"},"3206":{"body":"All services running and healthy Configuration loaded correctly First test deployment successful Monitoring and logging working Backup system operational","breadcrumbs":"Production Readiness Checklist » Immediate (Within 1 hour)","id":"3206","title":"Immediate (Within 1 hour)"},"3207":{"body":"Run health checks daily Monitor error logs Verify backup operations Check workspace synchronization Validate credentials refresh","breadcrumbs":"Production Readiness Checklist » Daily (First week)","id":"3207","title":"Daily (First week)"},"3208":{"body":"Run comprehensive validation Test backup/restore procedures Review audit logs Performance analysis Security review","breadcrumbs":"Production Readiness Checklist » Weekly (First month)","id":"3208","title":"Weekly (First month)"},"3209":{"body":"Weekly health checks Monthly comprehensive validation Quarterly security review Annual disaster recovery test","breadcrumbs":"Production Readiness Checklist » Ongoing (Production)","id":"3209","title":"Ongoing (Production)"},"321":{"body":"","breadcrumbs":"Installation Steps » Troubleshooting","id":"321","title":"Troubleshooting"},"3210":{"body":"","breadcrumbs":"Production Readiness Checklist » Troubleshooting Reference","id":"3210","title":"Troubleshooting Reference"},"3211":{"body":"Solution : # Check Nushell installation\\nnu --version # Run with debug\\nprovisioning -x setup system --interactive","breadcrumbs":"Production Readiness Checklist » Issue: Setup wizard won\'t start","id":"3211","title":"Issue: Setup wizard won\'t start"},"3212":{"body":"Solution : # Check configuration\\nprovisioning setup validate --verbose # View configuration paths\\nprovisioning info paths # Reset and reconfigure\\nprovisioning setup reset --confirm\\nprovisioning setup system --interactive","breadcrumbs":"Production Readiness Checklist » Issue: Configuration validation fails","id":"3212","title":"Issue: Configuration validation fails"},"3213":{"body":"Solution : # Run detailed health check\\nnu scripts/health-check.nu # Check specific service\\nprovisioning platform status # Restart services if needed\\nprovisioning platform restart","breadcrumbs":"Production Readiness Checklist » Issue: Health check shows warnings","id":"3213","title":"Issue: Health check shows warnings"},"3214":{"body":"Solution : # Dry-run to see what would happen\\nprovisioning server create --check # Check logs\\nprovisioning logs tail -f # Verify provider credentials\\nprovisioning setup validate provider upcloud","breadcrumbs":"Production Readiness Checklist » Issue: Deployment fails","id":"3214","title":"Issue: Deployment fails"},"3215":{"body":"Expected performance on modern hardware (4+ cores, 8+ GB RAM): Operation Expected Time Maximum Time Setup system 2-5 seconds 10 seconds Health check < 3 seconds 5 seconds Configuration validation < 500 ms 1 second Server creation < 30 seconds 60 seconds Workspace switch < 100 ms 500 ms","breadcrumbs":"Production Readiness Checklist » Performance Baselines","id":"3215","title":"Performance Baselines"},"3216":{"body":"","breadcrumbs":"Production Readiness Checklist » Support and Escalation","id":"3216","title":"Support and Escalation"},"3217":{"body":"Review troubleshooting guide Check system health Review logs Restart services if needed","breadcrumbs":"Production Readiness Checklist » Level 1 Support (Team)","id":"3217","title":"Level 1 Support (Team)"},"3218":{"body":"Review configuration Analyze performance metrics Check resource constraints Plan optimization","breadcrumbs":"Production Readiness Checklist » Level 2 Support (Engineering)","id":"3218","title":"Level 2 Support (Engineering)"},"3219":{"body":"Code-level debugging Feature requests Bug fixes Architecture changes","breadcrumbs":"Production Readiness Checklist » Level 3 Support (Development)","id":"3219","title":"Level 3 Support (Development)"},"322":{"body":"If plugins aren\'t recognized: # Rebuild plugin registry\\nnu -c \\"plugin list; plugin use tera\\"","breadcrumbs":"Installation Steps » Nushell Plugin Not Found","id":"322","title":"Nushell Plugin Not Found"},"3220":{"body":"If issues occur post-deployment: # 1. Take backup of current configuration\\nprovisioning setup backup --path rollback-$(date +%Y%m%d-%H%M%S).tar.gz # 2. Stop running deployments\\nprovisioning workflow stop --all # 3. Restore from previous backup\\nprovisioning setup restore --path # 4. Verify restoration\\nprovisioning setup validate --verbose # 5. Run health check\\nnu scripts/health-check.nu","breadcrumbs":"Production Readiness Checklist » Rollback Procedure","id":"3220","title":"Rollback Procedure"},"3221":{"body":"System is production-ready when: ✅ All tests passing ✅ Health checks show no critical issues ✅ Configuration validates successfully ✅ Team trained and ready ✅ Documentation complete ✅ Backup and recovery tested ✅ Monitoring configured ✅ Support procedures established","breadcrumbs":"Production Readiness Checklist » Success Criteria","id":"3221","title":"Success Criteria"},"3222":{"body":"Technical Lead : System validated and tested Operations : Infrastructure ready and monitored Security : Security controls reviewed and approved Management : Deployment approved for production Verification Date : 2025-12-09 Status : ✅ APPROVED FOR PRODUCTION DEPLOYMENT Next Review : 2025-12-16 (Weekly)","breadcrumbs":"Production Readiness Checklist » Sign-Off","id":"3222","title":"Sign-Off"},"3223":{"body":"Version : 1.0.0 Date : 2025-10-08 Audience : Platform Administrators, SREs, Security Team Training Duration : 45-60 minutes Certification : Required annually","breadcrumbs":"Break Glass Training Guide » Break-Glass Emergency Access - Training Guide","id":"3223","title":"Break-Glass Emergency Access - Training Guide"},"3224":{"body":"Break-glass is an emergency access procedure that allows authorized personnel to bypass normal security controls during critical incidents (for example, production outages, security breaches, data loss).","breadcrumbs":"Break Glass Training Guide » 🚨 What is Break-Glass","id":"3224","title":"🚨 What is Break-Glass"},"3225":{"body":"Last Resort Only : Use only when normal access is insufficient Multi-Party Approval : Requires 2+ approvers from different teams Time-Limited : Maximum 4 hours, auto-revokes Enhanced Audit : 7-year retention, immutable logs Real-Time Alerts : Security team notified immediately","breadcrumbs":"Break Glass Training Guide » Key Principles","id":"3225","title":"Key Principles"},"3226":{"body":"When to Use Break-Glass When NOT to Use Roles & Responsibilities Break-Glass Workflow Using the System Examples Auditing & Compliance Post-Incident Review FAQ Emergency Contacts","breadcrumbs":"Break Glass Training Guide » 📋 Table of Contents","id":"3226","title":"📋 Table of Contents"},"3227":{"body":"","breadcrumbs":"Break Glass Training Guide » When to Use Break-Glass","id":"3227","title":"When to Use Break-Glass"},"3228":{"body":"Scenario Example Urgency Production Outage Database cluster unresponsive, affecting all users Critical Security Incident Active breach detected, need immediate containment Critical Data Loss Accidental deletion of critical data, need restore High System Failure Infrastructure failure requiring emergency fixes High Locked Out Normal admin accounts compromised, need recovery High","breadcrumbs":"Break Glass Training Guide » ✅ Valid Emergency Scenarios","id":"3228","title":"✅ Valid Emergency Scenarios"},"3229":{"body":"Use break-glass if ALL apply: Production systems affected OR security incident Normal access insufficient OR unavailable Immediate action required (cannot wait for approval process) Clear justification for emergency access Incident properly documented","breadcrumbs":"Break Glass Training Guide » Criteria Checklist","id":"3229","title":"Criteria Checklist"},"323":{"body":"If you encounter permission errors: # Ensure proper ownership\\nsudo chown -R $USER:$USER ~/.config/provisioning # Check PATH\\necho $PATH | grep provisioning","breadcrumbs":"Installation Steps » Permission Denied","id":"323","title":"Permission Denied"},"3230":{"body":"","breadcrumbs":"Break Glass Training Guide » When NOT to Use","id":"3230","title":"When NOT to Use"},"3231":{"body":"Scenario Why Not Alternative Forgot password Not an emergency Use password reset Routine maintenance Can be scheduled Use normal change process Convenience Normal process \\"too slow\\" Follow standard approval Deadline pressure Business pressure ≠ emergency Plan ahead Testing Want to test emergency access Use dev environment","breadcrumbs":"Break Glass Training Guide » ❌ Invalid Scenarios (Do NOT Use Break-Glass)","id":"3231","title":"❌ Invalid Scenarios (Do NOT Use Break-Glass)"},"3232":{"body":"Immediate suspension of break-glass privileges Security team investigation Disciplinary action (up to termination) All actions audited and reviewed","breadcrumbs":"Break Glass Training Guide » Consequences of Misuse","id":"3232","title":"Consequences of Misuse"},"3233":{"body":"","breadcrumbs":"Break Glass Training Guide » Roles & Responsibilities","id":"3233","title":"Roles & Responsibilities"},"3234":{"body":"Who : Platform Admin, SRE on-call, Security Officer Responsibilities : Assess if situation warrants emergency access Provide clear justification and reason Document incident timeline Use access only for stated purpose Revoke access immediately after resolution","breadcrumbs":"Break Glass Training Guide » Requester","id":"3234","title":"Requester"},"3235":{"body":"Who : 2+ from different teams (Security, Platform, Engineering Leadership) Responsibilities : Verify emergency is genuine Assess risk of granting access Review requester\'s justification Monitor usage during active session Participate in post-incident review","breadcrumbs":"Break Glass Training Guide » Approvers","id":"3235","title":"Approvers"},"3236":{"body":"Who : Security Operations team Responsibilities : Monitor all break-glass activations (real-time) Review audit logs during session Alert on suspicious activity Lead post-incident review Update policies based on learnings","breadcrumbs":"Break Glass Training Guide » Security Team","id":"3236","title":"Security Team"},"3237":{"body":"","breadcrumbs":"Break Glass Training Guide » Break-Glass Workflow","id":"3237","title":"Break-Glass Workflow"},"3238":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ 1. Requester submits emergency access request │\\n│ - Reason: \\"Production database cluster down\\" │\\n│ - Justification: \\"Need direct SSH to diagnose\\" │\\n│ - Duration: 2 hours │\\n│ - Resources: [\\"database/*\\"] │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 2. System creates request ID: BG-20251008-001 │\\n│ - Sends notifications to approver pool │\\n│ - Starts approval timeout (1 hour) │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"Break Glass Training Guide » Phase 1: Request (5 minutes)","id":"3238","title":"Phase 1: Request (5 minutes)"},"3239":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ 3. First approver reviews request │\\n│ - Verifies emergency is real │\\n│ - Checks requester\'s justification │\\n│ - Approves with reason │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 4. Second approver (different team) reviews │\\n│ - Independent verification │\\n│ - Approves with reason │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 5. System validates approvals │\\n│ - ✓ Min 2 approvers │\\n│ - ✓ Different teams │\\n│ - ✓ Within approval window │\\n│ - Status → APPROVED │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"Break Glass Training Guide » Phase 2: Approval (10-15 minutes)","id":"3239","title":"Phase 2: Approval (10-15 minutes)"},"324":{"body":"If encryption fails: # Verify keys exist\\nls -la ~/.config/provisioning/age/ # Regenerate if needed\\nage-keygen -o ~/.config/provisioning/age/private_key.txt","breadcrumbs":"Installation Steps » Age Keys Not Found","id":"324","title":"Age Keys Not Found"},"3240":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ 6. Requester activates approved session │\\n│ - Receives emergency JWT token │\\n│ - Token valid for 2 hours (or requested duration) │\\n│ - All actions logged with session ID │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 7. Security team notified │\\n│ - Real-time alert: \\"Break-glass activated\\" │\\n│ - Monitoring dashboard shows active session │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"Break Glass Training Guide » Phase 3: Activation (1-2 minutes)","id":"3240","title":"Phase 3: Activation (1-2 minutes)"},"3241":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ 8. Requester performs emergency actions │\\n│ - Uses emergency token for access │\\n│ - Every action audited │\\n│ - Security team monitors in real-time │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 9. Background monitoring │\\n│ - Checks for suspicious activity │\\n│ - Enforces inactivity timeout (30 min) │\\n│ - Alerts on unusual patterns │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"Break Glass Training Guide » Phase 4: Usage (Variable)","id":"3241","title":"Phase 4: Usage (Variable)"},"3242":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ 10. Session ends (one of): │\\n│ - Manual revocation by requester │\\n│ - Expiration (max 4 hours) │\\n│ - Inactivity timeout (30 minutes) │\\n│ - Security team revocation │\\n└─────────────────────────────────────────────────────────┘ ↓\\n┌─────────────────────────────────────────────────────────┐\\n│ 11. System audit │\\n│ - All actions logged (7-year retention) │\\n│ - Incident report generated │\\n│ - Post-incident review scheduled │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"Break Glass Training Guide » Phase 5: Revocation (Immediate)","id":"3242","title":"Phase 5: Revocation (Immediate)"},"3243":{"body":"","breadcrumbs":"Break Glass Training Guide » Using the System","id":"3243","title":"Using the System"},"3244":{"body":"1. Request Emergency Access provisioning break-glass request \\\\ \\"Production database cluster unresponsive\\" \\\\ --justification \\"Need direct SSH access to diagnose PostgreSQL failure. \\\\ Monitoring shows cluster down. Application offline affecting 10,000+ users.\\" \\\\ --resources \'[\\"database/*\\", \\"server/db-*\\"]\' \\\\ --duration 2hr # Output:\\n# ✓ Break-glass request created\\n# Request ID: BG-20251008-001\\n# Status: Pending Approval\\n# Approvers needed: 2\\n# Expires: 2025-10-08 11:30:00 (1 hour)\\n#\\n# Notifications sent to:\\n# - security-team@example.com\\n# - platform-admin@example.com 2. Approve Request (Approver) # First approver (Security team)\\nprovisioning break-glass approve BG-20251008-001 \\\\ --reason \\"Emergency verified via incident INC-2025-234. Database cluster confirmed down, affecting production.\\" # Output:\\n# ✓ Approval granted\\n# Approver: alice@example.com (Security Team)\\n# Approvals: 1/2\\n# Status: Pending (need 1 more approval) # Second approver (Platform team)\\nprovisioning break-glass approve BG-20251008-001 \\\\ --reason \\"Confirmed with monitoring. PostgreSQL master node unreachable. Emergency access justified.\\" # Output:\\n# ✓ Approval granted\\n# Approver: bob@example.com (Platform Team)\\n# Approvals: 2/2\\n# Status: APPROVED\\n#\\n# Requester can now activate session 3. Activate Session provisioning break-glass activate BG-20251008-001 # Output:\\n# ✓ Emergency session activated\\n# Session ID: BGS-20251008-001\\n# Token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...\\n# Expires: 2025-10-08 12:30:00 (2 hours)\\n# Max inactivity: 30 minutes\\n#\\n# ⚠️ WARNING ⚠️\\n# - All actions are logged and monitored\\n# - Security team has been notified\\n# - Session will auto-revoke after 2 hours\\n# - Use ONLY for stated emergency purpose\\n#\\n# Export token:\\nexport EMERGENCY_TOKEN=\\"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9...\\" 4. Use Emergency Access # SSH to database server\\nprovisioning ssh connect db-master-01 \\\\ --token $EMERGENCY_TOKEN # Execute emergency commands\\nsudo systemctl status postgresql\\nsudo tail -f /var/log/postgresql/postgresql.log # Diagnose issue...\\n# Fix issue... 5. Revoke Session # When done, immediately revoke\\nprovisioning break-glass revoke BGS-20251008-001 \\\\ --reason \\"Database cluster restored. PostgreSQL master node restarted successfully. All services online.\\" # Output:\\n# ✓ Emergency session revoked\\n# Duration: 47 minutes\\n# Actions performed: 23\\n# Audit log: /var/log/provisioning/break-glass/BGS-20251008-001.json\\n#\\n# Post-incident review scheduled: 2025-10-09 10:00am","breadcrumbs":"Break Glass Training Guide » CLI Commands","id":"3244","title":"CLI Commands"},"3245":{"body":"Request Flow Navigate : Control Center → Security → Break-Glass Click : \\"Request Emergency Access\\" Fill Form : Reason: \\"Production database cluster down\\" Justification: (detailed description) Duration: 2 hours Resources: Select from dropdown or wildcard Submit : Request sent to approvers Approver Flow Receive : Email/Slack notification Navigate : Control Center → Break-Glass → Pending Requests Review : Request details, reason, justification Decision : Approve or Deny Reason : Provide approval/denial reason Monitor Active Sessions Navigate : Control Center → Security → Break-Glass → Active Sessions View : Real-time dashboard of active sessions Who, What, When, How long Actions performed (live) Inactivity timer Revoke : Emergency revoke button (if needed)","breadcrumbs":"Break Glass Training Guide » Web UI (Control Center)","id":"3245","title":"Web UI (Control Center)"},"3246":{"body":"","breadcrumbs":"Break Glass Training Guide » Examples","id":"3246","title":"Examples"},"3247":{"body":"Scenario : PostgreSQL cluster unresponsive, affecting all users Request : provisioning break-glass request \\\\ \\"Production PostgreSQL cluster completely unresponsive\\" \\\\ --justification \\"Database cluster (3 nodes) not responding. \\\\ All services offline, 10,000+ users affected. Need SSH to diagnose. \\\\ Monitoring shows all nodes down. Last state: replication failure during backup.\\" \\\\ --resources \'[\\"database/*\\", \\"server/db-prod-*\\"]\' \\\\ --duration 2hr Approval 1 (Security): \\"Verified incident INC-2025-234. Database monitoring confirms cluster down. Application completely offline. Emergency justified.\\" Approval 2 (Platform): \\"Confirmed. PostgreSQL master and replicas unreachable. On-call SRE needs immediate access. Approved.\\" Actions Taken : SSH to db-prod-01, db-prod-02, db-prod-03 Check PostgreSQL status: systemctl status postgresql Review logs: /var/log/postgresql/ Diagnose: Disk full on master node Fix: Clear old WAL files, restart PostgreSQL Verify: Cluster restored, replication working Revoke access Outcome : Cluster restored in 47 minutes. Root cause: Backup retention not working.","breadcrumbs":"Break Glass Training Guide » Example 1: Production Database Outage","id":"3247","title":"Example 1: Production Database Outage"},"3248":{"body":"Scenario : Suspicious activity detected, need immediate containment Request : provisioning break-glass request \\\\ \\"Active security breach detected - need immediate containment\\" \\\\ --justification \\"IDS alerts show unauthorized access from IP 203.0.113.42 to API. \\\\ Multiple failed sudo attempts. Isolate affected servers and investigate. \\\\ Potential data exfiltration in progress.\\" \\\\ --resources \'[\\"server/api-prod-*\\", \\"firewall/*\\", \\"network/*\\"]\' \\\\ --duration 4hr Approval 1 (Security): \\"Security incident SI-2025-089 confirmed. IDS shows sustained attack from external IP. Immediate containment required. Approved.\\" Approval 2 (Engineering Director): \\"Concur with security assessment. Production impact acceptable vs risk of data breach. Approved.\\" Actions Taken : Firewall block on 203.0.113.42 Isolate affected API servers Snapshot servers for forensics Review access logs Identify compromised service account Rotate credentials Restore from clean backup Re-enable servers with patched vulnerability Outcome : Breach contained in 3h 15 min. No data loss. Vulnerability patched across fleet.","breadcrumbs":"Break Glass Training Guide » Example 2: Security Incident","id":"3248","title":"Example 2: Security Incident"},"3249":{"body":"Scenario : Critical production data accidentally deleted Request : provisioning break-glass request \\\\ \\"Critical customer data accidentally deleted from production\\" \\\\ --justification \\"Database migration script ran against production instead of staging. \\\\ 50,000+ customer records deleted. Need immediate restore from backup. \\\\ Normal restore requires 4-6 hours for approval. Time-critical window.\\" \\\\ --resources \'[\\"database/customers\\", \\"backup/*\\"]\' \\\\ --duration 3hr Approval 1 (Platform): \\"Verified data deletion in production database. 50,284 records deleted at 10:42am. Backup available from 10:00am (42 minutes ago). Time-critical restore needed. Approved.\\" Approval 2 (Security): \\"Risk assessment: Restore from trusted backup less risky than data loss. Emergency justified. Ensure post-incident review of deployment process. Approved.\\" Actions Taken : Stop application writes to affected tables Identify latest good backup (10:00am) Restore deleted records from backup Verify data integrity Compare record counts Re-enable application writes Notify affected users (if any noticed) Outcome : Data restored in 1h 38 min. Only 42 minutes of data lost (from backup to deletion). Zero customer impact.","breadcrumbs":"Break Glass Training Guide » Example 3: Accidental Data Deletion","id":"3249","title":"Example 3: Accidental Data Deletion"},"325":{"body":"Once installation is complete, proceed to: → First Deployment","breadcrumbs":"Installation Steps » Next Steps","id":"325","title":"Next Steps"},"3250":{"body":"","breadcrumbs":"Break Glass Training Guide » Auditing & Compliance","id":"3250","title":"Auditing & Compliance"},"3251":{"body":"Every break-glass session logs: Request Details : Requester identity Reason and justification Requested resources Requested duration Timestamp Approval Process : Each approver identity Approval/denial reason Approval timestamp Team affiliation Session Activity : Activation timestamp Every action performed Resources accessed Commands executed Inactivity periods Revocation : Revocation reason Who revoked (system or manual) Total duration Final status","breadcrumbs":"Break Glass Training Guide » What is Logged","id":"3251","title":"What is Logged"},"3252":{"body":"Break-glass logs : 7 years (immutable) Cannot be deleted : Only anonymized for GDPR Exported to SIEM : Real-time","breadcrumbs":"Break Glass Training Guide » Retention","id":"3252","title":"Retention"},"3253":{"body":"# Generate break-glass usage report\\nprovisioning break-glass audit \\\\ --from \\"2025-01-01\\" \\\\ --to \\"2025-12-31\\" \\\\ --format pdf \\\\ --output break-glass-2025-report.pdf # Report includes:\\n# - Total break-glass activations\\n# - Average duration\\n# - Most common reasons\\n# - Approval times\\n# - Incidents resolved\\n# - Misuse incidents (if any)","breadcrumbs":"Break Glass Training Guide » Compliance Reports","id":"3253","title":"Compliance Reports"},"3254":{"body":"","breadcrumbs":"Break Glass Training Guide » Post-Incident Review","id":"3254","title":"Post-Incident Review"},"3255":{"body":"Required attendees : Requester Approvers Security team Incident commander Agenda : Timeline Review : What happened, when Actions Taken : What was done with emergency access Outcome : Was issue resolved? Any side effects? Process : Did break-glass work as intended? Lessons Learned : What can be improved?","breadcrumbs":"Break Glass Training Guide » Within 24 Hours","id":"3255","title":"Within 24 Hours"},"3256":{"body":"Was break-glass appropriate for this incident? Were approvals granted timely? Was access used only for stated purpose? Were any security policies violated? Could incident be prevented in future? Do we need policy updates? Do we need system changes?","breadcrumbs":"Break Glass Training Guide » Review Checklist","id":"3256","title":"Review Checklist"},"3257":{"body":"Incident Report : # Break-Glass Incident Report: BG-20251008-001 **Incident**: Production database cluster outage\\n**Duration**: 47 minutes\\n**Impact**: 10,000+ users, complete service outage ## Timeline\\n- 10:15: Incident detected\\n- 10:17: Break-glass requested\\n- 10:25: Approved (2/2)\\n- 10:27: Activated\\n- 11:02: Database restored\\n- 11:04: Session revoked ## Actions Taken\\n1. SSH access to database servers\\n2. Diagnosed disk full issue\\n3. Cleared old WAL files\\n4. Restarted PostgreSQL\\n5. Verified replication ## Root Cause\\nBackup retention job failed silently for 2 weeks, causing WAL files to accumulate until disk full. ## Prevention\\n- ✅ Add disk space monitoring alerts\\n- ✅ Fix backup retention job\\n- ✅ Test recovery procedures\\n- ✅ Implement WAL archiving to S3 ## Break-Glass Assessment\\n- ✓ Appropriate use\\n- ✓ Timely approvals\\n- ✓ No policy violations\\n- ✓ Access revoked promptly","breadcrumbs":"Break Glass Training Guide » Output","id":"3257","title":"Output"},"3258":{"body":"","breadcrumbs":"Break Glass Training Guide » FAQ","id":"3258","title":"FAQ"},"3259":{"body":"A : Typically 15-20 minutes: 5 min: Request submission 10 min: Approvals (2 people) 2 min: Activation In extreme emergencies, approvers can be on standby.","breadcrumbs":"Break Glass Training Guide » Q: How quickly can break-glass be activated","id":"3259","title":"Q: How quickly can break-glass be activated"},"326":{"body":"Detailed Installation Guide Workspace Management Troubleshooting Guide","breadcrumbs":"Installation Steps » Additional Resources","id":"326","title":"Additional Resources"},"3260":{"body":"A : No. Break-glass is for emergencies only. Schedule maintenance through normal change process.","breadcrumbs":"Break Glass Training Guide » Q: Can I use break-glass for scheduled maintenance","id":"3260","title":"Q: Can I use break-glass for scheduled maintenance"},"3261":{"body":"A : System requires 2 approvers from different teams. If unavailable: Escalate to on-call manager Contact security team directly Use emergency contact list","breadcrumbs":"Break Glass Training Guide » Q: What if I can\'t get 2 approvers","id":"3261","title":"Q: What if I can\'t get 2 approvers"},"3262":{"body":"A : No. System enforces team diversity to prevent collusion.","breadcrumbs":"Break Glass Training Guide » Q: Can approvers be from the same team","id":"3262","title":"Q: Can approvers be from the same team"},"3263":{"body":"A : Security team can revoke for: Suspicious activity Policy violation Incident resolved Misuse detected You\'ll receive immediate notification. Contact security team for details.","breadcrumbs":"Break Glass Training Guide » Q: What if security team revokes my session","id":"3263","title":"Q: What if security team revokes my session"},"3264":{"body":"A : No. Maximum duration is 4 hours. If you need more time, submit a new request with updated justification.","breadcrumbs":"Break Glass Training Guide » Q: Can I extend an active session","id":"3264","title":"Q: Can I extend an active session"},"3265":{"body":"A : Session auto-revokes after: Maximum duration (4 hours), OR Inactivity timeout (30 minutes) Always manually revoke when done.","breadcrumbs":"Break Glass Training Guide » Q: What happens if I forget to revoke","id":"3265","title":"Q: What happens if I forget to revoke"},"3266":{"body":"A : Yes. Security team monitors in real-time: Session activation alerts Action logging Suspicious activity detection Compliance verification","breadcrumbs":"Break Glass Training Guide » Q: Is break-glass monitored","id":"3266","title":"Q: Is break-glass monitored"},"3267":{"body":"A : Yes, in development environment only : PROVISIONING_ENV=dev provisioning break-glass request \\"Test emergency access procedure\\" Never practice in staging or production.","breadcrumbs":"Break Glass Training Guide » Q: Can I practice break-glass","id":"3267","title":"Q: Can I practice break-glass"},"3268":{"body":"","breadcrumbs":"Break Glass Training Guide » Emergency Contacts","id":"3268","title":"Emergency Contacts"},"3269":{"body":"Role Contact Response Time Security On-Call +1-555-SECURITY 5 minutes Platform On-Call +1-555-PLATFORM 5 minutes Engineering Director +1-555-ENG-DIR 15 minutes","breadcrumbs":"Break Glass Training Guide » During Incident","id":"3269","title":"During Incident"},"327":{"body":"This guide walks you through deploying your first infrastructure using the Provisioning Platform.","breadcrumbs":"First Deployment » First Deployment","id":"327","title":"First Deployment"},"3270":{"body":"L1 : On-call SRE L2 : Platform team lead L3 : Engineering manager L4 : Director of Engineering L5 : CTO","breadcrumbs":"Break Glass Training Guide » Escalation Path","id":"3270","title":"Escalation Path"},"3271":{"body":"Incident Slack : #incidents Security Slack : #security-alerts Email : security-team@example.com PagerDuty : Break-glass policy","breadcrumbs":"Break Glass Training Guide » Communication Channels","id":"3271","title":"Communication Channels"},"3272":{"body":"I certify that I have : Read and understood this training guide Understand when to use (and not use) break-glass Know the approval workflow Can use the CLI commands Understand auditing and compliance requirements Will follow post-incident review process Signature : _________________________ Date : _________________________ Next Training Due : _________________________ (1 year) Version : 1.0.0 Maintained By : Security Team Last Updated : 2025-10-08 Next Review : 2026-10-08","breadcrumbs":"Break Glass Training Guide » Training Certification","id":"3272","title":"Training Certification"},"3273":{"body":"Version : 1.0.0 Date : 2025-10-08 Audience : Platform Administrators, Security Teams Prerequisites : Understanding of Cedar policy language, Provisioning platform architecture","breadcrumbs":"Cedar Policies Production Guide » Cedar Policies Production Guide","id":"3273","title":"Cedar Policies Production Guide"},"3274":{"body":"Introduction Cedar Policy Basics Production Policy Strategy Policy Templates Policy Development Workflow Testing Policies Deployment Monitoring & Auditing Troubleshooting Best Practices","breadcrumbs":"Cedar Policies Production Guide » Table of Contents","id":"3274","title":"Table of Contents"},"3275":{"body":"Cedar policies control who can do what in the Provisioning platform. This guide helps you create, test, and deploy production-ready Cedar policies that balance security with operational efficiency.","breadcrumbs":"Cedar Policies Production Guide » Introduction","id":"3275","title":"Introduction"},"3276":{"body":"Fine-grained : Control access at resource + action level Context-aware : Decisions based on MFA, IP, time, approvals Auditable : Every decision is logged with policy ID Hot-reload : Update policies without restarting services Type-safe : Schema validation prevents errors","breadcrumbs":"Cedar Policies Production Guide » Why Cedar","id":"3276","title":"Why Cedar"},"3277":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Cedar Policy Basics","id":"3277","title":"Cedar Policy Basics"},"3278":{"body":"permit ( principal, # Who (user, team, role) action, # What (create, delete, deploy) resource # Where (server, cluster, environment)\\n) when { condition # Context (MFA, IP, time)\\n};","breadcrumbs":"Cedar Policies Production Guide » Core Concepts","id":"3278","title":"Core Concepts"},"3279":{"body":"Type Examples Description User User::\\"alice\\" Individual users Team Team::\\"platform-admin\\" User groups Role Role::\\"Admin\\" Permission levels Resource Server::\\"web-01\\" Infrastructure resources Environment Environment::\\"production\\" Deployment targets","breadcrumbs":"Cedar Policies Production Guide » Entities","id":"3279","title":"Entities"},"328":{"body":"In this chapter, you\'ll: Configure a simple infrastructure Create your first server Install a task service (Kubernetes) Verify the deployment Estimated time: 10-15 minutes","breadcrumbs":"First Deployment » Overview","id":"328","title":"Overview"},"3280":{"body":"Category Actions Read read, list Write create, update, delete Deploy deploy, rollback Admin ssh, execute, admin","breadcrumbs":"Cedar Policies Production Guide » Actions","id":"3280","title":"Actions"},"3281":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Production Policy Strategy","id":"3281","title":"Production Policy Strategy"},"3282":{"body":"Level 1: Development (Permissive) // Developers have full access to dev environment\\npermit ( principal in Team::\\"developers\\", action, resource in Environment::\\"development\\"\\n); Level 2: Staging (MFA Required) // All operations require MFA\\npermit ( principal in Team::\\"developers\\", action, resource in Environment::\\"staging\\"\\n) when { context.mfa_verified == true\\n}; Level 3: Production (MFA + Approval) // Deployments require MFA + approval\\npermit ( principal in Team::\\"platform-admin\\", action in [Action::\\"deploy\\", Action::\\"delete\\"], resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true && context has approval_id && context.approval_id.startsWith(\\"APPROVAL-\\")\\n}; Level 4: Critical (Break-Glass Only) // Only emergency access\\npermit ( principal, action, resource in Resource::\\"production-database\\"\\n) when { context.emergency_access == true && context.session_approved == true\\n};","breadcrumbs":"Cedar Policies Production Guide » Security Levels","id":"3282","title":"Security Levels"},"3283":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Policy Templates","id":"3283","title":"Policy Templates"},"3284":{"body":"// Admin: Full access\\npermit ( principal in Role::\\"Admin\\", action, resource\\n); // Operator: Server management + read clusters\\npermit ( principal in Role::\\"Operator\\", action in [ Action::\\"create\\", Action::\\"update\\", Action::\\"delete\\" ], resource is Server\\n); permit ( principal in Role::\\"Operator\\", action in [Action::\\"read\\", Action::\\"list\\"], resource is Cluster\\n); // Viewer: Read-only everywhere\\npermit ( principal in Role::\\"Viewer\\", action in [Action::\\"read\\", Action::\\"list\\"], resource\\n); // Auditor: Read audit logs only\\npermit ( principal in Role::\\"Auditor\\", action in [Action::\\"read\\", Action::\\"list\\"], resource is AuditLog\\n);","breadcrumbs":"Cedar Policies Production Guide » 1. Role-Based Access Control (RBAC)","id":"3284","title":"1. Role-Based Access Control (RBAC)"},"3285":{"body":"// Platform team: Infrastructure management\\npermit ( principal in Team::\\"platform\\", action in [ Action::\\"create\\", Action::\\"update\\", Action::\\"delete\\", Action::\\"deploy\\" ], resource in [Server, Cluster, Taskserv]\\n); // Security team: Access control + audit\\npermit ( principal in Team::\\"security\\", action, resource in [User, Role, AuditLog, BreakGlass]\\n); // DevOps team: Application deployments\\npermit ( principal in Team::\\"devops\\", action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true && context.has_approval == true\\n};","breadcrumbs":"Cedar Policies Production Guide » 2. Team-Based Policies","id":"3285","title":"2. Team-Based Policies"},"3286":{"body":"// Deployments only during business hours\\npermit ( principal, action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.time.hour >= 9 && context.time.hour <= 17 && context.time.weekday in [\\"Monday\\", \\"Tuesday\\", \\"Wednesday\\", \\"Thursday\\", \\"Friday\\"]\\n}; // Maintenance window\\npermit ( principal in Team::\\"platform\\", action, resource\\n) when { context.maintenance_window == true\\n};","breadcrumbs":"Cedar Policies Production Guide » 3. Time-Based Restrictions","id":"3286","title":"3. Time-Based Restrictions"},"3287":{"body":"// Production access only from office network\\npermit ( principal, action, resource in Environment::\\"production\\"\\n) when { context.ip_address.isInRange(\\"10.0.0.0/8\\") || context.ip_address.isInRange(\\"192.168.1.0/24\\")\\n}; // VPN access for remote work\\npermit ( principal, action, resource in Environment::\\"production\\"\\n) when { context.vpn_connected == true && context.mfa_verified == true\\n};","breadcrumbs":"Cedar Policies Production Guide » 4. IP-Based Restrictions","id":"3287","title":"4. IP-Based Restrictions"},"3288":{"body":"// Database servers: Extra protection\\nforbid ( principal, action == Action::\\"delete\\", resource in Resource::\\"database-*\\"\\n) unless { context.emergency_access == true\\n}; // Critical clusters: Require multiple approvals\\npermit ( principal, action in [Action::\\"update\\", Action::\\"delete\\"], resource in Resource::\\"k8s-production-*\\"\\n) when { context.approval_count >= 2 && context.mfa_verified == true\\n};","breadcrumbs":"Cedar Policies Production Guide » 5. Resource-Specific Policies","id":"3288","title":"5. Resource-Specific Policies"},"3289":{"body":"// Users can manage their own MFA devices\\npermit ( principal, action in [Action::\\"create\\", Action::\\"delete\\"], resource is MfaDevice\\n) when { resource.owner == principal\\n}; // Users can view their own audit logs\\npermit ( principal, action == Action::\\"read\\", resource is AuditLog\\n) when { resource.user_id == principal.id\\n};","breadcrumbs":"Cedar Policies Production Guide » 6. Self-Service Policies","id":"3289","title":"6. Self-Service Policies"},"329":{"body":"Create a basic infrastructure configuration: # Generate infrastructure template\\nprovisioning generate infra --new my-infra # This creates: workspace/infra/my-infra/\\n# - config.toml (infrastructure settings)\\n# - settings.ncl (Nickel configuration)","breadcrumbs":"First Deployment » Step 1: Configure Infrastructure","id":"329","title":"Step 1: Configure Infrastructure"},"3290":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Policy Development Workflow","id":"3290","title":"Policy Development Workflow"},"3291":{"body":"Document : Who needs access? (roles, teams, individuals) To what resources? (servers, clusters, environments) What actions? (read, write, deploy, delete) Under what conditions? (MFA, IP, time, approvals) Example Requirements Document : # Requirement: Production Deployment **Who**: DevOps team members\\n**What**: Deploy applications to production\\n**When**: Business hours (9am-5pm Mon-Fri)\\n**Conditions**:\\n- MFA verified\\n- Change request approved\\n- From office network or VPN","breadcrumbs":"Cedar Policies Production Guide » Step 1: Define Requirements","id":"3291","title":"Step 1: Define Requirements"},"3292":{"body":"@id(\\"prod-deploy-devops\\")\\n@description(\\"DevOps can deploy to production during business hours with approval\\")\\npermit ( principal in Team::\\"devops\\", action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true && context has approval_id && context.time.hour >= 9 && context.time.hour <= 17 && context.time.weekday in [\\"Monday\\", \\"Tuesday\\", \\"Wednesday\\", \\"Thursday\\", \\"Friday\\"] && (context.ip_address.isInRange(\\"10.0.0.0/8\\") || context.vpn_connected == true)\\n};","breadcrumbs":"Cedar Policies Production Guide » Step 2: Write Policy","id":"3292","title":"Step 2: Write Policy"},"3293":{"body":"# Use Cedar CLI to validate\\ncedar validate \\\\ --policies provisioning/config/cedar-policies/production.cedar \\\\ --schema provisioning/config/cedar-policies/schema.cedar # Expected output: ✓ Policy is valid","breadcrumbs":"Cedar Policies Production Guide » Step 3: Validate Syntax","id":"3293","title":"Step 3: Validate Syntax"},"3294":{"body":"# Deploy to development environment first\\ncp production.cedar provisioning/config/cedar-policies/development.cedar # Restart orchestrator to load new policies\\nsystemctl restart provisioning-orchestrator # Test with real requests\\nprovisioning server create test-server --check","breadcrumbs":"Cedar Policies Production Guide » Step 4: Test in Development","id":"3294","title":"Step 4: Test in Development"},"3295":{"body":"Review Checklist : Policy syntax valid Policy ID unique Description clear Conditions appropriate for security level Tested in development Reviewed by security team Documented in change log","breadcrumbs":"Cedar Policies Production Guide » Step 5: Review & Approve","id":"3295","title":"Step 5: Review & Approve"},"3296":{"body":"# Backup current policies\\ncp provisioning/config/cedar-policies/production.cedar \\\\ provisioning/config/cedar-policies/production.cedar.backup.$(date +%Y%m%d) # Deploy new policy\\ncp new-production.cedar provisioning/config/cedar-policies/production.cedar # Hot reload (no restart needed)\\nprovisioning cedar reload # Verify loaded\\nprovisioning cedar list","breadcrumbs":"Cedar Policies Production Guide » Step 6: Deploy to Production","id":"3296","title":"Step 6: Deploy to Production"},"3297":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Testing Policies","id":"3297","title":"Testing Policies"},"3298":{"body":"Create test cases for each policy: # tests/cedar/prod-deploy-devops.yaml\\npolicy_id: prod-deploy-devops test_cases: - name: \\"DevOps can deploy with approval and MFA\\" principal: { type: \\"Team\\", id: \\"devops\\" } action: \\"deploy\\" resource: { type: \\"Environment\\", id: \\"production\\" } context: mfa_verified: true approval_id: \\"APPROVAL-123\\" time: { hour: 10, weekday: \\"Monday\\" } ip_address: \\"10.0.1.5\\" expected: Allow - name: \\"DevOps cannot deploy without MFA\\" principal: { type: \\"Team\\", id: \\"devops\\" } action: \\"deploy\\" resource: { type: \\"Environment\\", id: \\"production\\" } context: mfa_verified: false approval_id: \\"APPROVAL-123\\" time: { hour: 10, weekday: \\"Monday\\" } expected: Deny - name: \\"DevOps cannot deploy outside business hours\\" principal: { type: \\"Team\\", id: \\"devops\\" } action: \\"deploy\\" resource: { type: \\"Environment\\", id: \\"production\\" } context: mfa_verified: true approval_id: \\"APPROVAL-123\\" time: { hour: 22, weekday: \\"Monday\\" } expected: Deny Run tests: provisioning cedar test tests/cedar/","breadcrumbs":"Cedar Policies Production Guide » Unit Testing","id":"3298","title":"Unit Testing"},"3299":{"body":"Test with real API calls: # Setup test user\\nexport TEST_USER=\\"alice\\"\\nexport TEST_TOKEN=$(provisioning login --user $TEST_USER --output token) # Test allowed action\\ncurl -H \\"Authorization: Bearer $TEST_TOKEN\\" \\\\ http://localhost:9090/api/v1/servers \\\\ -X POST -d \'{\\"name\\": \\"test-server\\"}\' # Expected: 200 OK # Test denied action (without MFA)\\ncurl -H \\"Authorization: Bearer $TEST_TOKEN\\" \\\\ http://localhost:9090/api/v1/servers/prod-server-01 \\\\ -X DELETE # Expected: 403 Forbidden (MFA required)","breadcrumbs":"Cedar Policies Production Guide » Integration Testing","id":"3299","title":"Integration Testing"},"33":{"body":"84% reduction in main file size Domain-driven handlers 80+ shortcuts Bi-directional help system","breadcrumbs":"Home » 🎯 Modular CLI (v3.2.0)","id":"33","title":"🎯 Modular CLI (v3.2.0)"},"330":{"body":"Edit the generated configuration: # Edit with your preferred editor\\n$EDITOR workspace/infra/my-infra/settings.ncl Example configuration: import provisioning.settings as cfg # Infrastructure settings\\ninfra_settings = cfg.InfraSettings { name = \\"my-infra\\" provider = \\"local\\" # Start with local provider environment = \\"development\\"\\n} # Server configuration\\nservers = [ { hostname = \\"dev-server-01\\" cores = 2 memory = 4096 # MB disk = 50 # GB }\\n]","breadcrumbs":"First Deployment » Step 2: Edit Configuration","id":"330","title":"Step 2: Edit Configuration"},"3300":{"body":"Verify policy evaluation performance: # Generate load\\nprovisioning cedar bench \\\\ --policies production.cedar \\\\ --requests 10000 \\\\ --concurrency 100 # Expected: <10 ms per evaluation","breadcrumbs":"Cedar Policies Production Guide » Load Testing","id":"3300","title":"Load Testing"},"3301":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Deployment","id":"3301","title":"Deployment"},"3302":{"body":"#!/bin/bash\\n# deploy-policies.sh ENVIRONMENT=$1 # dev, staging, prod # Validate policies\\ncedar validate \\\\ --policies provisioning/config/cedar-policies/$ENVIRONMENT.cedar \\\\ --schema provisioning/config/cedar-policies/schema.cedar if [ $? -ne 0 ]; then echo \\"❌ Policy validation failed\\" exit 1\\nfi # Backup current policies\\nBACKUP_DIR=\\"provisioning/config/cedar-policies/backups/$ENVIRONMENT\\"\\nmkdir -p $BACKUP_DIR\\ncp provisioning/config/cedar-policies/$ENVIRONMENT.cedar \\\\ $BACKUP_DIR/$ENVIRONMENT.cedar.$(date +%Y%m%d-%H%M%S) # Deploy new policies\\nscp provisioning/config/cedar-policies/$ENVIRONMENT.cedar \\\\ $ENVIRONMENT-orchestrator:/etc/provisioning/cedar-policies/production.cedar # Hot reload on remote\\nssh $ENVIRONMENT-orchestrator \\"provisioning cedar reload\\" echo \\"✅ Policies deployed to $ENVIRONMENT\\"","breadcrumbs":"Cedar Policies Production Guide » Development → Staging → Production","id":"3302","title":"Development → Staging → Production"},"3303":{"body":"# List backups\\nls -ltr provisioning/config/cedar-policies/backups/production/ # Restore previous version\\ncp provisioning/config/cedar-policies/backups/production/production.cedar.20251008-143000 \\\\ provisioning/config/cedar-policies/production.cedar # Reload\\nprovisioning cedar reload # Verify\\nprovisioning cedar list","breadcrumbs":"Cedar Policies Production Guide » Rollback Procedure","id":"3303","title":"Rollback Procedure"},"3304":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Monitoring & Auditing","id":"3304","title":"Monitoring & Auditing"},"3305":{"body":"# Query denied requests (last 24 hours)\\nprovisioning audit query \\\\ --action authorization_denied \\\\ --from \\"24h\\" \\\\ --out table # Expected output:\\n# ┌─────────┬────────┬──────────┬────────┬────────────────┐\\n# │ Time │ User │ Action │ Resour │ Reason │\\n# ├─────────┼────────┼──────────┼────────┼────────────────┤\\n# │ 10:15am │ bob │ deploy │ prod │ MFA not verif │\\n# │ 11:30am │ alice │ delete │ db-01 │ No approval │\\n# └─────────┴────────┴──────────┴────────┴────────────────┘","breadcrumbs":"Cedar Policies Production Guide » Monitor Authorization Decisions","id":"3305","title":"Monitor Authorization Decisions"},"3306":{"body":"# alerts/cedar-policies.yaml\\nalerts: - name: \\"High Denial Rate\\" query: \\"authorization_denied\\" threshold: 10 window: \\"5m\\" action: \\"notify:security-team\\" - name: \\"Policy Bypass Attempt\\" query: \\"action:deploy AND result:denied\\" user: \\"critical-users\\" action: \\"page:oncall\\"","breadcrumbs":"Cedar Policies Production Guide » Alert on Suspicious Activity","id":"3306","title":"Alert on Suspicious Activity"},"3307":{"body":"# Which policies are most used?\\nprovisioning cedar stats --top 10 # Example output:\\n# Policy ID | Uses | Allows | Denies\\n# ---------------------- | ------- | -------- | -------\\n# prod-deploy-devops | 1,234 | 1,100 | 134\\n# admin-full-access | 892 | 892 | 0\\n# viewer-read-only | 5,421 | 5,421 | 0","breadcrumbs":"Cedar Policies Production Guide » Policy Usage Statistics","id":"3307","title":"Policy Usage Statistics"},"3308":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Troubleshooting","id":"3308","title":"Troubleshooting"},"3309":{"body":"Symptom : Policy changes not taking effect Solutions : Verify hot reload: provisioning cedar reload\\nprovisioning cedar list # Should show updated timestamp Check orchestrator logs: journalctl -u provisioning-orchestrator -f | grep cedar Restart orchestrator: systemctl restart provisioning-orchestrator","breadcrumbs":"Cedar Policies Production Guide » Policy Not Applying","id":"3309","title":"Policy Not Applying"},"331":{"body":"First, run in check mode to see what would happen: # Check mode - no actual changes\\nprovisioning server create --infra my-infra --check # Expected output:\\n# ✓ Validation passed\\n# ⚠ Check mode: No changes will be made\\n# # Would create:\\n# - Server: dev-server-01 (2 cores, 4 GB RAM, 50 GB disk)","breadcrumbs":"First Deployment » Step 3: Create Server (Check Mode)","id":"331","title":"Step 3: Create Server (Check Mode)"},"3310":{"body":"Symptom : User denied access when policy should allow Debug : # Enable debug mode\\nexport PROVISIONING_DEBUG=1 # View authorization decision\\nprovisioning audit query \\\\ --user alice \\\\ --action deploy \\\\ --from \\"1h\\" \\\\ --out json | jq \'.authorization\' # Shows which policy evaluated, context used, reason for denial","breadcrumbs":"Cedar Policies Production Guide » Unexpected Denials","id":"3310","title":"Unexpected Denials"},"3311":{"body":"Symptom : Multiple policies match, unclear which applies Resolution : Cedar uses deny-override : If any forbid matches, request denied Use @priority annotations (higher number = higher priority) Make policies more specific to avoid conflicts @priority(100)\\npermit ( principal in Role::\\"Admin\\", action, resource\\n); @priority(50)\\nforbid ( principal, action == Action::\\"delete\\", resource is Database\\n); // Admin can do anything EXCEPT delete databases","breadcrumbs":"Cedar Policies Production Guide » Policy Conflicts","id":"3311","title":"Policy Conflicts"},"3312":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Best Practices","id":"3312","title":"Best Practices"},"3313":{"body":"// ❌ BAD: Too permissive initially\\npermit (principal, action, resource); // ✅ GOOD: Explicit allow, expand as needed\\npermit ( principal in Role::\\"Admin\\", action in [Action::\\"read\\", Action::\\"list\\"], resource\\n);","breadcrumbs":"Cedar Policies Production Guide » 1. Start Restrictive, Loosen Gradually","id":"3313","title":"1. Start Restrictive, Loosen Gradually"},"3314":{"body":"@id(\\"prod-deploy-mfa\\")\\n@description(\\"Production deployments require MFA verification\\")\\n@owner(\\"platform-team\\")\\n@reviewed(\\"2025-10-08\\")\\n@expires(\\"2026-10-08\\")\\npermit ( principal in Team::\\"platform-admin\\", action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true\\n};","breadcrumbs":"Cedar Policies Production Guide » 2. Use Annotations","id":"3314","title":"2. Use Annotations"},"3315":{"body":"Give users minimum permissions needed: // ❌ BAD: Overly broad\\npermit (principal in Team::\\"developers\\", action, resource); // ✅ GOOD: Specific permissions\\npermit ( principal in Team::\\"developers\\", action in [Action::\\"read\\", Action::\\"create\\", Action::\\"update\\"], resource in Environment::\\"development\\"\\n);","breadcrumbs":"Cedar Policies Production Guide » 3. Principle of Least Privilege","id":"3315","title":"3. Principle of Least Privilege"},"3316":{"body":"// Context required for this policy:\\n// - mfa_verified: boolean (from JWT claims)\\n// - approval_id: string (from request header)\\n// - ip_address: IpAddr (from connection)\\npermit ( principal in Role::\\"Operator\\", action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true && context has approval_id && context.ip_address.isInRange(\\"10.0.0.0/8\\")\\n};","breadcrumbs":"Cedar Policies Production Guide » 4. Document Context Requirements","id":"3316","title":"4. Document Context Requirements"},"3317":{"body":"File organization : cedar-policies/\\n├── schema.cedar # Entity/action definitions\\n├── rbac.cedar # Role-based policies\\n├── teams.cedar # Team-based policies\\n├── time-restrictions.cedar # Time-based policies\\n├── ip-restrictions.cedar # Network-based policies\\n├── production.cedar # Production-specific\\n└── development.cedar # Development-specific","breadcrumbs":"Cedar Policies Production Guide » 5. Separate Policies by Concern","id":"3317","title":"5. Separate Policies by Concern"},"3318":{"body":"# Git commit each policy change\\ngit add provisioning/config/cedar-policies/production.cedar\\ngit commit -m \\"feat(cedar): Add MFA requirement for prod deployments - Require MFA for all production deployments\\n- Applies to devops and platform-admin teams\\n- Effective 2025-10-08 Policy ID: prod-deploy-mfa\\nReviewed by: security-team\\nTicket: SEC-1234\\" git push","breadcrumbs":"Cedar Policies Production Guide » 6. Version Control","id":"3318","title":"6. Version Control"},"3319":{"body":"Quarterly review : Remove unused policies Tighten overly permissive policies Update for new resources/actions Verify team memberships current Test break-glass procedures","breadcrumbs":"Cedar Policies Production Guide » 7. Regular Policy Audits","id":"3319","title":"7. Regular Policy Audits"},"332":{"body":"If check mode looks good, create the server: # Create server\\nprovisioning server create --infra my-infra # Expected output:\\n# ✓ Creating server: dev-server-01\\n# ✓ Server created successfully\\n# ✓ IP Address: 192.168.1.100\\n# ✓ SSH access: ssh user@192.168.1.100","breadcrumbs":"First Deployment » Step 4: Create Server (Real)","id":"332","title":"Step 4: Create Server (Real)"},"3320":{"body":"","breadcrumbs":"Cedar Policies Production Guide » Quick Reference","id":"3320","title":"Quick Reference"},"3321":{"body":"# Allow all\\npermit (principal, action, resource); # Deny all\\nforbid (principal, action, resource); # Role-based\\npermit (principal in Role::\\"Admin\\", action, resource); # Team-based\\npermit (principal in Team::\\"platform\\", action, resource); # Resource-based\\npermit (principal, action, resource in Environment::\\"production\\"); # Action-based\\npermit (principal, action in [Action::\\"read\\", Action::\\"list\\"], resource); # Condition-based\\npermit (principal, action, resource) when { context.mfa_verified == true }; # Complex\\npermit ( principal in Team::\\"devops\\", action == Action::\\"deploy\\", resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true && context has approval_id && context.time.hour >= 9 && context.time.hour <= 17\\n};","breadcrumbs":"Cedar Policies Production Guide » Common Policy Patterns","id":"3321","title":"Common Policy Patterns"},"3322":{"body":"# Validate policies\\nprovisioning cedar validate # Reload policies (hot reload)\\nprovisioning cedar reload # List active policies\\nprovisioning cedar list # Test policies\\nprovisioning cedar test tests/ # Query denials\\nprovisioning audit query --action authorization_denied # Policy statistics\\nprovisioning cedar stats","breadcrumbs":"Cedar Policies Production Guide » Useful Commands","id":"3322","title":"Useful Commands"},"3323":{"body":"Documentation : docs/architecture/CEDAR_AUTHORIZATION_IMPLEMENTATION.md Policy Examples : provisioning/config/cedar-policies/ Issues : Report to platform-team Emergency : Use break-glass procedure Version : 1.0.0 Maintained By : Platform Team Last Updated : 2025-10-08","breadcrumbs":"Cedar Policies Production Guide » Support","id":"3323","title":"Support"},"3324":{"body":"Document Version : 1.0.0 Last Updated : 2025-10-08 Target Audience : Platform Administrators, Security Team Prerequisites : Control Center deployed, admin user created","breadcrumbs":"MFA Admin Setup Guide » MFA Admin Setup Guide - Production Operations Manual","id":"3324","title":"MFA Admin Setup Guide - Production Operations Manual"},"3325":{"body":"Overview MFA Requirements Admin Enrollment Process TOTP Setup (Authenticator Apps) WebAuthn Setup (Hardware Keys) Enforcing MFA via Cedar Policies Backup Codes Management Recovery Procedures Troubleshooting Best Practices Audit and Compliance","breadcrumbs":"MFA Admin Setup Guide » 📋 Table of Contents","id":"3325","title":"📋 Table of Contents"},"3326":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Overview","id":"3326","title":"Overview"},"3327":{"body":"Multi-Factor Authentication (MFA) adds a second layer of security beyond passwords. Admins must provide: Something they know : Password Something they have : TOTP code (authenticator app) or WebAuthn device (YubiKey, Touch ID)","breadcrumbs":"MFA Admin Setup Guide » What is MFA","id":"3327","title":"What is MFA"},"3328":{"body":"Administrators have elevated privileges including: Server creation/deletion Production deployments Secret management User management Break-glass approval MFA protects against : Password compromise (phishing, leaks, brute force) Unauthorized access to critical systems Compliance violations (SOC2, ISO 27001)","breadcrumbs":"MFA Admin Setup Guide » Why MFA for Admins","id":"3328","title":"Why MFA for Admins"},"3329":{"body":"Method Type Examples Recommended For TOTP Software Google Authenticator, Authy, 1Password All admins (primary) WebAuthn/FIDO2 Hardware YubiKey, Touch ID, Windows Hello High-security admins Backup Codes One-time 10 single-use codes Emergency recovery","breadcrumbs":"MFA Admin Setup Guide » MFA Methods Supported","id":"3329","title":"MFA Methods Supported"},"333":{"body":"Check server status: # List all servers\\nprovisioning server list # Get detailed server info\\nprovisioning server info dev-server-01 # SSH to server (optional)\\nprovisioning server ssh dev-server-01","breadcrumbs":"First Deployment » Step 5: Verify Server","id":"333","title":"Step 5: Verify Server"},"3330":{"body":"","breadcrumbs":"MFA Admin Setup Guide » MFA Requirements","id":"3330","title":"MFA Requirements"},"3331":{"body":"All administrators MUST enable MFA for: Production environment access Server creation/deletion operations Deployment to production clusters Secret access (KMS, dynamic secrets) Break-glass approval User management operations","breadcrumbs":"MFA Admin Setup Guide » Mandatory MFA Enforcement","id":"3331","title":"Mandatory MFA Enforcement"},"3332":{"body":"Development : MFA optional (not recommended) Staging : MFA recommended, not enforced Production : MFA mandatory (enforced by Cedar policies)","breadcrumbs":"MFA Admin Setup Guide » Grace Period","id":"3332","title":"Grace Period"},"3333":{"body":"Week 1-2: Pilot Program ├─ Platform admins enable MFA ├─ Document issues and refine process └─ Create training materials Week 3-4: Full Deployment ├─ All admins enable MFA ├─ Cedar policies enforce MFA for production └─ Monitor compliance Week 5+: Maintenance ├─ Regular MFA device audits ├─ Backup code rotation └─ User support for MFA issues","breadcrumbs":"MFA Admin Setup Guide » Timeline for Rollout","id":"3333","title":"Timeline for Rollout"},"3334":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Admin Enrollment Process","id":"3334","title":"Admin Enrollment Process"},"3335":{"body":"# Login with username/password\\nprovisioning login --user admin@example.com --workspace production # Response (partial token, MFA not yet verified):\\n{ \\"status\\": \\"mfa_required\\", \\"partial_token\\": \\"eyJhbGci...\\", # Limited access token \\"message\\": \\"MFA enrollment required for production access\\"\\n} Partial token limitations : Cannot access production resources Can only access MFA enrollment endpoints Expires in 15 minutes","breadcrumbs":"MFA Admin Setup Guide » Step 1: Initial Login (Password Only)","id":"3335","title":"Step 1: Initial Login (Password Only)"},"3336":{"body":"# Check available MFA methods\\nprovisioning mfa methods # Output:\\nAvailable MFA Methods: • TOTP (Authenticator apps) - Recommended for all users • WebAuthn (Hardware keys) - Recommended for high-security roles • Backup Codes - Emergency recovery only # Check current MFA status\\nprovisioning mfa status # Output:\\nMFA Status: TOTP: Not enrolled WebAuthn: Not enrolled Backup Codes: Not generated MFA Required: Yes (production workspace)","breadcrumbs":"MFA Admin Setup Guide » Step 2: Choose MFA Method","id":"3336","title":"Step 2: Choose MFA Method"},"3337":{"body":"Choose one or both methods (TOTP + WebAuthn recommended): TOTP Setup WebAuthn Setup","breadcrumbs":"MFA Admin Setup Guide » Step 3: Enroll MFA Device","id":"3337","title":"Step 3: Enroll MFA Device"},"3338":{"body":"After enrollment, login again with MFA: # Login (returns partial token)\\nprovisioning login --user admin@example.com --workspace production # Verify MFA code (returns full access token)\\nprovisioning mfa verify 123456 # Response:\\n{ \\"status\\": \\"authenticated\\", \\"access_token\\": \\"eyJhbGci...\\", # Full access token (15 min) \\"refresh_token\\": \\"eyJhbGci...\\", # Refresh token (7 days) \\"mfa_verified\\": true, \\"expires_in\\": 900\\n}","breadcrumbs":"MFA Admin Setup Guide » Step 4: Verify and Activate","id":"3338","title":"Step 4: Verify and Activate"},"3339":{"body":"","breadcrumbs":"MFA Admin Setup Guide » TOTP Setup (Authenticator Apps)","id":"3339","title":"TOTP Setup (Authenticator Apps)"},"334":{"body":"Install a task service on the server: # Check mode first\\nprovisioning taskserv create kubernetes --infra my-infra --check # Expected output:\\n# ✓ Validation passed\\n# ⚠ Check mode: No changes will be made\\n#\\n# Would install:\\n# - Kubernetes v1.28.0\\n# - Required dependencies: containerd, etcd\\n# - On servers: dev-server-01","breadcrumbs":"First Deployment » Step 6: Install Kubernetes (Check Mode)","id":"334","title":"Step 6: Install Kubernetes (Check Mode)"},"3340":{"body":"App Platform Notes Google Authenticator iOS, Android Simple, widely used Authy iOS, Android, Desktop Cloud backup, multi-device 1Password All platforms Integrated with password manager Microsoft Authenticator iOS, Android Enterprise integration Bitwarden All platforms Open source","breadcrumbs":"MFA Admin Setup Guide » Supported Authenticator Apps","id":"3340","title":"Supported Authenticator Apps"},"3341":{"body":"1. Initiate TOTP Enrollment provisioning mfa totp enroll Output : ╔════════════════════════════════════════════════════════════╗\\n║ TOTP ENROLLMENT ║\\n╚════════════════════════════════════════════════════════════╝ Scan this QR code with your authenticator app: █████████████████████████████████\\n█████████████████████████████████\\n████ ▄▄▄▄▄ █▀ █▀▀██ ▄▄▄▄▄ ████\\n████ █ █ █▀▄ ▀ ▄█ █ █ ████\\n████ █▄▄▄█ █ ▀▀ ▀▀█ █▄▄▄█ ████\\n████▄▄▄▄▄▄▄█ █▀█ ▀ █▄▄▄▄▄▄████\\n█████████████████████████████████\\n█████████████████████████████████ Manual entry (if QR code doesn\'t work): Secret: JBSWY3DPEHPK3PXP Account: admin@example.com Issuer: Provisioning Platform TOTP Configuration: Algorithm: SHA1 Digits: 6 Period: 30 seconds 2. Add to Authenticator App Option A: Scan QR Code (Recommended) Open authenticator app (Google Authenticator, Authy, etc.) Tap \\"+\\" or \\"Add Account\\" Select \\"Scan QR Code\\" Point camera at QR code displayed in terminal Account added automatically Option B: Manual Entry Open authenticator app Tap \\"+\\" or \\"Add Account\\" Select \\"Enter a setup key\\" or \\"Manual entry\\" Enter: Account name : admin@example.com Key : JBSWY3DPEHPK3PXP (secret shown above) Type of key : Time-based Save account 3. Verify TOTP Code # Get current code from authenticator app (6 digits, changes every 30s)\\n# Example code: 123456 provisioning mfa totp verify 123456 Success Response : ✓ TOTP verified successfully! Backup Codes (SAVE THESE SECURELY): 1. A3B9-C2D7-E1F4 2. G8H5-J6K3-L9M2 3. N4P7-Q1R8-S5T2 4. U6V3-W9X1-Y7Z4 5. A2B8-C5D1-E9F3 6. G7H4-J2K6-L8M1 7. N3P9-Q5R2-S7T4 8. U1V6-W3X8-Y2Z5 9. A9B4-C7D2-E5F1 10. G3H8-J1K5-L6M9 ⚠ Store backup codes in a secure location (password manager, encrypted file)\\n⚠ Each code can only be used once\\n⚠ These codes allow access if you lose your authenticator device TOTP enrollment complete. MFA is now active for your account. 4. Save Backup Codes Critical : Store backup codes in a secure location: # Copy backup codes to password manager or encrypted file\\n# NEVER store in plaintext, email, or cloud storage # Example: Store in encrypted file\\nprovisioning mfa backup-codes --save-encrypted ~/secure/mfa-backup-codes.enc # Or display again (requires existing MFA verification)\\nprovisioning mfa backup-codes --show 5. Test TOTP Login # Logout to test full login flow\\nprovisioning logout # Login with password (returns partial token)\\nprovisioning login --user admin@example.com --workspace production # Get current TOTP code from authenticator app\\n# Verify with TOTP code (returns full access token)\\nprovisioning mfa verify 654321 # ✓ Full access granted","breadcrumbs":"MFA Admin Setup Guide » Step-by-Step TOTP Enrollment","id":"3341","title":"Step-by-Step TOTP Enrollment"},"3342":{"body":"","breadcrumbs":"MFA Admin Setup Guide » WebAuthn Setup (Hardware Keys)","id":"3342","title":"WebAuthn Setup (Hardware Keys)"},"3343":{"body":"Device Type Examples Security Level USB Security Keys YubiKey 5, SoloKey, Titan Key Highest NFC Keys YubiKey 5 NFC, Google Titan High (mobile compatible) Biometric Touch ID (macOS), Windows Hello, Face ID High (convenience) Platform Authenticators Built-in laptop/phone biometrics Medium-High","breadcrumbs":"MFA Admin Setup Guide » Supported WebAuthn Devices","id":"3343","title":"Supported WebAuthn Devices"},"3344":{"body":"1. Check WebAuthn Support # Verify WebAuthn support on your system\\nprovisioning mfa webauthn check # Output:\\nWebAuthn Support: ✓ Browser: Chrome 120.0 (WebAuthn supported) ✓ Platform: macOS 14.0 (Touch ID available) ✓ USB: YubiKey 5 NFC detected 2. Initiate WebAuthn Registration provisioning mfa webauthn register --device-name \\"YubiKey-Admin-Primary\\" Output : ╔════════════════════════════════════════════════════════════╗\\n║ WEBAUTHN DEVICE REGISTRATION ║\\n╚════════════════════════════════════════════════════════════╝ Device Name: YubiKey-Admin-Primary\\nRelying Party: provisioning.example.com ⚠ Please insert your security key and touch it when it blinks Waiting for device interaction... 3. Complete Device Registration For USB Security Keys (YubiKey, SoloKey) : Insert USB key into computer Terminal shows \\"Touch your security key\\" Touch the gold/silver contact on the key (it will blink) Registration completes For Touch ID (macOS) : Terminal shows \\"Touch ID prompt will appear\\" Touch ID dialog appears on screen Place finger on Touch ID sensor Registration completes For Windows Hello : Terminal shows \\"Windows Hello prompt\\" Windows Hello biometric prompt appears Complete biometric scan (fingerprint/face) Registration completes Success Response : ✓ WebAuthn device registered successfully! Device Details: Name: YubiKey-Admin-Primary Type: USB Security Key AAGUID: 2fc0579f-8113-47ea-b116-bb5a8 d9202a Credential ID: kZj8C3bx... Registered: 2025-10-08T14:32:10Z You can now use this device for authentication. 4. Register Additional Devices (Optional) Best Practice : Register 2+ WebAuthn devices (primary + backup) # Register backup YubiKey\\nprovisioning mfa webauthn register --device-name \\"YubiKey-Admin-Backup\\" # Register Touch ID (for convenience on personal laptop)\\nprovisioning mfa webauthn register --device-name \\"MacBook-TouchID\\" 5. List Registered Devices provisioning mfa webauthn list # Output:\\nRegistered WebAuthn Devices: 1. YubiKey-Admin-Primary (USB Security Key) Registered: 2025-10-08T14:32:10Z Last Used: 2025-10-08T14:32:10Z 2. YubiKey-Admin-Backup (USB Security Key) Registered: 2025-10-08T14:35:22Z Last Used: Never 3. MacBook-TouchID (Platform Authenticator) Registered: 2025-10-08T14:40:15Z Last Used: 2025-10-08T15:20:05Z Total: 3 devices 6. Test WebAuthn Login # Logout to test\\nprovisioning logout # Login with password (partial token)\\nprovisioning login --user admin@example.com --workspace production # Authenticate with WebAuthn\\nprovisioning mfa webauthn verify # Output:\\n⚠ Insert and touch your security key\\n[Touch YubiKey when it blinks] ✓ WebAuthn verification successful\\n✓ Full access granted","breadcrumbs":"MFA Admin Setup Guide » Step-by-Step WebAuthn Enrollment","id":"3344","title":"Step-by-Step WebAuthn Enrollment"},"3345":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Enforcing MFA via Cedar Policies","id":"3345","title":"Enforcing MFA via Cedar Policies"},"3346":{"body":"Location : provisioning/config/cedar-policies/production.cedar // Production operations require MFA verification\\npermit ( principal, action in [ Action::\\"server:create\\", Action::\\"server:delete\\", Action::\\"cluster:deploy\\", Action::\\"secret:read\\", Action::\\"user:manage\\" ], resource in Environment::\\"production\\"\\n) when { // MFA MUST be verified context.mfa_verified == true\\n}; // Admin role requires MFA for ALL production actions\\npermit ( principal in Role::\\"Admin\\", action, resource in Environment::\\"production\\"\\n) when { context.mfa_verified == true\\n}; // Break-glass approval requires MFA\\npermit ( principal, action == Action::\\"break_glass:approve\\", resource\\n) when { context.mfa_verified == true && principal.role in [Role::\\"Admin\\", Role::\\"SecurityLead\\"]\\n};","breadcrumbs":"MFA Admin Setup Guide » Production MFA Enforcement Policy","id":"3346","title":"Production MFA Enforcement Policy"},"3347":{"body":"Location : provisioning/config/cedar-policies/development.cedar // Development: MFA recommended but not enforced\\npermit ( principal, action, resource in Environment::\\"dev\\"\\n) when { // MFA not required for dev, but logged if missing true\\n}; // Staging: MFA recommended for destructive operations\\npermit ( principal, action in [Action::\\"server:delete\\", Action::\\"cluster:delete\\"], resource in Environment::\\"staging\\"\\n) when { // Allow without MFA but log warning context.mfa_verified == true || context has mfa_warning_acknowledged\\n};","breadcrumbs":"MFA Admin Setup Guide » Development/Staging Policies (MFA Recommended, Not Required)","id":"3347","title":"Development/Staging Policies (MFA Recommended, Not Required)"},"3348":{"body":"# Validate Cedar policies\\nprovisioning cedar validate --policies config/cedar-policies/ # Test policies with sample requests\\nprovisioning cedar test --policies config/cedar-policies/ \\\\ --test-file tests/cedar-test-cases.yaml # Deploy to production (requires MFA + approval)\\nprovisioning cedar deploy production --policies config/cedar-policies/production.cedar # Verify policy is active\\nprovisioning cedar status production","breadcrumbs":"MFA Admin Setup Guide » Policy Deployment","id":"3348","title":"Policy Deployment"},"3349":{"body":"# Test 1: Production access WITHOUT MFA (should fail)\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning server create web-01 --plan medium --check # Expected: Authorization denied (MFA not verified) # Test 2: Production access WITH MFA (should succeed)\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify 123456\\nprovisioning server create web-01 --plan medium --check # Expected: Server creation initiated","breadcrumbs":"MFA Admin Setup Guide » Testing MFA Enforcement","id":"3349","title":"Testing MFA Enforcement"},"335":{"body":"Proceed with installation: # Install Kubernetes\\nprovisioning taskserv create kubernetes --infra my-infra --wait # This will:\\n# 1. Check dependencies\\n# 2. Install containerd\\n# 3. Install etcd\\n# 4. Install Kubernetes\\n# 5. Configure and start services # Monitor progress\\nprovisioning workflow monitor ","breadcrumbs":"First Deployment » Step 7: Install Kubernetes (Real)","id":"335","title":"Step 7: Install Kubernetes (Real)"},"3350":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Backup Codes Management","id":"3350","title":"Backup Codes Management"},"3351":{"body":"Backup codes are automatically generated during first MFA enrollment: # View existing backup codes (requires MFA verification)\\nprovisioning mfa backup-codes --show # Regenerate backup codes (invalidates old ones)\\nprovisioning mfa backup-codes --regenerate # Output:\\n⚠ WARNING: Regenerating backup codes will invalidate all existing codes.\\nContinue? (yes/no): yes New Backup Codes: 1. X7Y2-Z9A4-B6C1 2. D3E8-F5G2-H9J4 3. K6L1-M7N3-P8Q2 4. R4S9-T6U1-V3W7 5. X2Y5-Z8A3-B9C4 6. D7E1-F4G6-H2J8 7. K5L9-M3N6-P1Q4 8. R8S2-T5U7-V9W3 9. X4Y6-Z1A8-B3C5 10. D9E2-F7G4-H6J1 ✓ Backup codes regenerated successfully\\n⚠ Save these codes in a secure location","breadcrumbs":"MFA Admin Setup Guide » Generating Backup Codes","id":"3351","title":"Generating Backup Codes"},"3352":{"body":"When to use backup codes : Lost authenticator device (phone stolen, broken) WebAuthn key not available (traveling, left at office) Authenticator app not working (time sync issue) Login with backup code : # Login (partial token)\\nprovisioning login --user admin@example.com --workspace production # Use backup code instead of TOTP/WebAuthn\\nprovisioning mfa verify-backup X7Y2-Z9A4-B6C1 # Output:\\n✓ Backup code verified\\n⚠ Backup code consumed (9 remaining)\\n⚠ Enroll a new MFA device as soon as possible\\n✓ Full access granted (temporary)","breadcrumbs":"MFA Admin Setup Guide » Using Backup Codes","id":"3352","title":"Using Backup Codes"},"3353":{"body":"✅ DO : Store in password manager (1Password, Bitwarden, LastPass) Print and store in physical safe Encrypt and store in secure cloud storage (with encryption key stored separately) Share with trusted IT team member (encrypted) ❌ DON\'T : Email to yourself Store in plaintext file on laptop Save in browser notes/bookmarks Share via Slack/Teams/unencrypted chat Screenshot and save to Photos Example: Encrypted Storage : # Encrypt backup codes with Age\\nprovisioning mfa backup-codes --export | \\\\ age -p -o ~/secure/mfa-backup-codes.age # Decrypt when needed\\nage -d ~/secure/mfa-backup-codes.age","breadcrumbs":"MFA Admin Setup Guide » Backup Code Storage Best Practices","id":"3353","title":"Backup Code Storage Best Practices"},"3354":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Recovery Procedures","id":"3354","title":"Recovery Procedures"},"3355":{"body":"Situation : Phone stolen/broken, authenticator app not accessible Recovery Steps : # Step 1: Use backup code to login\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify-backup X7Y2-Z9A4-B6C1 # Step 2: Remove old TOTP enrollment\\nprovisioning mfa totp unenroll # Step 3: Enroll new TOTP device\\nprovisioning mfa totp enroll\\n# [Scan QR code with new phone/authenticator app]\\nprovisioning mfa totp verify 654321 # Step 4: Generate new backup codes\\nprovisioning mfa backup-codes --regenerate","breadcrumbs":"MFA Admin Setup Guide » Scenario 1: Lost Authenticator Device (TOTP)","id":"3355","title":"Scenario 1: Lost Authenticator Device (TOTP)"},"3356":{"body":"Situation : YubiKey lost, stolen, or damaged Recovery Steps : # Step 1: Login with alternative method (TOTP or backup code)\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify 123456 # TOTP from authenticator app # Step 2: List registered WebAuthn devices\\nprovisioning mfa webauthn list # Step 3: Remove lost device\\nprovisioning mfa webauthn remove \\"YubiKey-Admin-Primary\\" # Output:\\n⚠ Remove WebAuthn device \\"YubiKey-Admin-Primary\\"?\\nThis cannot be undone. (yes/no): yes ✓ Device removed # Step 4: Register new WebAuthn device\\nprovisioning mfa webauthn register --device-name \\"YubiKey-Admin-Replacement\\"","breadcrumbs":"MFA Admin Setup Guide » Scenario 2: Lost WebAuthn Key (YubiKey)","id":"3356","title":"Scenario 2: Lost WebAuthn Key (YubiKey)"},"3357":{"body":"Situation : Lost phone (TOTP), lost YubiKey, no backup codes Recovery Steps (Requires Admin Assistance): # User contacts Security Team / Platform Admin # Admin performs MFA reset (requires 2+ admin approval)\\nprovisioning admin mfa-reset admin@example.com \\\\ --reason \\"Employee lost all MFA devices (phone + YubiKey)\\" \\\\ --ticket SUPPORT-12345 # Output:\\n⚠ MFA Reset Request Created Reset Request ID: MFA-RESET-20251008-001\\nUser: admin@example.com\\nReason: Employee lost all MFA devices (phone + YubiKey)\\nTicket: SUPPORT-12345 Required Approvals: 2\\nApprovers: 0/2 # Two other admins approve (with their own MFA)\\nprovisioning admin mfa-reset approve MFA-RESET-20251008-001 \\\\ --reason \\"Verified via video call + employee badge\\" # After 2 approvals, MFA is reset\\n✓ MFA reset approved (2/2 approvals)\\n✓ User admin@example.com can now re-enroll MFA devices # User re-enrolls TOTP and WebAuthn\\nprovisioning mfa totp enroll\\nprovisioning mfa webauthn register --device-name \\"YubiKey-New\\"","breadcrumbs":"MFA Admin Setup Guide » Scenario 3: All MFA Methods Lost","id":"3357","title":"Scenario 3: All MFA Methods Lost"},"3358":{"body":"Situation : Used 9 out of 10 backup codes Recovery Steps : # Login with last backup code\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify-backup D9E2-F7G4-H6J1 # Output:\\n⚠ WARNING: This is your LAST backup code!\\n✓ Backup code verified\\n⚠ Regenerate backup codes immediately! # Immediately regenerate backup codes\\nprovisioning mfa backup-codes --regenerate # Save new codes securely","breadcrumbs":"MFA Admin Setup Guide » Scenario 4: Backup Codes Depleted","id":"3358","title":"Scenario 4: Backup Codes Depleted"},"3359":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Troubleshooting","id":"3359","title":"Troubleshooting"},"336":{"body":"Check that Kubernetes is running: # List installed task services\\nprovisioning taskserv list --infra my-infra # Check Kubernetes status\\nprovisioning server ssh dev-server-01\\nkubectl get nodes # On the server\\nexit # Or remotely\\nprovisioning server exec dev-server-01 -- kubectl get nodes","breadcrumbs":"First Deployment » Step 8: Verify Installation","id":"336","title":"Step 8: Verify Installation"},"3360":{"body":"Symptoms : provisioning mfa verify 123456\\n✗ Error: Invalid TOTP code Possible Causes : Time sync issue (most common) Wrong secret key entered during enrollment Code expired (30-second window) Solutions : # Check time sync (device clock must be accurate)\\n# macOS:\\nsudo sntp -sS time.apple.com # Linux:\\nsudo ntpdate pool.ntp.org # Verify TOTP configuration\\nprovisioning mfa totp status # Output:\\nTOTP Configuration: Algorithm: SHA1 Digits: 6 Period: 30 seconds Time Window: ±1 period (90 seconds total) # Check system time vs NTP\\ndate && curl -s http://worldtimeapi.org/api/ip | grep datetime # If time is off by >30 seconds, sync time and retry","breadcrumbs":"MFA Admin Setup Guide » Issue 1: \\"Invalid TOTP code\\" Error","id":"3360","title":"Issue 1: \\"Invalid TOTP code\\" Error"},"3361":{"body":"Symptoms : provisioning mfa webauthn register\\n✗ Error: No WebAuthn authenticator detected Solutions : # Check USB connection (for hardware keys)\\n# macOS:\\nsystem_profiler SPUSBDataType | grep -i yubikey # Linux:\\nlsusb | grep -i yubico # Check browser WebAuthn support\\nprovisioning mfa webauthn check # Try different USB port (USB-A vs USB-C) # For Touch ID: Ensure finger is enrolled in System Preferences\\n# For Windows Hello: Ensure biometrics are configured in Settings","breadcrumbs":"MFA Admin Setup Guide » Issue 2: WebAuthn Not Detected","id":"3361","title":"Issue 2: WebAuthn Not Detected"},"3362":{"body":"Symptoms : provisioning server create web-01\\n✗ Error: Authorization denied (MFA verification required) Cause : Access token expired (15 min) or MFA verification not in token claims Solution : # Check token expiration\\nprovisioning auth status # Output:\\nAuthentication Status: Logged in: Yes User: admin@example.com Access Token: Expired (issued 16 minutes ago) MFA Verified: Yes (but token expired) # Re-authenticate (will prompt for MFA again)\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify 654321 # Verify MFA claim in token\\nprovisioning auth decode-token # Output (JWT claims):\\n{ \\"sub\\": \\"admin@example.com\\", \\"role\\": \\"Admin\\", \\"mfa_verified\\": true, # ← Must be true \\"mfa_method\\": \\"totp\\", \\"iat\\": 1696766400, \\"exp\\": 1696767300\\n}","breadcrumbs":"MFA Admin Setup Guide » Issue 3: \\"MFA Required\\" Despite Verification","id":"3362","title":"Issue 3: \\"MFA Required\\" Despite Verification"},"3363":{"body":"Symptoms : QR code appears garbled or doesn\'t display in terminal Solutions : # Use manual entry instead\\nprovisioning mfa totp enroll --manual # Output (no QR code):\\nManual TOTP Setup: Secret: JBSWY3DPEHPK3PXP Account: admin@example.com Issuer: Provisioning Platform Enter this secret manually in your authenticator app. # Or export QR code to image file\\nprovisioning mfa totp enroll --qr-image ~/mfa-qr.png\\nopen ~/mfa-qr.png # View in image viewer","breadcrumbs":"MFA Admin Setup Guide » Issue 4: QR Code Not Displaying","id":"3363","title":"Issue 4: QR Code Not Displaying"},"3364":{"body":"Symptoms : provisioning mfa verify-backup X7Y2-Z9A4-B6C1\\n✗ Error: Invalid or already used backup code Possible Causes : Code already used (single-use only) Backup codes regenerated (old codes invalidated) Typo in code entry Solutions : # Check backup code status (requires alternative login method)\\nprovisioning mfa backup-codes --status # Output:\\nBackup Codes Status: Total Generated: 10 Used: 3 Remaining: 7 Last Used: 2025-10-05T10:15:30Z # Contact admin for MFA reset if all codes used\\n# Or use alternative MFA method (TOTP, WebAuthn)","breadcrumbs":"MFA Admin Setup Guide » Issue 5: Backup Code Not Working","id":"3364","title":"Issue 5: Backup Code Not Working"},"3365":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Best Practices","id":"3365","title":"Best Practices"},"3366":{"body":"1. Use Multiple MFA Methods ✅ Recommended Setup : Primary : TOTP (Google Authenticator, Authy) Backup : WebAuthn (YubiKey or Touch ID) Emergency : Backup codes (stored securely) # Enroll all three\\nprovisioning mfa totp enroll\\nprovisioning mfa webauthn register --device-name \\"YubiKey-Primary\\"\\nprovisioning mfa backup-codes --save-encrypted ~/secure/codes.enc 2. Secure Backup Code Storage # Store in password manager (1Password example)\\nprovisioning mfa backup-codes --show | \\\\ op item create --category \\"Secure Note\\" \\\\ --title \\"Provisioning MFA Backup Codes\\" \\\\ --vault \\"Work\\" # Or encrypted file\\nprovisioning mfa backup-codes --export | \\\\ age -p -o ~/secure/mfa-backup-codes.age 3. Regular Device Audits # Monthly: Review registered devices\\nprovisioning mfa devices --all # Remove unused/old devices\\nprovisioning mfa webauthn remove \\"Old-YubiKey\\"\\nprovisioning mfa totp remove \\"Old-Phone\\" 4. Test Recovery Procedures # Quarterly: Test backup code login\\nprovisioning logout\\nprovisioning login --user admin@example.com --workspace dev\\nprovisioning mfa verify-backup [test-code] # Verify backup codes are accessible\\ncat ~/secure/mfa-backup-codes.enc | age -d","breadcrumbs":"MFA Admin Setup Guide » For Individual Admins","id":"3366","title":"For Individual Admins"},"3367":{"body":"1. MFA Enrollment Verification # Generate MFA enrollment report\\nprovisioning admin mfa-report --format csv > mfa-enrollment.csv # Output (CSV):\\n# User,MFA_Enabled,TOTP,WebAuthn,Backup_Codes,Last_MFA_Login,Role\\n# admin@example.com,Yes,Yes,Yes,10,2025-10-08T14:00:00Z,Admin\\n# dev@example.com,No,No,No,0,Never,Developer 2. Enforce MFA Deadlines # Set MFA enrollment deadline\\nprovisioning admin mfa-deadline set 2025-11-01 \\\\ --roles Admin,Developer \\\\ --environment production # Send reminder emails\\nprovisioning admin mfa-remind \\\\ --users-without-mfa \\\\ --template \\"MFA enrollment required by Nov 1\\" 3. Monitor MFA Usage # Audit: Find production logins without MFA\\nprovisioning audit query \\\\ --action \\"auth:login\\" \\\\ --filter \'mfa_verified == false && environment == \\"production\\"\' \\\\ --since 7d # Alert on repeated MFA failures\\nprovisioning monitoring alert create \\\\ --name \\"MFA Brute Force\\" \\\\ --condition \\"mfa_failures > 5 in 5 min\\" \\\\ --action \\"notify security-team\\" 4. MFA Reset Policy MFA Reset Requirements : User verification (video call + ID check) Support ticket created (incident tracking) 2+ admin approvals (different teams) Time-limited reset window (24 hours) Mandatory re-enrollment before production access # MFA reset workflow\\nprovisioning admin mfa-reset create user@example.com \\\\ --reason \\"Lost all devices\\" \\\\ --ticket SUPPORT-12345 \\\\ --expires-in 24h # Requires 2 approvals\\nprovisioning admin mfa-reset approve MFA-RESET-001","breadcrumbs":"MFA Admin Setup Guide » For Security Teams","id":"3367","title":"For Security Teams"},"3368":{"body":"1. Cedar Policy Best Practices // Require MFA for high-risk actions\\npermit ( principal, action in [ Action::\\"server:delete\\", Action::\\"cluster:delete\\", Action::\\"secret:delete\\", Action::\\"user:delete\\" ], resource\\n) when { context.mfa_verified == true && context.mfa_age_seconds < 300 // MFA verified within last 5 minutes\\n}; 2. MFA Grace Periods (For Rollout) # Development: No MFA required\\nexport PROVISIONING_MFA_REQUIRED=false # Staging: MFA recommended (warnings only)\\nexport PROVISIONING_MFA_REQUIRED=warn # Production: MFA mandatory (strict enforcement)\\nexport PROVISIONING_MFA_REQUIRED=true 3. Backup Admin Account Emergency Admin (break-glass scenario): Separate admin account with MFA enrollment Credentials stored in physical safe Only used when primary admins locked out Requires incident report after use # Create emergency admin\\nprovisioning admin create emergency-admin@example.com \\\\ --role EmergencyAdmin \\\\ --mfa-required true \\\\ --max-concurrent-sessions 1 # Print backup codes and store in safe\\nprovisioning mfa backup-codes --show --user emergency-admin@example.com > emergency-codes.txt\\n# [Print and store in physical safe]","breadcrumbs":"MFA Admin Setup Guide » For Platform Admins","id":"3368","title":"For Platform Admins"},"3369":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Audit and Compliance","id":"3369","title":"Audit and Compliance"},"337":{"body":"","breadcrumbs":"First Deployment » Common Deployment Patterns","id":"337","title":"Common Deployment Patterns"},"3370":{"body":"All MFA events are logged to the audit system: # View MFA enrollment events\\nprovisioning audit query \\\\ --action-type \\"mfa:*\\" \\\\ --since 30d # Output (JSON):\\n[ { \\"timestamp\\": \\"2025-10-08T14:32:10Z\\", \\"action\\": \\"mfa:totp:enroll\\", \\"user\\": \\"admin@example.com\\", \\"result\\": \\"success\\", \\"device_type\\": \\"totp\\", \\"ip_address\\": \\"203.0.113.42\\" }, { \\"timestamp\\": \\"2025-10-08T14:35:22Z\\", \\"action\\": \\"mfa:webauthn:register\\", \\"user\\": \\"admin@example.com\\", \\"result\\": \\"success\\", \\"device_name\\": \\"YubiKey-Admin-Primary\\", \\"ip_address\\": \\"203.0.113.42\\" }\\n]","breadcrumbs":"MFA Admin Setup Guide » MFA Audit Logging","id":"3370","title":"MFA Audit Logging"},"3371":{"body":"SOC2 Compliance (Access Control) # Generate SOC2 access control report\\nprovisioning compliance report soc2 \\\\ --control \\"CC6.1\\" \\\\ --period \\"2025-Q3\\" # Output:\\nSOC2 Trust Service Criteria - CC6.1 (Logical Access) MFA Enforcement: ✓ MFA enabled for 100% of production admins (15/15) ✓ MFA verified for 98.7% of production logins (2,453/2,485) ✓ MFA policies enforced via Cedar authorization ✓ Failed MFA attempts logged and monitored Evidence: - Cedar policy: production.cedar (lines 15-25) - Audit logs: mfa-verification-logs-2025-q3.json - Enrollment report: mfa-enrollment-status.csv ISO 27001 Compliance (A.9.4.2 - Secure Log-on) # ISO 27001 A.9.4.2 compliance report\\nprovisioning compliance report iso27001 \\\\ --control \\"A.9.4.2\\" \\\\ --format pdf \\\\ --output iso27001-a942-mfa-report.pdf # Report Sections:\\n# 1. MFA Implementation Details\\n# 2. Enrollment Procedures\\n# 3. Audit Trail\\n# 4. Policy Enforcement\\n# 5. Recovery Procedures GDPR Compliance (MFA Data Handling) # GDPR data subject request (MFA data export)\\nprovisioning compliance gdpr export admin@example.com \\\\ --include mfa # Output (JSON):\\n{ \\"user\\": \\"admin@example.com\\", \\"mfa_data\\": { \\"totp_enrolled\\": true, \\"totp_enrollment_date\\": \\"2025-10-08T14:32:10Z\\", \\"webauthn_devices\\": [ { \\"name\\": \\"YubiKey-Admin-Primary\\", \\"registered\\": \\"2025-10-08T14:35:22Z\\", \\"last_used\\": \\"2025-10-08T16:20:05Z\\" } ], \\"backup_codes_remaining\\": 7, \\"mfa_login_history\\": [...] # Last 90 days }\\n} # GDPR deletion (MFA data removal after account deletion)\\nprovisioning compliance gdpr delete admin@example.com --include-mfa","breadcrumbs":"MFA Admin Setup Guide » Compliance Reports","id":"3371","title":"Compliance Reports"},"3372":{"body":"# Generate MFA metrics\\nprovisioning admin mfa-metrics --period 30d # Output:\\nMFA Metrics (Last 30 Days) Enrollment: Total Users: 42 MFA Enabled: 38 (90.5%) TOTP Only: 22 (57.9%) WebAuthn Only: 3 (7.9%) Both TOTP + WebAuthn: 13 (34.2%) No MFA: 4 (9.5%) ⚠ Authentication: Total Logins: 3,847 MFA Verified: 3,802 (98.8%) MFA Failed: 45 (1.2%) Backup Code Used: 7 (0.2%) Devices: TOTP Devices: 35 WebAuthn Devices: 47 Backup Codes Remaining (avg): 8.3 Incidents: MFA Resets: 2 Lost Devices: 3 Lockouts: 1","breadcrumbs":"MFA Admin Setup Guide » MFA Metrics Dashboard","id":"3372","title":"MFA Metrics Dashboard"},"3373":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Quick Reference Card","id":"3373","title":"Quick Reference Card"},"3374":{"body":"# Login with MFA\\nprovisioning login --user admin@example.com --workspace production\\nprovisioning mfa verify 123456 # Check MFA status\\nprovisioning mfa status # View registered devices\\nprovisioning mfa devices","breadcrumbs":"MFA Admin Setup Guide » Daily Admin Operations","id":"3374","title":"Daily Admin Operations"},"3375":{"body":"# TOTP\\nprovisioning mfa totp enroll # Enroll TOTP\\nprovisioning mfa totp verify 123456 # Verify TOTP code\\nprovisioning mfa totp unenroll # Remove TOTP # WebAuthn\\nprovisioning mfa webauthn register --device-name \\"YubiKey\\" # Register key\\nprovisioning mfa webauthn list # List devices\\nprovisioning mfa webauthn remove \\"YubiKey\\" # Remove device # Backup Codes\\nprovisioning mfa backup-codes --show # View codes\\nprovisioning mfa backup-codes --regenerate # Generate new codes\\nprovisioning mfa verify-backup X7Y2-Z9A4-B6C1 # Use backup code","breadcrumbs":"MFA Admin Setup Guide » MFA Management","id":"3375","title":"MFA Management"},"3376":{"body":"# Lost device recovery (use backup code)\\nprovisioning login --user admin@example.com\\nprovisioning mfa verify-backup [code]\\nprovisioning mfa totp enroll # Re-enroll new device # MFA reset (admin only)\\nprovisioning admin mfa-reset user@example.com --reason \\"Lost all devices\\" # Check MFA compliance\\nprovisioning admin mfa-report","breadcrumbs":"MFA Admin Setup Guide » Emergency Procedures","id":"3376","title":"Emergency Procedures"},"3377":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Summary Checklist","id":"3377","title":"Summary Checklist"},"3378":{"body":"Complete initial login with password Enroll TOTP (Google Authenticator, Authy) Verify TOTP code successfully Save backup codes in password manager Register WebAuthn device (YubiKey or Touch ID) Test full login flow with MFA Store backup codes in secure location Verify production access works with MFA","breadcrumbs":"MFA Admin Setup Guide » For New Admins","id":"3378","title":"For New Admins"},"3379":{"body":"Deploy Cedar MFA enforcement policies Verify 100% admin MFA enrollment Configure MFA audit logging Setup MFA compliance reports (SOC2, ISO 27001) Document MFA reset procedures Train admins on MFA usage Create emergency admin account (break-glass) Schedule quarterly MFA audits","breadcrumbs":"MFA Admin Setup Guide » For Security Team","id":"3379","title":"For Security Team"},"338":{"body":"Create multiple servers at once: servers = [ {hostname = \\"web-01\\", cores = 2, memory = 4096}, {hostname = \\"web-02\\", cores = 2, memory = 4096}, {hostname = \\"db-01\\", cores = 4, memory = 8192}\\n] provisioning server create --infra my-infra --servers web-01,web-02,db-01","breadcrumbs":"First Deployment » Pattern 1: Multiple Servers","id":"338","title":"Pattern 1: Multiple Servers"},"3380":{"body":"Configure MFA settings in config/mfa.toml Deploy Cedar policies with MFA requirements Setup monitoring for MFA failures Configure alerts for MFA bypass attempts Document MFA architecture in ADR Test MFA enforcement in all environments Verify audit logs capture MFA events Create runbooks for MFA incidents","breadcrumbs":"MFA Admin Setup Guide » For Platform Team","id":"3380","title":"For Platform Team"},"3381":{"body":"","breadcrumbs":"MFA Admin Setup Guide » Support and Resources","id":"3381","title":"Support and Resources"},"3382":{"body":"MFA Implementation : /docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md Cedar Policies : /docs/operations/CEDAR_POLICIES_PRODUCTION_GUIDE.md Break-Glass : /docs/operations/BREAK_GLASS_TRAINING_GUIDE.md Audit Logging : /docs/architecture/AUDIT_LOGGING_IMPLEMENTATION.md","breadcrumbs":"MFA Admin Setup Guide » Documentation","id":"3382","title":"Documentation"},"3383":{"body":"MFA Config : provisioning/config/mfa.toml Cedar Policies : provisioning/config/cedar-policies/production.cedar Control Center : provisioning/platform/control-center/config.toml","breadcrumbs":"MFA Admin Setup Guide » Configuration Files","id":"3383","title":"Configuration Files"},"3384":{"body":"provisioning mfa help # MFA command help\\nprovisioning mfa totp --help # TOTP-specific help\\nprovisioning mfa webauthn --help # WebAuthn-specific help","breadcrumbs":"MFA Admin Setup Guide » CLI Help","id":"3384","title":"CLI Help"},"3385":{"body":"Security Team : security@example.com Platform Team : platform@example.com Support Ticket : https://support.example.com Document Status : ✅ Complete Review Date : 2025-11-08 Maintained By : Security Team, Platform Team","breadcrumbs":"MFA Admin Setup Guide » Contact","id":"3385","title":"Contact"},"3386":{"body":"A Rust-based orchestrator service that coordinates infrastructure provisioning workflows with pluggable storage backends and comprehensive migration tools. Source : provisioning/platform/orchestrator/","breadcrumbs":"Orchestrator » Provisioning Orchestrator","id":"3386","title":"Provisioning Orchestrator"},"3387":{"body":"The orchestrator implements a hybrid multi-storage approach: Rust Orchestrator : Handles coordination, queuing, and parallel execution Nushell Scripts : Execute the actual provisioning logic Pluggable Storage : Multiple storage backends with seamless migration REST API : HTTP interface for workflow submission and monitoring","breadcrumbs":"Orchestrator » Architecture","id":"3387","title":"Architecture"},"3388":{"body":"Multi-Storage Backends : Filesystem, SurrealDB Embedded, and SurrealDB Server options Task Queue : Priority-based task scheduling with retry logic Seamless Migration : Move data between storage backends with zero downtime Feature Flags : Compile-time backend selection for minimal dependencies Parallel Execution : Multiple tasks can run concurrently Status Tracking : Real-time task status and progress monitoring Advanced Features : Authentication, audit logging, and metrics (SurrealDB) Nushell Integration : Seamless execution of existing provisioning scripts RESTful API : HTTP endpoints for workflow management Test Environment Service : Automated containerized testing for taskservs, servers, and clusters Multi-Node Support : Test complex topologies including Kubernetes and etcd clusters Docker Integration : Automated container lifecycle management via Docker API","breadcrumbs":"Orchestrator » Key Features","id":"3388","title":"Key Features"},"3389":{"body":"","breadcrumbs":"Orchestrator » Quick Start","id":"3389","title":"Quick Start"},"339":{"body":"Install multiple services on one server: provisioning taskserv create kubernetes,cilium,postgres --infra my-infra --servers web-01","breadcrumbs":"First Deployment » Pattern 2: Server with Multiple Task Services","id":"339","title":"Pattern 2: Server with Multiple Task Services"},"3390":{"body":"Default Build (Filesystem Only) : cd provisioning/platform/orchestrator\\ncargo build --release\\ncargo run -- --port 8080 --data-dir ./data With SurrealDB Support : cargo build --release --features surrealdb # Run with SurrealDB embedded\\ncargo run --features surrealdb -- --storage-type surrealdb-embedded --data-dir ./data # Run with SurrealDB server\\ncargo run --features surrealdb -- --storage-type surrealdb-server \\\\ --surrealdb-url ws://localhost:8000 \\\\ --surrealdb-username admin --surrealdb-password secret","breadcrumbs":"Orchestrator » Build and Run","id":"3390","title":"Build and Run"},"3391":{"body":"curl -X POST http://localhost:8080/workflows/servers/create \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"infra\\": \\"production\\", \\"settings\\": \\"./settings.yaml\\", \\"servers\\": [\\"web-01\\", \\"web-02\\"], \\"check_mode\\": false, \\"wait\\": true }\'","breadcrumbs":"Orchestrator » Submit Workflow","id":"3391","title":"Submit Workflow"},"3392":{"body":"","breadcrumbs":"Orchestrator » API Endpoints","id":"3392","title":"API Endpoints"},"3393":{"body":"GET /health - Service health status GET /tasks - List all tasks GET /tasks/{id} - Get specific task status","breadcrumbs":"Orchestrator » Core Endpoints","id":"3393","title":"Core Endpoints"},"3394":{"body":"POST /workflows/servers/create - Submit server creation workflow POST /workflows/taskserv/create - Submit taskserv creation workflow POST /workflows/cluster/create - Submit cluster creation workflow","breadcrumbs":"Orchestrator » Workflow Endpoints","id":"3394","title":"Workflow Endpoints"},"3395":{"body":"POST /test/environments/create - Create test environment GET /test/environments - List all test environments GET /test/environments/{id} - Get environment details POST /test/environments/{id}/run - Run tests in environment DELETE /test/environments/{id} - Cleanup test environment GET /test/environments/{id}/logs - Get environment logs","breadcrumbs":"Orchestrator » Test Environment Endpoints","id":"3395","title":"Test Environment Endpoints"},"3396":{"body":"The orchestrator includes a comprehensive test environment service for automated containerized testing.","breadcrumbs":"Orchestrator » Test Environment Service","id":"3396","title":"Test Environment Service"},"3397":{"body":"1. Single Taskserv Test individual taskserv in isolated container. 2. Server Simulation Test complete server configurations with multiple taskservs. 3. Cluster Topology Test multi-node cluster configurations (Kubernetes, etcd, etc.).","breadcrumbs":"Orchestrator » Test Environment Types","id":"3397","title":"Test Environment Types"},"3398":{"body":"# Quick test\\nprovisioning test quick kubernetes # Single taskserv test\\nprovisioning test env single postgres --auto-start --auto-cleanup # Server simulation\\nprovisioning test env server web-01 [containerd kubernetes cilium] --auto-start # Cluster from template\\nprovisioning test topology load kubernetes_3node | test env cluster kubernetes","breadcrumbs":"Orchestrator » Nushell CLI Integration","id":"3398","title":"Nushell CLI Integration"},"3399":{"body":"Predefined multi-node cluster topologies: kubernetes_3node : 3-node HA Kubernetes cluster kubernetes_single : All-in-one Kubernetes node etcd_cluster : 3-member etcd cluster containerd_test : Standalone containerd testing postgres_redis : Database stack testing","breadcrumbs":"Orchestrator » Topology Templates","id":"3399","title":"Topology Templates"},"34":{"body":"Automated containerized testing Multi-node cluster topologies CI/CD integration ready Template-based configurations","breadcrumbs":"Home » 🧪 Test Environment Service (v3.4.0)","id":"34","title":"🧪 Test Environment Service (v3.4.0)"},"340":{"body":"Deploy a complete cluster configuration: provisioning cluster create buildkit --infra my-infra","breadcrumbs":"First Deployment » Pattern 3: Complete Cluster","id":"340","title":"Pattern 3: Complete Cluster"},"3400":{"body":"Feature Filesystem SurrealDB Embedded SurrealDB Server Dependencies None Local database Remote server Auth/RBAC Basic Advanced Advanced Real-time No Yes Yes Scalability Limited Medium High Complexity Low Medium High Best For Development Production Distributed","breadcrumbs":"Orchestrator » Storage Backends","id":"3400","title":"Storage Backends"},"3401":{"body":"User Guide : Test Environment Guide Architecture : Orchestrator Architecture Feature Summary : Orchestrator Features","breadcrumbs":"Orchestrator » Related Documentation","id":"3401","title":"Related Documentation"},"3402":{"body":"","breadcrumbs":"Orchestrator System » Hybrid Orchestrator Architecture (v3.0.0)","id":"3402","title":"Hybrid Orchestrator Architecture (v3.0.0)"},"3403":{"body":"A production-ready hybrid Rust/Nushell orchestrator has been implemented to solve deep call stack limitations while preserving all Nushell business logic.","breadcrumbs":"Orchestrator System » 🚀 Orchestrator Implementation Completed (2025-09-25)","id":"3403","title":"🚀 Orchestrator Implementation Completed (2025-09-25)"},"3404":{"body":"Rust Orchestrator : High-performance coordination layer with REST API Nushell Business Logic : All existing scripts preserved and enhanced File-based Persistence : Reliable task queue using lightweight file storage Priority Processing : Intelligent task scheduling with retry logic Deep Call Stack Solution : Eliminates template.nu:71 \\"Type not supported\\" errors","breadcrumbs":"Orchestrator System » Architecture Overview","id":"3404","title":"Architecture Overview"},"3405":{"body":"# Start orchestrator in background\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background --provisioning-path \\"/usr/local/bin/provisioning\\" # Check orchestrator status\\n./scripts/start-orchestrator.nu --check # Stop orchestrator\\n./scripts/start-orchestrator.nu --stop # View logs\\ntail -f ./data/orchestrator.log","breadcrumbs":"Orchestrator System » Orchestrator Management","id":"3405","title":"Orchestrator Management"},"3406":{"body":"The orchestrator provides comprehensive workflow management:","breadcrumbs":"Orchestrator System » Workflow System","id":"3406","title":"Workflow System"},"3407":{"body":"# Submit server creation workflow\\nnu -c \\"use core/nulib/workflows/server_create.nu *; server_create_workflow \'wuji\' \'\' [] --check\\" # Traditional orchestrated server creation\\nprovisioning servers create --orchestrated --check","breadcrumbs":"Orchestrator System » Server Workflows","id":"3407","title":"Server Workflows"},"3408":{"body":"# Create taskserv workflow\\nnu -c \\"use core/nulib/workflows/taskserv.nu *; taskserv create \'kubernetes\' \'wuji\' --check\\" # Other taskserv operations\\nnu -c \\"use core/nulib/workflows/taskserv.nu *; taskserv delete \'kubernetes\' \'wuji\' --check\\"\\nnu -c \\"use core/nulib/workflows/taskserv.nu *; taskserv generate \'kubernetes\' \'wuji\'\\"\\nnu -c \\"use core/nulib/workflows/taskserv.nu *; taskserv check-updates\\"","breadcrumbs":"Orchestrator System » Taskserv Workflows","id":"3408","title":"Taskserv Workflows"},"3409":{"body":"# Create cluster workflow\\nnu -c \\"use core/nulib/workflows/cluster.nu *; cluster create \'buildkit\' \'wuji\' --check\\" # Delete cluster workflow\\nnu -c \\"use core/nulib/workflows/cluster.nu *; cluster delete \'buildkit\' \'wuji\' --check\\"","breadcrumbs":"Orchestrator System » Cluster Workflows","id":"3409","title":"Cluster Workflows"},"341":{"body":"The typical deployment workflow: # 1. Initialize workspace\\nprovisioning workspace init production # 2. Generate infrastructure\\nprovisioning generate infra --new prod-infra # 3. Configure (edit settings.ncl)\\n$EDITOR workspace/infra/prod-infra/settings.ncl # 4. Validate configuration\\nprovisioning validate config --infra prod-infra # 5. Create servers (check mode)\\nprovisioning server create --infra prod-infra --check # 6. Create servers (real)\\nprovisioning server create --infra prod-infra # 7. Install task services\\nprovisioning taskserv create kubernetes --infra prod-infra --wait # 8. Deploy cluster (if needed)\\nprovisioning cluster create my-cluster --infra prod-infra # 9. Verify\\nprovisioning server list\\nprovisioning taskserv list","breadcrumbs":"First Deployment » Deployment Workflow","id":"341","title":"Deployment Workflow"},"3410":{"body":"# List all workflows\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow list\\" # Get workflow statistics\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow stats\\" # Monitor workflow in real-time\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow monitor \\" # Check orchestrator health\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow orchestrator\\" # Get specific workflow status\\nnu -c \\"use core/nulib/workflows/management.nu *; workflow status \\"","breadcrumbs":"Orchestrator System » Workflow Management","id":"3410","title":"Workflow Management"},"3411":{"body":"The orchestrator exposes HTTP endpoints for external integration: Health : GET http://localhost:9090/v1/health List Tasks : GET http://localhost:9090/v1/tasks Task Status : GET http://localhost:9090/v1/tasks/{id} Server Workflow : POST http://localhost:9090/v1/workflows/servers/create Taskserv Workflow : POST http://localhost:9090/v1/workflows/taskserv/create Cluster Workflow : POST http://localhost:9090/v1/workflows/cluster/create","breadcrumbs":"Orchestrator System » REST API Endpoints","id":"3411","title":"REST API Endpoints"},"3412":{"body":"A comprehensive Cedar policy engine implementation with advanced security features, compliance checking, and anomaly detection. Source : provisioning/platform/control-center/","breadcrumbs":"Control Center » Control Center - Cedar Policy Engine","id":"3412","title":"Control Center - Cedar Policy Engine"},"3413":{"body":"","breadcrumbs":"Control Center » Key Features","id":"3413","title":"Key Features"},"3414":{"body":"Policy Evaluation : High-performance policy evaluation with context injection Versioning : Complete policy versioning with rollback capabilities Templates : Configuration-driven policy templates with variable substitution Validation : Comprehensive policy validation with syntax and semantic checking","breadcrumbs":"Control Center » Cedar Policy Engine","id":"3414","title":"Cedar Policy Engine"},"3415":{"body":"JWT Authentication : Secure token-based authentication Multi-Factor Authentication : MFA support for sensitive operations Role-Based Access Control : Flexible RBAC with policy integration Session Management : Secure session handling with timeouts","breadcrumbs":"Control Center » Security & Authentication","id":"3415","title":"Security & Authentication"},"3416":{"body":"SOC2 Type II : Complete SOC2 compliance validation HIPAA : Healthcare data protection compliance Audit Trail : Comprehensive audit logging and reporting Impact Analysis : Policy change impact assessment","breadcrumbs":"Control Center » Compliance Framework","id":"3416","title":"Compliance Framework"},"3417":{"body":"Statistical Analysis : Multiple statistical methods (Z-Score, IQR, Isolation Forest) Real-time Detection : Continuous monitoring of policy evaluations Alert Management : Configurable alerting through multiple channels Baseline Learning : Adaptive baseline calculation for improved accuracy","breadcrumbs":"Control Center » Anomaly Detection","id":"3417","title":"Anomaly Detection"},"3418":{"body":"SurrealDB Integration : High-performance graph database backend Policy Storage : Versioned policy storage with metadata Metrics Storage : Policy evaluation metrics and analytics Compliance Records : Complete compliance audit trails","breadcrumbs":"Control Center » Storage & Persistence","id":"3418","title":"Storage & Persistence"},"3419":{"body":"","breadcrumbs":"Control Center » Quick Start","id":"3419","title":"Quick Start"},"342":{"body":"","breadcrumbs":"First Deployment » Troubleshooting","id":"342","title":"Troubleshooting"},"3420":{"body":"cd provisioning/platform/control-center\\ncargo build --release","breadcrumbs":"Control Center » Installation","id":"3420","title":"Installation"},"3421":{"body":"Copy and edit the configuration: cp config.toml.example config.toml Configuration example: [database]\\nurl = \\"surreal://localhost:8000\\"\\nusername = \\"root\\"\\npassword = \\"your-password\\" [auth]\\njwt_secret = \\"your-super-secret-key\\"\\nrequire_mfa = true [compliance.soc2]\\nenabled = true [anomaly]\\nenabled = true\\ndetection_threshold = 2.5","breadcrumbs":"Control Center » Configuration","id":"3421","title":"Configuration"},"3422":{"body":"./target/release/control-center server --port 8080","breadcrumbs":"Control Center » Start Server","id":"3422","title":"Start Server"},"3423":{"body":"curl -X POST http://localhost:8080/policies/evaluate \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"principal\\": {\\"id\\": \\"user123\\", \\"roles\\": [\\"Developer\\"]}, \\"action\\": {\\"id\\": \\"access\\"}, \\"resource\\": {\\"id\\": \\"sensitive-db\\", \\"classification\\": \\"confidential\\"}, \\"context\\": {\\"mfa_enabled\\": true, \\"location\\": \\"US\\"} }\'","breadcrumbs":"Control Center » Test Policy Evaluation","id":"3423","title":"Test Policy Evaluation"},"3424":{"body":"","breadcrumbs":"Control Center » Policy Examples","id":"3424","title":"Policy Examples"},"3425":{"body":"permit( principal, action == Action::\\"access\\", resource\\n) when { resource has classification && resource.classification in [\\"sensitive\\", \\"confidential\\"] && principal has mfa_enabled && principal.mfa_enabled == true\\n};","breadcrumbs":"Control Center » Multi-Factor Authentication Policy","id":"3425","title":"Multi-Factor Authentication Policy"},"3426":{"body":"permit( principal, action in [Action::\\"deploy\\", Action::\\"modify\\", Action::\\"delete\\"], resource\\n) when { resource has environment && resource.environment == \\"production\\" && principal has approval && principal.approval.approved_by in [\\"ProductionAdmin\\", \\"SRE\\"]\\n};","breadcrumbs":"Control Center » Production Approval Policy","id":"3426","title":"Production Approval Policy"},"3427":{"body":"permit( principal, action, resource\\n) when { context has geo && context.geo has country && context.geo.country in [\\"US\\", \\"CA\\", \\"GB\\", \\"DE\\"]\\n};","breadcrumbs":"Control Center » Geographic Restrictions","id":"3427","title":"Geographic Restrictions"},"3428":{"body":"","breadcrumbs":"Control Center » CLI Commands","id":"3428","title":"CLI Commands"},"3429":{"body":"# Validate policies\\ncontrol-center policy validate policies/ # Test policy with test data\\ncontrol-center policy test policies/mfa.cedar tests/data/mfa_test.json # Analyze policy impact\\ncontrol-center policy impact policies/new_policy.cedar","breadcrumbs":"Control Center » Policy Management","id":"3429","title":"Policy Management"},"343":{"body":"# Check logs\\nprovisioning server logs dev-server-01 # Try with debug mode\\nprovisioning --debug server create --infra my-infra","breadcrumbs":"First Deployment » Server Creation Fails","id":"343","title":"Server Creation Fails"},"3430":{"body":"# Check SOC2 compliance\\ncontrol-center compliance soc2 # Check HIPAA compliance\\ncontrol-center compliance hipaa # Generate compliance report\\ncontrol-center compliance report --format html","breadcrumbs":"Control Center » Compliance Checking","id":"3430","title":"Compliance Checking"},"3431":{"body":"","breadcrumbs":"Control Center » API Endpoints","id":"3431","title":"API Endpoints"},"3432":{"body":"POST /policies/evaluate - Evaluate policy decision GET /policies - List all policies POST /policies - Create new policy PUT /policies/{id} - Update policy DELETE /policies/{id} - Delete policy","breadcrumbs":"Control Center » Policy Evaluation","id":"3432","title":"Policy Evaluation"},"3433":{"body":"GET /policies/{id}/versions - List policy versions GET /policies/{id}/versions/{version} - Get specific version POST /policies/{id}/rollback/{version} - Rollback to version","breadcrumbs":"Control Center » Policy Versions","id":"3433","title":"Policy Versions"},"3434":{"body":"GET /compliance/soc2 - SOC2 compliance check GET /compliance/hipaa - HIPAA compliance check GET /compliance/report - Generate compliance report","breadcrumbs":"Control Center » Compliance","id":"3434","title":"Compliance"},"3435":{"body":"GET /anomalies - List detected anomalies GET /anomalies/{id} - Get anomaly details POST /anomalies/detect - Trigger anomaly detection","breadcrumbs":"Control Center » Anomaly Detection","id":"3435","title":"Anomaly Detection"},"3436":{"body":"","breadcrumbs":"Control Center » Architecture","id":"3436","title":"Architecture"},"3437":{"body":"Policy Engine (src/policies/engine.rs) Cedar policy evaluation Context injection Caching and optimization Storage Layer (src/storage/) SurrealDB integration Policy versioning Metrics storage Compliance Framework (src/compliance/) SOC2 checker HIPAA validator Report generation Anomaly Detection (src/anomaly/) Statistical analysis Real-time monitoring Alert management Authentication (src/auth.rs) JWT token management Password hashing Session handling","breadcrumbs":"Control Center » Core Components","id":"3437","title":"Core Components"},"3438":{"body":"The system follows PAP (Project Architecture Principles) with: No hardcoded values : All behavior controlled via configuration Dynamic loading : Policies and rules loaded from configuration Template-based : Policy generation through templates Environment-aware : Different configs for dev/test/prod","breadcrumbs":"Control Center » Configuration-Driven Design","id":"3438","title":"Configuration-Driven Design"},"3439":{"body":"","breadcrumbs":"Control Center » Deployment","id":"3439","title":"Deployment"},"344":{"body":"# Check task service logs\\nprovisioning taskserv logs kubernetes # Retry installation\\nprovisioning taskserv create kubernetes --infra my-infra --force","breadcrumbs":"First Deployment » Task Service Installation Fails","id":"344","title":"Task Service Installation Fails"},"3440":{"body":"FROM rust:1.75 as builder\\nWORKDIR /app\\nCOPY . .\\nRUN cargo build --release FROM debian:bookworm-slim\\nRUN apt-get update && apt-get install -y ca-certificates\\nCOPY --from=builder /app/target/release/control-center /usr/local/bin/\\nEXPOSE 8080\\nCMD [\\"control-center\\", \\"server\\"]","breadcrumbs":"Control Center » Docker","id":"3440","title":"Docker"},"3441":{"body":"apiVersion: apps/v1\\nkind: Deployment\\nmetadata: name: control-center\\nspec: replicas: 3 template: spec: containers: - name: control-center image: control-center:latest ports: - containerPort: 8080 env: - name: DATABASE_URL value: \\"surreal://surrealdb:8000\\"","breadcrumbs":"Control Center » Kubernetes","id":"3441","title":"Kubernetes"},"3442":{"body":"Architecture : Cedar Authorization User Guide : Authentication Layer","breadcrumbs":"Control Center » Related Documentation","id":"3442","title":"Related Documentation"},"3443":{"body":"Interactive Ratatui-based installer for the Provisioning Platform with Nushell fallback for automation. Source : provisioning/platform/installer/ Status : COMPLETE - All 7 UI screens implemented (1,480 lines)","breadcrumbs":"Installer » Provisioning Platform Installer","id":"3443","title":"Provisioning Platform Installer"},"3444":{"body":"Rich Interactive TUI : Beautiful Ratatui interface with real-time feedback Headless Mode : Automation-friendly with Nushell scripts One-Click Deploy : Single command to deploy entire platform Platform Agnostic : Supports Docker, Podman, Kubernetes, OrbStack Live Progress : Real-time deployment progress and logs Health Checks : Automatic service health verification","breadcrumbs":"Installer » Features","id":"3444","title":"Features"},"3445":{"body":"cd provisioning/platform/installer\\ncargo build --release\\ncargo install --path .","breadcrumbs":"Installer » Installation","id":"3445","title":"Installation"},"3446":{"body":"","breadcrumbs":"Installer » Usage","id":"3446","title":"Usage"},"3447":{"body":"provisioning-installer The TUI guides you through: Platform detection (Docker, Podman, K8s, OrbStack) Deployment mode selection (Solo, Multi-User, CI/CD, Enterprise) Service selection (check/uncheck services) Configuration (domain, ports, secrets) Live deployment with progress tracking Success screen with access URLs","breadcrumbs":"Installer » Interactive TUI (Default)","id":"3447","title":"Interactive TUI (Default)"},"3448":{"body":"# Quick deploy with auto-detection\\nprovisioning-installer --headless --mode solo --yes # Fully specified\\nprovisioning-installer \\\\ --headless \\\\ --platform orbstack \\\\ --mode solo \\\\ --services orchestrator,control-center,coredns \\\\ --domain localhost \\\\ --yes # Use existing config file\\nprovisioning-installer --headless --config my-deployment.toml --yes","breadcrumbs":"Installer » Headless Mode (Automation)","id":"3448","title":"Headless Mode (Automation)"},"3449":{"body":"# Generate config without deploying\\nprovisioning-installer --config-only # Deploy later with generated config\\nprovisioning-installer --headless --config ~/.provisioning/installer-config.toml --yes","breadcrumbs":"Installer » Configuration Generation","id":"3449","title":"Configuration Generation"},"345":{"body":"# Verify SSH key\\nls -la ~/.ssh/ # Test SSH manually\\nssh -v user@ # Use provisioning SSH helper\\nprovisioning server ssh dev-server-01 --debug","breadcrumbs":"First Deployment » SSH Connection Issues","id":"345","title":"SSH Connection Issues"},"3450":{"body":"","breadcrumbs":"Installer » Deployment Platforms","id":"3450","title":"Deployment Platforms"},"3451":{"body":"provisioning-installer --platform docker --mode solo Requirements : Docker 20.10+, docker-compose 2.0+","breadcrumbs":"Installer » Docker Compose","id":"3451","title":"Docker Compose"},"3452":{"body":"provisioning-installer --platform orbstack --mode solo Requirements : OrbStack installed, 4 GB RAM, 2 CPU cores","breadcrumbs":"Installer » OrbStack (macOS)","id":"3452","title":"OrbStack (macOS)"},"3453":{"body":"provisioning-installer --platform podman --mode solo Requirements : Podman 4.0+, systemd","breadcrumbs":"Installer » Podman (Rootless)","id":"3453","title":"Podman (Rootless)"},"3454":{"body":"provisioning-installer --platform kubernetes --mode enterprise Requirements : kubectl configured, Helm 3.0+","breadcrumbs":"Installer » Kubernetes","id":"3454","title":"Kubernetes"},"3455":{"body":"","breadcrumbs":"Installer » Deployment Modes","id":"3455","title":"Deployment Modes"},"3456":{"body":"Services : 5 core services Resources : 2 CPU cores, 4 GB RAM, 20 GB disk Use case : Single developer, local testing","breadcrumbs":"Installer » Solo Mode (Development)","id":"3456","title":"Solo Mode (Development)"},"3457":{"body":"Services : 7 services Resources : 4 CPU cores, 8 GB RAM, 50 GB disk Use case : Team collaboration, shared infrastructure","breadcrumbs":"Installer » Multi-User Mode (Team)","id":"3457","title":"Multi-User Mode (Team)"},"3458":{"body":"Services : 8-10 services Resources : 8 CPU cores, 16 GB RAM, 100 GB disk Use case : Automated pipelines, webhooks","breadcrumbs":"Installer » CI/CD Mode (Automation)","id":"3458","title":"CI/CD Mode (Automation)"},"3459":{"body":"Services : 15+ services Resources : 16 CPU cores, 32 GB RAM, 500 GB disk Use case : Production deployments, full observability","breadcrumbs":"Installer » Enterprise Mode (Production)","id":"3459","title":"Enterprise Mode (Production)"},"346":{"body":"Now that you\'ve completed your first deployment: → Verification - Verify your deployment is working correctly","breadcrumbs":"First Deployment » Next Steps","id":"346","title":"Next Steps"},"3460":{"body":"provisioning-installer [OPTIONS] OPTIONS: --headless Run in headless mode (no TUI) --mode Deployment mode [solo|multi-user|cicd|enterprise] --platform Target platform [docker|podman|kubernetes|orbstack] --services Comma-separated list of services --domain Domain/hostname (default: localhost) --yes, -y Skip confirmation prompts --config-only Generate config without deploying --config Use existing config file -h, --help Print help -V, --version Print version","breadcrumbs":"Installer » CLI Options","id":"3460","title":"CLI Options"},"3461":{"body":"","breadcrumbs":"Installer » CI/CD Integration","id":"3461","title":"CI/CD Integration"},"3462":{"body":"deploy_platform: stage: deploy script: - provisioning-installer --headless --mode cicd --platform kubernetes --yes only: - main","breadcrumbs":"Installer » GitLab CI","id":"3462","title":"GitLab CI"},"3463":{"body":"- name: Deploy Provisioning Platform run: | provisioning-installer --headless --mode cicd --platform docker --yes","breadcrumbs":"Installer » GitHub Actions","id":"3463","title":"GitHub Actions"},"3464":{"body":"If the Rust binary is unavailable: cd provisioning/platform/installer/scripts\\nnu deploy.nu --mode solo --platform orbstack --yes","breadcrumbs":"Installer » Nushell Scripts (Fallback)","id":"3464","title":"Nushell Scripts (Fallback)"},"3465":{"body":"Deployment Guide : Platform Deployment Architecture : Platform Overview","breadcrumbs":"Installer » Related Documentation","id":"3465","title":"Related Documentation"},"3466":{"body":"","breadcrumbs":"Installer System » Provisioning Platform Installer (v3.5.0)","id":"3466","title":"Provisioning Platform Installer (v3.5.0)"},"3467":{"body":"A comprehensive installer system supporting interactive, headless, and unattended deployment modes with automatic configuration management via TOML and MCP integration.","breadcrumbs":"Installer System » 🚀 Flexible Installation and Configuration System","id":"3467","title":"🚀 Flexible Installation and Configuration System"},"3468":{"body":"","breadcrumbs":"Installer System » Installation Modes","id":"3468","title":"Installation Modes"},"3469":{"body":"Beautiful terminal user interface with step-by-step guidance. provisioning-installer Features : 7 interactive screens with progress tracking Real-time validation and error feedback Visual feedback for each configuration step Beautiful formatting with color and styling Nushell fallback for unsupported terminals Screens : Welcome and prerequisites check Deployment mode selection Infrastructure provider selection Configuration details Resource allocation (CPU, memory) Security settings Review and confirm","breadcrumbs":"Installer System » 1. Interactive TUI Mode","id":"3469","title":"1. Interactive TUI Mode"},"347":{"body":"Complete Deployment Guide Infrastructure Management Troubleshooting Guide","breadcrumbs":"First Deployment » Additional Resources","id":"347","title":"Additional Resources"},"3470":{"body":"CLI-only installation without interactive prompts, suitable for scripting. provisioning-installer --headless --mode solo --yes Features : Fully automated CLI options All settings via command-line flags No user interaction required Perfect for CI/CD pipelines Verbose output with progress tracking Common Usage : # Solo deployment\\nprovisioning-installer --headless --mode solo --provider upcloud --yes # Multi-user deployment\\nprovisioning-installer --headless --mode multiuser --cpu 4 --memory 8192 --yes # CI/CD mode\\nprovisioning-installer --headless --mode cicd --config ci-config.toml --yes","breadcrumbs":"Installer System » 2. Headless Mode","id":"3470","title":"2. Headless Mode"},"3471":{"body":"Zero-interaction mode using pre-defined configuration files, ideal for infrastructure automation. provisioning-installer --unattended --config config.toml Features : Load all settings from TOML file Complete automation for GitOps workflows No user interaction or prompts Suitable for production deployments Comprehensive logging and audit trails","breadcrumbs":"Installer System » 3. Unattended Mode","id":"3471","title":"3. Unattended Mode"},"3472":{"body":"Each mode configures resource allocation and features appropriately: Mode CPUs Memory Use Case Solo 2 4 GB Single user development MultiUser 4 8 GB Team development, testing CICD 8 16 GB CI/CD pipelines, testing Enterprise 16 32 GB Production deployment","breadcrumbs":"Installer System » Deployment Modes","id":"3472","title":"Deployment Modes"},"3473":{"body":"","breadcrumbs":"Installer System » Configuration System","id":"3473","title":"Configuration System"},"3474":{"body":"Define installation parameters in TOML format for unattended mode: [installation]\\nmode = \\"solo\\" # solo, multiuser, cicd, enterprise\\nprovider = \\"upcloud\\" # upcloud, aws, etc. [resources]\\ncpu = 2000 # millicores\\nmemory = 4096 # MB\\ndisk = 50 # GB [security]\\nenable_mfa = true\\nenable_audit = true\\ntls_enabled = true [mcp]\\nenabled = true\\nendpoint = \\"http://localhost:9090\\"","breadcrumbs":"Installer System » TOML Configuration","id":"3474","title":"TOML Configuration"},"3475":{"body":"Settings are loaded in this order (highest priority wins): CLI Arguments - Direct command-line flags Environment Variables - PROVISIONING_* variables Configuration File - TOML file specified via --config MCP Integration - AI-powered intelligent defaults Built-in Defaults - System defaults","breadcrumbs":"Installer System » Configuration Loading Priority","id":"3475","title":"Configuration Loading Priority"},"3476":{"body":"Model Context Protocol integration provides intelligent configuration: 7 AI-Powered Settings Tools : Resource recommendation engine Provider selection helper Security policy suggester Performance optimizer Compliance checker Network configuration advisor Monitoring setup assistant # Use MCP for intelligent config suggestion\\nprovisioning-installer --unattended --mcp-suggest > config.toml","breadcrumbs":"Installer System » MCP Integration","id":"3476","title":"MCP Integration"},"3477":{"body":"","breadcrumbs":"Installer System » Deployment Automation","id":"3477","title":"Deployment Automation"},"3478":{"body":"Complete deployment automation scripts for popular container runtimes: # Docker deployment\\n./provisioning/platform/installer/deploy/docker.nu --config config.toml # Podman deployment\\n./provisioning/platform/installer/deploy/podman.nu --config config.toml # Kubernetes deployment\\n./provisioning/platform/installer/deploy/kubernetes.nu --config config.toml # OrbStack deployment\\n./provisioning/platform/installer/deploy/orbstack.nu --config config.toml","breadcrumbs":"Installer System » Nushell Scripts","id":"3478","title":"Nushell Scripts"},"3479":{"body":"Infrastructure components can query MCP and install themselves: # Taskservs auto-install with dependencies\\ntaskserv install-self kubernetes\\ntaskserv install-self prometheus\\ntaskserv install-self cilium","breadcrumbs":"Installer System » Self-Installation","id":"3479","title":"Self-Installation"},"348":{"body":"This guide helps you verify that your Provisioning Platform deployment is working correctly.","breadcrumbs":"Verification » Verification","id":"348","title":"Verification"},"3480":{"body":"# Show interactive installer\\nprovisioning-installer # Show help\\nprovisioning-installer --help # Show available modes\\nprovisioning-installer --list-modes # Show available providers\\nprovisioning-installer --list-providers # List available templates\\nprovisioning-installer --list-templates # Validate configuration file\\nprovisioning-installer --validate --config config.toml # Dry-run (check without installing)\\nprovisioning-installer --config config.toml --check # Full unattended installation\\nprovisioning-installer --unattended --config config.toml # Headless with specific settings\\nprovisioning-installer --headless --mode solo --provider upcloud --cpu 2 --memory 4096 --yes","breadcrumbs":"Installer System » Command Reference","id":"3480","title":"Command Reference"},"3481":{"body":"","breadcrumbs":"Installer System » Integration Examples","id":"3481","title":"Integration Examples"},"3482":{"body":"# Define in Git\\ncat > infrastructure/installer.toml << EOF\\n[installation]\\nmode = \\"multiuser\\"\\nprovider = \\"upcloud\\" [resources]\\ncpu = 4\\nmemory = 8192\\nEOF # Deploy via CI/CD\\nprovisioning-installer --unattended --config infrastructure/installer.toml","breadcrumbs":"Installer System » GitOps Workflow","id":"3482","title":"GitOps Workflow"},"3483":{"body":"# Call installer as part of Terraform provisioning\\nresource \\"null_resource\\" \\"provisioning_installer\\" { provisioner \\"local-exec\\" { command = \\"provisioning-installer --unattended --config ${var.config_file}\\" }\\n}","breadcrumbs":"Installer System » Terraform Integration","id":"3483","title":"Terraform Integration"},"3484":{"body":"- name: Run provisioning installer shell: provisioning-installer --unattended --config /tmp/config.toml vars: ansible_python_interpreter: /usr/bin/python3","breadcrumbs":"Installer System » Ansible Integration","id":"3484","title":"Ansible Integration"},"3485":{"body":"Pre-built templates available in provisioning/config/installer-templates/: solo-dev.toml - Single developer setup team-test.toml - Team testing environment cicd-pipeline.toml - CI/CD integration enterprise-prod.toml - Production deployment kubernetes-ha.toml - High-availability Kubernetes multicloud.toml - Multi-provider setup","breadcrumbs":"Installer System » Configuration Templates","id":"3485","title":"Configuration Templates"},"3486":{"body":"User Guide : user/provisioning-installer-guide.md Deployment Guide : operations/installer-deployment-guide.md Configuration Guide : infrastructure/installer-configuration-guide.md","breadcrumbs":"Installer System » Documentation","id":"3486","title":"Documentation"},"3487":{"body":"# Show installer help\\nprovisioning-installer --help # Show detailed documentation\\nprovisioning help installer # Validate your configuration\\nprovisioning-installer --validate --config your-config.toml # Get configuration suggestions from MCP\\nprovisioning-installer --config-suggest","breadcrumbs":"Installer System » Help and Support","id":"3487","title":"Help and Support"},"3488":{"body":"If Ratatui TUI is not available, the installer automatically falls back to: Interactive Nushell prompt system Same functionality, text-based interface Full feature parity with TUI version","breadcrumbs":"Installer System » Nushell Fallback","id":"3488","title":"Nushell Fallback"},"3489":{"body":"A comprehensive REST API server for remote provisioning operations, enabling thin clients and CI/CD pipeline integration. Source : provisioning/platform/provisioning-server/","breadcrumbs":"Provisioning Server » Provisioning API Server","id":"3489","title":"Provisioning API Server"},"349":{"body":"After completing your first deployment, verify: System configuration Server accessibility Task service health Platform services (if installed)","breadcrumbs":"Verification » Overview","id":"349","title":"Overview"},"3490":{"body":"Comprehensive REST API : Complete provisioning operations via HTTP JWT Authentication : Secure token-based authentication RBAC System : Role-based access control (Admin, Operator, Developer, Viewer) Async Operations : Long-running tasks with status tracking Nushell Integration : Direct execution of provisioning CLI commands Audit Logging : Complete operation tracking for compliance Metrics : Prometheus-compatible metrics endpoint CORS Support : Configurable cross-origin resource sharing Health Checks : Built-in health and readiness endpoints","breadcrumbs":"Provisioning Server » Features","id":"3490","title":"Features"},"3491":{"body":"┌─────────────────┐\\n│ REST Client │\\n│ (curl, CI/CD) │\\n└────────┬────────┘ │ HTTPS/JWT ▼\\n┌─────────────────┐\\n│ API Gateway │\\n│ - Routes │\\n│ - Auth │\\n│ - RBAC │\\n└────────┬────────┘ │ ▼\\n┌─────────────────┐\\n│ Async Task Mgr │\\n│ - Queue │\\n│ - Status │\\n└────────┬────────┘ │ ▼\\n┌─────────────────┐\\n│ Nushell Exec │\\n│ - CLI wrapper │\\n│ - Timeout │\\n└─────────────────┘","breadcrumbs":"Provisioning Server » Architecture","id":"3491","title":"Architecture"},"3492":{"body":"cd provisioning/platform/provisioning-server\\ncargo build --release","breadcrumbs":"Provisioning Server » Installation","id":"3492","title":"Installation"},"3493":{"body":"Create config.toml: [server]\\nhost = \\"0.0.0.0\\"\\nport = 8083\\ncors_enabled = true [auth]\\njwt_secret = \\"your-secret-key-here\\"\\ntoken_expiry_hours = 24\\nrefresh_token_expiry_hours = 168 [provisioning]\\ncli_path = \\"/usr/local/bin/provisioning\\"\\ntimeout_seconds = 300\\nmax_concurrent_operations = 10 [logging]\\nlevel = \\"info\\"\\njson_format = false","breadcrumbs":"Provisioning Server » Configuration","id":"3493","title":"Configuration"},"3494":{"body":"","breadcrumbs":"Provisioning Server » Usage","id":"3494","title":"Usage"},"3495":{"body":"# Using config file\\nprovisioning-server --config config.toml # Custom settings\\nprovisioning-server \\\\ --host 0.0.0.0 \\\\ --port 8083 \\\\ --jwt-secret \\"my-secret\\" \\\\ --cli-path \\"/usr/local/bin/provisioning\\" \\\\ --log-level debug","breadcrumbs":"Provisioning Server » Starting the Server","id":"3495","title":"Starting the Server"},"3496":{"body":"Login curl -X POST http://localhost:8083/v1/auth/login \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"username\\": \\"admin\\", \\"password\\": \\"admin123\\" }\' Response: { \\"token\\": \\"eyJhbGc...\\", \\"refresh_token\\": \\"eyJhbGc...\\", \\"expires_in\\": 86400\\n} Using Token export TOKEN=\\"eyJhbGc...\\" curl -X GET http://localhost:8083/v1/servers \\\\ -H \\"Authorization: Bearer $TOKEN\\"","breadcrumbs":"Provisioning Server » Authentication","id":"3496","title":"Authentication"},"3497":{"body":"","breadcrumbs":"Provisioning Server » API Endpoints","id":"3497","title":"API Endpoints"},"3498":{"body":"POST /v1/auth/login - User login POST /v1/auth/refresh - Refresh access token","breadcrumbs":"Provisioning Server » Authentication","id":"3498","title":"Authentication"},"3499":{"body":"GET /v1/servers - List all servers POST /v1/servers/create - Create new server DELETE /v1/servers/{id} - Delete server GET /v1/servers/{id}/status - Get server status","breadcrumbs":"Provisioning Server » Servers","id":"3499","title":"Servers"},"35":{"body":"Centralized workspace management Single-command workspace switching Active workspace tracking User preference system","breadcrumbs":"Home » 🔄 Workspace Switching (v2.0.5)","id":"35","title":"🔄 Workspace Switching (v2.0.5)"},"350":{"body":"Check that all configuration is valid: # Validate all configuration\\nprovisioning validate config # Expected output:\\n# ✓ Configuration valid\\n# ✓ No errors found\\n# ✓ All required fields present # Check environment variables\\nprovisioning env # View complete configuration\\nprovisioning allenv","breadcrumbs":"Verification » Step 1: Verify Configuration","id":"350","title":"Step 1: Verify Configuration"},"3500":{"body":"GET /v1/taskservs - List all taskservs POST /v1/taskservs/create - Create taskserv DELETE /v1/taskservs/{id} - Delete taskserv GET /v1/taskservs/{id}/status - Get taskserv status","breadcrumbs":"Provisioning Server » Taskservs","id":"3500","title":"Taskservs"},"3501":{"body":"POST /v1/workflows/submit - Submit workflow GET /v1/workflows/{id} - Get workflow details GET /v1/workflows/{id}/status - Get workflow status POST /v1/workflows/{id}/cancel - Cancel workflow","breadcrumbs":"Provisioning Server » Workflows","id":"3501","title":"Workflows"},"3502":{"body":"GET /v1/operations - List all operations GET /v1/operations/{id} - Get operation status POST /v1/operations/{id}/cancel - Cancel operation","breadcrumbs":"Provisioning Server » Operations","id":"3502","title":"Operations"},"3503":{"body":"GET /health - Health check (no auth required) GET /v1/version - Version information GET /v1/metrics - Prometheus metrics","breadcrumbs":"Provisioning Server » System","id":"3503","title":"System"},"3504":{"body":"","breadcrumbs":"Provisioning Server » RBAC Roles","id":"3504","title":"RBAC Roles"},"3505":{"body":"Full system access including all operations, workspace management, and system administration.","breadcrumbs":"Provisioning Server » Admin Role","id":"3505","title":"Admin Role"},"3506":{"body":"Infrastructure operations including create/delete servers, taskservs, clusters, and workflow management.","breadcrumbs":"Provisioning Server » Operator Role","id":"3506","title":"Operator Role"},"3507":{"body":"Read access plus SSH to servers, view workflows and operations.","breadcrumbs":"Provisioning Server » Developer Role","id":"3507","title":"Developer Role"},"3508":{"body":"Read-only access to all resources and status information.","breadcrumbs":"Provisioning Server » Viewer Role","id":"3508","title":"Viewer Role"},"3509":{"body":"Change Default Credentials : Update all default usernames/passwords Use Strong JWT Secret : Generate secure random string (32+ characters) Enable TLS : Use HTTPS in production Restrict CORS : Configure specific allowed origins Enable mTLS : For client certificate authentication Regular Token Rotation : Implement token refresh strategy Audit Logging : Enable audit logs for compliance","breadcrumbs":"Provisioning Server » Security Best Practices","id":"3509","title":"Security Best Practices"},"351":{"body":"Check that servers are accessible and healthy: # List all servers\\nprovisioning server list # Expected output:\\n# ┌───────────────┬──────────┬───────┬────────┬──────────────┬──────────┐\\n# │ Hostname │ Provider │ Cores │ Memory │ IP Address │ Status │\\n# ├───────────────┼──────────┼───────┼────────┼──────────────┼──────────┤\\n# │ dev-server-01 │ local │ 2 │ 4096 │ 192.168.1.100│ running │\\n# └───────────────┴──────────┴───────┴────────┴──────────────┴──────────┘ # Check server details\\nprovisioning server info dev-server-01 # Test SSH connectivity\\nprovisioning server ssh dev-server-01 -- echo \\"SSH working\\"","breadcrumbs":"Verification » Step 2: Verify Servers","id":"351","title":"Step 2: Verify Servers"},"3510":{"body":"","breadcrumbs":"Provisioning Server » CI/CD Integration","id":"3510","title":"CI/CD Integration"},"3511":{"body":"- name: Deploy Infrastructure run: | TOKEN=$(curl -X POST https://api.example.com/v1/auth/login \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"username\\":\\"${{ secrets.API_USER }}\\",\\"password\\":\\"${{ secrets.API_PASS }}\\"}\' \\\\ | jq -r \'.token\') curl -X POST https://api.example.com/v1/servers/create \\\\ -H \\"Authorization: Bearer $TOKEN\\" \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"workspace\\": \\"production\\", \\"provider\\": \\"upcloud\\", \\"plan\\": \\"2xCPU-4 GB\\"}\'","breadcrumbs":"Provisioning Server » GitHub Actions","id":"3511","title":"GitHub Actions"},"3512":{"body":"API Reference : REST API Documentation Architecture : API Gateway Integration","breadcrumbs":"Provisioning Server » Related Documentation","id":"3512","title":"Related Documentation"},"3513":{"body":"This comprehensive guide covers creating, managing, and maintaining infrastructure using Infrastructure Automation.","breadcrumbs":"Infrastructure Management » Infrastructure Management Guide","id":"3513","title":"Infrastructure Management Guide"},"3514":{"body":"Infrastructure lifecycle management Server provisioning and management Task service installation and configuration Cluster deployment and orchestration Scaling and optimization strategies Monitoring and maintenance procedures Cost management and optimization","breadcrumbs":"Infrastructure Management » What You\'ll Learn","id":"3514","title":"What You\'ll Learn"},"3515":{"body":"","breadcrumbs":"Infrastructure Management » Infrastructure Concepts","id":"3515","title":"Infrastructure Concepts"},"3516":{"body":"Component Description Examples Servers Virtual machines or containers Web servers, databases, workers Task Services Software installed on servers Kubernetes, Docker, databases Clusters Groups of related services Web clusters, database clusters Networks Connectivity between resources VPCs, subnets, load balancers Storage Persistent data storage Block storage, object storage","breadcrumbs":"Infrastructure Management » Infrastructure Components","id":"3516","title":"Infrastructure Components"},"3517":{"body":"Plan → Create → Deploy → Monitor → Scale → Update → Retire Each phase has specific commands and considerations.","breadcrumbs":"Infrastructure Management » Infrastructure Lifecycle","id":"3517","title":"Infrastructure Lifecycle"},"3518":{"body":"","breadcrumbs":"Infrastructure Management » Server Management","id":"3518","title":"Server Management"},"3519":{"body":"Servers are defined in Nickel configuration files: # Example server configuration\\nimport models.server servers: [ server.Server { name = \\"web-01\\" provider = \\"aws\\" # aws, upcloud, local plan = \\"t3.medium\\" # Instance type/plan os = \\"ubuntu-22.04\\" # Operating system zone = \\"us-west-2a\\" # Availability zone # Network configuration vpc = \\"main\\" subnet = \\"web\\" security_groups = [\\"web\\", \\"ssh\\"] # Storage configuration storage = { root_size = \\"50 GB\\" additional = [ {name = \\"data\\", size = \\"100 GB\\", type = \\"gp3\\"} ] } # Task services to install taskservs = [ \\"containerd\\", \\"kubernetes\\", \\"monitoring\\" ] # Tags for organization tags = { environment = \\"production\\" team = \\"platform\\" cost_center = \\"engineering\\" } }\\n]","breadcrumbs":"Infrastructure Management » Understanding Server Configuration","id":"3519","title":"Understanding Server Configuration"},"352":{"body":"Check installed task services: # List task services\\nprovisioning taskserv list # Expected output:\\n# ┌────────────┬─────────┬────────────────┬──────────┐\\n# │ Name │ Version │ Server │ Status │\\n# ├────────────┼─────────┼────────────────┼──────────┤\\n# │ containerd │ 1.7.0 │ dev-server-01 │ running │\\n# │ etcd │ 3.5.0 │ dev-server-01 │ running │\\n# │ kubernetes │ 1.28.0 │ dev-server-01 │ running │\\n# └────────────┴─────────┴────────────────┴──────────┘ # Check specific task service\\nprovisioning taskserv status kubernetes # View task service logs\\nprovisioning taskserv logs kubernetes --tail 50","breadcrumbs":"Verification » Step 3: Verify Task Services","id":"352","title":"Step 3: Verify Task Services"},"3520":{"body":"Creating Servers # Plan server creation (dry run)\\nprovisioning server create --infra my-infra --check # Create servers\\nprovisioning server create --infra my-infra # Create with specific parameters\\nprovisioning server create --infra my-infra --wait --yes # Create single server type\\nprovisioning server create web --infra my-infra Managing Existing Servers # List all servers\\nprovisioning server list --infra my-infra # Show detailed server information\\nprovisioning show servers --infra my-infra # Show specific server\\nprovisioning show servers web-01 --infra my-infra # Get server status\\nprovisioning server status web-01 --infra my-infra Server Operations # Start/stop servers\\nprovisioning server start web-01 --infra my-infra\\nprovisioning server stop web-01 --infra my-infra # Restart servers\\nprovisioning server restart web-01 --infra my-infra # Resize server\\nprovisioning server resize web-01 --plan t3.large --infra my-infra # Update server configuration\\nprovisioning server update web-01 --infra my-infra SSH Access # SSH to server\\nprovisioning server ssh web-01 --infra my-infra # SSH with specific user\\nprovisioning server ssh web-01 --user admin --infra my-infra # Execute command on server\\nprovisioning server exec web-01 \\"systemctl status kubernetes\\" --infra my-infra # Copy files to/from server\\nprovisioning server copy local-file.txt web-01:/tmp/ --infra my-infra\\nprovisioning server copy web-01:/var/log/app.log ./logs/ --infra my-infra Server Deletion # Plan server deletion (dry run)\\nprovisioning server delete --infra my-infra --check # Delete specific server\\nprovisioning server delete web-01 --infra my-infra # Delete with confirmation\\nprovisioning server delete web-01 --infra my-infra --yes # Delete but keep storage\\nprovisioning server delete web-01 --infra my-infra --keepstorage","breadcrumbs":"Infrastructure Management » Server Lifecycle Commands","id":"3520","title":"Server Lifecycle Commands"},"3521":{"body":"","breadcrumbs":"Infrastructure Management » Task Service Management","id":"3521","title":"Task Service Management"},"3522":{"body":"Task services are software components installed on servers: Container Runtimes : containerd, cri-o, docker Orchestration : kubernetes, nomad Networking : cilium, calico, haproxy Storage : rook-ceph, longhorn, nfs Databases : postgresql, mysql, mongodb Monitoring : prometheus, grafana, alertmanager","breadcrumbs":"Infrastructure Management » Understanding Task Services","id":"3522","title":"Understanding Task Services"},"3523":{"body":"# Task service configuration example\\ntaskservs: { kubernetes: { version = \\"1.28\\" network_plugin = \\"cilium\\" ingress_controller = \\"nginx\\" storage_class = \\"gp3\\" # Cluster configuration cluster = { name = \\"production\\" pod_cidr = \\"10.244.0.0/16\\" service_cidr = \\"10.96.0.0/12\\" } # Node configuration nodes = { control_plane = [\\"master-01\\", \\"master-02\\", \\"master-03\\"] workers = [\\"worker-01\\", \\"worker-02\\", \\"worker-03\\"] } } postgresql: { version = \\"15\\" port = 5432 max_connections = 200 shared_buffers = \\"256 MB\\" # High availability replication = { enabled = true replicas = 2 sync_mode = \\"synchronous\\" } # Backup configuration backup = { enabled = true schedule = \\"0 2 * * *\\" # Daily at 2 AM retention = \\"30d\\" } }\\n}","breadcrumbs":"Infrastructure Management » Task Service Configuration","id":"3523","title":"Task Service Configuration"},"3524":{"body":"Installing Services # Install single service\\nprovisioning taskserv create kubernetes --infra my-infra # Install multiple services\\nprovisioning taskserv create containerd kubernetes cilium --infra my-infra # Install with specific version\\nprovisioning taskserv create kubernetes --version 1.28 --infra my-infra # Install on specific servers\\nprovisioning taskserv create postgresql --servers db-01,db-02 --infra my-infra Managing Services # List available services\\nprovisioning taskserv list # List installed services\\nprovisioning taskserv list --infra my-infra --installed # Show service details\\nprovisioning taskserv show kubernetes --infra my-infra # Check service status\\nprovisioning taskserv status kubernetes --infra my-infra # Check service health\\nprovisioning taskserv health kubernetes --infra my-infra Service Operations # Start/stop services\\nprovisioning taskserv start kubernetes --infra my-infra\\nprovisioning taskserv stop kubernetes --infra my-infra # Restart services\\nprovisioning taskserv restart kubernetes --infra my-infra # Update services\\nprovisioning taskserv update kubernetes --infra my-infra # Configure services\\nprovisioning taskserv configure kubernetes --config cluster.yaml --infra my-infra Service Removal # Remove service\\nprovisioning taskserv delete kubernetes --infra my-infra # Remove with data cleanup\\nprovisioning taskserv delete postgresql --cleanup-data --infra my-infra # Remove from specific servers\\nprovisioning taskserv delete kubernetes --servers worker-03 --infra my-infra","breadcrumbs":"Infrastructure Management » Task Service Commands","id":"3524","title":"Task Service Commands"},"3525":{"body":"# Check for updates\\nprovisioning taskserv check-updates --infra my-infra # Check specific service updates\\nprovisioning taskserv check-updates kubernetes --infra my-infra # Show available versions\\nprovisioning taskserv versions kubernetes # Upgrade to latest version\\nprovisioning taskserv upgrade kubernetes --infra my-infra # Upgrade to specific version\\nprovisioning taskserv upgrade kubernetes --version 1.29 --infra my-infra","breadcrumbs":"Infrastructure Management » Version Management","id":"3525","title":"Version Management"},"3526":{"body":"","breadcrumbs":"Infrastructure Management » Cluster Management","id":"3526","title":"Cluster Management"},"3527":{"body":"Clusters are collections of services that work together to provide functionality: # Cluster configuration example\\nclusters: { web_cluster: { name = \\"web-application\\" description = \\"Web application cluster\\" # Services in the cluster services = [ { name = \\"nginx\\" replicas = 3 image = \\"nginx:1.24\\" ports = [80, 443] } { name = \\"app\\" replicas = 5 image = \\"myapp:latest\\" ports = [8080] } ] # Load balancer configuration load_balancer = { type = \\"application\\" health_check = \\"/health\\" ssl_cert = \\"wildcard.example.com\\" } # Auto-scaling auto_scaling = { min_replicas = 2 max_replicas = 10 target_cpu = 70 target_memory = 80 } }\\n}","breadcrumbs":"Infrastructure Management » Understanding Clusters","id":"3527","title":"Understanding Clusters"},"3528":{"body":"Creating Clusters # Create cluster\\nprovisioning cluster create web-cluster --infra my-infra # Create with specific configuration\\nprovisioning cluster create web-cluster --config cluster.yaml --infra my-infra # Create and deploy\\nprovisioning cluster create web-cluster --deploy --infra my-infra Managing Clusters # List available clusters\\nprovisioning cluster list # List deployed clusters\\nprovisioning cluster list --infra my-infra --deployed # Show cluster details\\nprovisioning cluster show web-cluster --infra my-infra # Get cluster status\\nprovisioning cluster status web-cluster --infra my-infra Cluster Operations # Deploy cluster\\nprovisioning cluster deploy web-cluster --infra my-infra # Scale cluster\\nprovisioning cluster scale web-cluster --replicas 10 --infra my-infra # Update cluster\\nprovisioning cluster update web-cluster --infra my-infra # Rolling update\\nprovisioning cluster update web-cluster --rolling --infra my-infra Cluster Deletion # Delete cluster\\nprovisioning cluster delete web-cluster --infra my-infra # Delete with data cleanup\\nprovisioning cluster delete web-cluster --cleanup --infra my-infra","breadcrumbs":"Infrastructure Management » Cluster Commands","id":"3528","title":"Cluster Commands"},"3529":{"body":"","breadcrumbs":"Infrastructure Management » Network Management","id":"3529","title":"Network Management"},"353":{"body":"If you installed Kubernetes, verify it\'s working: # Check Kubernetes nodes\\nprovisioning server ssh dev-server-01 -- kubectl get nodes # Expected output:\\n# NAME STATUS ROLES AGE VERSION\\n# dev-server-01 Ready control-plane 10m v1.28.0 # Check Kubernetes pods\\nprovisioning server ssh dev-server-01 -- kubectl get pods -A # All pods should be Running or Completed","breadcrumbs":"Verification » Step 4: Verify Kubernetes (If Installed)","id":"353","title":"Step 4: Verify Kubernetes (If Installed)"},"3530":{"body":"# Network configuration\\nnetwork: { vpc = { cidr = \\"10.0.0.0/16\\" enable_dns = true enable_dhcp = true } subnets = [ { name = \\"web\\" cidr = \\"10.0.1.0/24\\" zone = \\"us-west-2a\\" public = true } { name = \\"app\\" cidr = \\"10.0.2.0/24\\" zone = \\"us-west-2b\\" public = false } { name = \\"data\\" cidr = \\"10.0.3.0/24\\" zone = \\"us-west-2c\\" public = false } ] security_groups = [ { name = \\"web\\" rules = [ {protocol = \\"tcp\\", port = 80, source = \\"0.0.0.0/0\\"} {protocol = \\"tcp\\", port = 443, source = \\"0.0.0.0/0\\"} ] } { name = \\"app\\" rules = [ {protocol = \\"tcp\\", port = 8080, source = \\"10.0.1.0/24\\"} ] } ] load_balancers = [ { name = \\"web-lb\\" type = \\"application\\" scheme = \\"internet-facing\\" subnets = [\\"web\\"] targets = [\\"web-01\\", \\"web-02\\"] } ]\\n}","breadcrumbs":"Infrastructure Management » Network Configuration","id":"3530","title":"Network Configuration"},"3531":{"body":"# Show network configuration\\nprovisioning network show --infra my-infra # Create network resources\\nprovisioning network create --infra my-infra # Update network configuration\\nprovisioning network update --infra my-infra # Test network connectivity\\nprovisioning network test --infra my-infra","breadcrumbs":"Infrastructure Management » Network Commands","id":"3531","title":"Network Commands"},"3532":{"body":"","breadcrumbs":"Infrastructure Management » Storage Management","id":"3532","title":"Storage Management"},"3533":{"body":"# Storage configuration\\nstorage: { # Block storage volumes = [ { name = \\"app-data\\" size = \\"100 GB\\" type = \\"gp3\\" encrypted = true } ] # Object storage buckets = [ { name = \\"app-assets\\" region = \\"us-west-2\\" versioning = true encryption = \\"AES256\\" } ] # Backup configuration backup = { schedule = \\"0 1 * * *\\" # Daily at 1 AM retention = { daily = 7 weekly = 4 monthly = 12 } }\\n}","breadcrumbs":"Infrastructure Management » Storage Configuration","id":"3533","title":"Storage Configuration"},"3534":{"body":"# Create storage resources\\nprovisioning storage create --infra my-infra # List storage\\nprovisioning storage list --infra my-infra # Backup data\\nprovisioning storage backup --infra my-infra # Restore from backup\\nprovisioning storage restore --backup latest --infra my-infra","breadcrumbs":"Infrastructure Management » Storage Commands","id":"3534","title":"Storage Commands"},"3535":{"body":"","breadcrumbs":"Infrastructure Management » Monitoring and Observability","id":"3535","title":"Monitoring and Observability"},"3536":{"body":"# Install monitoring stack\\nprovisioning taskserv create prometheus --infra my-infra\\nprovisioning taskserv create grafana --infra my-infra\\nprovisioning taskserv create alertmanager --infra my-infra # Configure monitoring\\nprovisioning taskserv configure prometheus --config monitoring.yaml --infra my-infra","breadcrumbs":"Infrastructure Management » Monitoring Setup","id":"3536","title":"Monitoring Setup"},"3537":{"body":"# Check overall infrastructure health\\nprovisioning health check --infra my-infra # Check specific components\\nprovisioning health check servers --infra my-infra\\nprovisioning health check taskservs --infra my-infra\\nprovisioning health check clusters --infra my-infra # Continuous monitoring\\nprovisioning health monitor --infra my-infra --watch","breadcrumbs":"Infrastructure Management » Health Checks","id":"3537","title":"Health Checks"},"3538":{"body":"# Get infrastructure metrics\\nprovisioning metrics get --infra my-infra # Set up alerts\\nprovisioning alerts create --config alerts.yaml --infra my-infra # List active alerts\\nprovisioning alerts list --infra my-infra","breadcrumbs":"Infrastructure Management » Metrics and Alerting","id":"3538","title":"Metrics and Alerting"},"3539":{"body":"","breadcrumbs":"Infrastructure Management » Cost Management","id":"3539","title":"Cost Management"},"354":{"body":"If you installed platform services:","breadcrumbs":"Verification » Step 5: Verify Platform Services (Optional)","id":"354","title":"Step 5: Verify Platform Services (Optional)"},"3540":{"body":"# Show current costs\\nprovisioning cost show --infra my-infra # Cost breakdown by component\\nprovisioning cost breakdown --infra my-infra # Cost trends\\nprovisioning cost trends --period 30d --infra my-infra # Set cost alerts\\nprovisioning cost alert --threshold 1000 --infra my-infra","breadcrumbs":"Infrastructure Management » Cost Monitoring","id":"3540","title":"Cost Monitoring"},"3541":{"body":"# Analyze cost optimization opportunities\\nprovisioning cost optimize --infra my-infra # Show unused resources\\nprovisioning cost unused --infra my-infra # Right-size recommendations\\nprovisioning cost recommendations --infra my-infra","breadcrumbs":"Infrastructure Management » Cost Optimization","id":"3541","title":"Cost Optimization"},"3542":{"body":"","breadcrumbs":"Infrastructure Management » Scaling Strategies","id":"3542","title":"Scaling Strategies"},"3543":{"body":"# Scale servers\\nprovisioning server scale --count 5 --infra my-infra # Scale specific service\\nprovisioning taskserv scale kubernetes --nodes 3 --infra my-infra # Scale cluster\\nprovisioning cluster scale web-cluster --replicas 10 --infra my-infra","breadcrumbs":"Infrastructure Management » Manual Scaling","id":"3543","title":"Manual Scaling"},"3544":{"body":"# Auto-scaling configuration\\nauto_scaling: { servers = { min_count = 2 max_count = 10 # Scaling metrics cpu_threshold = 70 memory_threshold = 80 # Scaling behavior scale_up_cooldown = \\"5m\\" scale_down_cooldown = \\"10m\\" } clusters = { web_cluster = { min_replicas = 3 max_replicas = 20 metrics = [ {type = \\"cpu\\", target = 70} {type = \\"memory\\", target = 80} {type = \\"requests\\", target = 1000} ] } }\\n}","breadcrumbs":"Infrastructure Management » Auto-scaling Configuration","id":"3544","title":"Auto-scaling Configuration"},"3545":{"body":"","breadcrumbs":"Infrastructure Management » Disaster Recovery","id":"3545","title":"Disaster Recovery"},"3546":{"body":"# Full infrastructure backup\\nprovisioning backup create --type full --infra my-infra # Incremental backup\\nprovisioning backup create --type incremental --infra my-infra # Schedule automated backups\\nprovisioning backup schedule --daily --time \\"02:00\\" --infra my-infra","breadcrumbs":"Infrastructure Management » Backup Strategies","id":"3546","title":"Backup Strategies"},"3547":{"body":"# List available backups\\nprovisioning backup list --infra my-infra # Restore infrastructure\\nprovisioning restore --backup latest --infra my-infra # Partial restore\\nprovisioning restore --backup latest --components servers --infra my-infra # Test restore (dry run)\\nprovisioning restore --backup latest --test --infra my-infra","breadcrumbs":"Infrastructure Management » Recovery Procedures","id":"3547","title":"Recovery Procedures"},"3548":{"body":"","breadcrumbs":"Infrastructure Management » Advanced Infrastructure Patterns","id":"3548","title":"Advanced Infrastructure Patterns"},"3549":{"body":"# Multi-region configuration\\nregions: { primary = { name = \\"us-west-2\\" servers = [\\"web-01\\", \\"web-02\\", \\"db-01\\"] availability_zones = [\\"us-west-2a\\", \\"us-west-2b\\"] } secondary = { name = \\"us-east-1\\" servers = [\\"web-03\\", \\"web-04\\", \\"db-02\\"] availability_zones = [\\"us-east-1a\\", \\"us-east-1b\\"] } # Cross-region replication replication = { database = { primary = \\"us-west-2\\" replicas = [\\"us-east-1\\"] sync_mode = \\"async\\" } storage = { sync_schedule = \\"*/15 * * * *\\" # Every 15 minutes } }\\n}","breadcrumbs":"Infrastructure Management » Multi-Region Deployment","id":"3549","title":"Multi-Region Deployment"},"355":{"body":"# Check orchestrator health\\ncurl http://localhost:8080/health # Expected:\\n# {\\"status\\":\\"healthy\\",\\"version\\":\\"0.1.0\\"} # List tasks\\ncurl http://localhost:8080/tasks","breadcrumbs":"Verification » Orchestrator","id":"355","title":"Orchestrator"},"3550":{"body":"# Create green environment\\nprovisioning generate infra --from production --name production-green # Deploy to green\\nprovisioning server create --infra production-green\\nprovisioning taskserv create --infra production-green\\nprovisioning cluster deploy --infra production-green # Switch traffic to green\\nprovisioning network switch --from production --to production-green # Decommission blue\\nprovisioning server delete --infra production --yes","breadcrumbs":"Infrastructure Management » Blue-Green Deployment","id":"3550","title":"Blue-Green Deployment"},"3551":{"body":"# Create canary environment\\nprovisioning cluster create web-cluster-canary --replicas 1 --infra my-infra # Route small percentage of traffic\\nprovisioning network route --target web-cluster-canary --weight 10 --infra my-infra # Monitor canary metrics\\nprovisioning metrics monitor web-cluster-canary --infra my-infra # Promote or rollback\\nprovisioning cluster promote web-cluster-canary --infra my-infra\\n# or\\nprovisioning cluster rollback web-cluster-canary --infra my-infra","breadcrumbs":"Infrastructure Management » Canary Deployment","id":"3551","title":"Canary Deployment"},"3552":{"body":"","breadcrumbs":"Infrastructure Management » Troubleshooting Infrastructure","id":"3552","title":"Troubleshooting Infrastructure"},"3553":{"body":"Server Creation Failures # Check provider status\\nprovisioning provider status aws # Validate server configuration\\nprovisioning server validate web-01 --infra my-infra # Check quota limits\\nprovisioning provider quota --infra my-infra # Debug server creation\\nprovisioning --debug server create web-01 --infra my-infra Service Installation Failures # Check service prerequisites\\nprovisioning taskserv check kubernetes --infra my-infra # Validate service configuration\\nprovisioning taskserv validate kubernetes --infra my-infra # Check service logs\\nprovisioning taskserv logs kubernetes --infra my-infra # Debug service installation\\nprovisioning --debug taskserv create kubernetes --infra my-infra Network Connectivity Issues # Test network connectivity\\nprovisioning network test --infra my-infra # Check security groups\\nprovisioning network security-groups --infra my-infra # Trace network path\\nprovisioning network trace --from web-01 --to db-01 --infra my-infra","breadcrumbs":"Infrastructure Management » Common Issues","id":"3553","title":"Common Issues"},"3554":{"body":"# Analyze performance bottlenecks\\nprovisioning performance analyze --infra my-infra # Get performance recommendations\\nprovisioning performance recommendations --infra my-infra # Monitor resource utilization\\nprovisioning performance monitor --infra my-infra --duration 1h","breadcrumbs":"Infrastructure Management » Performance Optimization","id":"3554","title":"Performance Optimization"},"3555":{"body":"The provisioning system includes a comprehensive Test Environment Service for automated testing of infrastructure components before deployment.","breadcrumbs":"Infrastructure Management » Testing Infrastructure","id":"3555","title":"Testing Infrastructure"},"3556":{"body":"Testing infrastructure before production deployment helps: Validate taskserv configurations before installing on production servers Test integration between multiple taskservs Verify cluster topologies (Kubernetes, etcd, etc.) before deployment Catch configuration errors early in the development cycle Ensure compatibility between components","breadcrumbs":"Infrastructure Management » Why Test Infrastructure","id":"3556","title":"Why Test Infrastructure"},"3557":{"body":"1. Single Taskserv Testing Test individual taskservs in isolated containers: # Quick test (create, run, cleanup automatically)\\nprovisioning test quick kubernetes # Single taskserv with custom resources\\nprovisioning test env single postgres \\\\ --cpu 2000 \\\\ --memory 4096 \\\\ --auto-start \\\\ --auto-cleanup # Test with specific infrastructure context\\nprovisioning test env single redis --infra my-infra 2. Server Simulation Test complete server configurations with multiple taskservs: # Simulate web server with multiple taskservs\\nprovisioning test env server web-01 [containerd kubernetes cilium] \\\\ --auto-start # Simulate database server\\nprovisioning test env server db-01 [postgres redis] \\\\ --infra prod-stack \\\\ --auto-start 3. Multi-Node Cluster Testing Test complex cluster topologies before production deployment: # Test 3-node Kubernetes cluster\\nprovisioning test topology load kubernetes_3node | \\\\ test env cluster kubernetes --auto-start # Test etcd cluster\\nprovisioning test topology load etcd_cluster | \\\\ test env cluster etcd --auto-start # Test single-node Kubernetes\\nprovisioning test topology load kubernetes_single | \\\\ test env cluster kubernetes --auto-start","breadcrumbs":"Infrastructure Management » Test Environment Types","id":"3557","title":"Test Environment Types"},"3558":{"body":"# List all test environments\\nprovisioning test env list # Check environment status\\nprovisioning test env status # View environment logs\\nprovisioning test env logs # Cleanup environment when done\\nprovisioning test env cleanup ","breadcrumbs":"Infrastructure Management » Managing Test Environments","id":"3558","title":"Managing Test Environments"},"3559":{"body":"Pre-configured multi-node cluster templates: Template Description Use Case kubernetes_3node 3-node HA K8s cluster Production-like K8s testing kubernetes_single All-in-one K8s node Development K8s testing etcd_cluster 3-member etcd cluster Distributed consensus testing containerd_test Standalone containerd Container runtime testing postgres_redis Database stack Database integration testing","breadcrumbs":"Infrastructure Management » Available Topology Templates","id":"3559","title":"Available Topology Templates"},"356":{"body":"# Check control center health\\ncurl http://localhost:9090/health # Test policy evaluation\\ncurl -X POST http://localhost:9090/policies/evaluate \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"principal\\":{\\"id\\":\\"test\\"},\\"action\\":{\\"id\\":\\"read\\"},\\"resource\\":{\\"id\\":\\"test\\"}}\'","breadcrumbs":"Verification » Control Center","id":"356","title":"Control Center"},"3560":{"body":"Typical testing workflow: # 1. Test new taskserv before deploying\\nprovisioning test quick kubernetes # 2. If successful, test server configuration\\nprovisioning test env server k8s-node [containerd kubernetes cilium] \\\\ --auto-start # 3. Test complete cluster topology\\nprovisioning test topology load kubernetes_3node | \\\\ test env cluster kubernetes --auto-start # 4. Deploy to production\\nprovisioning server create --infra production\\nprovisioning taskserv create kubernetes --infra production","breadcrumbs":"Infrastructure Management » Test Environment Workflow","id":"3560","title":"Test Environment Workflow"},"3561":{"body":"Integrate infrastructure testing into CI/CD pipelines: # GitLab CI example\\ntest-infrastructure: stage: test script: # Start orchestrator - ./scripts/start-orchestrator.nu --background # Test critical taskservs - provisioning test quick kubernetes - provisioning test quick postgres - provisioning test quick redis # Test cluster topology - provisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start artifacts: when: on_failure paths: - test-logs/","breadcrumbs":"Infrastructure Management » CI/CD Integration","id":"3561","title":"CI/CD Integration"},"3562":{"body":"Test environments require: Docker Running : Test environments use Docker containers docker ps # Should work without errors Orchestrator Running : The orchestrator manages test containers cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"Infrastructure Management » Prerequisites","id":"3562","title":"Prerequisites"},"3563":{"body":"Custom Topology Testing Create custom topology configurations: # custom-topology.toml\\n[my_cluster]\\nname = \\"Custom Test Cluster\\"\\ncluster_type = \\"custom\\" [[my_cluster.nodes]]\\nname = \\"node-01\\"\\nrole = \\"primary\\"\\ntaskservs = [\\"postgres\\", \\"redis\\"]\\n[my_cluster.nodes.resources]\\ncpu_millicores = 2000\\nmemory_mb = 4096 [[my_cluster.nodes]]\\nname = \\"node-02\\"\\nrole = \\"replica\\"\\ntaskservs = [\\"postgres\\"]\\n[my_cluster.nodes.resources]\\ncpu_millicores = 1000\\nmemory_mb = 2048 Load and test custom topology: provisioning test env cluster custom-app custom-topology.toml --auto-start Integration Testing Test taskserv dependencies: # Test Kubernetes dependencies in order\\nprovisioning test quick containerd\\nprovisioning test quick etcd\\nprovisioning test quick kubernetes\\nprovisioning test quick cilium # Test complete stack\\nprovisioning test env server k8s-stack \\\\ [containerd etcd kubernetes cilium] \\\\ --auto-start","breadcrumbs":"Infrastructure Management » Advanced Testing","id":"3563","title":"Advanced Testing"},"3564":{"body":"For complete test environment documentation: Test Environment Guide : docs/user/test-environment-guide.md Detailed Usage : docs/user/test-environment-usage.md Orchestrator README : provisioning/platform/orchestrator/README.md","breadcrumbs":"Infrastructure Management » Documentation","id":"3564","title":"Documentation"},"3565":{"body":"","breadcrumbs":"Infrastructure Management » Best Practices","id":"3565","title":"Best Practices"},"3566":{"body":"Principle of Least Privilege : Grant minimal necessary access Defense in Depth : Multiple layers of security High Availability : Design for failure resilience Scalability : Plan for growth from the start","breadcrumbs":"Infrastructure Management » 1. Infrastructure Design","id":"3566","title":"1. Infrastructure Design"},"3567":{"body":"# Always validate before applying changes\\nprovisioning validate config --infra my-infra # Use check mode for dry runs\\nprovisioning server create --check --infra my-infra # Monitor continuously\\nprovisioning health monitor --infra my-infra # Regular backups\\nprovisioning backup schedule --daily --infra my-infra","breadcrumbs":"Infrastructure Management » 2. Operational Excellence","id":"3567","title":"2. Operational Excellence"},"3568":{"body":"# Regular security updates\\nprovisioning taskserv update --security-only --infra my-infra # Encrypt sensitive data\\nprovisioning sops settings.ncl --infra my-infra # Audit access\\nprovisioning audit logs --infra my-infra","breadcrumbs":"Infrastructure Management » 3. Security","id":"3568","title":"3. Security"},"3569":{"body":"# Regular cost reviews\\nprovisioning cost analyze --infra my-infra # Right-size resources\\nprovisioning cost optimize --apply --infra my-infra # Use reserved instances for predictable workloads\\nprovisioning server reserve --infra my-infra","breadcrumbs":"Infrastructure Management » 4. Cost Optimization","id":"3569","title":"4. Cost Optimization"},"357":{"body":"# Check KMS health\\ncurl http://localhost:8082/api/v1/kms/health # Test encryption\\necho \\"test\\" | provisioning kms encrypt","breadcrumbs":"Verification » KMS Service","id":"357","title":"KMS Service"},"3570":{"body":"Now that you understand infrastructure management: Learn about extensions : Extension Development Guide Master configuration : Configuration Guide Explore advanced examples : Examples and Tutorials Set up monitoring and alerting Implement automated scaling Plan disaster recovery procedures You now have the knowledge to build and manage robust, scalable cloud infrastructure!","breadcrumbs":"Infrastructure Management » Next Steps","id":"3570","title":"Next Steps"},"3571":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Infrastructure-from-Code (IaC) Guide","id":"3571","title":"Infrastructure-from-Code (IaC) Guide"},"3572":{"body":"The Infrastructure-from-Code system automatically detects technologies in your project and infers infrastructure requirements based on organization-specific rules. It consists of three main commands: detect : Scan a project and identify technologies complete : Analyze gaps and recommend infrastructure components ifc : Full-pipeline orchestration (workflow)","breadcrumbs":"Infrastructure from Code Guide » Overview","id":"3572","title":"Overview"},"3573":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Quick Start","id":"3573","title":"Quick Start"},"3574":{"body":"Scan a project directory for detected technologies: provisioning detect /path/to/project --out json Output Example: { \\"detections\\": [ {\\"technology\\": \\"nodejs\\", \\"confidence\\": 0.95}, {\\"technology\\": \\"postgres\\", \\"confidence\\": 0.92} ], \\"overall_confidence\\": 0.93\\n}","breadcrumbs":"Infrastructure from Code Guide » 1. Detect Technologies in Your Project","id":"3574","title":"1. Detect Technologies in Your Project"},"3575":{"body":"Get a completeness assessment and recommendations: provisioning complete /path/to/project --out json Output Example: { \\"completeness\\": 1.0, \\"changes_needed\\": 2, \\"is_safe\\": true, \\"change_summary\\": \\"+ Adding: postgres-backup, pg-monitoring\\"\\n}","breadcrumbs":"Infrastructure from Code Guide » 2. Analyze Infrastructure Gaps","id":"3575","title":"2. Analyze Infrastructure Gaps"},"3576":{"body":"Orchestrate detection → completion → assessment pipeline: provisioning ifc /path/to/project --org default Output: ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\\n🔄 Infrastructure-from-Code Workflow\\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ STEP 1: Technology Detection\\n────────────────────────────\\n✓ Detected 2 technologies STEP 2: Infrastructure Completion\\n─────────────────────────────────\\n✓ Completeness: 1% ✅ Workflow Complete","breadcrumbs":"Infrastructure from Code Guide » 3. Run Full Workflow","id":"3576","title":"3. Run Full Workflow"},"3577":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Command Reference","id":"3577","title":"Command Reference"},"3578":{"body":"Scan and detect technologies in a project. Usage: provisioning detect [PATH] [OPTIONS] Arguments: PATH: Project directory to analyze (default: current directory) Options: -o, --out TEXT: Output format - text, json, yaml (default: text) -C, --high-confidence-only: Only show detections with confidence > 0.8 --pretty: Pretty-print JSON/YAML output -x, --debug: Enable debug output Examples: # Detect with default text output\\nprovisioning detect /path/to/project # Get JSON output for parsing\\nprovisioning detect /path/to/project --out json | jq \'.detections\' # Show only high-confidence detections\\nprovisioning detect /path/to/project --high-confidence-only # Pretty-printed YAML output\\nprovisioning detect /path/to/project --out yaml --pretty","breadcrumbs":"Infrastructure from Code Guide » detect","id":"3578","title":"detect"},"3579":{"body":"Analyze infrastructure completeness and recommend changes. Usage: provisioning complete [PATH] [OPTIONS] Arguments: PATH: Project directory to analyze (default: current directory) Options: -o, --out TEXT: Output format - text, json, yaml (default: text) -c, --check: Check mode (report only, no changes) --pretty: Pretty-print JSON/YAML output -x, --debug: Enable debug output Examples: # Analyze completeness\\nprovisioning complete /path/to/project # Get detailed JSON report\\nprovisioning complete /path/to/project --out json # Check mode (dry-run, no changes)\\nprovisioning complete /path/to/project --check","breadcrumbs":"Infrastructure from Code Guide » complete","id":"3579","title":"complete"},"358":{"body":"Run comprehensive health checks: # Check all components\\nprovisioning health check # Expected output:\\n# ✓ Configuration: OK\\n# ✓ Servers: 1/1 healthy\\n# ✓ Task Services: 3/3 running\\n# ✓ Platform Services: 3/3 healthy\\n# ✓ Network Connectivity: OK\\n# ✓ Encryption Keys: OK","breadcrumbs":"Verification » Step 6: Run Health Checks","id":"358","title":"Step 6: Run Health Checks"},"3580":{"body":"Run the full Infrastructure-from-Code pipeline. Usage: provisioning ifc [PATH] [OPTIONS] Arguments: PATH: Project directory to process (default: current directory) Options: --org TEXT: Organization name for rule loading (default: default) -o, --out TEXT: Output format - text, json (default: text) --apply: Apply recommendations (future feature) -v, --verbose: Verbose output with timing --pretty: Pretty-print output -x, --debug: Enable debug output Examples: # Run workflow with default rules\\nprovisioning ifc /path/to/project # Run with organization-specific rules\\nprovisioning ifc /path/to/project --org acme-corp # Verbose output with timing\\nprovisioning ifc /path/to/project --verbose # JSON output for automation\\nprovisioning ifc /path/to/project --out json","breadcrumbs":"Infrastructure from Code Guide » ifc (workflow)","id":"3580","title":"ifc (workflow)"},"3581":{"body":"Customize how infrastructure is inferred for your organization.","breadcrumbs":"Infrastructure from Code Guide » Organization-Specific Inference Rules","id":"3581","title":"Organization-Specific Inference Rules"},"3582":{"body":"An inference rule tells the system: \\"If we detect technology X, we should recommend taskservice Y.\\" Rule Structure: version: \\"1.0.0\\"\\norganization: \\"your-org\\"\\nrules: - name: \\"rule-name\\" technology: [\\"detected-tech\\"] infers: \\"required-taskserv\\" confidence: 0.85 reason: \\"Why this taskserv is needed\\" required: true","breadcrumbs":"Infrastructure from Code Guide » Understanding Inference Rules","id":"3582","title":"Understanding Inference Rules"},"3583":{"body":"Create an organization-specific rules file: # ACME Corporation rules\\ncat > $PROVISIONING/config/inference-rules/acme-corp.yaml << \'EOF\'\\nversion: \\"1.0.0\\"\\norganization: \\"acme-corp\\"\\ndescription: \\"ACME Corporation infrastructure standards\\" rules: - name: \\"nodejs-to-redis\\" technology: [\\"nodejs\\", \\"express\\"] infers: \\"redis\\" confidence: 0.85 reason: \\"Node.js applications need caching\\" required: false - name: \\"postgres-to-backup\\" technology: [\\"postgres\\"] infers: \\"postgres-backup\\" confidence: 0.95 reason: \\"All databases require backup strategy\\" required: true - name: \\"all-services-monitoring\\" technology: [\\"nodejs\\", \\"python\\", \\"postgres\\"] infers: \\"monitoring\\" confidence: 0.90 reason: \\"ACME requires monitoring on production services\\" required: true\\nEOF Then use them: provisioning ifc /path/to/project --org acme-corp","breadcrumbs":"Infrastructure from Code Guide » Creating Custom Rules","id":"3583","title":"Creating Custom Rules"},"3584":{"body":"If no organization rules are found, the system uses sensible defaults: Node.js + Express → Redis (caching) Node.js → Nginx (reverse proxy) Database → Backup (data protection) Docker → Kubernetes (orchestration) Python → Gunicorn (WSGI server) PostgreSQL → Monitoring (production safety)","breadcrumbs":"Infrastructure from Code Guide » Default Rules","id":"3584","title":"Default Rules"},"3585":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Output Formats","id":"3585","title":"Output Formats"},"3586":{"body":"Human-readable format with visual indicators: STEP 1: Technology Detection\\n────────────────────────────\\n✓ Detected 2 technologies STEP 2: Infrastructure Completion\\n─────────────────────────────────\\n✓ Completeness: 1%","breadcrumbs":"Infrastructure from Code Guide » Text Output (Default)","id":"3586","title":"Text Output (Default)"},"3587":{"body":"Structured format for automation and parsing: provisioning detect /path/to/project --out json | jq \'.detections[0]\' Output: { \\"technology\\": \\"nodejs\\", \\"confidence\\": 0.8333333134651184, \\"evidence_count\\": 1\\n}","breadcrumbs":"Infrastructure from Code Guide » JSON Output","id":"3587","title":"JSON Output"},"3588":{"body":"Alternative structured format: provisioning detect /path/to/project --out yaml","breadcrumbs":"Infrastructure from Code Guide » YAML Output","id":"3588","title":"YAML Output"},"3589":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Practical Examples","id":"3589","title":"Practical Examples"},"359":{"body":"If you used workflows: # List all workflows\\nprovisioning workflow list # Check specific workflow\\nprovisioning workflow status # View workflow stats\\nprovisioning workflow stats","breadcrumbs":"Verification » Step 7: Verify Workflows","id":"359","title":"Step 7: Verify Workflows"},"3590":{"body":"# Step 1: Detect\\n$ provisioning detect my-app\\n✓ Detected: nodejs, express, postgres, docker # Step 2: Complete\\n$ provisioning complete my-app\\n✓ Changes needed: 3 - redis (caching) - nginx (reverse proxy) - pg-backup (database backup) # Step 3: Full workflow\\n$ provisioning ifc my-app --org acme-corp","breadcrumbs":"Infrastructure from Code Guide » Example 1: Node.js + PostgreSQL Project","id":"3590","title":"Example 1: Node.js + PostgreSQL Project"},"3591":{"body":"$ provisioning detect django-app --out json\\n{ \\"detections\\": [ {\\"technology\\": \\"python\\", \\"confidence\\": 0.95}, {\\"technology\\": \\"django\\", \\"confidence\\": 0.92} ]\\n} # Inferred requirements (with gunicorn, monitoring, backup)","breadcrumbs":"Infrastructure from Code Guide » Example 2: Python Django Project","id":"3591","title":"Example 2: Python Django Project"},"3592":{"body":"$ provisioning ifc microservices/ --org mycompany --verbose\\n🔍 Processing microservices/ - service-a: nodejs + postgres - service-b: python + redis - service-c: go + mongodb ✓ Detected common patterns\\n✓ Applied 12 inference rules\\n✓ Generated deployment plan","breadcrumbs":"Infrastructure from Code Guide » Example 3: Microservices Architecture","id":"3592","title":"Example 3: Microservices Architecture"},"3593":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Integration with Automation","id":"3593","title":"Integration with Automation"},"3594":{"body":"#!/bin/bash\\n# Check infrastructure completeness in CI/CD PROJECT_PATH=${1:-.}\\nCOMPLETENESS=$(provisioning complete $PROJECT_PATH --out json | jq \'.completeness\') if (( $(echo \\"$COMPLETENESS < 0.9\\" | bc -l) )); then echo \\"❌ Infrastructure completeness too low: $COMPLETENESS\\" exit 1\\nfi echo \\"✅ Infrastructure is complete: $COMPLETENESS\\"","breadcrumbs":"Infrastructure from Code Guide » CI/CD Pipeline Example","id":"3594","title":"CI/CD Pipeline Example"},"3595":{"body":"# Generate JSON for infrastructure config\\nprovisioning detect /path/to/project --out json > infra-report.json # Use in your config processing\\ncat infra-report.json | jq \'.detections[]\' | while read -r tech; do echo \\"Processing technology: $tech\\"\\ndone","breadcrumbs":"Infrastructure from Code Guide » Configuration as Code Integration","id":"3595","title":"Configuration as Code Integration"},"3596":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Troubleshooting","id":"3596","title":"Troubleshooting"},"3597":{"body":"Solution: Ensure the provisioning project is properly built: cd $PROVISIONING/platform\\ncargo build --release --bin provisioning-detector","breadcrumbs":"Infrastructure from Code Guide » \\"Detector binary not found\\"","id":"3597","title":"\\"Detector binary not found\\""},"3598":{"body":"Check: Project path is correct: provisioning detect /actual/path Project contains recognizable technologies (package.json, Dockerfile, requirements.txt, etc.) Use --debug flag for more details: provisioning detect /path --debug","breadcrumbs":"Infrastructure from Code Guide » No technologies detected","id":"3598","title":"No technologies detected"},"3599":{"body":"Check: Rules file exists: $PROVISIONING/config/inference-rules/{org}.yaml Organization name is correct: provisioning ifc /path --org myorg Verify rules structure with: cat $PROVISIONING/config/inference-rules/myorg.yaml","breadcrumbs":"Infrastructure from Code Guide » Organization rules not being applied","id":"3599","title":"Organization rules not being applied"},"36":{"body":"Component Technology Purpose Core CLI Nushell 0.107.1 Shell and scripting Configuration Nickel 1.0.0+ Type-safe IaC Orchestrator Rust High-performance coordination Templates Jinja2 (nu_plugin_tera) Code generation Secrets SOPS 3.10.2 + Age 1.2.1 Encryption Distribution OCI (skopeo/crane/oras) Artifact management","breadcrumbs":"Home » Technology Stack","id":"36","title":"Technology Stack"},"360":{"body":"","breadcrumbs":"Verification » Common Verification Checks","id":"360","title":"Common Verification Checks"},"3600":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Advanced Usage","id":"3600","title":"Advanced Usage"},"3601":{"body":"Generate a template for a new organization: # Template will be created with proper structure\\nprovisioning rules create --org neworg","breadcrumbs":"Infrastructure from Code Guide » Custom Rule Template","id":"3601","title":"Custom Rule Template"},"3602":{"body":"# Check for syntax errors\\nprovisioning rules validate /path/to/rules.yaml","breadcrumbs":"Infrastructure from Code Guide » Validate Rule Files","id":"3602","title":"Validate Rule Files"},"3603":{"body":"Export as Rust code for embedding: provisioning rules export myorg --format rust > rules.rs","breadcrumbs":"Infrastructure from Code Guide » Export Rules for Integration","id":"3603","title":"Export Rules for Integration"},"3604":{"body":"Organize by Organization : Keep separate rules for different organizations High Confidence First : Start with rules you\'re confident about (confidence > 0.8) Document Reasons : Always fill in the reason field for maintainability Test Locally : Run on sample projects before applying organization-wide Version Control : Commit inference rules to version control Review Changes : Always inspect recommendations with --check first","breadcrumbs":"Infrastructure from Code Guide » Best Practices","id":"3604","title":"Best Practices"},"3605":{"body":"# View available taskservs that can be inferred\\nprovisioning taskserv list # Create inferred infrastructure\\nprovisioning taskserv create {inferred-name} # View current configuration\\nprovisioning env | grep PROVISIONING","breadcrumbs":"Infrastructure from Code Guide » Related Commands","id":"3605","title":"Related Commands"},"3606":{"body":"Full CLI Help : provisioning help Specific Command Help : provisioning help detect Configuration Guide : See CONFIG_ENCRYPTION_GUIDE.md Task Services : See SERVICE_MANAGEMENT_GUIDE.md","breadcrumbs":"Infrastructure from Code Guide » Support and Documentation","id":"3606","title":"Support and Documentation"},"3607":{"body":"","breadcrumbs":"Infrastructure from Code Guide » Quick Reference","id":"3607","title":"Quick Reference"},"3608":{"body":"# 1. Detect technologies\\nprovisioning detect /path/to/project # 2. Analyze infrastructure gaps\\nprovisioning complete /path/to/project # 3. Run full workflow (detect + complete)\\nprovisioning ifc /path/to/project --org myorg","breadcrumbs":"Infrastructure from Code Guide » 3-Step Workflow","id":"3608","title":"3-Step Workflow"},"3609":{"body":"Task Command Detect technologies provisioning detect /path Get JSON output provisioning detect /path --out json Check completeness provisioning complete /path Dry-run (check mode) provisioning complete /path --check Full workflow provisioning ifc /path --org myorg Verbose output provisioning ifc /path --verbose Debug mode provisioning detect /path --debug","breadcrumbs":"Infrastructure from Code Guide » Common Commands","id":"3609","title":"Common Commands"},"361":{"body":"# Test DNS resolution\\ndig @localhost test.provisioning.local # Check CoreDNS status\\nprovisioning server ssh dev-server-01 -- systemctl status coredns","breadcrumbs":"Verification » DNS Resolution (If CoreDNS Installed)","id":"361","title":"DNS Resolution (If CoreDNS Installed)"},"3610":{"body":"# Text (human-readable)\\nprovisioning detect /path --out text # JSON (for automation)\\nprovisioning detect /path --out json | jq \'.detections\' # YAML (for configuration)\\nprovisioning detect /path --out yaml","breadcrumbs":"Infrastructure from Code Guide » Output Formats","id":"3610","title":"Output Formats"},"3611":{"body":"Use Organization Rules provisioning ifc /path --org acme-corp Create Rules File mkdir -p $PROVISIONING/config/inference-rules\\ncat > $PROVISIONING/config/inference-rules/myorg.yaml << \'EOF\'\\nversion: \\"1.0.0\\"\\norganization: \\"myorg\\"\\nrules: - name: \\"nodejs-to-redis\\" technology: [\\"nodejs\\"] infers: \\"redis\\" confidence: 0.85 reason: \\"Caching layer\\" required: false\\nEOF","breadcrumbs":"Infrastructure from Code Guide » Organization Rules","id":"3611","title":"Organization Rules"},"3612":{"body":"$ provisioning detect myapp\\n✓ Detected: nodejs, postgres $ provisioning complete myapp\\n✓ Changes: +redis, +nginx, +pg-backup $ provisioning ifc myapp --org default\\n✓ Detection: 2 technologies\\n✓ Completion: recommended changes\\n✅ Workflow complete","breadcrumbs":"Infrastructure from Code Guide » Example: Node.js + PostgreSQL","id":"3612","title":"Example: Node.js + PostgreSQL"},"3613":{"body":"#!/bin/bash\\n# Check infrastructure is complete before deploy\\nCOMPLETENESS=$(provisioning complete . --out json | jq \'.completeness\') if (( $(echo \\"$COMPLETENESS < 0.9\\" | bc -l) )); then echo \\"Infrastructure incomplete: $COMPLETENESS\\" exit 1\\nfi","breadcrumbs":"Infrastructure from Code Guide » CI/CD Integration","id":"3613","title":"CI/CD Integration"},"3614":{"body":"Detect Output { \\"detections\\": [ {\\"technology\\": \\"nodejs\\", \\"confidence\\": 0.95}, {\\"technology\\": \\"postgres\\", \\"confidence\\": 0.92} ], \\"overall_confidence\\": 0.93\\n} Complete Output { \\"completeness\\": 1.0, \\"changes_needed\\": 2, \\"is_safe\\": true, \\"change_summary\\": \\"+ redis, + monitoring\\"\\n}","breadcrumbs":"Infrastructure from Code Guide » JSON Output Examples","id":"3614","title":"JSON Output Examples"},"3615":{"body":"Flag Short Purpose --out TEXT -o Output format: text, json, yaml --debug -x Enable debug output --pretty Pretty-print JSON/YAML --check -c Dry-run (detect/complete) --org TEXT Organization name (ifc) --verbose -v Verbose output (ifc) --apply Apply changes (ifc, future)","breadcrumbs":"Infrastructure from Code Guide » Flag Reference","id":"3615","title":"Flag Reference"},"3616":{"body":"Issue Solution \\"Detector binary not found\\" cd $PROVISIONING/platform && cargo build --release No technologies detected Check file types (.py, .js, go.mod, package.json, etc.) Organization rules not found Verify file exists: $PROVISIONING/config/inference-rules/{org}.yaml Invalid path error Use absolute path: provisioning detect /full/path","breadcrumbs":"Infrastructure from Code Guide » Troubleshooting","id":"3616","title":"Troubleshooting"},"3617":{"body":"Variable Purpose $PROVISIONING Path to provisioning root $PROVISIONING_ORG Default organization (optional)","breadcrumbs":"Infrastructure from Code Guide » Environment Variables","id":"3617","title":"Environment Variables"},"3618":{"body":"Node.js + Express → Redis (caching) Node.js → Nginx (reverse proxy) Database → Backup (data protection) Docker → Kubernetes (orchestration) Python → Gunicorn (WSGI) PostgreSQL → Monitoring (production)","breadcrumbs":"Infrastructure from Code Guide » Default Inference Rules","id":"3618","title":"Default Inference Rules"},"3619":{"body":"# Add to shell config\\nalias detect=\'provisioning detect\'\\nalias complete=\'provisioning complete\'\\nalias ifc=\'provisioning ifc\' # Usage\\ndetect /my/project\\ncomplete /my/project\\nifc /my/project --org myorg","breadcrumbs":"Infrastructure from Code Guide » Useful Aliases","id":"3619","title":"Useful Aliases"},"362":{"body":"# Test server-to-server connectivity\\nprovisioning server ssh dev-server-01 -- ping -c 3 dev-server-02 # Check firewall rules\\nprovisioning server ssh dev-server-01 -- sudo iptables -L","breadcrumbs":"Verification » Network Connectivity","id":"362","title":"Network Connectivity"},"3620":{"body":"Parse JSON in bash: provisioning detect . --out json | \\\\ jq \'.detections[] | .technology\' | \\\\ sort | uniq Watch for changes: watch -n 5 \'provisioning complete . --out json | jq \\".completeness\\"\' Generate reports: provisioning detect . --out yaml > detection-report.yaml\\nprovisioning complete . --out yaml > completion-report.yaml Validate all organizations: for org in $PROVISIONING/config/inference-rules/*.yaml; do org_name=$(basename \\"$org\\" .yaml) echo \\"Testing $org_name...\\" provisioning ifc . --org \\"$org_name\\" --check\\ndone","breadcrumbs":"Infrastructure from Code Guide » Tips & Tricks","id":"3620","title":"Tips & Tricks"},"3621":{"body":"Full guide: docs/user/INFRASTRUCTURE_FROM_CODE_GUIDE.md Inference rules: docs/user/INFRASTRUCTURE_FROM_CODE_GUIDE.md#organization-specific-inference-rules Service management: docs/user/SERVICE_MANAGEMENT_QUICKREF.md Configuration: docs/user/CONFIG_ENCRYPTION_QUICKREF.md","breadcrumbs":"Infrastructure from Code Guide » Related Guides","id":"3621","title":"Related Guides"},"3622":{"body":"","breadcrumbs":"Batch Workflow System » Batch Workflow System (v3.1.0 - TOKEN-OPTIMIZED ARCHITECTURE)","id":"3622","title":"Batch Workflow System (v3.1.0 - TOKEN-OPTIMIZED ARCHITECTURE)"},"3623":{"body":"A comprehensive batch workflow system has been implemented using 10 token-optimized agents achieving 85-90% token efficiency over monolithic approaches. The system enables provider-agnostic batch operations with mixed provider support (UpCloud + AWS + local).","breadcrumbs":"Batch Workflow System » 🚀 Batch Workflow System Completed (2025-09-25)","id":"3623","title":"🚀 Batch Workflow System Completed (2025-09-25)"},"3624":{"body":"Provider-Agnostic Design : Single workflows supporting multiple cloud providers Nickel Schema Integration : Type-safe workflow definitions with comprehensive validation Dependency Resolution : Topological sorting with soft/hard dependency support State Management : Checkpoint-based recovery with rollback capabilities Real-time Monitoring : Live workflow progress tracking and health monitoring Token Optimization : 85-90% efficiency using parallel specialized agents","breadcrumbs":"Batch Workflow System » Key Achievements","id":"3624","title":"Key Achievements"},"3625":{"body":"# Submit batch workflow from Nickel definition\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch submit workflows/example_batch.ncl\\" # Monitor batch workflow progress\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch monitor \\" # List batch workflows with filtering\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch list --status Running\\" # Get detailed batch status\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch status \\" # Initiate rollback for failed workflow\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch rollback \\" # Show batch workflow statistics\\nnu -c \\"use core/nulib/workflows/batch.nu *; batch stats\\"","breadcrumbs":"Batch Workflow System » Batch Workflow Commands","id":"3625","title":"Batch Workflow Commands"},"3626":{"body":"Batch workflows are defined using Nickel configuration in schemas/workflows.ncl: # Example batch workflow with mixed providers\\n{ batch_workflow = { name = \\"multi_cloud_deployment\\", version = \\"1.0.0\\", storage_backend = \\"surrealdb\\", # or \\"filesystem\\" parallel_limit = 5, rollback_enabled = true, operations = [ { id = \\"upcloud_servers\\", type = \\"server_batch\\", provider = \\"upcloud\\", dependencies = [], server_configs = [ { name = \\"web-01\\", plan = \\"1xCPU-2 GB\\", zone = \\"de-fra1\\" }, { name = \\"web-02\\", plan = \\"1xCPU-2 GB\\", zone = \\"us-nyc1\\" } ] }, { id = \\"aws_taskservs\\", type = \\"taskserv_batch\\", provider = \\"aws\\", dependencies = [\\"upcloud_servers\\"], taskservs = [\\"kubernetes\\", \\"cilium\\", \\"containerd\\"] } ] }\\n}","breadcrumbs":"Batch Workflow System » Nickel Workflow Schema","id":"3626","title":"Nickel Workflow Schema"},"3627":{"body":"Extended orchestrator API for batch workflow management: Submit Batch : POST http://localhost:9090/v1/workflows/batch/submit Batch Status : GET http://localhost:9090/v1/workflows/batch/{id} List Batches : GET http://localhost:9090/v1/workflows/batch Monitor Progress : GET http://localhost:9090/v1/workflows/batch/{id}/progress Initiate Rollback : POST http://localhost:9090/v1/workflows/batch/{id}/rollback Batch Statistics : GET http://localhost:9090/v1/workflows/batch/stats","breadcrumbs":"Batch Workflow System » REST API Endpoints (Batch Operations)","id":"3627","title":"REST API Endpoints (Batch Operations)"},"3628":{"body":"Provider Agnostic : Mix UpCloud, AWS, and local providers in single workflows Type Safety : Nickel schema validation prevents runtime errors Dependency Management : Automatic resolution with failure handling State Recovery : Checkpoint-based recovery from any failure point Real-time Monitoring : Live progress tracking with detailed status","breadcrumbs":"Batch Workflow System » System Benefits","id":"3628","title":"System Benefits"},"3629":{"body":"This document provides practical examples of orchestrating complex deployments and operations across multiple cloud providers using the batch workflow system.","breadcrumbs":"Batch Workflow Multi-Provider Examples » Multi-Provider Batch Workflow Examples","id":"3629","title":"Multi-Provider Batch Workflow Examples"},"363":{"body":"# Check disk usage\\nprovisioning server ssh dev-server-01 -- df -h # Check memory usage\\nprovisioning server ssh dev-server-01 -- free -h # Check CPU usage\\nprovisioning server ssh dev-server-01 -- top -bn1 | head -20","breadcrumbs":"Verification » Storage and Resources","id":"363","title":"Storage and Resources"},"3630":{"body":"Overview Workflow 1: Coordinated Multi-Provider Deployment Workflow 2: Multi-Provider Disaster Recovery Failover Workflow 3: Cost Optimization Workload Migration Workflow 4: Multi-Region Database Replication Best Practices Troubleshooting","breadcrumbs":"Batch Workflow Multi-Provider Examples » Table of Contents","id":"3630","title":"Table of Contents"},"3631":{"body":"The batch workflow system enables declarative orchestration of operations across multiple providers with: Dependency Tracking : Define what must complete before what Error Handling : Automatic rollback on failure Idempotency : Safe to re-run workflows Status Tracking : Real-time progress monitoring Recovery Checkpoints : Resume from failure points","breadcrumbs":"Batch Workflow Multi-Provider Examples » Overview","id":"3631","title":"Overview"},"3632":{"body":"Use Case : Deploy web application across DigitalOcean, AWS, and Hetzner with proper sequencing and dependencies. Workflow Characteristics : Database created first (dependencies) Backup storage ready before compute Web servers scale once database ready Health checks before considering complete","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow 1: Coordinated Multi-Provider Deployment","id":"3632","title":"Workflow 1: Coordinated Multi-Provider Deployment"},"3633":{"body":"# file: workflows/multi-provider-deployment.yml name: multi-provider-app-deployment\\nversion: \\"1.0\\"\\ndescription: \\"Deploy web app across three cloud providers\\" parameters: do_region: \\"nyc3\\" aws_region: \\"us-east-1\\" hetzner_location: \\"nbg1\\" web_server_count: 3 phases: # Phase 1: Create backup storage first (independent) - name: \\"provision-backup-storage\\" provider: \\"hetzner\\" description: \\"Create backup storage volume in Hetzner\\" operations: - id: \\"create-backup-volume\\" action: \\"create-volume\\" config: name: \\"webapp-backups\\" size: 500 location: \\"{{ hetzner_location }}\\" format: \\"ext4\\" tags: [\\"storage\\", \\"backup\\"] on_failure: \\"alert\\" on_success: \\"proceed\\" # Phase 2: Create database (independent, but must complete before app) - name: \\"provision-database\\" provider: \\"aws\\" description: \\"Create managed PostgreSQL database\\" depends_on: [] # Can run in parallel with Phase 1 operations: - id: \\"create-rds-instance\\" action: \\"create-db-instance\\" config: identifier: \\"webapp-db\\" engine: \\"postgres\\" engine_version: \\"14.6\\" instance_class: \\"db.t3.medium\\" allocated_storage: 100 multi_az: true backup_retention_days: 30 tags: [\\"database\\", \\"primary\\"] - id: \\"create-security-group\\" action: \\"create-security-group\\" config: name: \\"webapp-db-sg\\" description: \\"Security group for RDS\\" depends_on: [\\"create-rds-instance\\"] - id: \\"configure-db-access\\" action: \\"authorize-security-group\\" config: group_id: \\"{{ create-security-group.id }}\\" protocol: \\"tcp\\" port: 5432 cidr: \\"10.0.0.0/8\\" depends_on: [\\"create-security-group\\"] timeout: 60 # Phase 3: Create web tier (depends on database being ready) - name: \\"provision-web-tier\\" provider: \\"digitalocean\\" description: \\"Create web servers and load balancer\\" depends_on: [\\"provision-database\\"] # Wait for database operations: - id: \\"create-droplets\\" action: \\"create-droplet\\" config: name: \\"web-server\\" size: \\"s-2vcpu-4gb\\" region: \\"{{ do_region }}\\" image: \\"ubuntu-22-04-x64\\" count: \\"{{ web_server_count }}\\" backups: true monitoring: true tags: [\\"web\\", \\"production\\"] timeout: 300 retry: max_attempts: 3 backoff: exponential - id: \\"create-firewall\\" action: \\"create-firewall\\" config: name: \\"web-firewall\\" inbound_rules: - protocol: \\"tcp\\" ports: \\"22\\" sources: [\\"0.0.0.0/0\\"] - protocol: \\"tcp\\" ports: \\"80\\" sources: [\\"0.0.0.0/0\\"] - protocol: \\"tcp\\" ports: \\"443\\" sources: [\\"0.0.0.0/0\\"] depends_on: [\\"create-droplets\\"] - id: \\"create-load-balancer\\" action: \\"create-load-balancer\\" config: name: \\"web-lb\\" algorithm: \\"round_robin\\" region: \\"{{ do_region }}\\" forwarding_rules: - entry_protocol: \\"http\\" entry_port: 80 target_protocol: \\"http\\" target_port: 80 - entry_protocol: \\"https\\" entry_port: 443 target_protocol: \\"http\\" target_port: 80 health_check: protocol: \\"http\\" port: 80 path: \\"/health\\" interval: 10 depends_on: [\\"create-droplets\\"] # Phase 4: Network configuration (depends on all resources) - name: \\"configure-networking\\" description: \\"Setup VPN tunnels and security between providers\\" depends_on: [\\"provision-web-tier\\"] operations: - id: \\"setup-vpn-tunnel-do-aws\\" action: \\"create-vpn-tunnel\\" config: source_provider: \\"digitalocean\\" destination_provider: \\"aws\\" protocol: \\"ipsec\\" encryption: \\"aes-256\\" timeout: 120 - id: \\"setup-vpn-tunnel-aws-hetzner\\" action: \\"create-vpn-tunnel\\" config: source_provider: \\"aws\\" destination_provider: \\"hetzner\\" protocol: \\"ipsec\\" encryption: \\"aes-256\\" # Phase 5: Validation and verification - name: \\"verify-deployment\\" description: \\"Verify all resources are operational\\" depends_on: [\\"configure-networking\\"] operations: - id: \\"health-check-droplets\\" action: \\"run-health-check\\" config: targets: \\"{{ create-droplets.ips }}\\" endpoint: \\"/health\\" expected_status: 200 timeout: 30 timeout: 300 - id: \\"health-check-database\\" action: \\"verify-database\\" config: host: \\"{{ create-rds-instance.endpoint }}\\" port: 5432 database: \\"postgres\\" timeout: 30 - id: \\"health-check-backup\\" action: \\"verify-volume\\" config: volume_id: \\"{{ create-backup-volume.id }}\\" status: \\"available\\" # Rollback strategy: if any phase fails\\nrollback: strategy: \\"automatic\\" on_phase_failure: \\"rollback-previous-phases\\" preserve_data: true # Notifications\\nnotifications: on_start: \\"slack:#deployments\\" on_phase_complete: \\"slack:#deployments\\" on_failure: \\"slack:#alerts\\" on_success: \\"slack:#deployments\\" # Validation checks\\npre_flight: - check: \\"credentials\\" description: \\"Verify all provider credentials\\" - check: \\"quotas\\" description: \\"Verify sufficient quotas in each provider\\" - check: \\"dependencies\\" description: \\"Verify all dependencies are available\\"","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow Definition","id":"3633","title":"Workflow Definition"},"3634":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ Start Deployment │\\n└──────────────────┬──────────────────────────────────────┘ │ ┌──────────┴──────────┐ │ │ ▼ ▼ ┌─────────────┐ ┌──────────────────┐ │ Hetzner │ │ AWS │ │ Backup │ │ Database │ │ (Phase 1) │ │ (Phase 2) │ └──────┬──────┘ └────────┬─────────┘ │ │ │ Ready │ Ready └────────┬───────────┘ │ ▼ ┌──────────────────┐ │ DigitalOcean │ │ Web Tier │ │ (Phase 3) │ │ - Droplets │ │ - Firewall │ │ - Load Balancer │ └────────┬─────────┘ │ ▼ ┌──────────────────┐ │ Network Setup │ │ (Phase 4) │ │ - VPN Tunnels │ └────────┬─────────┘ │ ▼ ┌──────────────────┐ │ Verification │ │ (Phase 5) │ │ - Health Checks │ └────────┬─────────┘ │ ▼ ┌──────────────────┐ │ Deployment OK │ │ (Ready to use) │ └──────────────────┘","breadcrumbs":"Batch Workflow Multi-Provider Examples » Execution Flow","id":"3634","title":"Execution Flow"},"3635":{"body":"Use Case : Automated failover from primary provider (DigitalOcean) to backup provider (Hetzner) on detection of failure. Workflow Characteristics : Continuous health monitoring Automatic failover trigger Database promotion DNS update Verification before considering complete","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow 2: Multi-Provider Disaster Recovery Failover","id":"3635","title":"Workflow 2: Multi-Provider Disaster Recovery Failover"},"3636":{"body":"# file: workflows/multi-provider-dr-failover.yml name: multi-provider-dr-failover\\nversion: \\"1.0\\"\\ndescription: \\"Automated failover from DigitalOcean to Hetzner\\" parameters: primary_provider: \\"digitalocean\\" backup_provider: \\"hetzner\\" dns_provider: \\"aws\\" health_check_threshold: 3 phases: # Phase 1: Monitor primary provider - name: \\"monitor-primary\\" description: \\"Continuous health monitoring of primary\\" operations: - id: \\"health-check-primary\\" action: \\"run-health-check\\" config: provider: \\"{{ primary_provider }}\\" resources: [\\"web-servers\\", \\"load-balancer\\"] checks: - type: \\"http\\" endpoint: \\"/health\\" expected_status: 200 - type: \\"database\\" host: \\"db.primary.example.com\\" query: \\"SELECT 1\\" - type: \\"connectivity\\" test: \\"ping\\" interval: 30 # Check every 30 seconds timeout: 300 - id: \\"aggregate-health\\" action: \\"aggregate-metrics\\" config: source: \\"{{ health-check-primary.results }}\\" failure_threshold: 3 # 3 consecutive failures trigger failover # Phase 2: Trigger failover (conditional on failure) - name: \\"trigger-failover\\" description: \\"Activate disaster recovery if primary fails\\" depends_on: [\\"monitor-primary\\"] condition: \\"{{ aggregate-health.status }} == \'FAILED\'\\" operations: - id: \\"alert-on-failure\\" action: \\"send-notification\\" config: type: \\"critical\\" message: \\"Primary provider ({{ primary_provider }}) has failed. Initiating failover...\\" recipients: [\\"ops-team@example.com\\", \\"slack:#alerts\\"] - id: \\"enable-backup-infrastructure\\" action: \\"scale-up\\" config: provider: \\"{{ backup_provider }}\\" target: \\"warm-standby-servers\\" desired_count: 3 instance_type: \\"cx31\\" timeout: 300 retry: max_attempts: 3 - id: \\"promote-database-replica\\" action: \\"promote-read-replica\\" config: provider: \\"aws\\" replica_identifier: \\"backup-db-replica\\" to_master: true timeout: 600 # Allow time for promotion # Phase 3: Network failover - name: \\"network-failover\\" description: \\"Switch traffic to backup provider\\" depends_on: [\\"trigger-failover\\"] operations: - id: \\"update-load-balancer\\" action: \\"reconfigure-load-balancer\\" config: provider: \\"{{ dns_provider }}\\" record: \\"api.example.com\\" old_backend: \\"do-lb-{{ primary_provider }}\\" new_backend: \\"hz-lb-{{ backup_provider }}\\" - id: \\"update-dns\\" action: \\"update-dns-record\\" config: provider: \\"route53\\" record: \\"example.com\\" old_value: \\"do-lb-ip\\" new_value: \\"hz-lb-ip\\" ttl: 60 - id: \\"update-cdn\\" action: \\"update-cdn-origin\\" config: cdn_provider: \\"cloudfront\\" distribution_id: \\"E123456789ABCDEF\\" new_origin: \\"backup-lb.hetzner.com\\" # Phase 4: Verify failover - name: \\"verify-failover\\" description: \\"Verify backup provider is operational\\" depends_on: [\\"network-failover\\"] operations: - id: \\"health-check-backup\\" action: \\"run-health-check\\" config: provider: \\"{{ backup_provider }}\\" resources: [\\"backup-servers\\"] endpoint: \\"/health\\" expected_status: 200 timeout: 30 timeout: 300 - id: \\"verify-database\\" action: \\"verify-database\\" config: provider: \\"aws\\" database: \\"backup-db-promoted\\" query: \\"SELECT COUNT(*) FROM users\\" expected_rows: \\"> 0\\" - id: \\"verify-traffic\\" action: \\"verify-traffic-flow\\" config: endpoint: \\"https://example.com\\" expected_response_time: \\"< 500 ms\\" expected_status: 200 # Phase 5: Activate backup fully - name: \\"activate-backup\\" description: \\"Run at full capacity on backup provider\\" depends_on: [\\"verify-failover\\"] operations: - id: \\"scale-to-production\\" action: \\"scale-up\\" config: provider: \\"{{ backup_provider }}\\" target: \\"all-backup-servers\\" desired_count: 6 timeout: 600 - id: \\"configure-persistence\\" action: \\"enable-persistence\\" config: provider: \\"{{ backup_provider }}\\" resources: [\\"backup-servers\\"] persistence_type: \\"volume\\" # Recovery strategy for primary restoration\\nrecovery: description: \\"Restore primary provider when recovered\\" phases: - name: \\"detect-primary-recovery\\" operation: \\"health-check\\" target: \\"primary-provider\\" success_criteria: \\"3 consecutive successful checks\\" - name: \\"resync-data\\" operation: \\"database-resync\\" direction: \\"backup-to-primary\\" timeout: 3600 - name: \\"failback\\" operation: \\"switch-traffic\\" target: \\"primary-provider\\" verification: \\"100% traffic restored\\" # Notifications\\nnotifications: on_failover_start: \\"pagerduty:critical\\" on_failover_complete: \\"slack:#ops\\" on_failover_failed: [\\"pagerduty:critical\\", \\"email:cto@example.com\\"] on_recovery_start: \\"slack:#ops\\" on_recovery_complete: \\"slack:#ops\\"","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow Definition","id":"3636","title":"Workflow Definition"},"3637":{"body":"Time Event\\n────────────────────────────────────────────────────\\n00:00 Health check detects failure (3 consecutive failures)\\n00:01 Alert sent to ops team\\n00:02 Backup infrastructure scaled to 3 servers\\n00:05 Database replica promoted to master\\n00:10 DNS updated (TTL=60s, propagation ~2 minutes)\\n00:12 Load balancer reconfigured\\n00:15 Traffic verified flowing through backup\\n00:20 Backup scaled to full production capacity (6 servers)\\n00:25 Fully operational on backup provider Total RTO: 25 minutes (including DNS propagation)\\nData loss (RPO): < 5 minutes (database replication lag)","breadcrumbs":"Batch Workflow Multi-Provider Examples » Failover Timeline","id":"3637","title":"Failover Timeline"},"3638":{"body":"Use Case : Migrate running workloads to cheaper provider (DigitalOcean to Hetzner) for cost reduction. Workflow Characteristics : Parallel deployment on target provider Gradual traffic migration Rollback capability Cost tracking","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow 3: Cost Optimization Workload Migration","id":"3638","title":"Workflow 3: Cost Optimization Workload Migration"},"3639":{"body":"# file: workflows/cost-optimization-migration.yml name: cost-optimization-migration\\nversion: \\"1.0\\"\\ndescription: \\"Migrate workload from DigitalOcean to Hetzner for cost savings\\" parameters: source_provider: \\"digitalocean\\" target_provider: \\"hetzner\\" migration_speed: \\"gradual\\" # or \\"aggressive\\" traffic_split: [10, 25, 50, 75, 100] # Gradual percentages phases: # Phase 1: Create target infrastructure - name: \\"create-target-infrastructure\\" description: \\"Deploy identical workload on Hetzner\\" operations: - id: \\"provision-servers\\" action: \\"create-server\\" config: provider: \\"{{ target_provider }}\\" name: \\"migration-app\\" server_type: \\"cpx21\\" # Better price/performance than DO count: 3 timeout: 300 # Phase 2: Verify target is ready - name: \\"verify-target\\" description: \\"Health checks on target infrastructure\\" depends_on: [\\"create-target-infrastructure\\"] operations: - id: \\"health-check\\" action: \\"run-health-check\\" config: provider: \\"{{ target_provider }}\\" endpoint: \\"/health\\" timeout: 300 # Phase 3: Gradual traffic migration - name: \\"migrate-traffic\\" description: \\"Gradually shift traffic to target provider\\" depends_on: [\\"verify-target\\"] operations: - id: \\"set-traffic-10\\" action: \\"set-traffic-split\\" config: source: \\"{{ source_provider }}\\" target: \\"{{ target_provider }}\\" percentage: 10 duration: 300 - id: \\"verify-10\\" action: \\"verify-traffic-flow\\" config: target_percentage: 10 error_rate_threshold: 0.1 - id: \\"set-traffic-25\\" action: \\"set-traffic-split\\" config: percentage: 25 duration: 600 - id: \\"set-traffic-50\\" action: \\"set-traffic-split\\" config: percentage: 50 duration: 900 - id: \\"set-traffic-75\\" action: \\"set-traffic-split\\" config: percentage: 75 duration: 900 - id: \\"set-traffic-100\\" action: \\"set-traffic-split\\" config: percentage: 100 duration: 600 # Phase 4: Cleanup source - name: \\"cleanup-source\\" description: \\"Remove old infrastructure from source provider\\" depends_on: [\\"migrate-traffic\\"] operations: - id: \\"verify-final\\" action: \\"run-health-check\\" config: provider: \\"{{ target_provider }}\\" duration: 3600 # Monitor for 1 hour - id: \\"decommission-source\\" action: \\"delete-resources\\" config: provider: \\"{{ source_provider }}\\" resources: [\\"droplets\\", \\"load-balancer\\"] preserve_backups: true # Cost tracking\\ncost_tracking: before: provider: \\"{{ source_provider }}\\" estimated_monthly: \\"$72\\" after: provider: \\"{{ target_provider }}\\" estimated_monthly: \\"$42\\" savings: monthly: \\"$30\\" annual: \\"$360\\" percentage: \\"42%\\"","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow Definition","id":"3639","title":"Workflow Definition"},"364":{"body":"","breadcrumbs":"Verification » Troubleshooting Failed Verifications","id":"364","title":"Troubleshooting Failed Verifications"},"3640":{"body":"Use Case : Setup database replication across multiple providers and regions for disaster recovery. Workflow Characteristics : Create primary database Setup read replicas in other providers Configure replication Monitor lag","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow 4: Multi-Region Database Replication","id":"3640","title":"Workflow 4: Multi-Region Database Replication"},"3641":{"body":"# file: workflows/multi-region-replication.yml name: multi-region-replication\\nversion: \\"1.0\\"\\ndescription: \\"Setup database replication across providers\\" phases: # Primary database - name: \\"create-primary\\" provider: \\"aws\\" operations: - id: \\"create-rds\\" action: \\"create-db-instance\\" config: identifier: \\"app-db-primary\\" engine: \\"postgres\\" instance_class: \\"db.t3.medium\\" region: \\"us-east-1\\" # Secondary replica - name: \\"create-secondary-replica\\" depends_on: [\\"create-primary\\"] provider: \\"aws\\" operations: - id: \\"create-replica\\" action: \\"create-read-replica\\" config: source: \\"app-db-primary\\" region: \\"eu-west-1\\" identifier: \\"app-db-secondary\\" # Tertiary replica in different provider - name: \\"create-tertiary-replica\\" depends_on: [\\"create-primary\\"] operations: - id: \\"setup-replication\\" action: \\"setup-external-replication\\" config: source_provider: \\"aws\\" source_db: \\"app-db-primary\\" target_provider: \\"hetzner\\" replication_slot: \\"hetzner_replica\\" replication_type: \\"logical\\" # Monitor replication - name: \\"monitor-replication\\" depends_on: [\\"create-tertiary-replica\\"] operations: - id: \\"check-lag\\" action: \\"monitor-replication-lag\\" config: replicas: - name: \\"secondary\\" warning_threshold: 300 critical_threshold: 600 - name: \\"tertiary\\" warning_threshold: 1000 critical_threshold: 2000 interval: 60","breadcrumbs":"Batch Workflow Multi-Provider Examples » Workflow Definition","id":"3641","title":"Workflow Definition"},"3642":{"body":"","breadcrumbs":"Batch Workflow Multi-Provider Examples » Best Practices","id":"3642","title":"Best Practices"},"3643":{"body":"Define Clear Dependencies : Explicitly state what must happen before what Use Idempotent Operations : Workflows should be safe to re-run Set Realistic Timeouts : Account for cloud provider delays Plan for Failures : Define rollback strategies Test Workflows : Run in staging before production","breadcrumbs":"Batch Workflow Multi-Provider Examples » 1. Workflow Design","id":"3643","title":"1. Workflow Design"},"3644":{"body":"Parallel Execution : Run independent phases in parallel for speed Checkpoints : Add verification at each phase Progressive Deployment : Use gradual traffic shifting Monitoring Integration : Track metrics during workflow Notifications : Alert team at key points","breadcrumbs":"Batch Workflow Multi-Provider Examples » 2. Orchestration","id":"3644","title":"2. Orchestration"},"3645":{"body":"Calculate ROI : Track cost savings from optimizations Monitor Resource Usage : Watch for over-provisioning Implement Cleanup : Remove old resources after migration Review Regularly : Reassess provider choices","breadcrumbs":"Batch Workflow Multi-Provider Examples » 3. Cost Management","id":"3645","title":"3. Cost Management"},"3646":{"body":"","breadcrumbs":"Batch Workflow Multi-Provider Examples » Troubleshooting","id":"3646","title":"Troubleshooting"},"3647":{"body":"Diagnosis : provisioning workflow status workflow-id --verbose Solution : Increase timeout if legitimate long operation Check provider logs for actual status Manually intervene if necessary Use --skip-phase to skip problematic phase","breadcrumbs":"Batch Workflow Multi-Provider Examples » Issue: Workflow Stuck in Phase","id":"3647","title":"Issue: Workflow Stuck in Phase"},"3648":{"body":"Diagnosis : provisioning workflow rollback workflow-id --dry-run Solution : Review what resources were created Manually delete resources if needed Fix root cause of failure Re-run workflow","breadcrumbs":"Batch Workflow Multi-Provider Examples » Issue: Rollback Failed","id":"3648","title":"Issue: Rollback Failed"},"3649":{"body":"Diagnosis : provisioning database verify-consistency Solution : Check replication lag before failover Manually resync if necessary Use backup to restore consistency Run validation queries","breadcrumbs":"Batch Workflow Multi-Provider Examples » Issue: Data Inconsistency After Failover","id":"3649","title":"Issue: Data Inconsistency After Failover"},"365":{"body":"# View detailed error\\nprovisioning validate config --verbose # Check specific infrastructure\\nprovisioning validate config --infra my-infra","breadcrumbs":"Verification » Configuration Validation Failed","id":"365","title":"Configuration Validation Failed"},"3650":{"body":"Batch workflows enable complex multi-provider orchestration with: Coordinated deployment across providers Automated failover and recovery Gradual workload migration Cost optimization Disaster recovery Start with simple workflows and gradually add complexity as you gain confidence.","breadcrumbs":"Batch Workflow Multi-Provider Examples » Summary","id":"3650","title":"Summary"},"3651":{"body":"","breadcrumbs":"CLI Architecture » Modular CLI Architecture (v3.2.0 - MAJOR REFACTORING)","id":"3651","title":"Modular CLI Architecture (v3.2.0 - MAJOR REFACTORING)"},"3652":{"body":"A comprehensive CLI refactoring transforming the monolithic 1,329-line script into a modular, maintainable architecture with domain-driven design.","breadcrumbs":"CLI Architecture » 🚀 CLI Refactoring Completed (2025-09-30)","id":"3652","title":"🚀 CLI Refactoring Completed (2025-09-30)"},"3653":{"body":"Main File Reduction : 1,329 lines → 211 lines (84% reduction) Domain Handlers : 7 focused modules (infrastructure, orchestration, development, workspace, configuration, utilities, generation) Code Duplication : 50+ instances eliminated through centralized flag handling Command Registry : 80+ shortcuts for improved user experience Bi-directional Help : provisioning help ws = provisioning ws help Test Coverage : Comprehensive test suite with 6 test groups","breadcrumbs":"CLI Architecture » Architecture Improvements","id":"3653","title":"Architecture Improvements"},"3654":{"body":"","breadcrumbs":"CLI Architecture » Command Shortcuts Reference","id":"3654","title":"Command Shortcuts Reference"},"3655":{"body":"[Full docs: provisioning help infra] s → server (create, delete, list, ssh, price) t, task → taskserv (create, delete, list, generate, check-updates) cl → cluster (create, delete, list) i, infras → infra (list, validate)","breadcrumbs":"CLI Architecture » Infrastructure","id":"3655","title":"Infrastructure"},"3656":{"body":"[Full docs: provisioning help orch] wf, flow → workflow (list, status, monitor, stats, cleanup) bat → batch (submit, list, status, monitor, rollback, cancel, stats) orch → orchestrator (start, stop, status, health, logs)","breadcrumbs":"CLI Architecture » Orchestration","id":"3656","title":"Orchestration"},"3657":{"body":"[Full docs: provisioning help dev] mod → module (discover, load, list, unload, sync-nickel) lyr → layer (explain, show, test, stats) version (check, show, updates, apply, taskserv) pack (core, provider, list, clean)","breadcrumbs":"CLI Architecture » Development","id":"3657","title":"Development"},"3658":{"body":"[Full docs: provisioning help ws] ws → workspace (init, create, validate, info, list, migrate) tpl, tmpl → template (list, types, show, apply, validate)","breadcrumbs":"CLI Architecture » Workspace","id":"3658","title":"Workspace"},"3659":{"body":"[Full docs: provisioning help config] e → env (show environment variables) val → validate (validate configuration) st, config → setup (setup wizard) show (show configuration details) init (initialize infrastructure) allenv (show all config and environment)","breadcrumbs":"CLI Architecture » Configuration","id":"3659","title":"Configuration"},"366":{"body":"# Check server logs\\nprovisioning server logs dev-server-01 # Try debug mode\\nprovisioning --debug server ssh dev-server-01","breadcrumbs":"Verification » Server Unreachable","id":"366","title":"Server Unreachable"},"3660":{"body":"l, ls, list → list (list resources) ssh (SSH operations) sops (edit encrypted files) cache (cache management) providers (provider operations) nu (start Nushell session with provisioning library) qr (QR code generation) nuinfo (Nushell information) plugin, plugins (plugin management)","breadcrumbs":"CLI Architecture » Utilities","id":"3660","title":"Utilities"},"3661":{"body":"[Full docs: provisioning generate help] g, gen → generate (server, taskserv, cluster, infra, new)","breadcrumbs":"CLI Architecture » Generation","id":"3661","title":"Generation"},"3662":{"body":"c → create (create resources) d → delete (delete resources) u → update (update resources) price, cost, costs → price (show pricing) cst, csts → create-server-task (create server with taskservs)","breadcrumbs":"CLI Architecture » Special Commands","id":"3662","title":"Special Commands"},"3663":{"body":"The help system works in both directions: # All these work identically:\\nprovisioning help workspace\\nprovisioning workspace help\\nprovisioning ws help\\nprovisioning help ws # Same for all categories:\\nprovisioning help infra = provisioning infra help\\nprovisioning help orch = provisioning orch help\\nprovisioning help dev = provisioning dev help\\nprovisioning help ws = provisioning ws help\\nprovisioning help plat = provisioning plat help\\nprovisioning help concept = provisioning concept help","breadcrumbs":"CLI Architecture » Bi-directional Help System","id":"3663","title":"Bi-directional Help System"},"3664":{"body":"File Structure: provisioning/core/nulib/\\n├── provisioning (211 lines) - Main entry point\\n├── main_provisioning/\\n│ ├── flags.nu (139 lines) - Centralized flag handling\\n│ ├── dispatcher.nu (264 lines) - Command routing\\n│ ├── help_system.nu - Categorized help\\n│ └── commands/ - Domain-focused handlers\\n│ ├── infrastructure.nu (117 lines)\\n│ ├── orchestration.nu (64 lines)\\n│ ├── development.nu (72 lines)\\n│ ├── workspace.nu (56 lines)\\n│ ├── generation.nu (78 lines)\\n│ ├── utilities.nu (157 lines)\\n│ └── configuration.nu (316 lines) For Developers: Adding commands : Update appropriate domain handler in commands/ Adding shortcuts : Update command registry in dispatcher.nu Flag changes : Modify centralized functions in flags.nu Testing : Run nu tests/test_provisioning_refactor.nu See ADR-006: CLI Refactoring for complete refactoring details.","breadcrumbs":"CLI Architecture » CLI Internal Architecture","id":"3664","title":"CLI Internal Architecture"},"3665":{"body":"","breadcrumbs":"Configuration System » Configuration System (v2.0.0)","id":"3665","title":"Configuration System (v2.0.0)"},"3666":{"body":"The system has been migrated from ENV-based to config-driven architecture. 65+ files migrated across entire codebase 200+ ENV variables replaced with 476 config accessors 16 token-efficient agents used for systematic migration 92% token efficiency achieved vs monolithic approach","breadcrumbs":"Configuration System » ⚠️ Migration Completed (2025-09-23)","id":"3666","title":"⚠️ Migration Completed (2025-09-23)"},"3667":{"body":"Primary Config : config.defaults.toml (system defaults) User Config : config.user.toml (user preferences) Environment Configs : config.{dev,test,prod}.toml.example Hierarchical Loading : defaults → user → project → infra → env → runtime Interpolation : {{paths.base}}, {{env.HOME}}, {{now.date}}, {{git.branch}}","breadcrumbs":"Configuration System » Configuration Files","id":"3667","title":"Configuration Files"},"3668":{"body":"provisioning validate config - Validate configuration provisioning env - Show environment variables provisioning allenv - Show all config and environment PROVISIONING_ENV=prod provisioning - Use specific environment","breadcrumbs":"Configuration System » Essential Commands","id":"3668","title":"Essential Commands"},"3669":{"body":"See ADR-010: Configuration Format Strategy for complete rationale and design patterns.","breadcrumbs":"Configuration System » Configuration Architecture","id":"3669","title":"Configuration Architecture"},"367":{"body":"# Check service logs\\nprovisioning taskserv logs kubernetes # Restart service\\nprovisioning taskserv restart kubernetes --infra my-infra","breadcrumbs":"Verification » Task Service Not Running","id":"367","title":"Task Service Not Running"},"3670":{"body":"When loading configuration, precedence is (highest to lowest): Runtime Arguments - CLI flags and direct user input Environment Variables - PROVISIONING_* overrides User Configuration - ~/.config/provisioning/user_config.yaml Infrastructure Configuration - Nickel schemas, extensions, provider configs System Defaults - provisioning/config/config.defaults.toml","breadcrumbs":"Configuration System » Configuration Loading Hierarchy (Priority)","id":"3670","title":"Configuration Loading Hierarchy (Priority)"},"3671":{"body":"For new configuration : Infrastructure/schemas → Use Nickel (type-safe, schema-validated) Application settings → Use TOML (hierarchical, supports interpolation) Kubernetes/CI-CD → Use YAML (standard, ecosystem-compatible) For existing workspace configs : Nickel is the primary configuration language All new workspaces use Nickel exclusively","breadcrumbs":"Configuration System » File Type Guidelines","id":"3671","title":"File Type Guidelines"},"3672":{"body":"Complete command-line reference for Infrastructure Automation. This guide covers all commands, options, and usage patterns.","breadcrumbs":"CLI Reference » CLI Reference","id":"3672","title":"CLI Reference"},"3673":{"body":"Complete command syntax and options All available commands and subcommands Usage examples and patterns Scripting and automation Integration with other tools Advanced command combinations","breadcrumbs":"CLI Reference » What You\'ll Learn","id":"3673","title":"What You\'ll Learn"},"3674":{"body":"All provisioning commands follow this structure: provisioning [global-options] [subcommand] [command-options] [arguments]","breadcrumbs":"CLI Reference » Command Structure","id":"3674","title":"Command Structure"},"3675":{"body":"These options can be used with any command: Option Short Description Example --infra -i Specify infrastructure --infra production --environment Environment override --environment prod --check -c Dry run mode --check --debug -x Enable debug output --debug --yes -y Auto-confirm actions --yes --wait -w Wait for completion --wait --out Output format --out json --help -h Show help --help","breadcrumbs":"CLI Reference » Global Options","id":"3675","title":"Global Options"},"3676":{"body":"Format Description Use Case text Human-readable text Terminal viewing json JSON format Scripting, APIs yaml YAML format Configuration files toml TOML format Settings files table Tabular format Reports, lists","breadcrumbs":"CLI Reference » Output Formats","id":"3676","title":"Output Formats"},"3677":{"body":"","breadcrumbs":"CLI Reference » Core Commands","id":"3677","title":"Core Commands"},"3678":{"body":"Display help information for the system or specific commands. # General help\\nprovisioning help # Command-specific help\\nprovisioning help server\\nprovisioning help taskserv\\nprovisioning help cluster # Show all available commands\\nprovisioning help --all # Show help for subcommand\\nprovisioning server help create Options: --all - Show all available commands --detailed - Show detailed help with examples","breadcrumbs":"CLI Reference » help - Show Help Information","id":"3678","title":"help - Show Help Information"},"3679":{"body":"Display version information for the system and dependencies. # Basic version\\nprovisioning version\\nprovisioning --version\\nprovisioning -V # Detailed version with dependencies\\nprovisioning version --verbose # Show version info with title\\nprovisioning --info\\nprovisioning -I Options: --verbose - Show detailed version information --dependencies - Include dependency versions","breadcrumbs":"CLI Reference » version - Show Version Information","id":"3679","title":"version - Show Version Information"},"368":{"body":"# Check service status\\nprovisioning platform status orchestrator # View service logs\\nprovisioning platform logs orchestrator --tail 100 # Restart service\\nprovisioning platform restart orchestrator","breadcrumbs":"Verification » Platform Service Down","id":"368","title":"Platform Service Down"},"3680":{"body":"Display current environment configuration and settings. # Show environment variables\\nprovisioning env # Show all environment and configuration\\nprovisioning allenv # Show specific environment\\nprovisioning env --environment prod # Export environment\\nprovisioning env --export Output includes: Configuration file locations Environment variables Provider settings Path configurations","breadcrumbs":"CLI Reference » env - Environment Information","id":"3680","title":"env - Environment Information"},"3681":{"body":"","breadcrumbs":"CLI Reference » Server Management Commands","id":"3681","title":"Server Management Commands"},"3682":{"body":"Create new server instances based on configuration. # Create all servers in infrastructure\\nprovisioning server create --infra my-infra # Dry run (check mode)\\nprovisioning server create --infra my-infra --check # Create with confirmation\\nprovisioning server create --infra my-infra --yes # Create and wait for completion\\nprovisioning server create --infra my-infra --wait # Create specific server\\nprovisioning server create web-01 --infra my-infra # Create with custom settings\\nprovisioning server create --infra my-infra --settings custom.ncl Options: --check, -c - Dry run mode (show what would be created) --yes, -y - Auto-confirm creation --wait, -w - Wait for servers to be fully ready --settings, -s - Custom settings file --template, -t - Use specific template","breadcrumbs":"CLI Reference » server create - Create Servers","id":"3682","title":"server create - Create Servers"},"3683":{"body":"Remove server instances and associated resources. # Delete all servers\\nprovisioning server delete --infra my-infra # Delete with confirmation\\nprovisioning server delete --infra my-infra --yes # Delete but keep storage\\nprovisioning server delete --infra my-infra --keepstorage # Delete specific server\\nprovisioning server delete web-01 --infra my-infra # Dry run deletion\\nprovisioning server delete --infra my-infra --check Options: --yes, -y - Auto-confirm deletion --keepstorage - Preserve storage volumes --force - Force deletion even if servers are running","breadcrumbs":"CLI Reference » server delete - Delete Servers","id":"3683","title":"server delete - Delete Servers"},"3684":{"body":"Display information about servers. # List all servers\\nprovisioning server list --infra my-infra # List with detailed information\\nprovisioning server list --infra my-infra --detailed # List in specific format\\nprovisioning server list --infra my-infra --out json # List servers across all infrastructures\\nprovisioning server list --all # Filter by status\\nprovisioning server list --infra my-infra --status running Options: --detailed - Show detailed server information --status - Filter by server status --all - Show servers from all infrastructures","breadcrumbs":"CLI Reference » server list - List Servers","id":"3684","title":"server list - List Servers"},"3685":{"body":"Connect to servers via SSH. # SSH to server\\nprovisioning server ssh web-01 --infra my-infra # SSH with specific user\\nprovisioning server ssh web-01 --user admin --infra my-infra # SSH with custom key\\nprovisioning server ssh web-01 --key ~/.ssh/custom_key --infra my-infra # Execute single command\\nprovisioning server ssh web-01 --command \\"systemctl status nginx\\" --infra my-infra Options: --user - SSH username (default from configuration) --key - SSH private key file --command - Execute command and exit --port - SSH port (default: 22)","breadcrumbs":"CLI Reference » server ssh - SSH Access","id":"3685","title":"server ssh - SSH Access"},"3686":{"body":"Display pricing information for servers. # Show costs for all servers\\nprovisioning server price --infra my-infra # Show detailed cost breakdown\\nprovisioning server price --infra my-infra --detailed # Show monthly estimates\\nprovisioning server price --infra my-infra --monthly # Cost comparison between providers\\nprovisioning server price --infra my-infra --compare Options: --detailed - Detailed cost breakdown --monthly - Monthly cost estimates --compare - Compare costs across providers","breadcrumbs":"CLI Reference » server price - Cost Information","id":"3686","title":"server price - Cost Information"},"3687":{"body":"","breadcrumbs":"CLI Reference » Task Service Commands","id":"3687","title":"Task Service Commands"},"3688":{"body":"Install and configure task services on servers. # Install service on all eligible servers\\nprovisioning taskserv create kubernetes --infra my-infra # Install with check mode\\nprovisioning taskserv create kubernetes --infra my-infra --check # Install specific version\\nprovisioning taskserv create kubernetes --version 1.28 --infra my-infra # Install on specific servers\\nprovisioning taskserv create postgresql --servers db-01,db-02 --infra my-infra # Install with custom configuration\\nprovisioning taskserv create kubernetes --config k8s-config.yaml --infra my-infra Options: --version - Specific version to install --config - Custom configuration file --servers - Target specific servers --force - Force installation even if conflicts exist","breadcrumbs":"CLI Reference » taskserv create - Install Services","id":"3688","title":"taskserv create - Install Services"},"3689":{"body":"Remove task services from servers. # Remove service\\nprovisioning taskserv delete kubernetes --infra my-infra # Remove with data cleanup\\nprovisioning taskserv delete postgresql --cleanup-data --infra my-infra # Remove from specific servers\\nprovisioning taskserv delete nginx --servers web-01,web-02 --infra my-infra # Dry run removal\\nprovisioning taskserv delete kubernetes --infra my-infra --check Options: --cleanup-data - Remove associated data --servers - Target specific servers --force - Force removal","breadcrumbs":"CLI Reference » taskserv delete - Remove Services","id":"3689","title":"taskserv delete - Remove Services"},"369":{"body":"","breadcrumbs":"Verification » Performance Verification","id":"369","title":"Performance Verification"},"3690":{"body":"Display available and installed task services. # List all available services\\nprovisioning taskserv list # List installed services\\nprovisioning taskserv list --infra my-infra --installed # List by category\\nprovisioning taskserv list --category database # List with versions\\nprovisioning taskserv list --versions # Search services\\nprovisioning taskserv list --search kubernetes Options: --installed - Show only installed services --category - Filter by service category --versions - Include version information --search - Search by name or description","breadcrumbs":"CLI Reference » taskserv list - List Services","id":"3690","title":"taskserv list - List Services"},"3691":{"body":"Generate configuration files for task services. # Generate configuration\\nprovisioning taskserv generate kubernetes --infra my-infra # Generate with custom template\\nprovisioning taskserv generate kubernetes --template custom --infra my-infra # Generate for specific servers\\nprovisioning taskserv generate nginx --servers web-01,web-02 --infra my-infra # Generate and save to file\\nprovisioning taskserv generate postgresql --output db-config.yaml --infra my-infra Options: --template - Use specific template --output - Save to specific file --servers - Target specific servers","breadcrumbs":"CLI Reference » taskserv generate - Generate Configurations","id":"3691","title":"taskserv generate - Generate Configurations"},"3692":{"body":"Check for and manage service version updates. # Check updates for all services\\nprovisioning taskserv check-updates --infra my-infra # Check specific service\\nprovisioning taskserv check-updates kubernetes --infra my-infra # Show available versions\\nprovisioning taskserv versions kubernetes # Update to latest version\\nprovisioning taskserv update kubernetes --infra my-infra # Update to specific version\\nprovisioning taskserv update kubernetes --version 1.29 --infra my-infra Options: --version - Target specific version --security-only - Only security updates --dry-run - Show what would be updated","breadcrumbs":"CLI Reference » taskserv check-updates - Version Management","id":"3692","title":"taskserv check-updates - Version Management"},"3693":{"body":"","breadcrumbs":"CLI Reference » Cluster Management Commands","id":"3693","title":"Cluster Management Commands"},"3694":{"body":"Deploy and configure application clusters. # Create cluster\\nprovisioning cluster create web-cluster --infra my-infra # Create with check mode\\nprovisioning cluster create web-cluster --infra my-infra --check # Create with custom configuration\\nprovisioning cluster create web-cluster --config cluster.yaml --infra my-infra # Create and scale immediately\\nprovisioning cluster create web-cluster --replicas 5 --infra my-infra Options: --config - Custom cluster configuration --replicas - Initial replica count --namespace - Kubernetes namespace","breadcrumbs":"CLI Reference » cluster create - Deploy Clusters","id":"3694","title":"cluster create - Deploy Clusters"},"3695":{"body":"Remove application clusters and associated resources. # Delete cluster\\nprovisioning cluster delete web-cluster --infra my-infra # Delete with data cleanup\\nprovisioning cluster delete web-cluster --cleanup --infra my-infra # Force delete\\nprovisioning cluster delete web-cluster --force --infra my-infra Options: --cleanup - Remove associated data --force - Force deletion --keep-volumes - Preserve persistent volumes","breadcrumbs":"CLI Reference » cluster delete - Remove Clusters","id":"3695","title":"cluster delete - Remove Clusters"},"3696":{"body":"Display information about deployed clusters. # List all clusters\\nprovisioning cluster list --infra my-infra # List with status\\nprovisioning cluster list --infra my-infra --status # List across all infrastructures\\nprovisioning cluster list --all # Filter by namespace\\nprovisioning cluster list --namespace production --infra my-infra Options: --status - Include status information --all - Show clusters from all infrastructures --namespace - Filter by namespace","breadcrumbs":"CLI Reference » cluster list - List Clusters","id":"3696","title":"cluster list - List Clusters"},"3697":{"body":"Adjust cluster size and resources. # Scale cluster\\nprovisioning cluster scale web-cluster --replicas 10 --infra my-infra # Auto-scale configuration\\nprovisioning cluster scale web-cluster --auto-scale --min 3 --max 20 --infra my-infra # Scale specific component\\nprovisioning cluster scale web-cluster --component api --replicas 5 --infra my-infra Options: --replicas - Target replica count --auto-scale - Enable auto-scaling --min, --max - Auto-scaling limits --component - Scale specific component","breadcrumbs":"CLI Reference » cluster scale - Scale Clusters","id":"3697","title":"cluster scale - Scale Clusters"},"3698":{"body":"","breadcrumbs":"CLI Reference » Infrastructure Commands","id":"3698","title":"Infrastructure Commands"},"3699":{"body":"Generate infrastructure and configuration files. # Generate new infrastructure\\nprovisioning generate infra --new my-infrastructure # Generate from template\\nprovisioning generate infra --template web-app --name my-app # Generate server configurations\\nprovisioning generate server --infra my-infra # Generate task service configurations\\nprovisioning generate taskserv --infra my-infra # Generate cluster configurations\\nprovisioning generate cluster --infra my-infra Subcommands: infra - Infrastructure configurations server - Server configurations taskserv - Task service configurations cluster - Cluster configurations Options: --new - Create new infrastructure --template - Use specific template --name - Name for generated resources --output - Output directory","breadcrumbs":"CLI Reference » generate - Generate Configurations","id":"3699","title":"generate - Generate Configurations"},"37":{"body":"","breadcrumbs":"Home » Support","id":"37","title":"Support"},"370":{"body":"# Measure server response time\\ntime provisioning server info dev-server-01 # Measure task service response time\\ntime provisioning taskserv list # Measure workflow submission time\\ntime provisioning workflow submit test-workflow.ncl","breadcrumbs":"Verification » Response Time Tests","id":"370","title":"Response Time Tests"},"3700":{"body":"Show detailed information about infrastructure components. # Show settings\\nprovisioning show settings --infra my-infra # Show servers\\nprovisioning show servers --infra my-infra # Show specific server\\nprovisioning show servers web-01 --infra my-infra # Show task services\\nprovisioning show taskservs --infra my-infra # Show costs\\nprovisioning show costs --infra my-infra # Show in different format\\nprovisioning show servers --infra my-infra --out json Subcommands: settings - Configuration settings servers - Server information taskservs - Task service information costs - Cost information data - Raw infrastructure data","breadcrumbs":"CLI Reference » show - Display Information","id":"3700","title":"show - Display Information"},"3701":{"body":"List resource types (servers, networks, volumes, etc.). # List providers\\nprovisioning list providers # List task services\\nprovisioning list taskservs # List clusters\\nprovisioning list clusters # List infrastructures\\nprovisioning list infras # List with selection interface\\nprovisioning list servers --select Subcommands: providers - Available providers taskservs - Available task services clusters - Available clusters infras - Available infrastructures servers - Server instances","breadcrumbs":"CLI Reference » list - List Resources","id":"3701","title":"list - List Resources"},"3702":{"body":"Validate configuration files and infrastructure definitions. # Validate configuration\\nprovisioning validate config --infra my-infra # Validate with detailed output\\nprovisioning validate config --detailed --infra my-infra # Validate specific file\\nprovisioning validate config settings.ncl --infra my-infra # Quick validation\\nprovisioning validate quick --infra my-infra # Validate interpolation\\nprovisioning validate interpolation --infra my-infra Subcommands: config - Configuration validation quick - Quick infrastructure validation interpolation - Interpolation pattern validation Options: --detailed - Show detailed validation results --strict - Strict validation mode --rules - Show validation rules","breadcrumbs":"CLI Reference » validate - Validate Configuration","id":"3702","title":"validate - Validate Configuration"},"3703":{"body":"","breadcrumbs":"CLI Reference » Configuration Commands","id":"3703","title":"Configuration Commands"},"3704":{"body":"Initialize user and project configurations. # Initialize user configuration\\nprovisioning init config # Initialize with specific template\\nprovisioning init config dev # Initialize project configuration\\nprovisioning init project # Force overwrite existing\\nprovisioning init config --force Subcommands: config - User configuration project - Project configuration Options: --template - Configuration template --force - Overwrite existing files","breadcrumbs":"CLI Reference » init - Initialize Configuration","id":"3704","title":"init - Initialize Configuration"},"3705":{"body":"Manage configuration templates. # List available templates\\nprovisioning template list # Show template content\\nprovisioning template show dev # Validate templates\\nprovisioning template validate # Create custom template\\nprovisioning template create my-template --from dev Subcommands: list - List available templates show - Display template content validate - Validate templates create - Create custom template","breadcrumbs":"CLI Reference » template - Template Management","id":"3705","title":"template - Template Management"},"3706":{"body":"","breadcrumbs":"CLI Reference » Advanced Commands","id":"3706","title":"Advanced Commands"},"3707":{"body":"Start interactive Nushell session with provisioning library loaded. # Start interactive shell\\nprovisioning nu # Execute specific command\\nprovisioning nu -c \\"use lib_provisioning *; show_env\\" # Start with custom script\\nprovisioning nu --script my-script.nu Options: -c - Execute command and exit --script - Run specific script --load - Load additional modules","breadcrumbs":"CLI Reference » nu - Interactive Shell","id":"3707","title":"nu - Interactive Shell"},"3708":{"body":"Edit encrypted configuration files using SOPS. # Edit encrypted file\\nprovisioning sops settings.ncl --infra my-infra # Encrypt new file\\nprovisioning sops --encrypt new-secrets.ncl --infra my-infra # Decrypt for viewing\\nprovisioning sops --decrypt secrets.ncl --infra my-infra # Rotate keys\\nprovisioning sops --rotate-keys secrets.ncl --infra my-infra Options: --encrypt - Encrypt file --decrypt - Decrypt file --rotate-keys - Rotate encryption keys","breadcrumbs":"CLI Reference » sops - Secret Management","id":"3708","title":"sops - Secret Management"},"3709":{"body":"Manage infrastructure contexts and environments. # Show current context\\nprovisioning context # List available contexts\\nprovisioning context list # Switch context\\nprovisioning context switch production # Create new context\\nprovisioning context create staging --from development # Delete context\\nprovisioning context delete old-context Subcommands: list - List contexts switch - Switch active context create - Create new context delete - Delete context","breadcrumbs":"CLI Reference » context - Context Management","id":"3709","title":"context - Context Management"},"371":{"body":"# Check platform resource usage\\ndocker stats # If using Docker # Check system resources\\nprovisioning system resources","breadcrumbs":"Verification » Resource Usage","id":"371","title":"Resource Usage"},"3710":{"body":"","breadcrumbs":"CLI Reference » Workflow Commands","id":"3710","title":"Workflow Commands"},"3711":{"body":"Manage complex workflows and batch operations. # Submit batch workflow\\nprovisioning workflows batch submit my-workflow.ncl # Monitor workflow progress\\nprovisioning workflows batch monitor workflow-123 # List workflows\\nprovisioning workflows batch list --status running # Get workflow status\\nprovisioning workflows batch status workflow-123 # Rollback failed workflow\\nprovisioning workflows batch rollback workflow-123 Options: --status - Filter by workflow status --follow - Follow workflow progress --timeout - Set timeout for operations","breadcrumbs":"CLI Reference » workflows - Batch Operations","id":"3711","title":"workflows - Batch Operations"},"3712":{"body":"Control the hybrid orchestrator system. # Start orchestrator\\nprovisioning orchestrator start # Check orchestrator status\\nprovisioning orchestrator status # Stop orchestrator\\nprovisioning orchestrator stop # Show orchestrator logs\\nprovisioning orchestrator logs # Health check\\nprovisioning orchestrator health","breadcrumbs":"CLI Reference » orchestrator - Orchestrator Management","id":"3712","title":"orchestrator - Orchestrator Management"},"3713":{"body":"","breadcrumbs":"CLI Reference » Scripting and Automation","id":"3713","title":"Scripting and Automation"},"3714":{"body":"Provisioning uses standard exit codes: 0 - Success 1 - General error 2 - Invalid command or arguments 3 - Configuration error 4 - Permission denied 5 - Resource not found","breadcrumbs":"CLI Reference » Exit Codes","id":"3714","title":"Exit Codes"},"3715":{"body":"Control behavior through environment variables: # Enable debug mode\\nexport PROVISIONING_DEBUG=true # Set environment\\nexport PROVISIONING_ENV=production # Set output format\\nexport PROVISIONING_OUTPUT_FORMAT=json # Disable interactive prompts\\nexport PROVISIONING_NONINTERACTIVE=true","breadcrumbs":"CLI Reference » Environment Variables","id":"3715","title":"Environment Variables"},"3716":{"body":"#!/bin/bash\\n# Example batch script # Set environment\\nexport PROVISIONING_ENV=production\\nexport PROVISIONING_NONINTERACTIVE=true # Validate first\\nif ! provisioning validate config --infra production; then echo \\"Configuration validation failed\\" exit 1\\nfi # Create infrastructure\\nprovisioning server create --infra production --yes --wait # Install services\\nprovisioning taskserv create kubernetes --infra production --yes\\nprovisioning taskserv create postgresql --infra production --yes # Deploy clusters\\nprovisioning cluster create web-app --infra production --yes echo \\"Deployment completed successfully\\"","breadcrumbs":"CLI Reference » Batch Operations","id":"3716","title":"Batch Operations"},"3717":{"body":"# Get server list as JSON\\nservers=$(provisioning server list --infra my-infra --out json) # Process with jq\\necho \\"$servers\\" | jq \'.[] | select(.status == \\"running\\") | .name\' # Use in scripts\\nfor server in $(echo \\"$servers\\" | jq -r \'.[] | select(.status == \\"running\\") | .name\'); do echo \\"Processing server: $server\\" provisioning server ssh \\"$server\\" --command \\"uptime\\" --infra my-infra\\ndone","breadcrumbs":"CLI Reference » JSON Output Processing","id":"3717","title":"JSON Output Processing"},"3718":{"body":"","breadcrumbs":"CLI Reference » Command Chaining and Pipelines","id":"3718","title":"Command Chaining and Pipelines"},"3719":{"body":"# Chain commands with && (stop on failure)\\nprovisioning validate config --infra my-infra && \\\\\\nprovisioning server create --infra my-infra --check && \\\\\\nprovisioning server create --infra my-infra --yes # Chain with || (continue on failure)\\nprovisioning taskserv create kubernetes --infra my-infra || \\\\\\necho \\"Kubernetes installation failed, continuing with other services\\"","breadcrumbs":"CLI Reference » Sequential Operations","id":"3719","title":"Sequential Operations"},"372":{"body":"","breadcrumbs":"Verification » Security Verification","id":"372","title":"Security Verification"},"3720":{"body":"# Full deployment workflow\\ndeploy_infrastructure() { local infra_name=$1 echo \\"Deploying infrastructure: $infra_name\\" # Validate provisioning validate config --infra \\"$infra_name\\" || return 1 # Create servers provisioning server create --infra \\"$infra_name\\" --yes --wait || return 1 # Install base services for service in containerd kubernetes; do provisioning taskserv create \\"$service\\" --infra \\"$infra_name\\" --yes || return 1 done # Deploy applications provisioning cluster create web-app --infra \\"$infra_name\\" --yes || return 1 echo \\"Deployment completed: $infra_name\\"\\n} # Use the function\\ndeploy_infrastructure \\"production\\"","breadcrumbs":"CLI Reference » Complex Workflows","id":"3720","title":"Complex Workflows"},"3721":{"body":"","breadcrumbs":"CLI Reference » Integration with Other Tools","id":"3721","title":"Integration with Other Tools"},"3722":{"body":"# GitLab CI example\\ndeploy: script: - provisioning validate config --infra production - provisioning server create --infra production --check - provisioning server create --infra production --yes --wait - provisioning taskserv create kubernetes --infra production --yes only: - main","breadcrumbs":"CLI Reference » CI/CD Integration","id":"3722","title":"CI/CD Integration"},"3723":{"body":"# Health check script\\n#!/bin/bash # Check infrastructure health\\nif provisioning health check --infra production --out json | jq -e \'.healthy\'; then echo \\"Infrastructure healthy\\" exit 0\\nelse echo \\"Infrastructure unhealthy\\" # Send alert curl -X POST https://alerts.company.com/webhook \\\\ -d \'{\\"message\\": \\"Infrastructure health check failed\\"}\' exit 1\\nfi","breadcrumbs":"CLI Reference » Monitoring Integration","id":"3723","title":"Monitoring Integration"},"3724":{"body":"# Backup script\\n#!/bin/bash DATE=$(date +%Y%m%d_%H%M%S)\\nBACKUP_DIR=\\"/backups/provisioning/$DATE\\" # Create backup directory\\nmkdir -p \\"$BACKUP_DIR\\" # Export configurations\\nprovisioning config export --format yaml > \\"$BACKUP_DIR/config.yaml\\" # Backup infrastructure definitions\\nfor infra in $(provisioning list infras --out json | jq -r \'.[]\'); do provisioning show settings --infra \\"$infra\\" --out yaml > \\"$BACKUP_DIR/$infra.yaml\\"\\ndone echo \\"Backup completed: $BACKUP_DIR\\" This CLI reference provides comprehensive coverage of all provisioning commands. Use it as your primary reference for command syntax, options, and integration patterns.","breadcrumbs":"CLI Reference » Backup Automation","id":"3724","title":"Backup Automation"},"3725":{"body":"This guide covers generating and managing temporary credentials (dynamic secrets) instead of using static secrets. See the Quick Reference section below for fast lookup.","breadcrumbs":"Dynamic Secrets Guide » Dynamic Secrets Guide","id":"3725","title":"Dynamic Secrets Guide"},"3726":{"body":"Quick Start : Generate temporary credentials instead of using static secrets","breadcrumbs":"Dynamic Secrets Guide » Quick Reference","id":"3726","title":"Quick Reference"},"3727":{"body":"Generate AWS Credentials (1 hour) secrets generate aws --role deploy --workspace prod --purpose \\"deployment\\" Generate SSH Key (2 hours) secrets generate ssh --ttl 2 --workspace dev --purpose \\"server access\\" Generate UpCloud Subaccount (2 hours) secrets generate upcloud --workspace staging --purpose \\"testing\\" List Active Secrets secrets list Revoke Secret secrets revoke --reason \\"no longer needed\\" View Statistics secrets stats","breadcrumbs":"Dynamic Secrets Guide » Quick Commands","id":"3727","title":"Quick Commands"},"3728":{"body":"Type TTL Range Renewable Use Case AWS STS 15 min - 12 h ✅ Yes Cloud resource provisioning SSH Keys 10 min - 24 h ❌ No Temporary server access UpCloud 30 min - 8 h ❌ No UpCloud API operations Vault 5 min - 24 h ✅ Yes Any Vault-backed secret","breadcrumbs":"Dynamic Secrets Guide » Secret Types","id":"3728","title":"Secret Types"},"3729":{"body":"Base URL : http://localhost:9090/api/v1/secrets # Generate secret\\nPOST /generate # Get secret\\nGET /{id} # Revoke secret\\nPOST /{id}/revoke # Renew secret\\nPOST /{id}/renew # List secrets\\nGET /list # List expiring\\nGET /expiring # Statistics\\nGET /stats","breadcrumbs":"Dynamic Secrets Guide » REST API Endpoints","id":"3729","title":"REST API Endpoints"},"373":{"body":"# Verify encryption keys\\nls -la ~/.config/provisioning/age/ # Test encryption/decryption\\necho \\"test\\" | provisioning kms encrypt | provisioning kms decrypt","breadcrumbs":"Verification » Encryption","id":"373","title":"Encryption"},"3730":{"body":"# Generate\\nlet creds = secrets generate aws ` --role deploy ` --region us-west-2 ` --workspace prod ` --purpose \\"Deploy servers\\" # Export to environment\\nexport-env { AWS_ACCESS_KEY_ID: ($creds.credentials.access_key_id) AWS_SECRET_ACCESS_KEY: ($creds.credentials.secret_access_key) AWS_SESSION_TOKEN: ($creds.credentials.session_token)\\n} # Use credentials\\nprovisioning server create # Cleanup\\nsecrets revoke ($creds.id) --reason \\"done\\"","breadcrumbs":"Dynamic Secrets Guide » AWS STS Example","id":"3730","title":"AWS STS Example"},"3731":{"body":"# Generate\\nlet key = secrets generate ssh ` --ttl 4 ` --workspace dev ` --purpose \\"Debug issue\\" # Save key\\n$key.credentials.private_key | save ~/.ssh/temp_key\\nchmod 600 ~/.ssh/temp_key # Use key\\nssh -i ~/.ssh/temp_key user@server # Cleanup\\nrm ~/.ssh/temp_key\\nsecrets revoke ($key.id) --reason \\"fixed\\"","breadcrumbs":"Dynamic Secrets Guide » SSH Key Example","id":"3731","title":"SSH Key Example"},"3732":{"body":"File : provisioning/platform/orchestrator/config.defaults.toml [secrets]\\ndefault_ttl_hours = 1\\nmax_ttl_hours = 12\\nauto_revoke_on_expiry = true\\nwarning_threshold_minutes = 5 aws_account_id = \\"123456789012\\"\\naws_default_region = \\"us-east-1\\" upcloud_username = \\"${UPCLOUD_USER}\\"\\nupcloud_password = \\"${UPCLOUD_PASS}\\"","breadcrumbs":"Dynamic Secrets Guide » Configuration","id":"3732","title":"Configuration"},"3733":{"body":"","breadcrumbs":"Dynamic Secrets Guide » Troubleshooting","id":"3733","title":"Troubleshooting"},"3734":{"body":"→ Check service initialization","breadcrumbs":"Dynamic Secrets Guide » \\"Provider not found\\"","id":"3734","title":"\\"Provider not found\\""},"3735":{"body":"→ Reduce TTL or configure higher max","breadcrumbs":"Dynamic Secrets Guide » \\"TTL exceeds maximum\\"","id":"3735","title":"\\"TTL exceeds maximum\\""},"3736":{"body":"→ Generate new secret instead","breadcrumbs":"Dynamic Secrets Guide » \\"Secret not renewable\\"","id":"3736","title":"\\"Secret not renewable\\""},"3737":{"body":"→ Check provider requirements (for example, AWS needs \'role\')","breadcrumbs":"Dynamic Secrets Guide » \\"Missing required parameter\\"","id":"3737","title":"\\"Missing required parameter\\""},"3738":{"body":"✅ No static credentials stored ✅ Automatic expiration (1-12 hours) ✅ Auto-revocation on expiry ✅ Full audit trail ✅ Memory-only storage ✅ TLS in transit","breadcrumbs":"Dynamic Secrets Guide » Security Features","id":"3738","title":"Security Features"},"3739":{"body":"Orchestrator logs : provisioning/platform/orchestrator/data/orchestrator.log Debug secrets : secrets list | where is_expired == true","breadcrumbs":"Dynamic Secrets Guide » Support","id":"3739","title":"Support"},"374":{"body":"# Test login\\nprovisioning login --username admin # Verify token\\nprovisioning whoami # Test MFA (if enabled)\\nprovisioning mfa verify ","breadcrumbs":"Verification » Authentication (If Enabled)","id":"374","title":"Authentication (If Enabled)"},"3740":{"body":"Version : 1.0.0 | Date : 2025-10-06","breadcrumbs":"Mode System Guide » Mode System Quick Reference","id":"3740","title":"Mode System Quick Reference"},"3741":{"body":"# Check current mode\\nprovisioning mode current # List all available modes\\nprovisioning mode list # Switch to a different mode\\nprovisioning mode switch # Validate mode configuration\\nprovisioning mode validate","breadcrumbs":"Mode System Guide » Quick Start","id":"3741","title":"Quick Start"},"3742":{"body":"Mode Use Case Auth Orchestrator OCI Registry solo Local development None Local binary Local Zot (optional) multi-user Team collaboration Token (JWT) Remote Remote Harbor cicd CI/CD pipelines Token (CI injected) Remote Remote Harbor enterprise Production mTLS Kubernetes HA Harbor HA + DR","breadcrumbs":"Mode System Guide » Available Modes","id":"3742","title":"Available Modes"},"3743":{"body":"","breadcrumbs":"Mode System Guide » Mode Comparison","id":"3743","title":"Mode Comparison"},"3744":{"body":"✅ Best for : Individual developers 🔐 Authentication : None 🚀 Services : Local orchestrator only 📦 Extensions : Local filesystem 🔒 Workspace Locking : Disabled 💾 Resource Limits : Unlimited","breadcrumbs":"Mode System Guide » Solo Mode","id":"3744","title":"Solo Mode"},"3745":{"body":"✅ Best for : Development teams (5-20 developers) 🔐 Authentication : Token (JWT, 24h expiry) 🚀 Services : Remote orchestrator, control-center, DNS, git 📦 Extensions : OCI registry (Harbor) 🔒 Workspace Locking : Enabled (Gitea provider) 💾 Resource Limits : 10 servers, 32 cores, 128 GB per user","breadcrumbs":"Mode System Guide » Multi-User Mode","id":"3745","title":"Multi-User Mode"},"3746":{"body":"✅ Best for : Automated pipelines 🔐 Authentication : Token (1h expiry, CI/CD injected) 🚀 Services : Remote orchestrator, DNS, git 📦 Extensions : OCI registry (always pull latest) 🔒 Workspace Locking : Disabled (stateless) 💾 Resource Limits : 5 servers, 16 cores, 64 GB per pipeline","breadcrumbs":"Mode System Guide » CI/CD Mode","id":"3746","title":"CI/CD Mode"},"3747":{"body":"✅ Best for : Large enterprises with strict compliance 🔐 Authentication : mTLS (TLS 1.3) 🚀 Services : All services on Kubernetes (HA) 📦 Extensions : OCI registry (signature verification) 🔒 Workspace Locking : Required (etcd provider) 💾 Resource Limits : 20 servers, 64 cores, 256 GB per user","breadcrumbs":"Mode System Guide » Enterprise Mode","id":"3747","title":"Enterprise Mode"},"3748":{"body":"","breadcrumbs":"Mode System Guide » Common Operations","id":"3748","title":"Common Operations"},"3749":{"body":"provisioning mode init","breadcrumbs":"Mode System Guide » Initialize Mode System","id":"3749","title":"Initialize Mode System"},"375":{"body":"Use this checklist to ensure everything is working: Configuration validation passes All servers are accessible via SSH All servers show \\"running\\" status All task services show \\"running\\" status Kubernetes nodes are \\"Ready\\" (if installed) Kubernetes pods are \\"Running\\" (if installed) Platform services respond to health checks Encryption/decryption works Workflows can be submitted and complete No errors in logs Resource usage is within expected limits","breadcrumbs":"Verification » Verification Checklist","id":"375","title":"Verification Checklist"},"3750":{"body":"provisioning mode current # Output:\\n# mode: solo\\n# configured: true\\n# config_file: ~/.provisioning/config/active-mode.yaml","breadcrumbs":"Mode System Guide » Check Current Mode","id":"3750","title":"Check Current Mode"},"3751":{"body":"provisioning mode list # Output:\\n# ┌───────────────┬───────────────────────────────────┬─────────┐\\n# │ mode │ description │ current │\\n# ├───────────────┼───────────────────────────────────┼─────────┤\\n# │ solo │ Single developer local development │ ● │\\n# │ multi-user │ Team collaboration │ │\\n# │ cicd │ CI/CD pipeline execution │ │\\n# │ enterprise │ Production enterprise deployment │ │\\n# └───────────────┴───────────────────────────────────┴─────────┘","breadcrumbs":"Mode System Guide » List All Modes","id":"3751","title":"List All Modes"},"3752":{"body":"# Switch with confirmation\\nprovisioning mode switch multi-user # Dry run (preview changes)\\nprovisioning mode switch multi-user --dry-run # With validation\\nprovisioning mode switch multi-user --validate","breadcrumbs":"Mode System Guide » Switch Mode","id":"3752","title":"Switch Mode"},"3753":{"body":"# Show current mode\\nprovisioning mode show # Show specific mode\\nprovisioning mode show enterprise","breadcrumbs":"Mode System Guide » Show Mode Details","id":"3753","title":"Show Mode Details"},"3754":{"body":"# Validate current mode\\nprovisioning mode validate # Validate specific mode\\nprovisioning mode validate cicd","breadcrumbs":"Mode System Guide » Validate Mode","id":"3754","title":"Validate Mode"},"3755":{"body":"provisioning mode compare solo multi-user # Output shows differences in:\\n# - Authentication\\n# - Service deployments\\n# - Extension sources\\n# - Workspace locking\\n# - Security settings","breadcrumbs":"Mode System Guide » Compare Modes","id":"3755","title":"Compare Modes"},"3756":{"body":"","breadcrumbs":"Mode System Guide » OCI Registry Management","id":"3756","title":"OCI Registry Management"},"3757":{"body":"# Start local OCI registry\\nprovisioning mode oci-registry start # Check registry status\\nprovisioning mode oci-registry status # View registry logs\\nprovisioning mode oci-registry logs # Stop registry\\nprovisioning mode oci-registry stop Note : OCI registry management only works in solo mode with local deployment.","breadcrumbs":"Mode System Guide » Solo Mode Only","id":"3757","title":"Solo Mode Only"},"3758":{"body":"","breadcrumbs":"Mode System Guide » Mode-Specific Workflows","id":"3758","title":"Mode-Specific Workflows"},"3759":{"body":"# 1. Initialize (defaults to solo)\\nprovisioning workspace init # 2. Start orchestrator\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background # 3. (Optional) Start OCI registry\\nprovisioning mode oci-registry start # 4. Create infrastructure\\nprovisioning server create web-01 --check\\nprovisioning taskserv create kubernetes # Extensions loaded from local filesystem","breadcrumbs":"Mode System Guide » Solo Mode Workflow","id":"3759","title":"Solo Mode Workflow"},"376":{"body":"Once verification is complete: User Guide - Learn advanced features Quick Reference - Command shortcuts Infrastructure Management - Day-to-day operations Troubleshooting - Common issues and solutions","breadcrumbs":"Verification » Next Steps","id":"376","title":"Next Steps"},"3760":{"body":"# 1. Switch to multi-user mode\\nprovisioning mode switch multi-user # 2. Authenticate\\nprovisioning auth login\\n# Enter JWT token from team admin # 3. Lock workspace\\nprovisioning workspace lock my-infra # 4. Pull extensions from OCI registry\\nprovisioning extension pull upcloud\\nprovisioning extension pull kubernetes # 5. Create infrastructure\\nprovisioning server create web-01 # 6. Unlock workspace\\nprovisioning workspace unlock my-infra","breadcrumbs":"Mode System Guide » Multi-User Mode Workflow","id":"3760","title":"Multi-User Mode Workflow"},"3761":{"body":"# GitLab CI example\\ndeploy: stage: deploy script: # Token injected by CI - export PROVISIONING_MODE=cicd - mkdir -p /var/run/secrets/provisioning - echo \\"$PROVISIONING_TOKEN\\" > /var/run/secrets/provisioning/token # Validate - provisioning validate --all # Test - provisioning test quick kubernetes # Deploy - provisioning server create --check - provisioning server create after_script: - provisioning workspace cleanup","breadcrumbs":"Mode System Guide » CI/CD Mode Workflow","id":"3761","title":"CI/CD Mode Workflow"},"3762":{"body":"# 1. Switch to enterprise mode\\nprovisioning mode switch enterprise # 2. Verify Kubernetes connectivity\\nkubectl get pods -n provisioning-system # 3. Login to Harbor\\ndocker login harbor.enterprise.local # 4. Request workspace (requires approval)\\nprovisioning workspace request prod-deployment\\n# Approval from: platform-team, security-team # 5. After approval, lock workspace\\nprovisioning workspace lock prod-deployment --provider etcd # 6. Pull extensions (with signature verification)\\nprovisioning extension pull upcloud --verify-signature # 7. Deploy infrastructure\\nprovisioning infra create --check\\nprovisioning infra create # 8. Release workspace\\nprovisioning workspace unlock prod-deployment","breadcrumbs":"Mode System Guide » Enterprise Mode Workflow","id":"3762","title":"Enterprise Mode Workflow"},"3763":{"body":"","breadcrumbs":"Mode System Guide » Configuration Files","id":"3763","title":"Configuration Files"},"3764":{"body":"workspace/config/modes/\\n├── solo.yaml # Solo mode configuration\\n├── multi-user.yaml # Multi-user mode configuration\\n├── cicd.yaml # CI/CD mode configuration\\n└── enterprise.yaml # Enterprise mode configuration","breadcrumbs":"Mode System Guide » Mode Templates","id":"3764","title":"Mode Templates"},"3765":{"body":"~/.provisioning/config/active-mode.yaml This file is created/updated when you switch modes.","breadcrumbs":"Mode System Guide » Active Mode Configuration","id":"3765","title":"Active Mode Configuration"},"3766":{"body":"All modes use the following OCI registry namespaces: Namespace Purpose Example *-extensions Extension artifacts provisioning-extensions/upcloud:latest *-schemas Nickel schema artifacts provisioning-schemas/lib:v1.0.0 *-platform Platform service images provisioning-platform/orchestrator:latest *-test Test environment images provisioning-test/ubuntu:22.04 Note : Prefix varies by mode (dev-, provisioning-, cicd-, prod-)","breadcrumbs":"Mode System Guide » OCI Registry Namespaces","id":"3766","title":"OCI Registry Namespaces"},"3767":{"body":"","breadcrumbs":"Mode System Guide » Troubleshooting","id":"3767","title":"Troubleshooting"},"3768":{"body":"# Validate mode first\\nprovisioning mode validate # Check runtime requirements\\nprovisioning mode validate --check-requirements","breadcrumbs":"Mode System Guide » Mode switch fails","id":"3768","title":"Mode switch fails"},"3769":{"body":"# Check if registry binary is installed\\nwhich zot # Install Zot\\n# macOS: brew install project-zot/tap/zot\\n# Linux: Download from https://github.com/project-zot/zot/releases # Check if port 5000 is available\\nlsof -i :5000","breadcrumbs":"Mode System Guide » Cannot start OCI registry (solo mode)","id":"3769","title":"Cannot start OCI registry (solo mode)"},"377":{"body":"Complete From-Scratch Guide Service Management Guide Test Environment Guide Congratulations! You\'ve successfully deployed and verified your first Provisioning Platform infrastructure!","breadcrumbs":"Verification » Additional Resources","id":"377","title":"Additional Resources"},"3770":{"body":"# Check token expiry\\nprovisioning auth status # Re-authenticate\\nprovisioning auth login # For enterprise mTLS, verify certificates\\nls -la /etc/provisioning/certs/\\n# Should contain: client.crt, client.key, ca.crt","breadcrumbs":"Mode System Guide » Authentication fails (multi-user/cicd/enterprise)","id":"3770","title":"Authentication fails (multi-user/cicd/enterprise)"},"3771":{"body":"# Check lock status\\nprovisioning workspace lock-status # Force unlock (use with caution)\\nprovisioning workspace unlock --force # Check lock provider status\\n# Multi-user: Check Gitea connectivity\\ncurl -I https://git.company.local # Enterprise: Check etcd cluster\\netcdctl endpoint health","breadcrumbs":"Mode System Guide » Workspace locking issues (multi-user/enterprise)","id":"3771","title":"Workspace locking issues (multi-user/enterprise)"},"3772":{"body":"# Test registry connectivity\\ncurl https://harbor.company.local/v2/ # Check authentication token\\ncat ~/.provisioning/tokens/oci # Verify network connectivity\\nping harbor.company.local # For Harbor, check credentials\\ndocker login harbor.company.local","breadcrumbs":"Mode System Guide » OCI registry connection fails","id":"3772","title":"OCI registry connection fails"},"3773":{"body":"Variable Purpose Example PROVISIONING_MODE Override active mode export PROVISIONING_MODE=cicd PROVISIONING_WORKSPACE_CONFIG Override config location ~/.provisioning/config PROVISIONING_PROJECT_ROOT Project root directory /opt/project-provisioning","breadcrumbs":"Mode System Guide » Environment Variables","id":"3773","title":"Environment Variables"},"3774":{"body":"","breadcrumbs":"Mode System Guide » Best Practices","id":"3774","title":"Best Practices"},"3775":{"body":"Solo : Individual development, experimentation Multi-User : Team collaboration, shared infrastructure CI/CD : Automated testing and deployment Enterprise : Production deployments, compliance requirements","breadcrumbs":"Mode System Guide » 1. Use Appropriate Mode","id":"3775","title":"1. Use Appropriate Mode"},"3776":{"body":"provisioning mode validate ","breadcrumbs":"Mode System Guide » 2. Validate Before Switching","id":"3776","title":"2. Validate Before Switching"},"3777":{"body":"# Automatic backup created when switching\\nls ~/.provisioning/config/active-mode.yaml.backup","breadcrumbs":"Mode System Guide » 3. Backup Active Configuration","id":"3777","title":"3. Backup Active Configuration"},"3778":{"body":"provisioning server create --check","breadcrumbs":"Mode System Guide » 4. Use Check Mode","id":"3778","title":"4. Use Check Mode"},"3779":{"body":"provisioning workspace lock \\n# ... make changes ...\\nprovisioning workspace unlock ","breadcrumbs":"Mode System Guide » 5. Lock Workspaces in Multi-User/Enterprise","id":"3779","title":"5. Lock Workspaces in Multi-User/Enterprise"},"378":{"body":"After verifying your installation, the next step is to configure the platform services. This guide walks you through setting up your provisioning platform for deployment.","breadcrumbs":"Platform Service Configuration » Platform Service Configuration","id":"378","title":"Platform Service Configuration"},"3780":{"body":"# Don\'t use local extensions in shared modes\\nprovisioning extension pull ","breadcrumbs":"Mode System Guide » 6. Pull Extensions from OCI (Multi-User/CI/CD/Enterprise)","id":"3780","title":"6. Pull Extensions from OCI (Multi-User/CI/CD/Enterprise)"},"3781":{"body":"","breadcrumbs":"Mode System Guide » Security Considerations","id":"3781","title":"Security Considerations"},"3782":{"body":"⚠️ No authentication (local development only) ⚠️ No encryption (sensitive data should use SOPS) ✅ Isolated environment","breadcrumbs":"Mode System Guide » Solo Mode","id":"3782","title":"Solo Mode"},"3783":{"body":"✅ Token-based authentication ✅ TLS in transit ✅ Audit logging ⚠️ No encryption at rest (configure as needed)","breadcrumbs":"Mode System Guide » Multi-User Mode","id":"3783","title":"Multi-User Mode"},"3784":{"body":"✅ Token authentication (short expiry) ✅ Full encryption (at rest + in transit) ✅ KMS for secrets ✅ Vulnerability scanning (critical threshold) ✅ Image signing required","breadcrumbs":"Mode System Guide » CI/CD Mode","id":"3784","title":"CI/CD Mode"},"3785":{"body":"✅ mTLS authentication ✅ Full encryption (at rest + in transit) ✅ KMS for all secrets ✅ Vulnerability scanning (critical threshold) ✅ Image signing + signature verification ✅ Network isolation ✅ Compliance policies (SOC2, ISO27001, HIPAA)","breadcrumbs":"Mode System Guide » Enterprise Mode","id":"3785","title":"Enterprise Mode"},"3786":{"body":"Implementation Summary : MODE_SYSTEM_IMPLEMENTATION_SUMMARY.md Nickel Schemas : provisioning/schemas/modes.ncl, provisioning/schemas/oci_registry.ncl Mode Templates : workspace/config/modes/*.yaml Commands : provisioning/core/nulib/lib_provisioning/mode/ Last Updated : 2025-10-06 | Version : 1.0.0","breadcrumbs":"Mode System Guide » Support and Documentation","id":"3786","title":"Support and Documentation"},"3787":{"body":"This guide covers the unified configuration rendering system in the CLI daemon that supports Nickel and Tera template engines.","breadcrumbs":"Config Rendering Guide » Configuration Rendering Guide","id":"3787","title":"Configuration Rendering Guide"},"3788":{"body":"The CLI daemon (cli-daemon) provides a high-performance REST API for rendering configurations in multiple formats: Nickel : Functional configuration language with lazy evaluation and type safety (primary choice) Tera : Jinja2-compatible template engine (simple templating) All renderers are accessible through a single unified API endpoint with intelligent caching to minimize latency.","breadcrumbs":"Config Rendering Guide » Overview","id":"3788","title":"Overview"},"3789":{"body":"","breadcrumbs":"Config Rendering Guide » Quick Start","id":"3789","title":"Quick Start"},"379":{"body":"Understanding platform services and configuration modes Setting up platform configurations with setup-platform-config.sh Choosing the right deployment mode for your use case Configuring services interactively or with quick mode Running platform services with your configuration","breadcrumbs":"Platform Service Configuration » What You\'ll Learn","id":"379","title":"What You\'ll Learn"},"3790":{"body":"The daemon runs on port 9091 by default: # Start in background\\n./target/release/cli-daemon & # Check it\'s running\\ncurl http://localhost:9091/health","breadcrumbs":"Config Rendering Guide » Starting the Daemon","id":"3790","title":"Starting the Daemon"},"3791":{"body":"curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"nickel\\", \\"content\\": \\"{ name = \\\\\\"my-server\\\\\\", cpu = 4, memory = 8192 }\\", \\"name\\": \\"server-config\\" }\' Response : { \\"rendered\\": \\"{ name = \\\\\\"my-server\\\\\\", cpu = 4, memory = 8192 }\\", \\"error\\": null, \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 23\\n}","breadcrumbs":"Config Rendering Guide » Simple Nickel Rendering","id":"3791","title":"Simple Nickel Rendering"},"3792":{"body":"","breadcrumbs":"Config Rendering Guide » REST API Reference","id":"3792","title":"REST API Reference"},"3793":{"body":"Render a configuration in any supported language. Request Headers : Content-Type: application/json Request Body : { \\"language\\": \\"nickel|tera\\", \\"content\\": \\"...configuration content...\\", \\"context\\": { \\"key1\\": \\"value1\\", \\"key2\\": 123 }, \\"name\\": \\"optional-config-name\\"\\n} Parameters : Parameter Type Required Description language string Yes One of: nickel, tera content string Yes The configuration or template content to render context object No Variables to pass to the configuration (JSON object) name string No Optional name for logging purposes Response (Success): { \\"rendered\\": \\"...rendered output...\\", \\"error\\": null, \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 23\\n} Response (Error): { \\"rendered\\": null, \\"error\\": \\"Nickel evaluation failed: undefined variable \'name\'\\", \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 18\\n} Status Codes : 200 OK - Rendering completed (check error field in body for evaluation errors) 400 Bad Request - Invalid request format 500 Internal Server Error - Daemon error","breadcrumbs":"Config Rendering Guide » POST /config/render","id":"3793","title":"POST /config/render"},"3794":{"body":"Get rendering statistics across all languages. Response : { \\"total_renders\\": 156, \\"successful_renders\\": 154, \\"failed_renders\\": 2, \\"average_time_ms\\": 28, \\"nickel_renders\\": 104, \\"tera_renders\\": 52, \\"nickel_cache_hits\\": 87, \\"tera_cache_hits\\": 38\\n}","breadcrumbs":"Config Rendering Guide » GET /config/stats","id":"3794","title":"GET /config/stats"},"3795":{"body":"Reset all rendering statistics. Response : { \\"status\\": \\"success\\", \\"message\\": \\"Configuration rendering statistics reset\\"\\n}","breadcrumbs":"Config Rendering Guide » POST /config/stats/reset","id":"3795","title":"POST /config/stats/reset"},"3796":{"body":"","breadcrumbs":"Config Rendering Guide » Nickel Rendering","id":"3796","title":"Nickel Rendering"},"3797":{"body":"curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"nickel\\", \\"content\\": \\"{ name = \\\\\\"production-server\\\\\\", type = \\\\\\"web\\\\\\", cpu = 4, memory = 8192, disk = 50, tags = { environment = \\\\\\"production\\\\\\", team = \\\\\\"platform\\\\\\" }\\n}\\", \\"name\\": \\"nickel-server-config\\" }\'","breadcrumbs":"Config Rendering Guide » Basic Nickel Configuration","id":"3797","title":"Basic Nickel Configuration"},"3798":{"body":"Nickel excels at evaluating only what\'s needed: curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"nickel\\", \\"content\\": \\"{ server = { name = \\\\\\"db-01\\\\\\", # Expensive computation - only computed if accessed health_check = std.array.fold (fun acc x => acc + x) 0 [1, 2, 3, 4, 5] }, networking = { dns_servers = [\\\\\\"8.8.8.8\\\\\\", \\\\\\"8.8.4.4\\\\\\"], firewall_rules = [\\\\\\"allow_ssh\\\\\\", \\\\\\"allow_https\\\\\\"] }\\n}\\", \\"context\\": { \\"only_server\\": true } }\'","breadcrumbs":"Config Rendering Guide » Nickel with Lazy Evaluation","id":"3798","title":"Nickel with Lazy Evaluation"},"3799":{"body":"First render (cache miss) : 30-60 ms Cached render (same content) : 1-5 ms Large configs with lazy evaluation : 40-80 ms Advantage : Nickel only computes fields that are actually used in the output","breadcrumbs":"Config Rendering Guide » Expected Nickel Rendering Time","id":"3799","title":"Expected Nickel Rendering Time"},"38":{"body":"Documentation : You\'re reading it! Quick Reference : Run provisioning sc or provisioning guide quickstart Help System : Run provisioning help or provisioning help Interactive Shell : Run provisioning nu for Nushell REPL","breadcrumbs":"Home » Getting Help","id":"38","title":"Getting Help"},"380":{"body":"Before configuring platform services, ensure you have: ✅ Completed Installation Steps ✅ Verified installation with Verification ✅ Nickel 0.10+ (for configuration language) ✅ Nushell 0.109+ (for scripts) ✅ TypeDialog (optional, for interactive configuration)","breadcrumbs":"Platform Service Configuration » Prerequisites","id":"380","title":"Prerequisites"},"3800":{"body":"","breadcrumbs":"Config Rendering Guide » Tera Template Rendering","id":"3800","title":"Tera Template Rendering"},"3801":{"body":"curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"tera\\", \\"content\\": \\"\\nServer Configuration\\n==================== Name: {{ server_name }}\\nEnvironment: {{ environment | default(value=\\\\\\"development\\\\\\") }}\\nType: {{ server_type }} Assigned Tasks:\\n{% for task in tasks %} - {{ task }}\\n{% endfor %} {% if enable_monitoring %}\\nMonitoring: ENABLED - Prometheus: true - Grafana: true\\n{% else %}\\nMonitoring: DISABLED\\n{% endif %}\\n\\", \\"context\\": { \\"server_name\\": \\"prod-web-01\\", \\"environment\\": \\"production\\", \\"server_type\\": \\"web\\", \\"tasks\\": [\\"kubernetes\\", \\"prometheus\\", \\"cilium\\"], \\"enable_monitoring\\": true }, \\"name\\": \\"server-template\\" }\'","breadcrumbs":"Config Rendering Guide » Basic Tera Template","id":"3801","title":"Basic Tera Template"},"3802":{"body":"Tera supports Jinja2-compatible filters and functions: curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"tera\\", \\"content\\": \\"\\nConfiguration for {{ environment | upper }}\\nServers: {{ server_count | default(value=1) }}\\nCost estimate: \\\\${{ monthly_cost | round(precision=2) }} {% for server in servers | reverse %}\\n- {{ server.name }}: {{ server.cpu }} CPUs\\n{% endfor %}\\n\\", \\"context\\": { \\"environment\\": \\"production\\", \\"server_count\\": 5, \\"monthly_cost\\": 1234.567, \\"servers\\": [ {\\"name\\": \\"web-01\\", \\"cpu\\": 4}, {\\"name\\": \\"db-01\\", \\"cpu\\": 8}, {\\"name\\": \\"cache-01\\", \\"cpu\\": 2} ] } }\'","breadcrumbs":"Config Rendering Guide » Tera Filters and Functions","id":"3802","title":"Tera Filters and Functions"},"3803":{"body":"Simple templates : 4-10 ms Complex templates with loops : 10-20 ms Always fast (template is pre-compiled)","breadcrumbs":"Config Rendering Guide » Expected Tera Rendering Time","id":"3803","title":"Expected Tera Rendering Time"},"3804":{"body":"","breadcrumbs":"Config Rendering Guide » Performance Characteristics","id":"3804","title":"Performance Characteristics"},"3805":{"body":"All three renderers use LRU (Least Recently Used) caching: Cache Size : 100 entries per renderer Cache Key : SHA256 hash of (content + context) Cache Hit : Typically < 5 ms Cache Miss : Language-dependent (20-60 ms) To maximize cache hits : Render the same config multiple times → hits after first render Use static content when possible → better cache reuse Monitor cache hit ratio via /config/stats","breadcrumbs":"Config Rendering Guide » Caching Strategy","id":"3805","title":"Caching Strategy"},"3806":{"body":"Comparison of rendering times (on commodity hardware): Scenario Nickel Tera Simple config (10 vars) 30 ms 5 ms Medium config (50 vars) 45 ms 8 ms Large config (100+ vars) 50-80 ms 10 ms Cached render 1-5 ms 1-5 ms","breadcrumbs":"Config Rendering Guide » Benchmarks","id":"3806","title":"Benchmarks"},"3807":{"body":"Each renderer keeps 100 cached entries in memory Average config size in cache: ~5 KB Maximum memory per renderer: ~500 KB + overhead","breadcrumbs":"Config Rendering Guide » Memory Usage","id":"3807","title":"Memory Usage"},"3808":{"body":"","breadcrumbs":"Config Rendering Guide » Error Handling","id":"3808","title":"Error Handling"},"3809":{"body":"Nickel Binary Not Found Error Response : { \\"rendered\\": null, \\"error\\": \\"Nickel binary not found in PATH. Install Nickel or set NICKEL_PATH environment variable\\", \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 0\\n} Solution : # Install Nickel\\nnickel version # Or set explicit path\\nexport NICKEL_PATH=/usr/local/bin/nickel Invalid Nickel Syntax Error Response : { \\"rendered\\": null, \\"error\\": \\"Nickel evaluation failed: Type mismatch at line 3: expected String, got Number\\", \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 12\\n} Solution : Verify Nickel syntax. Run nickel typecheck file.ncl directly for better error messages. Missing Context Variable Error Response : { \\"rendered\\": null, \\"error\\": \\"Nickel evaluation failed: undefined variable \'required_var\'\\", \\"language\\": \\"nickel\\", \\"execution_time_ms\\": 8\\n} Solution : Provide required context variables or define fields with default values. Invalid JSON in Context HTTP Status : 400 Bad Request Body : Error message about invalid JSON Solution : Ensure context is valid JSON.","breadcrumbs":"Config Rendering Guide » Common Errors","id":"3809","title":"Common Errors"},"381":{"body":"The provisioning platform consists of 8 core services: Service Purpose Default Mode orchestrator Main orchestration engine Required control-center Web UI and management console Required mcp-server Model Context Protocol integration Optional vault-service Secrets management and encryption Required extension-registry Extension distribution system Required rag Retrieval-Augmented Generation Optional ai-service AI model integration Optional provisioning-daemon Background operations Required","breadcrumbs":"Platform Service Configuration » Platform Services Overview","id":"381","title":"Platform Services Overview"},"3810":{"body":"","breadcrumbs":"Config Rendering Guide » Integration Examples","id":"3810","title":"Integration Examples"},"3811":{"body":"# Render a Nickel config from Nushell\\nlet config = open workspace/config/provisioning.ncl | into string\\nlet response = curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d $\\"{{ language: \\\\\\"nickel\\\\\\", content: $config }}\\" | from json print $response.rendered","breadcrumbs":"Config Rendering Guide » Using with Nushell","id":"3811","title":"Using with Nushell"},"3812":{"body":"import requests\\nimport json def render_config(language, content, context=None, name=None): payload = { \\"language\\": language, \\"content\\": content, \\"context\\": context or {}, \\"name\\": name } response = requests.post( \\"http://localhost:9091/config/render\\", json=payload ) return response.json() # Example usage\\nresult = render_config( \\"nickel\\", \'{name = \\"server\\", cpu = 4}\', {\\"name\\": \\"prod-server\\"}, \\"my-config\\"\\n) if result[\\"error\\"]: print(f\\"Error: {result[\'error\']}\\")\\nelse: print(f\\"Rendered in {result[\'execution_time_ms\']}ms\\") print(result[\\"rendered\\"])","breadcrumbs":"Config Rendering Guide » Using with Python","id":"3812","title":"Using with Python"},"3813":{"body":"#!/bin/bash # Function to render config\\nrender_config() { local language=$1 local content=$2 local name=${3:-\\"unnamed\\"} curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d @- << EOF\\n{ \\"language\\": \\"$language\\", \\"content\\": $(echo \\"$content\\" | jq -Rs .), \\"name\\": \\"$name\\"\\n}\\nEOF\\n} # Usage\\nrender_config \\"nickel\\" \\"{name = \\\\\\"my-server\\\\\\"}\\" \\"server-config\\"","breadcrumbs":"Config Rendering Guide » Using with Curl","id":"3813","title":"Using with Curl"},"3814":{"body":"","breadcrumbs":"Config Rendering Guide » Troubleshooting","id":"3814","title":"Troubleshooting"},"3815":{"body":"Check log level : PROVISIONING_LOG_LEVEL=debug ./target/release/cli-daemon Verify Nushell binary : which nu\\n# or set explicit path\\nNUSHELL_PATH=/usr/local/bin/nu ./target/release/cli-daemon","breadcrumbs":"Config Rendering Guide » Daemon Won\'t Start","id":"3815","title":"Daemon Won\'t Start"},"3816":{"body":"Check cache hit rate : curl http://localhost:9091/config/stats | jq \'.nickel_cache_hits / .nickel_renders\' If low cache hit rate : Rendering same configs repeatedly? Monitor execution time : curl http://localhost:9091/config/render ... | jq \'.execution_time_ms\'","breadcrumbs":"Config Rendering Guide » Very Slow Rendering","id":"3816","title":"Very Slow Rendering"},"3817":{"body":"Set timeout (depends on client): curl --max-time 10 -X POST http://localhost:9091/config/render ... Check daemon logs for stuck processes.","breadcrumbs":"Config Rendering Guide » Rendering Hangs","id":"3817","title":"Rendering Hangs"},"3818":{"body":"Reduce cache size (rebuild with modified config) or restart daemon.","breadcrumbs":"Config Rendering Guide » Out of Memory","id":"3818","title":"Out of Memory"},"3819":{"body":"Choose right language for task : Nickel: Large configs with lazy evaluation, type-safe infrastructure definitions Tera: Simple templating, fastest for rendering Use context variables instead of hardcoding values: \\"context\\": { \\"environment\\": \\"production\\", \\"replica_count\\": 3\\n} Monitor statistics to understand performance: watch -n 1 \'curl -s http://localhost:9091/config/stats | jq\' Cache warming : Pre-render common configs on startup Error handling : Always check error field in response","breadcrumbs":"Config Rendering Guide » Best Practices","id":"3819","title":"Best Practices"},"382":{"body":"Choose a deployment mode based on your needs: Mode Resources Use Case solo 2 CPU, 4 GB RAM Development, testing, local machines multiuser 4 CPU, 8 GB RAM Team staging, team development cicd 8 CPU, 16 GB RAM CI/CD pipelines, automated testing enterprise 16+ CPU, 32+ GB Production, high-availability","breadcrumbs":"Platform Service Configuration » Deployment Modes","id":"382","title":"Deployment Modes"},"3820":{"body":"Nickel User Manual Tera Template Engine CLI Daemon Architecture: provisioning/platform/cli-daemon/README.md","breadcrumbs":"Config Rendering Guide » See Also","id":"3820","title":"See Also"},"3821":{"body":"","breadcrumbs":"Config Rendering Guide » Quick Reference","id":"3821","title":"Quick Reference"},"3822":{"body":"POST http://localhost:9091/config/render","breadcrumbs":"Config Rendering Guide » API Endpoint","id":"3822","title":"API Endpoint"},"3823":{"body":"curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"nickel|tera\\", \\"content\\": \\"...\\", \\"context\\": {...}, \\"name\\": \\"optional-name\\" }\'","breadcrumbs":"Config Rendering Guide » Request Template","id":"3823","title":"Request Template"},"3824":{"body":"Nickel - Simple Config curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"nickel\\", \\"content\\": \\"{name = \\\\\\"server\\\\\\", cpu = 4, memory = 8192}\\" }\' Tera - Template with Loops curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"language\\": \\"tera\\", \\"content\\": \\"{% for task in tasks %}{{ task }}\\\\n{% endfor %}\\", \\"context\\": {\\"tasks\\": [\\"kubernetes\\", \\"postgres\\", \\"redis\\"]} }\'","breadcrumbs":"Config Rendering Guide » Quick Examples","id":"3824","title":"Quick Examples"},"3825":{"body":"# Get stats\\ncurl http://localhost:9091/config/stats # Reset stats\\ncurl -X POST http://localhost:9091/config/stats/reset # Watch stats in real-time\\nwatch -n 1 \'curl -s http://localhost:9091/config/stats | jq\'","breadcrumbs":"Config Rendering Guide » Statistics","id":"3825","title":"Statistics"},"3826":{"body":"Language Cold Cached Use Case Nickel 30-60 ms 1-5 ms Type-safe configs, lazy evaluation Tera 5-20 ms 1-5 ms Simple templating","breadcrumbs":"Config Rendering Guide » Performance Guide","id":"3826","title":"Performance Guide"},"3827":{"body":"Code Meaning 200 Success (check error field for evaluation errors) 400 Invalid request 500 Daemon error","breadcrumbs":"Config Rendering Guide » Status Codes","id":"3827","title":"Status Codes"},"3828":{"body":"{ \\"rendered\\": \\"...output or null on error\\", \\"error\\": \\"...error message or null on success\\", \\"language\\": \\"nickel|tera\\", \\"execution_time_ms\\": 23\\n}","breadcrumbs":"Config Rendering Guide » Response Fields","id":"3828","title":"Response Fields"},"3829":{"body":"Nickel { name = \\"server\\", type = \\"web\\", cpu = 4, memory = 8192, tags = { env = \\"prod\\", team = \\"platform\\" }\\n} Pros : Lazy evaluation, functional style, compact Cons : Different paradigm, smaller ecosystem Tera Server: {{ name }}\\nType: {{ type | upper }}\\n{% for tag_name, tag_value in tags %}\\n- {{ tag_name }}: {{ tag_value }}\\n{% endfor %} Pros : Fast, simple, familiar template syntax Cons : No validation, template-only","breadcrumbs":"Config Rendering Guide » Languages Comparison","id":"3829","title":"Languages Comparison"},"383":{"body":"The configuration system is managed by a standalone script that doesn\'t require the main installer: # Navigate to the provisioning directory\\ncd /path/to/project-provisioning # Verify the setup script exists\\nls -la provisioning/scripts/setup-platform-config.sh # Make script executable\\nchmod +x provisioning/scripts/setup-platform-config.sh","breadcrumbs":"Platform Service Configuration » Step 1: Initialize Configuration Script","id":"383","title":"Step 1: Initialize Configuration Script"},"3830":{"body":"How it works : SHA256(content + context) → cached result Cache hit : < 5 ms Cache miss : 20-60 ms (language dependent) Cache size : 100 entries per language Cache stats : curl -s http://localhost:9091/config/stats | jq \'{ nickel_cache_hits: .nickel_cache_hits, nickel_renders: .nickel_renders, nickel_hit_ratio: (.nickel_cache_hits / .nickel_renders * 100)\\n}\'","breadcrumbs":"Config Rendering Guide » Caching","id":"3830","title":"Caching"},"3831":{"body":"Batch Rendering #!/bin/bash\\nfor config in configs/*.ncl; do curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \\"$(jq -n --arg content \\\\\\"$(cat $config)\\\\\\" \\\\ \'{language: \\"nickel\\", content: $content}\')\\"\\ndone Validate Before Rendering # Nickel validation\\nnickel typecheck my-config.ncl # Daemon validation (via first render)\\ncurl ... # catches errors in response Monitor Cache Performance #!/bin/bash\\nwhile true; do STATS=$(curl -s http://localhost:9091/config/stats) HIT_RATIO=$( echo \\"$STATS\\" | jq \'.nickel_cache_hits / .nickel_renders * 100\') echo \\"Cache hit ratio: ${HIT_RATIO}%\\" sleep 5\\ndone","breadcrumbs":"Config Rendering Guide » Common Tasks","id":"3831","title":"Common Tasks"},"3832":{"body":"Missing Binary { \\"error\\": \\"Nickel binary not found. Install Nickel or set NICKEL_PATH\\", \\"rendered\\": null\\n} Fix : export NICKEL_PATH=/path/to/nickel or install Nickel Syntax Error { \\"error\\": \\"Nickel type checking failed: Type mismatch at line 3\\", \\"rendered\\": null\\n} Fix : Check Nickel syntax, run nickel typecheck file.ncl directly","breadcrumbs":"Config Rendering Guide » Error Examples","id":"3832","title":"Error Examples"},"3833":{"body":"Nushell use lib_provisioning let config = open server.ncl | into string\\nlet result = (curl -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d {language: \\"nickel\\", content: $config} | from json) if ($result.error != null) { error $result.error\\n} else { print $result.rendered\\n} Python import requests resp = requests.post(\\"http://localhost:9091/config/render\\", json={ \\"language\\": \\"nickel\\", \\"content\\": \'{name = \\"server\\"}\', \\"context\\": {}\\n})\\nresult = resp.json()\\nprint(result[\\"rendered\\"] if not result[\\"error\\"] else f\\"Error: {result[\'error\']}\\") Bash render() { curl -s -X POST http://localhost:9091/config/render \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \\"$1\\" | jq \'.\'\\n} # Usage\\nrender \'{\\"language\\":\\"nickel\\",\\"content\\":\\"{name = \\\\\\"server\\\\\\"}\\"}\'","breadcrumbs":"Config Rendering Guide » Integration Quick Start","id":"3833","title":"Integration Quick Start"},"3834":{"body":"# Daemon configuration\\nPROVISIONING_LOG_LEVEL=debug # Log level\\nDAEMON_BIND=127.0.0.1:9091 # Bind address\\nNUSHELL_PATH=/usr/local/bin/nu # Nushell binary\\nNICKEL_PATH=/usr/local/bin/nickel # Nickel binary","breadcrumbs":"Config Rendering Guide » Environment Variables","id":"3834","title":"Environment Variables"},"3835":{"body":"# Health check\\ncurl http://localhost:9091/health # Daemon info\\ncurl http://localhost:9091/info # View stats\\ncurl http://localhost:9091/config/stats | jq \'.\' # Pretty print stats\\ncurl -s http://localhost:9091/config/stats | jq \'{ total: .total_renders, success_rate: (.successful_renders / .total_renders * 100), avg_time: .average_time_ms, cache_hit_rate: ((.nickel_cache_hits + .tera_cache_hits) / (.nickel_renders + .tera_renders) * 100)\\n}\'","breadcrumbs":"Config Rendering Guide » Useful Commands","id":"3835","title":"Useful Commands"},"3836":{"body":"Daemon running? curl http://localhost:9091/health Correct content for language? Valid JSON in context? Nickel or Tera binary available? Check log level? PROVISIONING_LOG_LEVEL=debug Cache hit rate? /config/stats Error in response? Check error field","breadcrumbs":"Config Rendering Guide » Troubleshooting Checklist","id":"3836","title":"Troubleshooting Checklist"},"3837":{"body":"This comprehensive guide explains the configuration system of the Infrastructure Automation platform, helping you understand, customize, and manage all configuration aspects.","breadcrumbs":"Configuration » Configuration Guide","id":"3837","title":"Configuration Guide"},"3838":{"body":"Understanding the configuration hierarchy and precedence Working with different configuration file types Configuration interpolation and templating Environment-specific configurations User customization and overrides Validation and troubleshooting Advanced configuration patterns","breadcrumbs":"Configuration » What You\'ll Learn","id":"3838","title":"What You\'ll Learn"},"3839":{"body":"","breadcrumbs":"Configuration » Configuration Architecture","id":"3839","title":"Configuration Architecture"},"384":{"body":"","breadcrumbs":"Platform Service Configuration » Step 2: Choose Configuration Method","id":"384","title":"Step 2: Choose Configuration Method"},"3840":{"body":"The system uses a layered configuration approach with clear precedence rules: Runtime CLI arguments (highest precedence) ↓ (overrides)\\nEnvironment Variables ↓ (overrides)\\nInfrastructure Config (./.provisioning.toml) ↓ (overrides)\\nProject Config (./provisioning.toml) ↓ (overrides)\\nUser Config (~/.config/provisioning/config.toml) ↓ (overrides)\\nSystem Defaults (config.defaults.toml) (lowest precedence)","breadcrumbs":"Configuration » Configuration Hierarchy","id":"3840","title":"Configuration Hierarchy"},"3841":{"body":"File Type Purpose Location Format System Defaults Base system configuration config.defaults.toml TOML User Config Personal preferences ~/.config/provisioning/config.toml TOML Project Config Project-wide settings ./provisioning.toml TOML Infrastructure Config Infra-specific settings ./.provisioning.toml TOML Environment Config Environment overrides config.{env}.toml TOML Infrastructure Definitions Infrastructure as Code main.ncl, *.ncl Nickel","breadcrumbs":"Configuration » Configuration File Types","id":"3841","title":"Configuration File Types"},"3842":{"body":"","breadcrumbs":"Configuration » Understanding Configuration Sections","id":"3842","title":"Understanding Configuration Sections"},"3843":{"body":"[core]\\nversion = \\"1.0.0\\" # System version\\nname = \\"provisioning\\" # System identifier","breadcrumbs":"Configuration » Core System Configuration","id":"3843","title":"Core System Configuration"},"3844":{"body":"The most critical configuration section that defines where everything is located: [paths]\\n# Base directory - all other paths derive from this\\nbase = \\"/usr/local/provisioning\\" # Derived paths (usually don\'t need to change these)\\nkloud = \\"{{paths.base}}/infra\\"\\nproviders = \\"{{paths.base}}/providers\\"\\ntaskservs = \\"{{paths.base}}/taskservs\\"\\nclusters = \\"{{paths.base}}/cluster\\"\\nresources = \\"{{paths.base}}/resources\\"\\ntemplates = \\"{{paths.base}}/templates\\"\\ntools = \\"{{paths.base}}/tools\\"\\ncore = \\"{{paths.base}}/core\\" [paths.files]\\n# Important file locations\\nsettings_file = \\"settings.ncl\\"\\nkeys = \\"{{paths.base}}/keys.yaml\\"\\nrequirements = \\"{{paths.base}}/requirements.yaml\\"","breadcrumbs":"Configuration » Path Configuration","id":"3844","title":"Path Configuration"},"3845":{"body":"[debug]\\nenabled = false # Enable debug mode\\nmetadata = false # Show internal metadata\\ncheck = false # Default to check mode (dry run)\\nremote = false # Enable remote debugging\\nlog_level = \\"info\\" # Logging verbosity\\nno_terminal = false # Disable terminal features","breadcrumbs":"Configuration » Debug and Logging","id":"3845","title":"Debug and Logging"},"3846":{"body":"[output]\\nfile_viewer = \\"less\\" # File viewer command\\nformat = \\"yaml\\" # Default output format (json, yaml, toml, text)","breadcrumbs":"Configuration » Output Configuration","id":"3846","title":"Output Configuration"},"3847":{"body":"[providers]\\ndefault = \\"local\\" # Default provider [providers.aws]\\napi_url = \\"\\" # AWS API endpoint (blank = default)\\nauth = \\"\\" # Authentication method\\ninterface = \\"CLI\\" # Interface type (CLI or API) [providers.upcloud]\\napi_url = \\"https://api.upcloud.com/1.3\\"\\nauth = \\"\\"\\ninterface = \\"CLI\\" [providers.local]\\napi_url = \\"\\"\\nauth = \\"\\"\\ninterface = \\"CLI\\"","breadcrumbs":"Configuration » Provider Configuration","id":"3847","title":"Provider Configuration"},"3848":{"body":"[sops]\\nuse_sops = true # Enable SOPS encryption\\nconfig_path = \\"{{paths.base}}/.sops.yaml\\" # Search paths for Age encryption keys\\nkey_search_paths = [ \\"{{paths.base}}/keys/age.txt\\", \\"~/.config/sops/age/keys.txt\\"\\n]","breadcrumbs":"Configuration » Encryption (SOPS) Configuration","id":"3848","title":"Encryption (SOPS) Configuration"},"3849":{"body":"The system supports powerful interpolation patterns for dynamic configuration values.","breadcrumbs":"Configuration » Configuration Interpolation","id":"3849","title":"Configuration Interpolation"},"385":{"body":"TypeDialog provides an interactive form-based configuration interface available in multiple backends (web, TUI, CLI). Quick Interactive Setup (All Services at Once) # Run interactive setup - prompts for choices\\n./provisioning/scripts/setup-platform-config.sh # Follow the prompts to:\\n# 1. Choose action (TypeDialog, Quick Mode, Clean, List)\\n# 2. Select service (or all services)\\n# 3. Choose deployment mode\\n# 4. Select backend (web, tui, cli) Configure Specific Service with TypeDialog # Configure orchestrator in solo mode with web UI\\n./provisioning/scripts/setup-platform-config.sh \\\\ --service orchestrator \\\\ --mode solo \\\\ --backend web # TypeDialog opens browser → User fills form → Config generated When to use TypeDialog: First-time setup with visual form guidance Updating configuration with validation Multiple services needing coordinated changes Team environments where UI is preferred","breadcrumbs":"Platform Service Configuration » Method A: Interactive TypeDialog Configuration (Recommended)","id":"385","title":"Method A: Interactive TypeDialog Configuration (Recommended)"},"3850":{"body":"Path Interpolation # Reference other path values\\ntemplates = \\"{{paths.base}}/my-templates\\"\\ncustom_path = \\"{{paths.providers}}/custom\\" Environment Variable Interpolation # Access environment variables\\nuser_home = \\"{{env.HOME}}\\"\\ncurrent_user = \\"{{env.USER}}\\"\\ncustom_path = \\"{{env.CUSTOM_PATH || /default/path}}\\" # With fallback Date/Time Interpolation # Dynamic date/time values\\nlog_file = \\"{{paths.base}}/logs/app-{{now.date}}.log\\"\\nbackup_dir = \\"{{paths.base}}/backups/{{now.timestamp}}\\" Git Information Interpolation # Git repository information\\ndeployment_branch = \\"{{git.branch}}\\"\\nversion_tag = \\"{{git.tag}}\\"\\ncommit_hash = \\"{{git.commit}}\\" Cross-Section References # Reference values from other sections\\ndatabase_host = \\"{{providers.aws.database_endpoint}}\\"\\napi_key = \\"{{sops.decrypted_key}}\\"","breadcrumbs":"Configuration » Basic Interpolation Patterns","id":"3850","title":"Basic Interpolation Patterns"},"3851":{"body":"Function Calls # Built-in functions\\nconfig_path = \\"{{path.join(env.HOME, .config, provisioning)}}\\"\\nsafe_name = \\"{{str.lower(str.replace(project.name, \' \', \'-\'))}}\\" Conditional Expressions # Conditional logic\\ndebug_level = \\"{{debug.enabled && \'debug\' || \'info\'}}\\"\\nstorage_path = \\"{{env.STORAGE_PATH || path.join(paths.base, \'storage\')}}\\"","breadcrumbs":"Configuration » Advanced Interpolation","id":"3851","title":"Advanced Interpolation"},"3852":{"body":"[paths]\\nbase = \\"/opt/provisioning\\"\\nworkspace = \\"{{env.HOME}}/provisioning-workspace\\"\\ncurrent_project = \\"{{paths.workspace}}/{{env.PROJECT_NAME || \'default\'}}\\" [deployment]\\nenvironment = \\"{{env.DEPLOY_ENV || \'development\'}}\\"\\ntimestamp = \\"{{now.iso8601}}\\"\\nversion = \\"{{git.tag || git.commit}}\\" [database]\\nconnection_string = \\"postgresql://{{env.DB_USER}}:{{env.DB_PASS}}@{{env.DB_HOST || \'localhost\'}}/{{env.DB_NAME}}\\" [notifications]\\nslack_channel = \\"#{{env.TEAM_NAME || \'general\'}}-notifications\\"\\nemail_subject = \\"Deployment {{deployment.environment}} - {{deployment.timestamp}}\\"","breadcrumbs":"Configuration » Interpolation Examples","id":"3852","title":"Interpolation Examples"},"3853":{"body":"","breadcrumbs":"Configuration » Environment-Specific Configuration","id":"3853","title":"Environment-Specific Configuration"},"3854":{"body":"The system automatically detects the environment using: PROVISIONING_ENV environment variable Git branch patterns (dev, staging, main/master) Directory patterns (development, staging, production) Explicit configuration","breadcrumbs":"Configuration » Environment Detection","id":"3854","title":"Environment Detection"},"3855":{"body":"Create environment-specific configurations: Development Environment (config.dev.toml) [core]\\nname = \\"provisioning-dev\\" [debug]\\nenabled = true\\nlog_level = \\"debug\\"\\nmetadata = true [providers]\\ndefault = \\"local\\" [cache]\\nenabled = false # Disable caching for development [notifications]\\nenabled = false # No notifications in dev Testing Environment (config.test.toml) [core]\\nname = \\"provisioning-test\\" [debug]\\nenabled = true\\ncheck = true # Default to check mode in testing\\nlog_level = \\"info\\" [providers]\\ndefault = \\"local\\" [infrastructure]\\nauto_cleanup = true # Clean up test resources\\nresource_prefix = \\"test-{{git.branch}}-\\" Production Environment (config.prod.toml) [core]\\nname = \\"provisioning-prod\\" [debug]\\nenabled = false\\nlog_level = \\"warn\\" [providers]\\ndefault = \\"aws\\" [security]\\nrequire_approval = true\\naudit_logging = true\\nencrypt_backups = true [notifications]\\nenabled = true\\ncritical_only = true","breadcrumbs":"Configuration » Environment Configuration Files","id":"3855","title":"Environment Configuration Files"},"3856":{"body":"# Set environment for session\\nexport PROVISIONING_ENV=dev\\nprovisioning env # Use environment for single command\\nprovisioning --environment prod server create # Switch environment permanently\\nprovisioning env set prod","breadcrumbs":"Configuration » Environment Switching","id":"3856","title":"Environment Switching"},"3857":{"body":"","breadcrumbs":"Configuration » User Configuration Customization","id":"3857","title":"User Configuration Customization"},"3858":{"body":"# Initialize user configuration from template\\nprovisioning init config # Or copy and customize\\ncp config-examples/config.user.toml ~/.config/provisioning/config.toml","breadcrumbs":"Configuration » Creating Your User Configuration","id":"3858","title":"Creating Your User Configuration"},"3859":{"body":"Developer Setup [paths]\\nbase = \\"/Users/alice/dev/provisioning\\" [debug]\\nenabled = true\\nlog_level = \\"debug\\" [providers]\\ndefault = \\"local\\" [output]\\nformat = \\"json\\"\\nfile_viewer = \\"code\\" [sops]\\nkey_search_paths = [ \\"/Users/alice/.config/sops/age/keys.txt\\"\\n] Operations Engineer Setup [paths]\\nbase = \\"/opt/provisioning\\" [debug]\\nenabled = false\\nlog_level = \\"info\\" [providers]\\ndefault = \\"aws\\" [output]\\nformat = \\"yaml\\" [notifications]\\nenabled = true\\nemail = \\"ops-team@company.com\\" Team Lead Setup [paths]\\nbase = \\"/home/teamlead/provisioning\\" [debug]\\nenabled = true\\nmetadata = true\\nlog_level = \\"info\\" [providers]\\ndefault = \\"upcloud\\" [security]\\nrequire_confirmation = true\\naudit_logging = true [sops]\\nkey_search_paths = [ \\"/secure/keys/team-lead.txt\\", \\"~/.config/sops/age/keys.txt\\"\\n]","breadcrumbs":"Configuration » Common User Customizations","id":"3859","title":"Common User Customizations"},"386":{"body":"Quick mode automatically creates all service configurations from defaults overlaid with mode-specific tuning. # Quick setup for solo development mode\\n./provisioning/scripts/setup-platform-config.sh --quick-mode --mode solo # Quick setup for enterprise production\\n./provisioning/scripts/setup-platform-config.sh --quick-mode --mode enterprise # Result: All 8 services configured immediately with appropriate resource limits When to use Quick Mode: Initial setup with standard defaults Switching deployment modes CI/CD automated setup Scripted/programmatic configuration","breadcrumbs":"Platform Service Configuration » Method B: Quick Mode Configuration (Fastest)","id":"386","title":"Method B: Quick Mode Configuration (Fastest)"},"3860":{"body":"","breadcrumbs":"Configuration » Project-Specific Configuration","id":"3860","title":"Project-Specific Configuration"},"3861":{"body":"[project]\\nname = \\"web-application\\"\\ndescription = \\"Main web application infrastructure\\"\\nversion = \\"2.1.0\\"\\nteam = \\"platform-team\\" [paths]\\n# Project-specific path overrides\\ninfra = \\"./infrastructure\\"\\ntemplates = \\"./custom-templates\\" [defaults]\\n# Project defaults\\nprovider = \\"aws\\"\\nregion = \\"us-west-2\\"\\nenvironment = \\"development\\" [cost_controls]\\nmax_monthly_budget = 5000.00\\nalert_threshold = 0.8 [compliance]\\nrequired_tags = [\\"team\\", \\"environment\\", \\"cost-center\\"]\\nencryption_required = true\\nbackup_required = true [notifications]\\nslack_webhook = \\"https://hooks.slack.com/services/...\\"\\nteam_email = \\"platform-team@company.com\\"","breadcrumbs":"Configuration » Project Configuration File (provisioning.toml)","id":"3861","title":"Project Configuration File (provisioning.toml)"},"3862":{"body":"[infrastructure]\\nname = \\"production-web-app\\"\\nenvironment = \\"production\\"\\nregion = \\"us-west-2\\" [overrides]\\n# Infrastructure-specific overrides\\ndebug.enabled = false\\ndebug.log_level = \\"error\\"\\ncache.enabled = true [scaling]\\nauto_scaling_enabled = true\\nmin_instances = 3\\nmax_instances = 20 [security]\\nvpc_id = \\"vpc-12345678\\"\\nsubnet_ids = [\\"subnet-12345678\\", \\"subnet-87654321\\"]\\nsecurity_group_id = \\"sg-12345678\\" [monitoring]\\nenabled = true\\nretention_days = 90\\nalerting_enabled = true","breadcrumbs":"Configuration » Infrastructure-Specific Configuration (.provisioning.toml)","id":"3862","title":"Infrastructure-Specific Configuration (.provisioning.toml)"},"3863":{"body":"","breadcrumbs":"Configuration » Configuration Validation","id":"3863","title":"Configuration Validation"},"3864":{"body":"# Validate current configuration\\nprovisioning validate config # Detailed validation with warnings\\nprovisioning validate config --detailed # Strict validation mode\\nprovisioning validate config strict # Validate specific environment\\nprovisioning validate config --environment prod","breadcrumbs":"Configuration » Built-in Validation","id":"3864","title":"Built-in Validation"},"3865":{"body":"Create custom validation in your configuration: [validation]\\n# Custom validation rules\\nrequired_sections = [\\"paths\\", \\"providers\\", \\"debug\\"]\\nrequired_env_vars = [\\"AWS_REGION\\", \\"PROJECT_NAME\\"]\\nforbidden_values = [\\"password123\\", \\"admin\\"] [validation.paths]\\n# Path validation rules\\nbase_must_exist = true\\nwritable_required = [\\"paths.base\\", \\"paths.cache\\"] [validation.security]\\n# Security validation\\nrequire_encryption = true\\nmin_key_length = 32","breadcrumbs":"Configuration » Custom Validation Rules","id":"3865","title":"Custom Validation Rules"},"3866":{"body":"","breadcrumbs":"Configuration » Troubleshooting Configuration","id":"3866","title":"Troubleshooting Configuration"},"3867":{"body":"Issue 1: Path Not Found Errors # Problem: Base path doesn\'t exist\\n# Check current configuration\\nprovisioning env | grep paths.base # Verify path exists\\nls -la /path/shown/above # Fix: Update user config\\nnano ~/.config/provisioning/config.toml\\n# Set correct paths.base = \\"/correct/path\\" Issue 2: Interpolation Failures # Problem: {{env.VARIABLE}} not resolving\\n# Check environment variables\\nenv | grep VARIABLE # Check interpolation\\nprovisioning validate interpolation test # Debug interpolation\\nprovisioning --debug validate interpolation validate Issue 3: SOPS Encryption Errors # Problem: Cannot decrypt SOPS files\\n# Check SOPS configuration\\nprovisioning sops config # Verify key files\\nls -la ~/.config/sops/age/keys.txt # Test decryption\\nsops -d encrypted-file.ncl Issue 4: Provider Authentication # Problem: Provider authentication failed\\n# Check provider configuration\\nprovisioning show providers # Test provider connection\\nprovisioning provider test aws # Verify credentials\\naws configure list # For AWS","breadcrumbs":"Configuration » Common Configuration Issues","id":"3867","title":"Common Configuration Issues"},"3868":{"body":"# Show current configuration hierarchy\\nprovisioning config show --hierarchy # Show configuration sources\\nprovisioning config sources # Show interpolated values\\nprovisioning config interpolated # Debug specific section\\nprovisioning config debug paths\\nprovisioning config debug providers","breadcrumbs":"Configuration » Configuration Debugging","id":"3868","title":"Configuration Debugging"},"3869":{"body":"# Reset to defaults\\nprovisioning config reset # Reset specific section\\nprovisioning config reset providers # Backup current config before reset\\nprovisioning config backup","breadcrumbs":"Configuration » Configuration Reset","id":"3869","title":"Configuration Reset"},"387":{"body":"For advanced users who prefer editing configuration files directly: # View schema definition\\ncat provisioning/schemas/platform/schemas/orchestrator.ncl # View default values\\ncat provisioning/schemas/platform/defaults/orchestrator-defaults.ncl # View mode overlay\\ncat provisioning/schemas/platform/defaults/deployment/solo-defaults.ncl # Edit configuration directly\\nvim provisioning/config/runtime/orchestrator.solo.ncl # Validate Nickel syntax\\nnickel typecheck provisioning/config/runtime/orchestrator.solo.ncl # Regenerate TOML from edited config (CRITICAL STEP)\\n./provisioning/scripts/setup-platform-config.sh --generate-toml When to use Manual Edit: Advanced customization beyond form options Programmatic configuration generation Integration with CI/CD systems Custom workspace-specific overrides","breadcrumbs":"Platform Service Configuration » Method C: Manual Nickel Configuration","id":"387","title":"Method C: Manual Nickel Configuration"},"3870":{"body":"","breadcrumbs":"Configuration » Advanced Configuration Patterns","id":"3870","title":"Advanced Configuration Patterns"},"3871":{"body":"[dynamic]\\n# Load configuration from external sources\\nconfig_urls = [ \\"https://config.company.com/provisioning/base.toml\\", \\"file:///etc/provisioning/shared.toml\\"\\n] # Conditional configuration loading\\nload_if_exists = [ \\"./local-overrides.toml\\", \\"../shared/team-config.toml\\"\\n]","breadcrumbs":"Configuration » Dynamic Configuration Loading","id":"3871","title":"Dynamic Configuration Loading"},"3872":{"body":"[templates]\\n# Template-based configuration\\nbase_template = \\"aws-web-app\\"\\ntemplate_vars = { region = \\"us-west-2\\" instance_type = \\"t3.medium\\" team_name = \\"platform\\"\\n} # Template inheritance\\nextends = [\\"base-web\\", \\"monitoring\\", \\"security\\"]","breadcrumbs":"Configuration » Configuration Templating","id":"3872","title":"Configuration Templating"},"3873":{"body":"[regions]\\nprimary = \\"us-west-2\\"\\nsecondary = \\"us-east-1\\" [regions.us-west-2]\\nproviders.aws.region = \\"us-west-2\\"\\navailability_zones = [\\"us-west-2a\\", \\"us-west-2b\\", \\"us-west-2c\\"] [regions.us-east-1]\\nproviders.aws.region = \\"us-east-1\\"\\navailability_zones = [\\"us-east-1a\\", \\"us-east-1b\\", \\"us-east-1c\\"]","breadcrumbs":"Configuration » Multi-Region Configuration","id":"3873","title":"Multi-Region Configuration"},"3874":{"body":"[profiles]\\nactive = \\"development\\" [profiles.development]\\ndebug.enabled = true\\nproviders.default = \\"local\\"\\ncost_controls.enabled = false [profiles.staging]\\ndebug.enabled = true\\nproviders.default = \\"aws\\"\\ncost_controls.max_budget = 1000.00 [profiles.production]\\ndebug.enabled = false\\nproviders.default = \\"aws\\"\\nsecurity.strict_mode = true","breadcrumbs":"Configuration » Configuration Profiles","id":"3874","title":"Configuration Profiles"},"3875":{"body":"","breadcrumbs":"Configuration » Configuration Management Best Practices","id":"3875","title":"Configuration Management Best Practices"},"3876":{"body":"# Track configuration changes\\ngit add provisioning.toml\\ngit commit -m \\"feat(config): add production settings\\" # Use branches for configuration experiments\\ngit checkout -b config/new-provider","breadcrumbs":"Configuration » 1. Version Control","id":"3876","title":"1. Version Control"},"3877":{"body":"# Document your configuration choices\\n[paths]\\n# Using custom base path for team shared installation\\nbase = \\"/opt/team-provisioning\\" [debug]\\n# Debug enabled for troubleshooting infrastructure issues\\nenabled = true\\nlog_level = \\"debug\\" # Temporary while debugging network problems","breadcrumbs":"Configuration » 2. Documentation","id":"3877","title":"2. Documentation"},"3878":{"body":"# Always validate before committing\\nprovisioning validate config\\ngit add . && git commit -m \\"update config\\"","breadcrumbs":"Configuration » 3. Validation","id":"3878","title":"3. Validation"},"3879":{"body":"# Regular configuration backups\\nprovisioning config export --format yaml > config-backup-$(date +%Y%m%d).yaml # Automated backup script\\necho \'0 2 * * * provisioning config export > ~/backups/config-$(date +\\\\%Y\\\\%m\\\\%d).yaml\' | crontab -","breadcrumbs":"Configuration » 4. Backup","id":"3879","title":"4. Backup"},"388":{"body":"The configuration system uses layered composition: 1. Schema (Type contract) ↓ Defines valid fields and constraints 2. Service Defaults (Base values) ↓ Default configuration for each service 3. Mode Overlay (Mode-specific tuning) ↓ solo, multiuser, cicd, or enterprise settings 4. User Customization (Overrides) ↓ User-specific or workspace-specific changes 5. Runtime Config (Final result) ↓ provisioning/config/runtime/orchestrator.solo.ncl 6. TOML Export (Service consumption) ↓ provisioning/config/runtime/generated/orchestrator.solo.toml All layers are automatically composed and validated.","breadcrumbs":"Platform Service Configuration » Step 3: Understand Configuration Layers","id":"388","title":"Step 3: Understand Configuration Layers"},"3880":{"body":"Never commit sensitive values in plain text Use SOPS for encrypting secrets Rotate encryption keys regularly Audit configuration access # Encrypt sensitive configuration\\nsops -e settings.ncl > settings.encrypted.ncl # Audit configuration changes\\ngit log -p -- provisioning.toml","breadcrumbs":"Configuration » 5. Security","id":"3880","title":"5. Security"},"3881":{"body":"","breadcrumbs":"Configuration » Configuration Migration","id":"3881","title":"Configuration Migration"},"3882":{"body":"# Old: Environment variables\\nexport PROVISIONING_DEBUG=true\\nexport PROVISIONING_PROVIDER=aws # New: Configuration file\\n[debug]\\nenabled = true [providers]\\ndefault = \\"aws\\"","breadcrumbs":"Configuration » Migrating from Environment Variables","id":"3882","title":"Migrating from Environment Variables"},"3883":{"body":"# Check for configuration updates needed\\nprovisioning config check-version # Migrate to new format\\nprovisioning config migrate --from 1.0 --to 2.0 # Validate migrated configuration\\nprovisioning validate config","breadcrumbs":"Configuration » Upgrading Configuration Format","id":"3883","title":"Upgrading Configuration Format"},"3884":{"body":"Now that you understand the configuration system: Create your user configuration : provisioning init config Set up environment-specific configs for your workflow Learn CLI commands : CLI Reference Practice with examples : Examples and Tutorials Troubleshoot issues : Troubleshooting Guide You now have complete control over how provisioning behaves in your environment!","breadcrumbs":"Configuration » Next Steps","id":"3884","title":"Next Steps"},"3885":{"body":"This guide shows you how to set up a new infrastructure workspace with Nickel-based configuration and auto-generated documentation.","breadcrumbs":"Workspace Setup » Workspace Setup Guide","id":"3885","title":"Workspace Setup Guide"},"3886":{"body":"","breadcrumbs":"Workspace Setup » Quick Start","id":"3886","title":"Quick Start"},"3887":{"body":"# Interactive workspace creation with prompts\\nprovisioning workspace init # Or non-interactive with explicit path\\nprovisioning workspace init my_workspace /path/to/my_workspace When you run provisioning workspace init, the system automatically: ✅ Creates Nickel-based configuration (config/config.ncl) ✅ Sets up infrastructure directories with Nickel files (infra/default/) ✅ Generates 4 workspace guides (deployment, configuration, troubleshooting, README) ✅ Configures local provider as default ✅ Creates .gitignore for workspace","breadcrumbs":"Workspace Setup » 1. Create a New Workspace (Automatic)","id":"3887","title":"1. Create a New Workspace (Automatic)"},"3888":{"body":"After running workspace init, your workspace has this structure: my_workspace/\\n├── config/\\n│ ├── config.ncl # Master Nickel configuration\\n│ ├── providers/\\n│ └── platform/\\n│\\n├── infra/\\n│ └── default/\\n│ ├── main.ncl # Infrastructure definition\\n│ └── servers.ncl # Server configurations\\n│\\n├── docs/ # ✨ AUTO-GENERATED GUIDES\\n│ ├── README.md # Workspace overview & quick start\\n│ ├── deployment-guide.md # Step-by-step deployment\\n│ ├── configuration-guide.md # Configuration reference\\n│ └── troubleshooting.md # Common issues & solutions\\n│\\n├── .providers/ # Provider state & cache\\n├── .kms/ # KMS data\\n├── .provisioning/ # Workspace metadata\\n└── workspace.nu # Utility scripts","breadcrumbs":"Workspace Setup » 2. Workspace Structure (Auto-Generated)","id":"3888","title":"2. Workspace Structure (Auto-Generated)"},"3889":{"body":"The config/config.ncl file is the master configuration for your workspace: { workspace = { name = \\"my_workspace\\", path = \\"/path/to/my_workspace\\", description = \\"Workspace: my_workspace\\", metadata = { owner = \\"your_username\\", created = \\"2025-01-07T19:30:00Z\\", environment = \\"development\\", }, }, providers = { local = { name = \\"local\\", enabled = true, workspace = \\"my_workspace\\", auth = { interface = \\"local\\" }, paths = { base = \\".providers/local\\", cache = \\".providers/local/cache\\", state = \\".providers/local/state\\", }, }, },\\n}","breadcrumbs":"Workspace Setup » 3. Understanding Nickel Configuration","id":"3889","title":"3. Understanding Nickel Configuration"},"389":{"body":"After running the setup script, verify the configuration was created: # List generated runtime configurations\\nls -la provisioning/config/runtime/ # Check generated TOML files\\nls -la provisioning/config/runtime/generated/ # Verify TOML is valid\\ncat provisioning/config/runtime/generated/orchestrator.solo.toml | head -20 You should see files for all 8 services in both the runtime directory (Nickel format) and the generated directory (TOML format).","breadcrumbs":"Platform Service Configuration » Step 4: Verify Generated Configuration","id":"389","title":"Step 4: Verify Generated Configuration"},"3890":{"body":"Every workspace gets 4 auto-generated guides tailored to your specific configuration: README.md - Overview with workspace structure and quick start deployment-guide.md - Step-by-step deployment instructions for your infrastructure configuration-guide.md - Configuration reference specific to your workspace troubleshooting.md - Common issues and solutions for your setup These guides are automatically generated based on your workspace\'s: Configured providers Infrastructure definitions Server configurations Taskservs and services","breadcrumbs":"Workspace Setup » 4. Auto-Generated Documentation","id":"3890","title":"4. Auto-Generated Documentation"},"3891":{"body":"After creation, edit the Nickel configuration files: # Edit master configuration\\nvim config/config.ncl # Edit infrastructure definition\\nvim infra/default/main.ncl # Edit server definitions\\nvim infra/default/servers.ncl # Validate Nickel syntax\\nnickel typecheck config/config.ncl","breadcrumbs":"Workspace Setup » 5. Customize Your Workspace","id":"3891","title":"5. Customize Your Workspace"},"3892":{"body":"","breadcrumbs":"Workspace Setup » Next Steps After Workspace Creation","id":"3892","title":"Next Steps After Workspace Creation"},"3893":{"body":"Each workspace gets 4 auto-generated guides in the docs/ directory: cd my_workspace # Overview and quick start\\ncat docs/README.md # Step-by-step deployment\\ncat docs/deployment-guide.md # Configuration reference\\ncat docs/configuration-guide.md # Common issues and solutions\\ncat docs/troubleshooting.md","breadcrumbs":"Workspace Setup » 1. Read Your Auto-Generated Documentation","id":"3893","title":"1. Read Your Auto-Generated Documentation"},"3894":{"body":"Edit the Nickel configuration files to suit your needs: # Master configuration (providers, settings)\\nvim config/config.ncl # Infrastructure definition\\nvim infra/default/main.ncl # Server configurations\\nvim infra/default/servers.ncl","breadcrumbs":"Workspace Setup » 2. Customize Your Configuration","id":"3894","title":"2. Customize Your Configuration"},"3895":{"body":"# Check Nickel syntax\\nnickel typecheck config/config.ncl\\nnickel typecheck infra/default/main.ncl # Validate with provisioning system\\nprovisioning validate config","breadcrumbs":"Workspace Setup » 3. Validate Your Configuration","id":"3895","title":"3. Validate Your Configuration"},"3896":{"body":"To add more infrastructure environments: # Create new infrastructure directory\\nmkdir infra/production\\nmkdir infra/staging # Create Nickel files for each infrastructure\\ncp infra/default/main.ncl infra/production/main.ncl\\ncp infra/default/servers.ncl infra/production/servers.ncl # Edit them for your specific needs\\nvim infra/production/servers.ncl","breadcrumbs":"Workspace Setup » 4. Add Multiple Infrastructures","id":"3896","title":"4. Add Multiple Infrastructures"},"3897":{"body":"To use cloud providers (UpCloud, AWS, etc.), update config/config.ncl: providers = { upcloud = { name = \\"upcloud\\", enabled = true, # Set to true to enable workspace = \\"my_workspace\\", auth = { interface = \\"API\\" }, paths = { base = \\".providers/upcloud\\", cache = \\".providers/upcloud/cache\\", state = \\".providers/upcloud/state\\", }, api = { url = \\"https://api.upcloud.com/1.3\\", timeout = 30, }, },\\n}","breadcrumbs":"Workspace Setup » 5. Configure Providers","id":"3897","title":"5. Configure Providers"},"3898":{"body":"","breadcrumbs":"Workspace Setup » Workspace Management Commands","id":"3898","title":"Workspace Management Commands"},"3899":{"body":"provisioning workspace list","breadcrumbs":"Workspace Setup » List Workspaces","id":"3899","title":"List Workspaces"},"39":{"body":"Check Troubleshooting Guide Review FAQ Enable debug mode: provisioning --debug Check logs: provisioning platform logs ","breadcrumbs":"Home » Reporting Issues","id":"39","title":"Reporting Issues"},"390":{"body":"After successful configuration, services can be started:","breadcrumbs":"Platform Service Configuration » Step 5: Run Platform Services","id":"390","title":"Step 5: Run Platform Services"},"3900":{"body":"provisioning workspace activate my_workspace","breadcrumbs":"Workspace Setup » Activate a Workspace","id":"3900","title":"Activate a Workspace"},"3901":{"body":"provisioning workspace active","breadcrumbs":"Workspace Setup » Show Active Workspace","id":"3901","title":"Show Active Workspace"},"3902":{"body":"# Dry-run first (check mode)\\nprovisioning -c server create # Actually create servers\\nprovisioning server create # List created servers\\nprovisioning server list","breadcrumbs":"Workspace Setup » Deploy Infrastructure","id":"3902","title":"Deploy Infrastructure"},"3903":{"body":"","breadcrumbs":"Workspace Setup » Troubleshooting","id":"3903","title":"Troubleshooting"},"3904":{"body":"# Check syntax\\nnickel typecheck config/config.ncl # Example error and solution\\nError: Type checking failed\\nSolution: Fix the syntax error shown and retry","breadcrumbs":"Workspace Setup » Invalid Nickel Syntax","id":"3904","title":"Invalid Nickel Syntax"},"3905":{"body":"Refer to the auto-generated docs/troubleshooting.md in your workspace for: Authentication & credentials issues Server deployment problems Configuration validation errors Network connectivity issues Performance issues","breadcrumbs":"Workspace Setup » Configuration Issues","id":"3905","title":"Configuration Issues"},"3906":{"body":"Consult workspace guides : Check the docs/ directory Check the docs : provisioning --help, provisioning workspace --help Enable debug mode : provisioning --debug server create Review logs : Check logs for detailed error information","breadcrumbs":"Workspace Setup » Getting Help","id":"3906","title":"Getting Help"},"3907":{"body":"Review auto-generated guides in docs/ Customize configuration in Nickel files Test with dry-run before deployment Deploy infrastructure Monitor and maintain your workspace For detailed deployment instructions, see docs/deployment-guide.md in your workspace.","breadcrumbs":"Workspace Setup » Next Steps","id":"3907","title":"Next Steps"},"3908":{"body":"Complete guide to workspace management in the provisioning platform.","breadcrumbs":"Workspace Guide » Workspace Guide","id":"3908","title":"Workspace Guide"},"3909":{"body":"The comprehensive workspace guide is available here: → Workspace Switching Guide - Complete workspace documentation This guide covers: Workspace creation and initialization Switching between multiple workspaces User preferences and configuration Workspace registry management Backup and restore operations","breadcrumbs":"Workspace Guide » 📖 Workspace Switching Guide","id":"3909","title":"📖 Workspace Switching Guide"},"391":{"body":"# Set deployment mode\\nexport ORCHESTRATOR_MODE=solo # Run the orchestrator service\\ncd provisioning/platform\\ncargo run -p orchestrator","breadcrumbs":"Platform Service Configuration » Running a Single Service","id":"391","title":"Running a Single Service"},"3910":{"body":"# List all workspaces\\nprovisioning workspace list # Switch to a workspace\\nprovisioning workspace switch # Create new workspace\\nprovisioning workspace init # Show active workspace\\nprovisioning workspace active","breadcrumbs":"Workspace Guide » Quick Start","id":"3910","title":"Quick Start"},"3911":{"body":"Workspace Switching Guide - Complete guide Workspace Configuration - Configuration commands Workspace Setup - Initial setup guide For complete workspace documentation, see Workspace Switching Guide .","breadcrumbs":"Workspace Guide » Additional Workspace Resources","id":"3911","title":"Additional Workspace Resources"},"3912":{"body":"Version : 1.0.0 Date : 2025-10-06 Status : ✅ Production Ready","breadcrumbs":"Workspace Switching Guide » Workspace Switching Guide","id":"3912","title":"Workspace Switching Guide"},"3913":{"body":"The provisioning system now includes a centralized workspace management system that allows you to easily switch between multiple workspaces without manually editing configuration files.","breadcrumbs":"Workspace Switching Guide » Overview","id":"3913","title":"Overview"},"3914":{"body":"","breadcrumbs":"Workspace Switching Guide » Quick Start","id":"3914","title":"Quick Start"},"3915":{"body":"provisioning workspace list\\n```bash Output: ```plaintext\\nRegistered Workspaces: ● librecloud Path: /Users/Akasha/project-provisioning/workspace_librecloud Last used: 2025-10-06T12:29:43Z production Path: /opt/workspaces/production Last used: 2025-10-05T10:15:30Z\\n```bash The green ● indicates the currently active workspace. ### Check Active Workspace ```bash\\nprovisioning workspace active\\n```bash Output: ```plaintext\\nActive Workspace: Name: librecloud Path: /Users/Akasha/project-provisioning/workspace_librecloud Last used: 2025-10-06T12:29:43Z\\n```bash ### Switch to Another Workspace ```bash\\n# Option 1: Using activate\\nprovisioning workspace activate production # Option 2: Using switch (alias)\\nprovisioning workspace switch production\\n```bash Output: ```plaintext\\n✓ Workspace \'production\' activated Current workspace: production\\nPath: /opt/workspaces/production ℹ All provisioning commands will now use this workspace\\n```bash ### Register a New Workspace ```bash\\n# Register without activating\\nprovisioning workspace register my-project ~/workspaces/my-project # Register and activate immediately\\nprovisioning workspace register my-project ~/workspaces/my-project --activate\\n```bash ### Remove Workspace from Registry ```bash\\n# With confirmation prompt\\nprovisioning workspace remove old-workspace # Skip confirmation\\nprovisioning workspace remove old-workspace --force\\n```bash **Note**: This only removes the workspace from the registry. The workspace files are NOT deleted. ## Architecture ### Central User Configuration All workspace information is stored in a central user configuration file: **Location**: `~/Library/Application Support/provisioning/user_config.yaml` **Structure**: ```yaml\\n# Active workspace (current workspace in use)\\nactive_workspace: \\"librecloud\\" # Known workspaces (automatically managed)\\nworkspaces: - name: \\"librecloud\\" path: \\"/Users/Akasha/project-provisioning/workspace_librecloud\\" last_used: \\"2025-10-06T12:29:43Z\\" - name: \\"production\\" path: \\"/opt/workspaces/production\\" last_used: \\"2025-10-05T10:15:30Z\\" # User preferences (global settings)\\npreferences: editor: \\"vim\\" output_format: \\"yaml\\" confirm_delete: true confirm_deploy: true default_log_level: \\"info\\" preferred_provider: \\"upcloud\\" # Metadata\\nmetadata: created: \\"2025-10-06T12:29:43Z\\" last_updated: \\"2025-10-06T13:46:16Z\\" version: \\"1.0.0\\"\\n```bash ### How It Works 1. **Workspace Registration**: When you register a workspace, it\'s added to the `workspaces` list in `user_config.yaml` 2. **Activation**: When you activate a workspace: - `active_workspace` is updated to the workspace name - The workspace\'s `last_used` timestamp is updated - All provisioning commands now use this workspace\'s configuration 3. **Configuration Loading**: The config loader reads `active_workspace` from `user_config.yaml` and loads: - `workspace_path/config/provisioning.yaml` - `workspace_path/config/providers/*.toml` - `workspace_path/config/platform/*.toml` - `workspace_path/config/kms.toml` ## Advanced Features ### User Preferences You can set global user preferences that apply across all workspaces: ```bash\\n# Get a preference value\\nprovisioning workspace get-preference editor # Set a preference value\\nprovisioning workspace set-preference editor \\"code\\" # View all preferences\\nprovisioning workspace preferences\\n```bash **Available Preferences**: - `editor`: Default editor for config files (vim, code, nano, etc.)\\n- `output_format`: Default output format (yaml, json, toml)\\n- `confirm_delete`: Require confirmation for deletions (true/false)\\n- `confirm_deploy`: Require confirmation for deployments (true/false)\\n- `default_log_level`: Default log level (debug, info, warn, error)\\n- `preferred_provider`: Preferred cloud provider (aws, upcloud, local) ### Output Formats List workspaces in different formats: ```bash\\n# Table format (default)\\nprovisioning workspace list # JSON format\\nprovisioning workspace list --format json # YAML format\\nprovisioning workspace list --format yaml\\n```bash ### Quiet Mode Activate workspace without output messages: ```bash\\nprovisioning workspace activate production --quiet\\n```bash ## Workspace Requirements For a workspace to be activated, it must have: 1. **Directory exists**: The workspace directory must exist on the filesystem 2. **Config directory**: Must have a `config/` directory ```bash workspace_name/ └── config/ ├── provisioning.yaml # Required ├── providers/ # Optional ├── platform/ # Optional └── kms.toml # Optional ```bash 3. **Main config file**: Must have `config/provisioning.yaml` If these requirements are not met, the activation will fail with helpful error messages: ```plaintext\\n✗ Workspace \'my-project\' not found in registry\\n💡 Available workspaces: [list of workspaces]\\n💡 Register it first with: provisioning workspace register my-project \\n```bash ```plaintext\\n✗ Workspace is not migrated to new config system\\n💡 Missing: /path/to/workspace/config\\n💡 Run migration: provisioning workspace migrate my-project\\n```bash ## Migration from Old System If you have workspaces using the old context system (`ws_{name}.yaml` files), they still work but you should register them in the new system: ```bash\\n# Register existing workspace\\nprovisioning workspace register old-workspace ~/workspaces/old-workspace # Activate it\\nprovisioning workspace activate old-workspace\\n```bash The old `ws_{name}.yaml` files are still supported for backward compatibility, but the new centralized system is recommended. ## Best Practices ### 1. **One Active Workspace at a Time** Only one workspace can be active at a time. All provisioning commands use the active workspace\'s configuration. ### 2. **Use Descriptive Names** Use clear, descriptive names for your workspaces: ```bash\\n# ✅ Good\\nprovisioning workspace register production-us-east ~/workspaces/prod-us-east\\nprovisioning workspace register dev-local ~/workspaces/dev # ❌ Avoid\\nprovisioning workspace register ws1 ~/workspaces/workspace1\\nprovisioning workspace register temp ~/workspaces/t\\n```bash ### 3. **Keep Workspaces Organized** Store all workspaces in a consistent location: ```bash\\n~/workspaces/\\n├── production/\\n├── staging/\\n├── development/\\n└── testing/\\n```bash ### 4. **Regular Cleanup** Remove workspaces you no longer use: ```bash\\n# List workspaces to see which ones are unused\\nprovisioning workspace list # Remove old workspace\\nprovisioning workspace remove old-workspace\\n```bash ### 5. **Backup User Config** Periodically backup your user configuration: ```bash\\ncp \\"~/Library/Application Support/provisioning/user_config.yaml\\" \\\\ \\"~/Library/Application Support/provisioning/user_config.yaml.backup\\"\\n```bash ## Troubleshooting ### Workspace Not Found **Problem**: `✗ Workspace \'name\' not found in registry` **Solution**: Register the workspace first: ```bash\\nprovisioning workspace register name /path/to/workspace\\n```bash ### Missing Configuration **Problem**: `✗ Missing workspace configuration` **Solution**: Ensure the workspace has a `config/provisioning.yaml` file. Run migration if needed: ```bash\\nprovisioning workspace migrate name\\n```bash ### Directory Not Found **Problem**: `✗ Workspace directory not found: /path/to/workspace` **Solution**: 1. Check if the workspace was moved or deleted\\n2. Update the path or remove from registry: ```bash\\nprovisioning workspace remove name\\nprovisioning workspace register name /new/path\\n```bash ### Corrupted User Config **Problem**: `Error: Failed to parse user config` **Solution**: The system automatically creates a backup and regenerates the config. Check: ```bash\\nls -la \\"~/Library/Application Support/provisioning/user_config.yaml\\"*\\n```bash Restore from backup if needed: ```bash\\ncp \\"~/Library/Application Support/provisioning/user_config.yaml.backup.TIMESTAMP\\" \\\\ \\"~/Library/Application Support/provisioning/user_config.yaml\\"\\n```bash ## CLI Commands Reference | Command | Alias | Description |\\n| --------- | ------- | ------------- |\\n| `provisioning workspace activate ` | - | Activate a workspace |\\n| `provisioning workspace switch ` | - | Alias for activate |\\n| `provisioning workspace list` | - | List all registered workspaces |\\n| `provisioning workspace active` | - | Show currently active workspace |\\n| `provisioning workspace register ` | - | Register a new workspace |\\n| `provisioning workspace remove ` | - | Remove workspace from registry |\\n| `provisioning workspace preferences` | - | Show user preferences |\\n| `provisioning workspace set-preference ` | - | Set a preference |\\n| `provisioning workspace get-preference ` | - | Get a preference value | ## Integration with Config System The workspace switching system is fully integrated with the new target-based configuration system: ### Configuration Hierarchy (Priority: Low → High) ```plaintext\\n1. Workspace config workspace/{name}/config/provisioning.yaml\\n2. Provider configs workspace/{name}/config/providers/*.toml\\n3. Platform configs workspace/{name}/config/platform/*.toml\\n4. User context ~/Library/Application Support/provisioning/ws_{name}.yaml (legacy)\\n5. User config ~/Library/Application Support/provisioning/user_config.yaml (new)\\n6. Environment variables PROVISIONING_*\\n```bash ### Example Workflow ```bash\\n# 1. Create and activate development workspace\\nprovisioning workspace register dev ~/workspaces/dev --activate # 2. Work on development\\nprovisioning server create web-dev-01\\nprovisioning taskserv create kubernetes # 3. Switch to production\\nprovisioning workspace switch production # 4. Deploy to production\\nprovisioning server create web-prod-01\\nprovisioning taskserv create kubernetes # 5. Switch back to development\\nprovisioning workspace switch dev # All commands now use dev workspace config\\n```bash ## Nickel Workspace Configuration Starting with v3.7.0, workspaces use **Nickel** for type-safe, schema-validated configurations. ### Nickel Configuration Features **Nickel Configuration** (Type-Safe): ```nickel\\n{ workspace = { name = \\"myworkspace\\", version = \\"1.0.0\\", }, paths = { base = \\"/path/to/workspace\\", infra = \\"/path/to/workspace/infra\\", config = \\"/path/to/workspace/config\\", },\\n}\\n```bash ### Benefits of Nickel Configuration - ✅ **Type Safety**: Catch configuration errors at load time, not runtime\\n- ✅ **Schema Validation**: Required fields, value constraints, format checking\\n- ✅ **Lazy Evaluation**: Only computes what\'s needed\\n- ✅ **Self-Documenting**: Records provide instant documentation\\n- ✅ **Merging**: Powerful record merging for composition ### Viewing Workspace Configuration ```bash\\n# View your Nickel workspace configuration\\nprovisioning workspace config show # View in different formats\\nprovisioning workspace config show --format=yaml # YAML output\\nprovisioning workspace config show --format=json # JSON output\\nprovisioning workspace config show --format=nickel # Raw Nickel file # Validate configuration\\nprovisioning workspace config validate\\n# Output: ✅ Validation complete - all configs are valid # Show configuration hierarchy\\nprovisioning workspace config hierarchy\\n```bash ## See Also - **Configuration Guide**: `docs/architecture/adr/ADR-010-configuration-format-strategy.md`\\n- **Migration Guide**: [Nickel Migration](../architecture/adr/adr-011-nickel-migration.md)\\n- **From-Scratch Guide**: [From-Scratch Guide](../guides/from-scratch.md)\\n- **Nickel Patterns**: Nickel Language Module System --- **Maintained By**: Infrastructure Team\\n**Version**: 2.0.0 (Updated for Nickel)\\n**Status**: ✅ Production Ready\\n**Last Updated**: 2025-12-03","breadcrumbs":"Workspace Switching Guide » List Available Workspaces","id":"3915","title":"List Available Workspaces"},"3916":{"body":"","breadcrumbs":"Workspace Switching System » Workspace Switching System (v2.0.5)","id":"3916","title":"Workspace Switching System (v2.0.5)"},"3917":{"body":"A centralized workspace management system has been implemented, allowing seamless switching between multiple workspaces without manually editing configuration files. This builds upon the target-based configuration system.","breadcrumbs":"Workspace Switching System » 🚀 Workspace Switching Completed (2025-10-02)","id":"3917","title":"🚀 Workspace Switching Completed (2025-10-02)"},"3918":{"body":"Centralized Configuration : Single user_config.yaml file stores all workspace information Simple CLI Commands : Switch workspaces with a single command Active Workspace Tracking : Automatic tracking of currently active workspace Workspace Registry : Maintain list of all known workspaces User Preferences : Global user settings that apply across all workspaces Automatic Updates : Last-used timestamps and metadata automatically managed Validation : Ensures workspaces have required configuration before activation","breadcrumbs":"Workspace Switching System » Key Features","id":"3918","title":"Key Features"},"3919":{"body":"# List all registered workspaces\\nprovisioning workspace list # Show currently active workspace\\nprovisioning workspace active # Switch to another workspace\\nprovisioning workspace activate \\nprovisioning workspace switch # alias # Register a new workspace\\nprovisioning workspace register [--activate] # Remove workspace from registry (does not delete files)\\nprovisioning workspace remove [--force] # View user preferences\\nprovisioning workspace preferences # Set user preference\\nprovisioning workspace set-preference # Get user preference\\nprovisioning workspace get-preference ","breadcrumbs":"Workspace Switching System » Workspace Management Commands","id":"3919","title":"Workspace Management Commands"},"392":{"body":"# Terminal 1: Vault Service (secrets management)\\nexport VAULT_MODE=solo\\ncargo run -p vault-service # Terminal 2: Orchestrator (main service)\\nexport ORCHESTRATOR_MODE=solo\\ncargo run -p orchestrator # Terminal 3: Control Center (web UI)\\nexport CONTROL_CENTER_MODE=solo\\ncargo run -p control-center # Access web UI at http://localhost:8080 (default)","breadcrumbs":"Platform Service Configuration » Running Multiple Services","id":"392","title":"Running Multiple Services"},"3920":{"body":"Location : ~/Library/Application Support/provisioning/user_config.yaml Structure : # Active workspace (current workspace in use)\\nactive_workspace: \\"librecloud\\" # Known workspaces (automatically managed)\\nworkspaces: - name: \\"librecloud\\" path: \\"/Users/Akasha/project-provisioning/workspace_librecloud\\" last_used: \\"2025-10-06T12:29:43Z\\" - name: \\"production\\" path: \\"/opt/workspaces/production\\" last_used: \\"2025-10-05T10:15:30Z\\" # User preferences (global settings)\\npreferences: editor: \\"vim\\" output_format: \\"yaml\\" confirm_delete: true confirm_deploy: true default_log_level: \\"info\\" preferred_provider: \\"upcloud\\" # Metadata\\nmetadata: created: \\"2025-10-06T12:29:43Z\\" last_updated: \\"2025-10-06T13:46:16Z\\" version: \\"1.0.0\\"","breadcrumbs":"Workspace Switching System » Central User Configuration","id":"3920","title":"Central User Configuration"},"3921":{"body":"# Start with workspace librecloud active\\n$ provisioning workspace active\\nActive Workspace: Name: librecloud Path: /Users/Akasha/project-provisioning/workspace_librecloud Last used: 2025-10-06T13:46:16Z # List all workspaces (● indicates active)\\n$ provisioning workspace list Registered Workspaces: ● librecloud Path: /Users/Akasha/project-provisioning/workspace_librecloud Last used: 2025-10-06T13:46:16Z production Path: /opt/workspaces/production Last used: 2025-10-05T10:15:30Z # Switch to production\\n$ provisioning workspace switch production\\n✓ Workspace \'production\' activated Current workspace: production\\nPath: /opt/workspaces/production ℹ All provisioning commands will now use this workspace # All subsequent commands use production workspace\\n$ provisioning server list\\n$ provisioning taskserv create kubernetes","breadcrumbs":"Workspace Switching System » Usage Example","id":"3921","title":"Usage Example"},"3922":{"body":"The workspace switching system integrates seamlessly with the configuration system: Active Workspace Detection : Config loader reads active_workspace from user_config.yaml Workspace Validation : Ensures workspace has required config/provisioning.yaml Configuration Loading : Loads workspace-specific configs automatically Automatic Timestamps : Updates last_used on workspace activation Configuration Hierarchy (Priority: Low → High): 1. Workspace config workspace/{name}/config/provisioning.yaml\\n2. Provider configs workspace/{name}/config/providers/*.toml\\n3. Platform configs workspace/{name}/config/platform/*.toml\\n4. User config ~/Library/Application Support/provisioning/user_config.yaml\\n5. Environment variables PROVISIONING_*","breadcrumbs":"Workspace Switching System » Integration with Config System","id":"3922","title":"Integration with Config System"},"3923":{"body":"✅ No Manual Config Editing : Switch workspaces with single command ✅ Multiple Workspaces : Manage dev, staging, production simultaneously ✅ User Preferences : Global settings across all workspaces ✅ Automatic Tracking : Last-used timestamps, active workspace markers ✅ Safe Operations : Validation before activation, confirmation prompts ✅ Backward Compatible : Old ws_{name}.yaml files still supported For more detailed information, see Workspace Switching Guide .","breadcrumbs":"Workspace Switching System » Benefits","id":"3923","title":"Benefits"},"3924":{"body":"Version : 2.0.0 Date : 2025-10-06 Status : Implemented","breadcrumbs":"Workspace Config Architecture » Workspace Configuration Architecture","id":"3924","title":"Workspace Configuration Architecture"},"3925":{"body":"The provisioning system now uses a workspace-based configuration architecture where each workspace has its own complete configuration structure. This replaces the old ENV-based and template-only system.","breadcrumbs":"Workspace Config Architecture » Overview","id":"3925","title":"Overview"},"3926":{"body":"config.defaults.toml is ONLY a template, NEVER loaded at runtime This file exists solely as a reference template for generating workspace configurations. The system does NOT load it during operation.","breadcrumbs":"Workspace Config Architecture » Critical Design Principle","id":"3926","title":"Critical Design Principle"},"3927":{"body":"Configuration is loaded in the following order (lowest to highest priority): Workspace Config (Base): {workspace}/config/provisioning.yaml Provider Configs : {workspace}/config/providers/*.toml Platform Configs : {workspace}/config/platform/*.toml User Context : ~/Library/Application Support/provisioning/ws_{name}.yaml Environment Variables : PROVISIONING_* (highest priority)","breadcrumbs":"Workspace Config Architecture » Configuration Hierarchy","id":"3927","title":"Configuration Hierarchy"},"3928":{"body":"When a workspace is initialized, the following structure is created: {workspace}/\\n├── config/\\n│ ├── provisioning.yaml # Main workspace config (generated from template)\\n│ ├── providers/ # Provider-specific configs\\n│ │ ├── aws.toml\\n│ │ ├── local.toml\\n│ │ └── upcloud.toml\\n│ ├── platform/ # Platform service configs\\n│ │ ├── orchestrator.toml\\n│ │ └── mcp.toml\\n│ └── kms.toml # KMS configuration\\n├── infra/ # Infrastructure definitions\\n├── .cache/ # Cache directory\\n├── .runtime/ # Runtime data\\n│ ├── taskservs/\\n│ └── clusters/\\n├── .providers/ # Provider state\\n├── .kms/ # Key management\\n│ └── keys/\\n├── generated/ # Generated files\\n└── .gitignore # Workspace gitignore","breadcrumbs":"Workspace Config Architecture » Workspace Structure","id":"3928","title":"Workspace Structure"},"3929":{"body":"Templates are located at: /Users/Akasha/project-provisioning/provisioning/config/templates/","breadcrumbs":"Workspace Config Architecture » Template System","id":"3929","title":"Template System"},"393":{"body":"# Start all services in Docker (requires docker-compose.yml)\\ncd provisioning/platform/infrastructure/docker\\ndocker-compose -f docker-compose.solo.yml up # Or for enterprise mode\\ndocker-compose -f docker-compose.enterprise.yml up","breadcrumbs":"Platform Service Configuration » Docker-Based Deployment","id":"393","title":"Docker-Based Deployment"},"3930":{"body":"workspace-provisioning.yaml.template - Main workspace configuration provider-aws.toml.template - AWS provider configuration provider-local.toml.template - Local provider configuration provider-upcloud.toml.template - UpCloud provider configuration kms.toml.template - KMS configuration user-context.yaml.template - User context configuration","breadcrumbs":"Workspace Config Architecture » Available Templates","id":"3930","title":"Available Templates"},"3931":{"body":"Templates support the following interpolation variables: {{workspace.name}} - Workspace name {{workspace.path}} - Absolute path to workspace {{now.iso}} - Current timestamp in ISO format {{env.HOME}} - User\'s home directory {{env.*}} - Environment variables (safe list only) {{paths.base}} - Base path (after config load)","breadcrumbs":"Workspace Config Architecture » Template Variables","id":"3931","title":"Template Variables"},"3932":{"body":"","breadcrumbs":"Workspace Config Architecture » Workspace Initialization","id":"3932","title":"Workspace Initialization"},"3933":{"body":"# Using the workspace init function\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/workspace/init.nu *; \\\\ workspace-init \'my-workspace\' \'/path/to/workspace\' \\\\ --providers [\'aws\' \'local\'] --activate\\"","breadcrumbs":"Workspace Config Architecture » Command","id":"3933","title":"Command"},"3934":{"body":"Create Directory Structure : All necessary directories Generate Config from Template : Creates config/provisioning.yaml Generate Provider Configs : For each specified provider Generate KMS Config : Security configuration Create User Context (if --activate): User-specific overrides Create .gitignore : Ignore runtime/cache files","breadcrumbs":"Workspace Config Architecture » Process","id":"3934","title":"Process"},"3935":{"body":"User context files are stored per workspace: Location : ~/Library/Application Support/provisioning/ws_{workspace_name}.yaml","breadcrumbs":"Workspace Config Architecture » User Context","id":"3935","title":"User Context"},"3936":{"body":"Store user-specific overrides (debug settings, output preferences) Mark active workspace Override workspace paths if needed","breadcrumbs":"Workspace Config Architecture » Purpose","id":"3936","title":"Purpose"},"3937":{"body":"workspace: name: \\"my-workspace\\" path: \\"/path/to/my-workspace\\" active: true debug: enabled: true log_level: \\"debug\\" output: format: \\"json\\" providers: default: \\"aws\\"","breadcrumbs":"Workspace Config Architecture » Example","id":"3937","title":"Example"},"3938":{"body":"","breadcrumbs":"Workspace Config Architecture » Configuration Loading Process","id":"3938","title":"Configuration Loading Process"},"3939":{"body":"# Check user config directory for active workspace\\nlet user_config_dir = ~/Library/Application Support/provisioning/\\nlet active_workspace = (find workspace with active: true in ws_*.yaml files)","breadcrumbs":"Workspace Config Architecture » 1. Determine Active Workspace","id":"3939","title":"1. Determine Active Workspace"},"394":{"body":"# Check orchestrator status\\ncurl http://localhost:9000/health # Check control center web UI\\nopen http://localhost:8080 # View service logs\\nexport ORCHESTRATOR_MODE=solo\\ncargo run -p orchestrator -- --log-level debug","breadcrumbs":"Platform Service Configuration » Step 6: Verify Services Are Running","id":"394","title":"Step 6: Verify Services Are Running"},"3940":{"body":"# Load main workspace config\\nlet workspace_config = {workspace.path}/config/provisioning.yaml","breadcrumbs":"Workspace Config Architecture » 2. Load Workspace Config","id":"3940","title":"2. Load Workspace Config"},"3941":{"body":"# Merge all provider configs\\nfor provider in {workspace.path}/config/providers/*.toml { merge provider config\\n}","breadcrumbs":"Workspace Config Architecture » 3. Load Provider Configs","id":"3941","title":"3. Load Provider Configs"},"3942":{"body":"# Merge all platform configs\\nfor platform in {workspace.path}/config/platform/*.toml { merge platform config\\n}","breadcrumbs":"Workspace Config Architecture » 4. Load Platform Configs","id":"3942","title":"4. Load Platform Configs"},"3943":{"body":"# Apply user-specific overrides\\nlet user_context = ~/Library/Application Support/provisioning/ws_{name}.yaml\\nmerge user_context (highest config priority)","breadcrumbs":"Workspace Config Architecture » 5. Apply User Context","id":"3943","title":"5. Apply User Context"},"3944":{"body":"# Final overrides from environment\\nPROVISIONING_DEBUG=true\\nPROVISIONING_LOG_LEVEL=debug\\nPROVISIONING_PROVIDER=aws\\n# etc.","breadcrumbs":"Workspace Config Architecture » 6. Apply Environment Variables","id":"3944","title":"6. Apply Environment Variables"},"3945":{"body":"","breadcrumbs":"Workspace Config Architecture » Migration from Old System","id":"3945","title":"Migration from Old System"},"3946":{"body":"export PROVISIONING=/usr/local/provisioning\\nexport PROVISIONING_INFRA_PATH=/path/to/infra\\nexport PROVISIONING_DEBUG=true\\n# ... many ENV variables","breadcrumbs":"Workspace Config Architecture » Before (ENV-based)","id":"3946","title":"Before (ENV-based)"},"3947":{"body":"# Initialize workspace\\nworkspace-init \\"production\\" \\"/workspaces/prod\\" --providers [\\"aws\\"] --activate # All config is now in workspace\\n# No ENV variables needed (except for overrides)","breadcrumbs":"Workspace Config Architecture » After (Workspace-based)","id":"3947","title":"After (Workspace-based)"},"3948":{"body":"config.defaults.toml NOT loaded - Only used as template Workspace required - Must have active workspace or be in workspace directory New config locations - User config in ~/Library/Application Support/provisioning/ YAML main config - provisioning.yaml instead of TOML","breadcrumbs":"Workspace Config Architecture » Breaking Changes","id":"3948","title":"Breaking Changes"},"3949":{"body":"","breadcrumbs":"Workspace Config Architecture » Workspace Management Commands","id":"3949","title":"Workspace Management Commands"},"395":{"body":"","breadcrumbs":"Platform Service Configuration » Customizing Configuration","id":"395","title":"Customizing Configuration"},"3950":{"body":"use provisioning/core/nulib/lib_provisioning/workspace/init.nu *\\nworkspace-init \\"my-workspace\\" \\"/path/to/workspace\\" --providers [\\"aws\\" \\"local\\"] --activate","breadcrumbs":"Workspace Config Architecture » Initialize Workspace","id":"3950","title":"Initialize Workspace"},"3951":{"body":"workspace-list","breadcrumbs":"Workspace Config Architecture » List Workspaces","id":"3951","title":"List Workspaces"},"3952":{"body":"workspace-activate \\"my-workspace\\"","breadcrumbs":"Workspace Config Architecture » Activate Workspace","id":"3952","title":"Activate Workspace"},"3953":{"body":"workspace-get-active","breadcrumbs":"Workspace Config Architecture » Get Active Workspace","id":"3953","title":"Get Active Workspace"},"3954":{"body":"","breadcrumbs":"Workspace Config Architecture » Implementation Files","id":"3954","title":"Implementation Files"},"3955":{"body":"Template Directory : /Users/Akasha/project-provisioning/provisioning/config/templates/ Workspace Init : /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/workspace/init.nu Config Loader : /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/config/loader.nu","breadcrumbs":"Workspace Config Architecture » Core Files","id":"3955","title":"Core Files"},"3956":{"body":"Removed get-defaults-config-path() - No longer loads config.defaults.toml Old hierarchy with user/project/infra TOML files Added get-active-workspace() - Finds active workspace from user config Support for YAML config files Provider and platform config merging User context loading","breadcrumbs":"Workspace Config Architecture » Key Changes in Config Loader","id":"3956","title":"Key Changes in Config Loader"},"3957":{"body":"","breadcrumbs":"Workspace Config Architecture » Configuration Schema","id":"3957","title":"Configuration Schema"},"3958":{"body":"workspace: name: string version: string created: timestamp paths: base: string infra: string cache: string runtime: string # ... all paths core: version: string name: string debug: enabled: bool log_level: string # ... debug settings providers: active: [string] default: string # ... all other sections","breadcrumbs":"Workspace Config Architecture » Main Workspace Config (provisioning.yaml)","id":"3958","title":"Main Workspace Config (provisioning.yaml)"},"3959":{"body":"[provider]\\nname = \\"aws\\"\\nenabled = true\\nworkspace = \\"workspace-name\\" [provider.auth]\\nprofile = \\"default\\"\\nregion = \\"us-east-1\\" [provider.paths]\\nbase = \\"{workspace}/.providers/aws\\"\\ncache = \\"{workspace}/.providers/aws/cache\\"","breadcrumbs":"Workspace Config Architecture » Provider Config (providers/*.toml)","id":"3959","title":"Provider Config (providers/*.toml)"},"396":{"body":"If you need to switch from solo to multiuser mode: # Option 1: Re-run setup with new mode\\n./provisioning/scripts/setup-platform-config.sh --quick-mode --mode multiuser # Option 2: Interactive update via TypeDialog\\n./provisioning/scripts/setup-platform-config.sh --service orchestrator --mode multiuser --backend web # Result: All configurations updated for multiuser mode\\n# Services read from provisioning/config/runtime/generated/orchestrator.multiuser.toml","breadcrumbs":"Platform Service Configuration » Scenario: Change Deployment Mode","id":"396","title":"Scenario: Change Deployment Mode"},"3960":{"body":"workspace: name: string path: string active: bool debug: enabled: bool log_level: string output: format: string","breadcrumbs":"Workspace Config Architecture » User Context (ws_{name}.yaml)","id":"3960","title":"User Context (ws_{name}.yaml)"},"3961":{"body":"No Template Loading : config.defaults.toml is template-only Workspace Isolation : Each workspace is self-contained Explicit Configuration : No hidden defaults from ENV Clear Hierarchy : Predictable override behavior Multi-Workspace Support : Easy switching between workspaces User Overrides : Per-workspace user preferences Version Control : Workspace configs can be committed (except secrets)","breadcrumbs":"Workspace Config Architecture » Benefits","id":"3961","title":"Benefits"},"3962":{"body":"","breadcrumbs":"Workspace Config Architecture » Security Considerations","id":"3962","title":"Security Considerations"},"3963":{"body":"The workspace .gitignore excludes: .cache/ - Cache files .runtime/ - Runtime data .providers/ - Provider state .kms/keys/ - Secret keys generated/ - Generated files *.log - Log files","breadcrumbs":"Workspace Config Architecture » Generated .gitignore","id":"3963","title":"Generated .gitignore"},"3964":{"body":"KMS keys stored in .kms/keys/ (gitignored) SOPS config references keys, doesn\'t store them Provider credentials in user-specific locations (not workspace)","breadcrumbs":"Workspace Config Architecture » Secret Management","id":"3964","title":"Secret Management"},"3965":{"body":"","breadcrumbs":"Workspace Config Architecture » Troubleshooting","id":"3965","title":"Troubleshooting"},"3966":{"body":"Error: No active workspace found. Please initialize or activate a workspace. Solution : Initialize or activate a workspace: workspace-init \\"my-workspace\\" \\"/path/to/workspace\\" --activate","breadcrumbs":"Workspace Config Architecture » No Active Workspace Error","id":"3966","title":"No Active Workspace Error"},"3967":{"body":"Error: Required configuration file not found: {workspace}/config/provisioning.yaml Solution : The workspace config is corrupted or deleted. Re-initialize: workspace-init \\"workspace-name\\" \\"/existing/path\\" --providers [\\"aws\\"]","breadcrumbs":"Workspace Config Architecture » Config File Not Found","id":"3967","title":"Config File Not Found"},"3968":{"body":"Solution : Add provider config to workspace: # Generate provider config manually\\ngenerate-provider-config \\"/workspace/path\\" \\"workspace-name\\" \\"aws\\"","breadcrumbs":"Workspace Config Architecture » Provider Not Configured","id":"3968","title":"Provider Not Configured"},"3969":{"body":"Workspace Templates : Pre-configured workspace templates (dev, prod, test) Workspace Import/Export : Share workspace configurations Remote Workspace : Load workspace from remote Git repository Workspace Validation : Comprehensive workspace health checks Config Migration Tool : Automated migration from old ENV-based system","breadcrumbs":"Workspace Config Architecture » Future Enhancements","id":"3969","title":"Future Enhancements"},"397":{"body":"If you need fine-grained control: # 1. Edit the Nickel configuration directly\\nvim provisioning/config/runtime/orchestrator.solo.ncl # 2. Make your changes (for example, change port, add environment variables) # 3. Validate syntax\\nnickel typecheck provisioning/config/runtime/orchestrator.solo.ncl # 4. CRITICAL: Regenerate TOML (services won\'t see changes without this)\\n./provisioning/scripts/setup-platform-config.sh --generate-toml # 5. Verify TOML was updated\\nstat provisioning/config/runtime/generated/orchestrator.solo.toml # 6. Restart service with new configuration\\npkill orchestrator\\nexport ORCHESTRATOR_MODE=solo\\ncargo run -p orchestrator","breadcrumbs":"Platform Service Configuration » Scenario: Manual Configuration Edit","id":"397","title":"Scenario: Manual Configuration Edit"},"3970":{"body":"config.defaults.toml is ONLY a template - Never loaded at runtime Workspaces are self-contained - Complete config structure generated from templates New hierarchy : Workspace → Provider → Platform → User Context → ENV User context for overrides - Stored in ~/Library/Application Support/provisioning/ Clear, explicit configuration - No hidden defaults","breadcrumbs":"Workspace Config Architecture » Summary","id":"3970","title":"Summary"},"3971":{"body":"Template files: provisioning/config/templates/ Workspace init: provisioning/core/nulib/lib_provisioning/workspace/init.nu Config loader: provisioning/core/nulib/lib_provisioning/config/loader.nu User guide: docs/user/workspace-management.md","breadcrumbs":"Workspace Config Architecture » Related Documentation","id":"3971","title":"Related Documentation"},"3972":{"body":"","breadcrumbs":"Workspace Config Commands » Workspace Configuration Management Commands","id":"3972","title":"Workspace Configuration Management Commands"},"3973":{"body":"The workspace configuration management commands provide a comprehensive set of tools for viewing, editing, validating, and managing workspace configurations.","breadcrumbs":"Workspace Config Commands » Overview","id":"3973","title":"Overview"},"3974":{"body":"Command Description workspace config show Display workspace configuration workspace config validate Validate all configuration files workspace config generate provider Generate provider configuration from template workspace config edit Edit configuration files workspace config hierarchy Show configuration loading hierarchy workspace config list List all configuration files","breadcrumbs":"Workspace Config Commands » Command Summary","id":"3974","title":"Command Summary"},"3975":{"body":"","breadcrumbs":"Workspace Config Commands » Commands","id":"3975","title":"Commands"},"3976":{"body":"Display the complete workspace configuration in JSON, YAML, TOML, and other formats. # Show active workspace config (YAML format)\\nprovisioning workspace config show # Show specific workspace config\\nprovisioning workspace config show my-workspace # Show in JSON format\\nprovisioning workspace config show --out json # Show in TOML format\\nprovisioning workspace config show --out toml # Show specific workspace in JSON\\nprovisioning workspace config show my-workspace --out json Output: Complete workspace configuration in the specified format","breadcrumbs":"Workspace Config Commands » Show Workspace Configuration","id":"3976","title":"Show Workspace Configuration"},"3977":{"body":"Validate all configuration files for syntax and required sections. # Validate active workspace\\nprovisioning workspace config validate # Validate specific workspace\\nprovisioning workspace config validate my-workspace Checks performed: Main config (provisioning.yaml) - YAML syntax and required sections Provider configs (providers/*.toml) - TOML syntax Platform service configs (platform/*.toml) - TOML syntax KMS config (kms.toml) - TOML syntax Output: Validation report with success/error indicators","breadcrumbs":"Workspace Config Commands » Validate Workspace Configuration","id":"3977","title":"Validate Workspace Configuration"},"3978":{"body":"Generate a provider configuration file from a template. # Generate AWS provider config for active workspace\\nprovisioning workspace config generate provider aws # Generate UpCloud provider config for specific workspace\\nprovisioning workspace config generate provider upcloud --infra my-workspace # Generate local provider config\\nprovisioning workspace config generate provider local What it does: Locates provider template in extensions/providers/{name}/config.defaults.toml Interpolates workspace-specific values ({{workspace.name}}, {{workspace.path}}) Saves to {workspace}/config/providers/{name}.toml Output: Generated configuration file ready for customization","breadcrumbs":"Workspace Config Commands » Generate Provider Configuration","id":"3978","title":"Generate Provider Configuration"},"3979":{"body":"Open configuration files in your editor for modification. # Edit main workspace config\\nprovisioning workspace config edit main # Edit specific provider config\\nprovisioning workspace config edit provider aws # Edit platform service config\\nprovisioning workspace config edit platform orchestrator # Edit KMS config\\nprovisioning workspace config edit kms # Edit for specific workspace\\nprovisioning workspace config edit provider upcloud --infra my-workspace Editor used: Value of $EDITOR environment variable (defaults to vi) Config types: main - Main workspace configuration (provisioning.yaml) provider - Provider configuration (providers/{name}.toml) platform - Platform service configuration (platform/{name}.toml) kms - KMS configuration (kms.toml)","breadcrumbs":"Workspace Config Commands » Edit Configuration Files","id":"3979","title":"Edit Configuration Files"},"398":{"body":"For workspace-specific customization: # Create workspace override file\\nmkdir -p workspace_myworkspace/config\\ncat > workspace_myworkspace/config/platform-overrides.ncl <<\'EOF\'\\n# Workspace-specific settings\\n{ orchestrator = { server.port = 9999, # Custom port workspace.name = \\"myworkspace\\" }, control_center = { workspace.name = \\"myworkspace\\" }\\n}\\nEOF # Generate config with workspace overrides\\n./provisioning/scripts/setup-platform-config.sh --workspace workspace_myworkspace # Configuration system merges: defaults + mode overlay + workspace overrides","breadcrumbs":"Platform Service Configuration » Scenario: Workspace-Specific Overrides","id":"398","title":"Scenario: Workspace-Specific Overrides"},"3980":{"body":"Display the configuration loading hierarchy and precedence. # Show hierarchy for active workspace\\nprovisioning workspace config hierarchy # Show hierarchy for specific workspace\\nprovisioning workspace config hierarchy my-workspace Output: Visual hierarchy showing: Environment Variables (highest priority) User Context Platform Services Provider Configs Workspace Config (lowest priority)","breadcrumbs":"Workspace Config Commands » Show Configuration Hierarchy","id":"3980","title":"Show Configuration Hierarchy"},"3981":{"body":"List all configuration files for a workspace. # List all configs\\nprovisioning workspace config list # List only provider configs\\nprovisioning workspace config list --type provider # List only platform configs\\nprovisioning workspace config list --type platform # List only KMS config\\nprovisioning workspace config list --type kms # List for specific workspace\\nprovisioning workspace config list my-workspace --type all Output: Table of configuration files with type, name, and path","breadcrumbs":"Workspace Config Commands » List Configuration Files","id":"3981","title":"List Configuration Files"},"3982":{"body":"All config commands support two ways to specify the workspace: Active Workspace (default): provisioning workspace config show Specific Workspace (using --infra flag): provisioning workspace config show --infra my-workspace","breadcrumbs":"Workspace Config Commands » Workspace Selection","id":"3982","title":"Workspace Selection"},"3983":{"body":"Workspace configurations are organized in a standard structure: {workspace}/\\n├── config/\\n│ ├── provisioning.yaml # Main workspace config\\n│ ├── providers/ # Provider configurations\\n│ │ ├── aws.toml\\n│ │ ├── upcloud.toml\\n│ │ └── local.toml\\n│ ├── platform/ # Platform service configs\\n│ │ ├── orchestrator.toml\\n│ │ ├── control-center.toml\\n│ │ └── mcp.toml\\n│ └── kms.toml # KMS configuration","breadcrumbs":"Workspace Config Commands » Configuration File Locations","id":"3983","title":"Configuration File Locations"},"3984":{"body":"Configuration values are loaded in the following order (highest to lowest priority): Environment Variables - PROVISIONING_* variables User Context - ~/Library/Application Support/provisioning/ws_{name}.yaml Platform Services - {workspace}/config/platform/*.toml Provider Configs - {workspace}/config/providers/*.toml Workspace Config - {workspace}/config/provisioning.yaml Higher priority values override lower priority values.","breadcrumbs":"Workspace Config Commands » Configuration Hierarchy","id":"3984","title":"Configuration Hierarchy"},"3985":{"body":"","breadcrumbs":"Workspace Config Commands » Examples","id":"3985","title":"Examples"},"3986":{"body":"# 1. Create new workspace with activation\\nprovisioning workspace init my-project ~/workspaces/my-project --providers [aws,local] --activate # 2. Validate configuration\\nprovisioning workspace config validate # 3. View configuration hierarchy\\nprovisioning workspace config hierarchy # 4. Generate additional provider config\\nprovisioning workspace config generate provider upcloud # 5. Edit provider settings\\nprovisioning workspace config edit provider upcloud # 6. List all configs\\nprovisioning workspace config list # 7. Show complete config in JSON\\nprovisioning workspace config show --out json # 8. Validate everything\\nprovisioning workspace config validate","breadcrumbs":"Workspace Config Commands » Complete Workflow","id":"3986","title":"Complete Workflow"},"3987":{"body":"# Create multiple workspaces\\nprovisioning workspace init dev ~/workspaces/dev --activate\\nprovisioning workspace init staging ~/workspaces/staging\\nprovisioning workspace init prod ~/workspaces/prod # Validate specific workspace\\nprovisioning workspace config validate staging # Show config for production\\nprovisioning workspace config show prod --out yaml # Edit provider for specific workspace\\nprovisioning workspace config edit provider aws --infra prod","breadcrumbs":"Workspace Config Commands » Multi-Workspace Management","id":"3987","title":"Multi-Workspace Management"},"3988":{"body":"# 1. Validate all configs\\nprovisioning workspace config validate # 2. If errors, check hierarchy\\nprovisioning workspace config hierarchy # 3. List all config files\\nprovisioning workspace config list # 4. Edit problematic config\\nprovisioning workspace config edit provider aws # 5. Validate again\\nprovisioning workspace config validate","breadcrumbs":"Workspace Config Commands » Configuration Troubleshooting","id":"3988","title":"Configuration Troubleshooting"},"3989":{"body":"Config commands integrate seamlessly with other workspace operations: # Create workspace with providers\\nprovisioning workspace init my-app ~/apps/my-app --providers [aws,upcloud] --activate # Generate additional configs\\nprovisioning workspace config generate provider local # Validate before deployment\\nprovisioning workspace config validate # Deploy infrastructure\\nprovisioning server create --infra my-app","breadcrumbs":"Workspace Config Commands » Integration with Other Commands","id":"3989","title":"Integration with Other Commands"},"399":{"body":"# List all available modes\\n./provisioning/scripts/setup-platform-config.sh --list-modes\\n# Output: solo, multiuser, cicd, enterprise # List all configurable services\\n./provisioning/scripts/setup-platform-config.sh --list-services\\n# Output: orchestrator, control-center, mcp-server, vault-service, extension-registry, rag, ai-service, provisioning-daemon # List current configurations\\n./provisioning/scripts/setup-platform-config.sh --list-configs\\n# Output: Shows current runtime configurations and their status # Clean all runtime configurations (use with caution)\\n./provisioning/scripts/setup-platform-config.sh --clean\\n# Removes: provisioning/config/runtime/*.ncl\\n# provisioning/config/runtime/generated/*.toml","breadcrumbs":"Platform Service Configuration » Available Configuration Commands","id":"399","title":"Available Configuration Commands"},"3990":{"body":"Always validate after editing : Run workspace config validate after manual edits Use hierarchy to understand precedence : Run workspace config hierarchy to see which config files are being used Generate from templates : Use config generate provider rather than creating configs manually Check before activation : Validate a workspace before activating it as default Use --out json for scripting : JSON output is easier to parse in scripts","breadcrumbs":"Workspace Config Commands » Tips","id":"3990","title":"Tips"},"3991":{"body":"Workspace Initialization Provider Configuration Configuration Architecture","breadcrumbs":"Workspace Config Commands » See Also","id":"3991","title":"See Also"},"3992":{"body":"Version : 1.0.0 Last Updated : 2025-10-06 System Version : 2.0.5+","breadcrumbs":"Workspace Enforcement Guide » Workspace Enforcement and Version Tracking Guide","id":"3992","title":"Workspace Enforcement and Version Tracking Guide"},"3993":{"body":"Overview Workspace Requirement Version Tracking Migration Framework Command Reference Troubleshooting Best Practices","breadcrumbs":"Workspace Enforcement Guide » Table of Contents","id":"3993","title":"Table of Contents"},"3994":{"body":"The provisioning system now enforces mandatory workspace requirements for all infrastructure operations. This ensures: Consistent Environment : All operations run in a well-defined workspace Version Compatibility : Workspaces track provisioning and schema versions Safe Migrations : Automatic migration framework with backup/rollback support Configuration Isolation : Each workspace has isolated configurations and state","breadcrumbs":"Workspace Enforcement Guide » Overview","id":"3994","title":"Overview"},"3995":{"body":"✅ Mandatory Workspace : Most commands require an active workspace ✅ Version Tracking : Workspaces track system, schema, and format versions ✅ Compatibility Checks : Automatic validation before operations ✅ Migration Framework : Safe upgrades with backup/restore ✅ Clear Error Messages : Helpful guidance when workspace is missing or incompatible","breadcrumbs":"Workspace Enforcement Guide » Key Features","id":"3995","title":"Key Features"},"3996":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Workspace Requirement","id":"3996","title":"Workspace Requirement"},"3997":{"body":"Almost all provisioning commands now require an active workspace: Infrastructure : server, taskserv, cluster, infra Orchestration : workflow, batch, orchestrator Development : module, layer, pack Generation : generate Configuration : Most config commands Test : test environment commands","breadcrumbs":"Workspace Enforcement Guide » Commands That Require Workspace","id":"3997","title":"Commands That Require Workspace"},"3998":{"body":"Only informational and workspace management commands work without a workspace: help - Help system version - Show version information workspace - Workspace management commands guide / sc - Documentation and quick reference nu - Start Nushell session nuinfo - Nushell information","breadcrumbs":"Workspace Enforcement Guide » Commands That Don\'t Require Workspace","id":"3998","title":"Commands That Don\'t Require Workspace"},"3999":{"body":"If you run a command without an active workspace, you\'ll see: ✗ Workspace Required No active workspace is configured. To get started: 1. Create a new workspace: provisioning workspace init 2. Or activate an existing workspace: provisioning workspace activate 3. List available workspaces: provisioning workspace list","breadcrumbs":"Workspace Enforcement Guide » What Happens Without a Workspace","id":"3999","title":"What Happens Without a Workspace"},"4":{"body":"Document Description System Overview High-level architecture Multi-Repo Architecture Repository structure and OCI distribution Design Principles Architectural philosophy Integration Patterns System integration patterns Orchestrator Model Hybrid orchestration architecture","breadcrumbs":"Home » 🏗️ Architecture","id":"4","title":"🏗️ Architecture"},"40":{"body":"This project welcomes contributions! See Development Guide for: Development setup Code style guidelines Testing requirements Pull request process","breadcrumbs":"Home » Contributing","id":"40","title":"Contributing"},"400":{"body":"","breadcrumbs":"Platform Service Configuration » Configuration File Locations","id":"400","title":"Configuration File Locations"},"4000":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Version Tracking","id":"4000","title":"Version Tracking"},"4001":{"body":"Each workspace maintains metadata in .provisioning/metadata.yaml: workspace: name: \\"my-workspace\\" path: \\"/path/to/workspace\\" version: provisioning: \\"2.0.5\\" # System version when created/updated schema: \\"1.0.0\\" # KCL schema version workspace_format: \\"2.0.0\\" # Directory structure version created: \\"2025-10-06T12:00:00Z\\"\\nlast_updated: \\"2025-10-06T13:30:00Z\\" migration_history: [] compatibility: min_provisioning_version: \\"2.0.0\\" min_schema_version: \\"1.0.0\\"","breadcrumbs":"Workspace Enforcement Guide » Workspace Metadata","id":"4001","title":"Workspace Metadata"},"4002":{"body":"1. Provisioning Version What : Version of the provisioning system (CLI + libraries) Example : 2.0.5 Purpose : Ensures workspace is compatible with current system 2. Schema Version What : Version of KCL schemas used in workspace Example : 1.0.0 Purpose : Tracks configuration schema compatibility 3. Workspace Format Version What : Version of workspace directory structure Example : 2.0.0 Purpose : Ensures workspace has required directories and files","breadcrumbs":"Workspace Enforcement Guide » Version Components","id":"4002","title":"Version Components"},"4003":{"body":"View workspace version information: # Check active workspace version\\nprovisioning workspace version # Check specific workspace version\\nprovisioning workspace version my-workspace # JSON output\\nprovisioning workspace version --format json Example Output : Workspace Version Information System: Version: 2.0.5 Workspace: Name: my-workspace Path: /Users/user/workspaces/my-workspace Version: 2.0.5 Schema Version: 1.0.0 Format Version: 2.0.0 Created: 2025-10-06T12:00:00Z Last Updated: 2025-10-06T13:30:00Z Compatibility: Compatible: true Reason: version_match Message: Workspace and system versions match Migrations: Total: 0","breadcrumbs":"Workspace Enforcement Guide » Checking Workspace Version","id":"4003","title":"Checking Workspace Version"},"4004":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Migration Framework","id":"4004","title":"Migration Framework"},"4005":{"body":"Migration is required when: No Metadata : Workspace created before version tracking (< 2.0.5) Version Mismatch : System version is newer than workspace version Breaking Changes : Major version update with structural changes","breadcrumbs":"Workspace Enforcement Guide » When Migration is Needed","id":"4005","title":"When Migration is Needed"},"4006":{"body":"Scenario 1: No Metadata (Unknown Version) Workspace version is incompatible: Workspace: my-workspace Path: /path/to/workspace Workspace metadata not found or corrupted This workspace needs migration: Run workspace migration: provisioning workspace migrate my-workspace Scenario 2: Migration Available ℹ Migration available: Workspace can be updated from 2.0.0 to 2.0.5 Run: provisioning workspace migrate my-workspace Scenario 3: Workspace Too New Workspace version (3.0.0) is newer than system (2.0.5) Workspace is newer than the system: Workspace version: 3.0.0 System version: 2.0.5 Upgrade the provisioning system to use this workspace.","breadcrumbs":"Workspace Enforcement Guide » Compatibility Scenarios","id":"4006","title":"Compatibility Scenarios"},"4007":{"body":"Basic Migration Migrate active workspace to current system version: provisioning workspace migrate Migrate Specific Workspace provisioning workspace migrate my-workspace Migration Options # Skip backup (not recommended)\\nprovisioning workspace migrate --skip-backup # Force without confirmation\\nprovisioning workspace migrate --force # Migrate to specific version\\nprovisioning workspace migrate --target-version 2.1.0","breadcrumbs":"Workspace Enforcement Guide » Running Migrations","id":"4007","title":"Running Migrations"},"4008":{"body":"When you run a migration: Validation : System validates workspace exists and needs migration Backup : Creates timestamped backup in .workspace_backups/ Confirmation : Prompts for confirmation (unless --force) Migration : Applies migration steps sequentially Verification : Validates migration success Metadata Update : Records migration in workspace metadata Example Migration Output : Workspace Migration Workspace: my-workspace\\nPath: /path/to/workspace Current version: unknown\\nTarget version: 2.0.5 This will migrate the workspace from unknown to 2.0.5\\nA backup will be created before migration. Continue with migration? (y/N): y Creating backup...\\n✓ Backup created: /path/.workspace_backups/my-workspace_backup_20251006_123000 Migration Strategy: Initialize metadata\\nDescription: Add metadata tracking to existing workspace\\nFrom: unknown → To: 2.0.5 Migrating workspace to version 2.0.5...\\n✓ Initialize metadata completed ✓ Migration completed successfully","breadcrumbs":"Workspace Enforcement Guide » Migration Process","id":"4008","title":"Migration Process"},"4009":{"body":"List Backups # List backups for active workspace\\nprovisioning workspace list-backups # List backups for specific workspace\\nprovisioning workspace list-backups my-workspace Example Output : Workspace Backups for my-workspace name created reason size\\nmy-workspace_backup_20251006_1200 2025-10-06T12:00:00Z pre_migration 2.3 MB\\nmy-workspace_backup_20251005_1500 2025-10-05T15:00:00Z pre_migration 2.1 MB Restore from Backup # Restore workspace from backup\\nprovisioning workspace restore-backup /path/to/backup # Force restore without confirmation\\nprovisioning workspace restore-backup /path/to/backup --force Restore Process : Restore Workspace from Backup Backup: /path/.workspace_backups/my-workspace_backup_20251006_1200\\nOriginal path: /path/to/workspace\\nCreated: 2025-10-06T12:00:00Z\\nReason: pre_migration ⚠ This will replace the current workspace at: /path/to/workspace Continue with restore? (y/N): y ✓ Workspace restored from backup","breadcrumbs":"Workspace Enforcement Guide » Workspace Backups","id":"4009","title":"Workspace Backups"},"401":{"body":"provisioning/schemas/platform/\\n├── schemas/ # Type contracts (Nickel)\\n├── defaults/ # Base configuration values\\n│ └── deployment/ # Mode-specific: solo, multiuser, cicd, enterprise\\n├── validators/ # Business logic validation\\n├── templates/ # Configuration generation templates\\n└── constraints/ # Validation limits","breadcrumbs":"Platform Service Configuration » Public Definitions (Part of repository)","id":"401","title":"Public Definitions (Part of repository)"},"4010":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Command Reference","id":"4010","title":"Command Reference"},"4011":{"body":"# Show workspace version information\\nprovisioning workspace version [workspace-name] [--format table|json|yaml] # Check compatibility\\nprovisioning workspace check-compatibility [workspace-name] # Migrate workspace\\nprovisioning workspace migrate [workspace-name] [--skip-backup] [--force] [--target-version VERSION] # List backups\\nprovisioning workspace list-backups [workspace-name] # Restore from backup\\nprovisioning workspace restore-backup [--force]","breadcrumbs":"Workspace Enforcement Guide » Workspace Version Commands","id":"4011","title":"Workspace Version Commands"},"4012":{"body":"# List all workspaces\\nprovisioning workspace list # Show active workspace\\nprovisioning workspace active # Activate workspace\\nprovisioning workspace activate # Create new workspace (includes metadata initialization)\\nprovisioning workspace init [path] # Register existing workspace\\nprovisioning workspace register # Remove workspace from registry\\nprovisioning workspace remove [--force]","breadcrumbs":"Workspace Enforcement Guide » Workspace Management Commands","id":"4012","title":"Workspace Management Commands"},"4013":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Troubleshooting","id":"4013","title":"Troubleshooting"},"4014":{"body":"Solution : Activate or create a workspace # List available workspaces\\nprovisioning workspace list # Activate existing workspace\\nprovisioning workspace activate my-workspace # Or create new workspace\\nprovisioning workspace init new-workspace","breadcrumbs":"Workspace Enforcement Guide » Problem: \\"No active workspace\\"","id":"4014","title":"Problem: \\"No active workspace\\""},"4015":{"body":"Symptoms : Missing directories or configuration files Solution : Run migration to fix structure provisioning workspace migrate my-workspace","breadcrumbs":"Workspace Enforcement Guide » Problem: \\"Workspace has invalid structure\\"","id":"4015","title":"Problem: \\"Workspace has invalid structure\\""},"4016":{"body":"Solution : Run migration to upgrade workspace provisioning workspace migrate","breadcrumbs":"Workspace Enforcement Guide » Problem: \\"Workspace version is incompatible\\"","id":"4016","title":"Problem: \\"Workspace version is incompatible\\""},"4017":{"body":"Solution : Restore from automatic backup # List backups\\nprovisioning workspace list-backups # Restore from most recent backup\\nprovisioning workspace restore-backup /path/to/backup","breadcrumbs":"Workspace Enforcement Guide » Problem: Migration Failed","id":"4017","title":"Problem: Migration Failed"},"4018":{"body":"Possible Causes : Migration failed partially Workspace path changed Metadata corrupted Solutions : # Check workspace compatibility\\nprovisioning workspace check-compatibility my-workspace # If corrupted, restore from backup\\nprovisioning workspace restore-backup /path/to/backup # If path changed, re-register\\nprovisioning workspace remove my-workspace\\nprovisioning workspace register my-workspace /new/path --activate","breadcrumbs":"Workspace Enforcement Guide » Problem: Can\'t Activate Workspace After Migration","id":"4018","title":"Problem: Can\'t Activate Workspace After Migration"},"4019":{"body":"","breadcrumbs":"Workspace Enforcement Guide » Best Practices","id":"4019","title":"Best Practices"},"402":{"body":"provisioning/config/runtime/ # User-specific deployments\\n├── orchestrator.solo.ncl # Editable config\\n├── orchestrator.multiuser.ncl\\n└── generated/ # Auto-generated, don\'t edit ├── orchestrator.solo.toml # For Rust services └── orchestrator.multiuser.toml","breadcrumbs":"Platform Service Configuration » Private Runtime Configs (Gitignored)","id":"402","title":"Private Runtime Configs (Gitignored)"},"4020":{"body":"Create workspaces for different environments: provisioning workspace init dev ~/workspaces/dev --activate\\nprovisioning workspace init staging ~/workspaces/staging\\nprovisioning workspace init production ~/workspaces/production","breadcrumbs":"Workspace Enforcement Guide » 1. Always Use Named Workspaces","id":"4020","title":"1. Always Use Named Workspaces"},"4021":{"body":"Never use --skip-backup for important workspaces. Backups are cheap, data loss is expensive. # Good: Default with backup\\nprovisioning workspace migrate # Risky: No backup\\nprovisioning workspace migrate --skip-backup # DON\'T DO THIS","breadcrumbs":"Workspace Enforcement Guide » 2. Let System Create Backups","id":"4021","title":"2. Let System Create Backups"},"4022":{"body":"Before major operations, verify workspace compatibility: provisioning workspace check-compatibility","breadcrumbs":"Workspace Enforcement Guide » 3. Check Compatibility Before Operations","id":"4022","title":"3. Check Compatibility Before Operations"},"4023":{"body":"After upgrading the provisioning system: # Check if migration available\\nprovisioning workspace version # Migrate if needed\\nprovisioning workspace migrate","breadcrumbs":"Workspace Enforcement Guide » 4. Migrate After System Upgrades","id":"4023","title":"4. Migrate After System Upgrades"},"4024":{"body":"Don\'t immediately delete old backups: # List backups\\nprovisioning workspace list-backups # Keep at least 2-3 recent backups","breadcrumbs":"Workspace Enforcement Guide » 5. Keep Backups for Safety","id":"4024","title":"5. Keep Backups for Safety"},"4025":{"body":"Initialize git in workspace directory: cd ~/workspaces/my-workspace\\ngit init\\ngit add config/ infra/\\ngit commit -m \\"Initial workspace configuration\\" Exclude runtime and cache directories in .gitignore: .cache/\\n.runtime/\\n.provisioning/\\n.workspace_backups/","breadcrumbs":"Workspace Enforcement Guide » 6. Use Version Control for Workspace Configs","id":"4025","title":"6. Use Version Control for Workspace Configs"},"4026":{"body":"If you need custom migration steps, document them: # Create migration notes\\necho \\"Custom steps for v2 to v3 migration\\" > MIGRATION_NOTES.md","breadcrumbs":"Workspace Enforcement Guide » 7. Document Custom Migrations","id":"4026","title":"7. Document Custom Migrations"},"4027":{"body":"Each migration is recorded in workspace metadata: migration_history: - from_version: \\"unknown\\" to_version: \\"2.0.5\\" migration_type: \\"metadata_initialization\\" timestamp: \\"2025-10-06T12:00:00Z\\" success: true notes: \\"Initial metadata creation\\" - from_version: \\"2.0.5\\" to_version: \\"2.1.0\\" migration_type: \\"version_update\\" timestamp: \\"2025-10-15T10:30:00Z\\" success: true notes: \\"Updated to workspace switching support\\" View migration history: provisioning workspace version --format yaml | grep -A 10 \\"migration_history\\"","breadcrumbs":"Workspace Enforcement Guide » Migration History","id":"4027","title":"Migration History"},"4028":{"body":"The workspace enforcement and version tracking system provides: Safety : Mandatory workspace prevents accidental operations outside defined environments Compatibility : Version tracking ensures workspace works with current system Upgradability : Migration framework handles version transitions safely Recoverability : Automatic backups protect against migration failures Key Commands : # Create workspace\\nprovisioning workspace init my-workspace --activate # Check version\\nprovisioning workspace version # Migrate if needed\\nprovisioning workspace migrate # List backups\\nprovisioning workspace list-backups For more information, see: Workspace Switching Guide : docs/user/WORKSPACE_SWITCHING_GUIDE.md Quick Reference : provisioning sc or provisioning guide quickstart Help System : provisioning help workspace Questions or Issues? Check the troubleshooting section or run: provisioning workspace check-compatibility This will provide specific guidance for your situation.","breadcrumbs":"Workspace Enforcement Guide » Summary","id":"4028","title":"Summary"},"4029":{"body":"Version : 1.0.0 Last Updated : 2025-12-04","breadcrumbs":"Workspace Infra Reference » Unified Workspace:Infrastructure Reference System","id":"4029","title":"Unified Workspace:Infrastructure Reference System"},"403":{"body":"provisioning/config/examples/\\n├── orchestrator.solo.example.ncl # Solo mode reference\\n└── orchestrator.enterprise.example.ncl # Enterprise mode reference","breadcrumbs":"Platform Service Configuration » Examples (Reference)","id":"403","title":"Examples (Reference)"},"4030":{"body":"The Workspace:Infrastructure Reference System provides a unified notation for managing workspaces and their associated infrastructure. This system eliminates the need to specify infrastructure separately and enables convenient defaults.","breadcrumbs":"Workspace Infra Reference » Overview","id":"4030","title":"Overview"},"4031":{"body":"","breadcrumbs":"Workspace Infra Reference » Quick Start","id":"4031","title":"Quick Start"},"4032":{"body":"Use the -ws flag with workspace:infra notation: # Use production workspace with sgoyol infrastructure for this command only\\nprovisioning server list -ws production:sgoyol # Use default infrastructure of active workspace\\nprovisioning taskserv create kubernetes","breadcrumbs":"Workspace Infra Reference » Temporal Override (Single Command)","id":"4032","title":"Temporal Override (Single Command)"},"4033":{"body":"Activate a workspace with a default infrastructure: # Activate librecloud workspace and set wuji as default infra\\nprovisioning workspace activate librecloud:wuji # Now all commands use librecloud:wuji by default\\nprovisioning server list","breadcrumbs":"Workspace Infra Reference » Persistent Activation","id":"4033","title":"Persistent Activation"},"4034":{"body":"","breadcrumbs":"Workspace Infra Reference » Notation Syntax","id":"4034","title":"Notation Syntax"},"4035":{"body":"workspace:infra Part Description Example workspace Workspace name librecloud : Separator - infra Infrastructure name wuji","breadcrumbs":"Workspace Infra Reference » Basic Format","id":"4035","title":"Basic Format"},"4036":{"body":"Notation Workspace Infrastructure librecloud:wuji librecloud wuji production:sgoyol production sgoyol dev:local dev local librecloud librecloud (from default or context)","breadcrumbs":"Workspace Infra Reference » Examples","id":"4036","title":"Examples"},"4037":{"body":"When no infrastructure is explicitly specified, the system uses this priority order: Explicit --infra flag (highest) provisioning server list --infra another-infra PWD Detection cd workspace_librecloud/infra/wuji\\nprovisioning server list # Auto-detects wuji Default Infrastructure # If workspace has default_infra set\\nprovisioning server list # Uses configured default Error (no infra found) # Error: No infrastructure specified","breadcrumbs":"Workspace Infra Reference » Resolution Priority","id":"4037","title":"Resolution Priority"},"4038":{"body":"","breadcrumbs":"Workspace Infra Reference » Usage Patterns","id":"4038","title":"Usage Patterns"},"4039":{"body":"Use -ws to override workspace:infra for a single command: # Currently in librecloud:wuji context\\nprovisioning server list # Shows librecloud:wuji # Temporary override for this command only\\nprovisioning server list -ws production:sgoyol # Shows production:sgoyol # Back to original context\\nprovisioning server list # Shows librecloud:wuji again","breadcrumbs":"Workspace Infra Reference » Pattern 1: Temporal Override for Commands","id":"4039","title":"Pattern 1: Temporal Override for Commands"},"404":{"body":"","breadcrumbs":"Platform Service Configuration » Troubleshooting Configuration","id":"404","title":"Troubleshooting Configuration"},"4040":{"body":"Set a workspace as active with a default infrastructure: # List available workspaces\\nprovisioning workspace list # Activate with infra notation\\nprovisioning workspace activate production:sgoyol # All subsequent commands use production:sgoyol\\nprovisioning server list\\nprovisioning taskserv create kubernetes","breadcrumbs":"Workspace Infra Reference » Pattern 2: Persistent Workspace Activation","id":"4040","title":"Pattern 2: Persistent Workspace Activation"},"4041":{"body":"The system auto-detects workspace and infrastructure from your current directory: # Your workspace structure\\nworkspace_librecloud/ infra/ wuji/ settings.ncl another/ settings.ncl # Navigation auto-detects context\\ncd workspace_librecloud/infra/wuji\\nprovisioning server list # Uses wuji automatically cd ../another\\nprovisioning server list # Switches to another","breadcrumbs":"Workspace Infra Reference » Pattern 3: PWD-Based Inference","id":"4041","title":"Pattern 3: PWD-Based Inference"},"4042":{"body":"Set a workspace-specific default infrastructure: # During activation\\nprovisioning workspace activate librecloud:wuji # Or explicitly after activation\\nprovisioning workspace set-default-infra librecloud another-infra # View current defaults\\nprovisioning workspace list","breadcrumbs":"Workspace Infra Reference » Pattern 4: Default Infrastructure Management","id":"4042","title":"Pattern 4: Default Infrastructure Management"},"4043":{"body":"","breadcrumbs":"Workspace Infra Reference » Command Reference","id":"4043","title":"Command Reference"},"4044":{"body":"# Activate workspace with infra\\nprovisioning workspace activate workspace:infra # Switch to different workspace\\nprovisioning workspace switch workspace_name # List all workspaces\\nprovisioning workspace list # Show active workspace\\nprovisioning workspace active # Set default infrastructure\\nprovisioning workspace set-default-infra workspace_name infra_name # Get default infrastructure\\nprovisioning workspace get-default-infra workspace_name","breadcrumbs":"Workspace Infra Reference » Workspace Commands","id":"4044","title":"Workspace Commands"},"4045":{"body":"# Server operations\\nprovisioning server create -ws workspace:infra\\nprovisioning server list -ws workspace:infra\\nprovisioning server delete name -ws workspace:infra # Task service operations\\nprovisioning taskserv create kubernetes -ws workspace:infra\\nprovisioning taskserv delete kubernetes -ws workspace:infra # Infrastructure operations\\nprovisioning infra validate -ws workspace:infra\\nprovisioning infra list -ws workspace:infra","breadcrumbs":"Workspace Infra Reference » Common Commands with -ws","id":"4045","title":"Common Commands with -ws"},"4046":{"body":"","breadcrumbs":"Workspace Infra Reference » Features","id":"4046","title":"Features"},"4047":{"body":"Single workspace:infra format for all references Works with all provisioning commands Backward compatible with existing workflows","breadcrumbs":"Workspace Infra Reference » ✅ Unified Notation","id":"4047","title":"✅ Unified Notation"},"4048":{"body":"Use -ws flag for single-command overrides No permanent state changes Automatically reverted after command","breadcrumbs":"Workspace Infra Reference » ✅ Temporal Override","id":"4048","title":"✅ Temporal Override"},"4049":{"body":"Set default infrastructure per workspace Eliminates repetitive --infra flags Survives across sessions","breadcrumbs":"Workspace Infra Reference » ✅ Persistent Defaults","id":"4049","title":"✅ Persistent Defaults"},"405":{"body":"# Install Nickel\\n# macOS\\nbrew install nickel # Linux\\ncargo install nickel --version 0.10 # Verify installation\\nnickel --version\\n# Expected: 0.10.0 or higher","breadcrumbs":"Platform Service Configuration » Issue: Script Fails with \\"Nickel not found\\"","id":"405","title":"Issue: Script Fails with \\"Nickel not found\\""},"4050":{"body":"Auto-detects workspace from directory Auto-detects infrastructure from PWD Fallback to configured defaults","breadcrumbs":"Workspace Infra Reference » ✅ Smart Detection","id":"4050","title":"✅ Smart Detection"},"4051":{"body":"Clear error messages when infra not found Validation of workspace and infra existence Helpful hints for missing configurations","breadcrumbs":"Workspace Infra Reference » ✅ Error Handling","id":"4051","title":"✅ Error Handling"},"4052":{"body":"","breadcrumbs":"Workspace Infra Reference » Environment Context","id":"4052","title":"Environment Context"},"4053":{"body":"The system uses $env.TEMP_WORKSPACE for temporal overrides: # Set temporarily (via -ws flag automatically)\\n$env.TEMP_WORKSPACE = \\"production\\" # Check current context\\necho $env.TEMP_WORKSPACE # Clear after use\\nhide-env TEMP_WORKSPACE","breadcrumbs":"Workspace Infra Reference » TEMP_WORKSPACE Variable","id":"4053","title":"TEMP_WORKSPACE Variable"},"4054":{"body":"","breadcrumbs":"Workspace Infra Reference » Validation","id":"4054","title":"Validation"},"4055":{"body":"# Valid notation formats\\nlibrecloud:wuji # Standard format\\nproduction:sgoyol.v2 # With dots and hyphens\\ndev-01:local-test # Multiple hyphens\\nprod123:infra456 # Numeric names # Special characters\\nlib-cloud_01:wu-ji.v2 # Mix of all allowed chars","breadcrumbs":"Workspace Infra Reference » Validating Notation","id":"4055","title":"Validating Notation"},"4056":{"body":"# Workspace not found\\nprovisioning workspace activate unknown:infra\\n# Error: Workspace \'unknown\' not found in registry # Infrastructure not found\\nprovisioning workspace activate librecloud:unknown\\n# Error: Infrastructure \'unknown\' not found in workspace \'librecloud\' # Empty specification\\nprovisioning workspace activate \\"\\"\\n# Error: Workspace \'\' not found in registry","breadcrumbs":"Workspace Infra Reference » Error Cases","id":"4056","title":"Error Cases"},"4057":{"body":"","breadcrumbs":"Workspace Infra Reference » Configuration","id":"4057","title":"Configuration"},"4058":{"body":"Default infrastructure is stored in ~/Library/Application Support/provisioning/user_config.yaml: active_workspace: \\"librecloud\\" workspaces: - name: \\"librecloud\\" path: \\"/Users/you/workspaces/librecloud\\" last_used: \\"2025-12-04T12:00:00Z\\" default_infra: \\"wuji\\" # Default infrastructure - name: \\"production\\" path: \\"/opt/workspaces/production\\" last_used: \\"2025-12-03T15:30:00Z\\" default_infra: \\"sgoyol\\"","breadcrumbs":"Workspace Infra Reference » User Configuration","id":"4058","title":"User Configuration"},"4059":{"body":"In provisioning/schemas/workspace_config.ncl: { InfraConfig = { current | String, # Infrastructure context settings default | String | optional, # Default infrastructure for workspace },\\n}","breadcrumbs":"Workspace Infra Reference » Workspace Schema","id":"4059","title":"Workspace Schema"},"406":{"body":"# Check Nickel syntax\\nnickel typecheck provisioning/config/runtime/orchestrator.solo.ncl # If errors found, view detailed message\\nnickel typecheck -i provisioning/config/runtime/orchestrator.solo.ncl # Try manual export\\nnickel export --format toml provisioning/config/runtime/orchestrator.solo.ncl","breadcrumbs":"Platform Service Configuration » Issue: Configuration Won\'t Generate TOML","id":"406","title":"Issue: Configuration Won\'t Generate TOML"},"4060":{"body":"","breadcrumbs":"Workspace Infra Reference » Best Practices","id":"4060","title":"Best Practices"},"4061":{"body":"# Good: Activate at start of session\\nprovisioning workspace activate production:sgoyol # Then use simple commands\\nprovisioning server list\\nprovisioning taskserv create kubernetes","breadcrumbs":"Workspace Infra Reference » 1. Use Persistent Activation for Long Sessions","id":"4061","title":"1. Use Persistent Activation for Long Sessions"},"4062":{"body":"# Good: Quick one-off operation\\nprovisioning server list -ws production:other-infra # Avoid: Repeated -ws flags\\nprovisioning server list -ws prod:infra1\\nprovisioning taskserv list -ws prod:infra1 # Better to activate once","breadcrumbs":"Workspace Infra Reference » 2. Use Temporal Override for Ad-Hoc Operations","id":"4062","title":"2. Use Temporal Override for Ad-Hoc Operations"},"4063":{"body":"# Good: Navigate to infrastructure directory\\ncd workspace_librecloud/infra/wuji\\nprovisioning server list # Auto-detects context # Works well with: cd - history, terminal multiplexer panes","breadcrumbs":"Workspace Infra Reference » 3. Navigate with PWD for Context Awareness","id":"4063","title":"3. Navigate with PWD for Context Awareness"},"4064":{"body":"# Good: Default to production infrastructure\\nprovisioning workspace activate production:main-infra # Avoid: Default to dev infrastructure in production workspace","breadcrumbs":"Workspace Infra Reference » 4. Set Meaningful Defaults","id":"4064","title":"4. Set Meaningful Defaults"},"4065":{"body":"","breadcrumbs":"Workspace Infra Reference » Troubleshooting","id":"4065","title":"Troubleshooting"},"4066":{"body":"Solution : Register the workspace first provisioning workspace register librecloud /path/to/workspace_librecloud","breadcrumbs":"Workspace Infra Reference » Issue: \\"Workspace not found in registry\\"","id":"4066","title":"Issue: \\"Workspace not found in registry\\""},"4067":{"body":"Solution : Verify infrastructure directory exists ls workspace_librecloud/infra/ # Check available infras\\nprovisioning workspace activate librecloud:wuji # Use correct name","breadcrumbs":"Workspace Infra Reference » Issue: \\"Infrastructure not found\\"","id":"4067","title":"Issue: \\"Infrastructure not found\\""},"4068":{"body":"Solution : Ensure you\'re using -ws flag correctly # Correct\\nprovisioning server list -ws production:sgoyol # Incorrect (missing space)\\nprovisioning server list-wsproduction:sgoyol # Incorrect (ws is not a command)\\nprovisioning -ws production:sgoyol server list","breadcrumbs":"Workspace Infra Reference » Issue: Temporal override not working","id":"4068","title":"Issue: Temporal override not working"},"4069":{"body":"Solution : Navigate to proper infrastructure directory # Must be in workspace structure\\ncd workspace_name/infra/infra_name # Then run command\\nprovisioning server list","breadcrumbs":"Workspace Infra Reference » Issue: PWD detection not working","id":"4069","title":"Issue: PWD detection not working"},"407":{"body":"# Verify TOML file exists\\nls -la provisioning/config/runtime/generated/orchestrator.solo.toml # Verify file is valid TOML\\nhead -20 provisioning/config/runtime/generated/orchestrator.solo.toml # Check service is looking in right location\\necho $ORCHESTRATOR_MODE # Should be set to \'solo\', \'multiuser\', etc. # Verify environment variable is correct\\nexport ORCHESTRATOR_MODE=solo\\ncargo run -p orchestrator --verbose","breadcrumbs":"Platform Service Configuration » Issue: Service Can\'t Read Configuration","id":"407","title":"Issue: Service Can\'t Read Configuration"},"4070":{"body":"","breadcrumbs":"Workspace Infra Reference » Migration from Old System","id":"4070","title":"Migration from Old System"},"4071":{"body":"provisioning workspace activate librecloud\\nprovisioning --infra wuji server list\\nprovisioning --infra wuji taskserv create kubernetes","breadcrumbs":"Workspace Infra Reference » Old Way","id":"4071","title":"Old Way"},"4072":{"body":"provisioning workspace activate librecloud:wuji\\nprovisioning server list\\nprovisioning taskserv create kubernetes","breadcrumbs":"Workspace Infra Reference » New Way","id":"4072","title":"New Way"},"4073":{"body":"Notation parsing : <1 ms per command Workspace detection : <5 ms from PWD Workspace switching : ~100 ms (includes platform activation) Temporal override : No additional overhead","breadcrumbs":"Workspace Infra Reference » Performance Notes","id":"4073","title":"Performance Notes"},"4074":{"body":"All existing commands and flags continue to work: # Old syntax still works\\nprovisioning --infra wuji server list # New syntax also works\\nprovisioning server list -ws librecloud:wuji # Mix and match\\nprovisioning --infra other-infra server list -ws librecloud:wuji\\n# Uses other-infra (explicit flag takes priority)","breadcrumbs":"Workspace Infra Reference » Backward Compatibility","id":"4074","title":"Backward Compatibility"},"4075":{"body":"provisioning help workspace - Workspace commands provisioning help infra - Infrastructure commands docs/architecture/ARCHITECTURE_OVERVIEW.md - Overall architecture docs/user/WORKSPACE_SWITCHING_GUIDE.md - Workspace switching details","breadcrumbs":"Workspace Infra Reference » See Also","id":"4075","title":"See Also"},"4076":{"body":"Version : 1.0.0 Date : 2025-10-09 Status : Production Ready","breadcrumbs":"Authentication Layer Guide » Authentication Layer Implementation Guide","id":"4076","title":"Authentication Layer Implementation Guide"},"4077":{"body":"A comprehensive authentication layer has been integrated into the provisioning system to secure sensitive operations. The system uses nu_plugin_auth for JWT authentication with MFA support, providing enterprise-grade security with graceful user experience.","breadcrumbs":"Authentication Layer Guide » Overview","id":"4077","title":"Overview"},"4078":{"body":"","breadcrumbs":"Authentication Layer Guide » Key Features","id":"4078","title":"Key Features"},"4079":{"body":"RS256 asymmetric signing Access tokens (15 min) + refresh tokens (7 d) OS keyring storage (macOS Keychain, Windows Credential Manager, Linux Secret Service)","breadcrumbs":"Authentication Layer Guide » ✅ JWT Authentication","id":"4079","title":"✅ JWT Authentication"},"408":{"body":"# If you edited .ncl file manually, TOML must be regenerated\\n./provisioning/scripts/setup-platform-config.sh --generate-toml # Verify new TOML was created\\nstat provisioning/config/runtime/generated/orchestrator.solo.toml # Check modification time (should be recent)\\nls -lah provisioning/config/runtime/generated/orchestrator.solo.toml","breadcrumbs":"Platform Service Configuration » Issue: Services Won\'t Start After Config Change","id":"408","title":"Issue: Services Won\'t Start After Config Change"},"4080":{"body":"TOTP (Google Authenticator, Authy) WebAuthn/FIDO2 (YubiKey, Touch ID) Required for production and destructive operations","breadcrumbs":"Authentication Layer Guide » ✅ MFA Support","id":"4080","title":"✅ MFA Support"},"4081":{"body":"Production environment : Requires authentication + MFA Destructive operations : Requires authentication + MFA (delete, destroy) Development/test : Requires authentication, allows skip with flag Check mode : Always bypasses authentication (dry-run operations)","breadcrumbs":"Authentication Layer Guide » ✅ Security Policies","id":"4081","title":"✅ Security Policies"},"4082":{"body":"All authenticated operations logged User, timestamp, operation details MFA verification status JSON format for easy parsing","breadcrumbs":"Authentication Layer Guide » ✅ Audit Logging","id":"4082","title":"✅ Audit Logging"},"4083":{"body":"Clear instructions for login/MFA Distinct error types (platform auth vs provider auth) Helpful guidance for setup","breadcrumbs":"Authentication Layer Guide » ✅ User-Friendly Error Messages","id":"4083","title":"✅ User-Friendly Error Messages"},"4084":{"body":"","breadcrumbs":"Authentication Layer Guide » Quick Start","id":"4084","title":"Quick Start"},"4085":{"body":"# Interactive login (password prompt)\\nprovisioning auth login # Save credentials to keyring\\nprovisioning auth login --save # Custom control center URL\\nprovisioning auth login admin --url http://control.example.com:9080","breadcrumbs":"Authentication Layer Guide » 1. Login to Platform","id":"4085","title":"1. Login to Platform"},"4086":{"body":"# Enroll TOTP (Google Authenticator)\\nprovisioning auth mfa enroll totp # Scan QR code with authenticator app\\n# Or enter secret manually","breadcrumbs":"Authentication Layer Guide » 2. Enroll MFA (First Time)","id":"4086","title":"2. Enroll MFA (First Time)"},"4087":{"body":"# Get 6-digit code from authenticator app\\nprovisioning auth mfa verify --code 123456","breadcrumbs":"Authentication Layer Guide » 3. Verify MFA (For Sensitive Operations)","id":"4087","title":"3. Verify MFA (For Sensitive Operations)"},"4088":{"body":"# View current authentication status\\nprovisioning auth status # Verify token is valid\\nprovisioning auth verify","breadcrumbs":"Authentication Layer Guide » 4. Check Authentication Status","id":"4088","title":"4. Check Authentication Status"},"4089":{"body":"","breadcrumbs":"Authentication Layer Guide » Protected Operations","id":"4089","title":"Protected Operations"},"409":{"body":"","breadcrumbs":"Platform Service Configuration » Important Notes","id":"409","title":"Important Notes"},"4090":{"body":"# ✅ CREATE - Requires auth (prod: +MFA)\\nprovisioning server create web-01 # Auth required\\nprovisioning server create web-01 --check # Auth skipped (check mode) # ❌ DELETE - Requires auth + MFA\\nprovisioning server delete web-01 # Auth + MFA required\\nprovisioning server delete web-01 --check # Auth skipped (check mode) # 📖 READ - No auth required\\nprovisioning server list # No auth required\\nprovisioning server ssh web-01 # No auth required","breadcrumbs":"Authentication Layer Guide » Server Operations","id":"4090","title":"Server Operations"},"4091":{"body":"# ✅ CREATE - Requires auth (prod: +MFA)\\nprovisioning taskserv create kubernetes # Auth required\\nprovisioning taskserv create kubernetes --check # Auth skipped # ❌ DELETE - Requires auth + MFA\\nprovisioning taskserv delete kubernetes # Auth + MFA required # 📖 READ - No auth required\\nprovisioning taskserv list # No auth required","breadcrumbs":"Authentication Layer Guide » Task Service Operations","id":"4091","title":"Task Service Operations"},"4092":{"body":"# ✅ CREATE - Requires auth (prod: +MFA)\\nprovisioning cluster create buildkit # Auth required\\nprovisioning cluster create buildkit --check # Auth skipped # ❌ DELETE - Requires auth + MFA\\nprovisioning cluster delete buildkit # Auth + MFA required","breadcrumbs":"Authentication Layer Guide » Cluster Operations","id":"4092","title":"Cluster Operations"},"4093":{"body":"# ✅ SUBMIT - Requires auth (prod: +MFA)\\nprovisioning batch submit workflow.ncl # Auth required\\nprovisioning batch submit workflow.ncl --skip-auth # Auth skipped (if allowed) # 📖 READ - No auth required\\nprovisioning batch list # No auth required\\nprovisioning batch status # No auth required","breadcrumbs":"Authentication Layer Guide » Batch Workflows","id":"4093","title":"Batch Workflows"},"4094":{"body":"","breadcrumbs":"Authentication Layer Guide » Configuration","id":"4094","title":"Configuration"},"4095":{"body":"[security]\\nrequire_auth = true # Enable authentication system\\nrequire_mfa_for_production = true # MFA for prod environment\\nrequire_mfa_for_destructive = true # MFA for delete operations\\nauth_timeout = 3600 # Token timeout (1 hour)\\naudit_log_path = \\"{{paths.base}}/logs/audit.log\\" [security.bypass]\\nallow_skip_auth = false # Allow PROVISIONING_SKIP_AUTH env var [plugins]\\nauth_enabled = true # Enable nu_plugin_auth [platform.control_center]\\nurl = \\"http://localhost:9080\\" # Control center URL","breadcrumbs":"Authentication Layer Guide » Security Settings (config.defaults.toml)","id":"4095","title":"Security Settings (config.defaults.toml)"},"4096":{"body":"# Development\\n[environments.dev]\\nsecurity.bypass.allow_skip_auth = true # Allow auth bypass in dev # Production\\n[environments.prod]\\nsecurity.bypass.allow_skip_auth = false # Never allow bypass\\nsecurity.require_mfa_for_production = true","breadcrumbs":"Authentication Layer Guide » Environment-Specific Configuration","id":"4096","title":"Environment-Specific Configuration"},"4097":{"body":"","breadcrumbs":"Authentication Layer Guide » Authentication Bypass (Dev/Test Only)","id":"4097","title":"Authentication Bypass (Dev/Test Only)"},"4098":{"body":"# Export environment variable (dev/test only)\\nexport PROVISIONING_SKIP_AUTH=true # Run operations without authentication\\nprovisioning server create web-01 # Unset when done\\nunset PROVISIONING_SKIP_AUTH","breadcrumbs":"Authentication Layer Guide » Environment Variable Method","id":"4098","title":"Environment Variable Method"},"4099":{"body":"# Some commands support --skip-auth flag\\nprovisioning batch submit workflow.ncl --skip-auth","breadcrumbs":"Authentication Layer Guide » Per-Command Flag","id":"4099","title":"Per-Command Flag"},"41":{"body":"[Add license information]","breadcrumbs":"Home » License","id":"41","title":"License"},"410":{"body":"Files in provisioning/config/runtime/ are gitignored because: May contain encrypted secrets or credentials Deployment-specific (different per environment) User-customized (each developer/machine has different needs)","breadcrumbs":"Platform Service Configuration » 🔒 Runtime Configurations Are Private","id":"410","title":"🔒 Runtime Configurations Are Private"},"4100":{"body":"# Check mode is always allowed without auth\\nprovisioning server create web-01 --check\\nprovisioning taskserv create kubernetes --check ⚠️ WARNING : Auth bypass is ONLY for development/testing. Production systems must have security.bypass.allow_skip_auth = false.","breadcrumbs":"Authentication Layer Guide » Check Mode (Always Bypasses Auth)","id":"4100","title":"Check Mode (Always Bypasses Auth)"},"4101":{"body":"","breadcrumbs":"Authentication Layer Guide » Error Messages","id":"4101","title":"Error Messages"},"4102":{"body":"❌ Authentication Required Operation: server create web-01\\nYou must be logged in to perform this operation. To login: provisioning auth login Note: Your credentials will be securely stored in the system keyring. Solution : Run provisioning auth login ","breadcrumbs":"Authentication Layer Guide » Not Authenticated","id":"4102","title":"Not Authenticated"},"4103":{"body":"❌ MFA Verification Required Operation: server delete web-01\\nReason: destructive operation (delete/destroy) To verify MFA: 1. Get code from your authenticator app 2. Run: provisioning auth mfa verify --code <6-digit-code> Don\'t have MFA set up? Run: provisioning auth mfa enroll totp Solution : Run provisioning auth mfa verify --code 123456","breadcrumbs":"Authentication Layer Guide » MFA Required","id":"4103","title":"MFA Required"},"4104":{"body":"❌ Authentication Required Operation: server create web-02\\nYou must be logged in to perform this operation. Error: Token verification failed Solution : Token expired, re-login with provisioning auth login ","breadcrumbs":"Authentication Layer Guide » Token Expired","id":"4104","title":"Token Expired"},"4105":{"body":"All authenticated operations are logged to the audit log file with the following information: { \\"timestamp\\": \\"2025-10-09 14:32:15\\", \\"user\\": \\"admin\\", \\"operation\\": \\"server_create\\", \\"details\\": { \\"hostname\\": \\"web-01\\", \\"infra\\": \\"production\\", \\"environment\\": \\"prod\\", \\"orchestrated\\": false }, \\"mfa_verified\\": true\\n}","breadcrumbs":"Authentication Layer Guide » Audit Logging","id":"4105","title":"Audit Logging"},"4106":{"body":"# View raw audit log\\ncat provisioning/logs/audit.log # Filter by user\\ncat provisioning/logs/audit.log | jq \'. | select(.user == \\"admin\\")\' # Filter by operation type\\ncat provisioning/logs/audit.log | jq \'. | select(.operation == \\"server_create\\")\' # Filter by date\\ncat provisioning/logs/audit.log | jq \'. | select(.timestamp | startswith(\\"2025-10-09\\"))\'","breadcrumbs":"Authentication Layer Guide » Viewing Audit Logs","id":"4106","title":"Viewing Audit Logs"},"4107":{"body":"The authentication system integrates with the provisioning platform\'s control center REST API: POST /api/auth/login - Login with credentials POST /api/auth/logout - Revoke tokens POST /api/auth/verify - Verify token validity GET /api/auth/sessions - List active sessions POST /api/mfa/enroll - Enroll MFA device POST /api/mfa/verify - Verify MFA code","breadcrumbs":"Authentication Layer Guide » Integration with Control Center","id":"4107","title":"Integration with Control Center"},"4108":{"body":"# Start control center (required for authentication)\\ncd provisioning/platform/control-center\\ncargo run --release Or use the orchestrator which includes control center: cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"Authentication Layer Guide » Starting Control Center","id":"4108","title":"Starting Control Center"},"4109":{"body":"","breadcrumbs":"Authentication Layer Guide » Testing Authentication","id":"4109","title":"Testing Authentication"},"411":{"body":"Files in provisioning/schemas/platform/ are version-controlled because: Define product structure and constraints Part of official releases Source of truth for configuration format Shared across the team","breadcrumbs":"Platform Service Configuration » 📘 Schemas Are Public","id":"411","title":"📘 Schemas Are Public"},"4110":{"body":"# 1. Start control center\\ncd provisioning/platform/control-center\\ncargo run --release & # 2. Login\\nprovisioning auth login admin # 3. Try creating server (should succeed if authenticated)\\nprovisioning server create test-server --check # 4. Logout\\nprovisioning auth logout # 5. Try creating server (should fail - not authenticated)\\nprovisioning server create test-server --check","breadcrumbs":"Authentication Layer Guide » Manual Testing","id":"4110","title":"Manual Testing"},"4111":{"body":"# Run authentication tests\\nnu provisioning/core/nulib/lib_provisioning/plugins/auth_test.nu","breadcrumbs":"Authentication Layer Guide » Automated Testing","id":"4111","title":"Automated Testing"},"4112":{"body":"","breadcrumbs":"Authentication Layer Guide » Troubleshooting","id":"4112","title":"Troubleshooting"},"4113":{"body":"Error : Authentication plugin not available Solution : Check plugin is built: ls provisioning/core/plugins/nushell-plugins/nu_plugin_auth/target/release/ Register plugin: plugin add target/release/nu_plugin_auth Use plugin: plugin use auth Verify: which auth","breadcrumbs":"Authentication Layer Guide » Plugin Not Available","id":"4113","title":"Plugin Not Available"},"4114":{"body":"Error : Cannot connect to control center Solution : Start control center: cd provisioning/platform/control-center && cargo run --release Or use orchestrator: cd provisioning/platform/orchestrator && ./scripts/start-orchestrator.nu --background Check URL is correct in config: provisioning config get platform.control_center.url","breadcrumbs":"Authentication Layer Guide » Control Center Not Running","id":"4114","title":"Control Center Not Running"},"4115":{"body":"Error : Invalid MFA code Solutions : Ensure time is synchronized (TOTP codes are time-based) Code expires every 30 seconds, get fresh code Verify you\'re using the correct authenticator app entry Re-enroll if needed: provisioning auth mfa enroll totp","breadcrumbs":"Authentication Layer Guide » MFA Not Working","id":"4115","title":"MFA Not Working"},"4116":{"body":"Error : Keyring storage unavailable macOS : Grant Keychain access to Terminal/iTerm2 in System Preferences → Security & Privacy Linux : Ensure gnome-keyring or kwallet is running Windows : Check Windows Credential Manager is accessible","breadcrumbs":"Authentication Layer Guide » Keyring Access Issues","id":"4116","title":"Keyring Access Issues"},"4117":{"body":"","breadcrumbs":"Authentication Layer Guide » Architecture","id":"4117","title":"Architecture"},"4118":{"body":"┌─────────────┐\\n│ User Command│\\n└──────┬──────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Infrastructure Command Handler │\\n│ (infrastructure.nu) │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Auth Check │\\n│ - Determine operation type │\\n│ - Check if auth required │\\n│ - Check environment (prod/dev) │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Auth Plugin Wrapper │\\n│ (auth.nu) │\\n│ - Call plugin or HTTP fallback │\\n│ - Verify token validity │\\n│ - Check MFA if required │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ nu_plugin_auth │\\n│ - JWT verification (RS256) │\\n│ - Keyring token storage │\\n│ - MFA verification │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Control Center API │\\n│ - /api/auth/verify │\\n│ - /api/mfa/verify │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Operation Execution │\\n│ (servers/create.nu, etc.) │\\n└──────┬──────────────────────────┘ │ ▼\\n┌─────────────────────────────────┐\\n│ Audit Logging │\\n│ - Log to audit.log │\\n│ - Include user, timestamp, MFA │\\n└─────────────────────────────────┘","breadcrumbs":"Authentication Layer Guide » Authentication Flow","id":"4118","title":"Authentication Flow"},"4119":{"body":"provisioning/\\n├── config/\\n│ └── config.defaults.toml # Security configuration\\n├── core/nulib/\\n│ ├── lib_provisioning/plugins/\\n│ │ └── auth.nu # Auth wrapper (550 lines)\\n│ ├── servers/\\n│ │ └── create.nu # Server ops with auth\\n│ ├── workflows/\\n│ │ └── batch.nu # Batch workflows with auth\\n│ └── main_provisioning/commands/\\n│ └── infrastructure.nu # Infrastructure commands with auth\\n├── core/plugins/nushell-plugins/\\n│ └── nu_plugin_auth/ # Native Rust plugin\\n│ ├── src/\\n│ │ ├── main.rs # Plugin implementation\\n│ │ └── helpers.rs # Helper functions\\n│ └── README.md # Plugin documentation\\n├── platform/control-center/ # Control Center (Rust)\\n│ └── src/auth/ # JWT auth implementation\\n└── logs/ └── audit.log # Audit trail","breadcrumbs":"Authentication Layer Guide » File Structure","id":"4119","title":"File Structure"},"412":{"body":"The setup script is safe to run multiple times: # Safe: Updates only what\'s needed\\n./provisioning/scripts/setup-platform-config.sh --quick-mode --mode enterprise # Safe: Doesn\'t overwrite without --clean\\n./provisioning/scripts/setup-platform-config.sh --generate-toml # Only deletes on explicit request\\n./provisioning/scripts/setup-platform-config.sh --clean","breadcrumbs":"Platform Service Configuration » 🔄 Configuration Is Idempotent","id":"412","title":"🔄 Configuration Is Idempotent"},"4120":{"body":"Security System Overview : docs/architecture/adr-009-security-system-complete.md JWT Authentication : docs/architecture/JWT_AUTH_IMPLEMENTATION.md MFA Implementation : docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md Plugin README : provisioning/core/plugins/nushell-plugins/nu_plugin_auth/README.md Control Center : provisioning/platform/control-center/README.md","breadcrumbs":"Authentication Layer Guide » Related Documentation","id":"4120","title":"Related Documentation"},"4121":{"body":"File Changes Lines Added lib_provisioning/plugins/auth.nu Added security policy enforcement functions +260 config/config.defaults.toml Added security configuration section +19 servers/create.nu Added auth check for server creation +25 workflows/batch.nu Added auth check for batch workflow submission +43 main_provisioning/commands/infrastructure.nu Added auth checks for all infrastructure commands +90 lib_provisioning/providers/interface.nu Added authentication guidelines for providers +65 Total 6 files modified ~500 lines","breadcrumbs":"Authentication Layer Guide » Summary of Changes","id":"4121","title":"Summary of Changes"},"4122":{"body":"","breadcrumbs":"Authentication Layer Guide » Best Practices","id":"4122","title":"Best Practices"},"4123":{"body":"Always login : Keep your session active to avoid interruptions Use keyring : Save credentials with --save flag for persistence Enable MFA : Use MFA for production operations Check mode first : Always test with --check before actual operations Monitor audit logs : Review audit logs regularly for security","breadcrumbs":"Authentication Layer Guide » For Users","id":"4123","title":"For Users"},"4124":{"body":"Check auth early : Verify authentication before expensive operations Log operations : Always log authenticated operations for audit Clear error messages : Provide helpful guidance for auth failures Respect check mode : Always skip auth in check/dry-run mode Test both paths : Test with and without authentication","breadcrumbs":"Authentication Layer Guide » For Developers","id":"4124","title":"For Developers"},"4125":{"body":"Production hardening : Set allow_skip_auth = false in production MFA enforcement : Require MFA for all production environments Monitor audit logs : Set up log monitoring and alerts Token rotation : Configure short token timeouts (15 min default) Backup authentication : Ensure multiple admins have MFA enrolled","breadcrumbs":"Authentication Layer Guide » For Operators","id":"4125","title":"For Operators"},"4126":{"body":"MIT License - See LICENSE file for details","breadcrumbs":"Authentication Layer Guide » License","id":"4126","title":"License"},"4127":{"body":"Version : 1.0.0 Last Updated : 2025-10-09","breadcrumbs":"Authentication Layer Guide » Quick Reference","id":"4127","title":"Quick Reference"},"4128":{"body":"Login provisioning auth login # Interactive password\\nprovisioning auth login --save # Save to keyring MFA provisioning auth mfa enroll totp # Enroll TOTP\\nprovisioning auth mfa verify --code 123456 # Verify code Status provisioning auth status # Show auth status\\nprovisioning auth verify # Verify token Logout provisioning auth logout # Logout current session\\nprovisioning auth logout --all # Logout all sessions","breadcrumbs":"Authentication Layer Guide » Quick Commands","id":"4128","title":"Quick Commands"},"4129":{"body":"Operation Auth MFA (Prod) MFA (Delete) Check Mode server create ✅ ✅ ❌ Skip server delete ✅ ✅ ✅ Skip server list ❌ ❌ ❌ - taskserv create ✅ ✅ ❌ Skip taskserv delete ✅ ✅ ✅ Skip cluster create ✅ ✅ ❌ Skip cluster delete ✅ ✅ ✅ Skip batch submit ✅ ✅ ❌ -","breadcrumbs":"Authentication Layer Guide » Protected Operations","id":"4129","title":"Protected Operations"},"413":{"body":"The full provisioning installer (provisioning/scripts/install.sh) is not yet implemented . Currently: ✅ Configuration setup script is standalone and ready to use ⏳ Full installer integration is planned for future release ✅ Manual workflow works perfectly without installer ✅ CI/CD integration available now","breadcrumbs":"Platform Service Configuration » ⚠️ Installer Status","id":"413","title":"⚠️ Installer Status"},"4130":{"body":"Environment Variable export PROVISIONING_SKIP_AUTH=true\\nprovisioning server create test\\nunset PROVISIONING_SKIP_AUTH Check Mode (Always Allowed) provisioning server create prod --check\\nprovisioning taskserv delete k8s --check Config Flag [security.bypass]\\nallow_skip_auth = true # Only in dev/test","breadcrumbs":"Authentication Layer Guide » Bypass Authentication (Dev/Test Only)","id":"4130","title":"Bypass Authentication (Dev/Test Only)"},"4131":{"body":"Security Settings [security]\\nrequire_auth = true\\nrequire_mfa_for_production = true\\nrequire_mfa_for_destructive = true\\nauth_timeout = 3600 [security.bypass]\\nallow_skip_auth = false # true in dev only [plugins]\\nauth_enabled = true [platform.control_center]\\nurl = \\"http://localhost:3000\\"","breadcrumbs":"Authentication Layer Guide » Configuration","id":"4131","title":"Configuration"},"4132":{"body":"Not Authenticated ❌ Authentication Required\\nOperation: server create web-01\\nTo login: provisioning auth login Fix : provisioning auth login MFA Required ❌ MFA Verification Required\\nOperation: server delete web-01\\nReason: destructive operation Fix : provisioning auth mfa verify --code Token Expired Error: Token verification failed Fix : Re-login: provisioning auth login ","breadcrumbs":"Authentication Layer Guide » Error Messages","id":"4132","title":"Error Messages"},"4133":{"body":"Error Solution Plugin not available plugin add target/release/nu_plugin_auth Control center offline Start: cd provisioning/platform/control-center && cargo run Invalid MFA code Get fresh code (expires in 30s) Token expired Re-login: provisioning auth login Keyring access denied Grant app access in system settings","breadcrumbs":"Authentication Layer Guide » Troubleshooting","id":"4133","title":"Troubleshooting"},"4134":{"body":"# View audit log\\ncat provisioning/logs/audit.log # Filter by user\\ncat provisioning/logs/audit.log | jq \'. | select(.user == \\"admin\\")\' # Filter by operation\\ncat provisioning/logs/audit.log | jq \'. | select(.operation == \\"server_create\\")\'","breadcrumbs":"Authentication Layer Guide » Audit Logs","id":"4134","title":"Audit Logs"},"4135":{"body":"Option 1: Skip Auth (Dev/Test Only) export PROVISIONING_SKIP_AUTH=true\\nprovisioning server create ci-server Option 2: Check Mode provisioning server create ci-server --check Option 3: Service Account (Future) export PROVISIONING_AUTH_TOKEN=\\"\\"\\nprovisioning server create ci-server","breadcrumbs":"Authentication Layer Guide » CI/CD Integration","id":"4135","title":"CI/CD Integration"},"4136":{"body":"Operation Auth Overhead Server create ~20 ms Taskserv create ~20 ms Batch submit ~20 ms Check mode 0 ms (skipped)","breadcrumbs":"Authentication Layer Guide » Performance","id":"4136","title":"Performance"},"4137":{"body":"Full Guide : docs/user/AUTHENTICATION_LAYER_GUIDE.md Implementation : AUTHENTICATION_LAYER_IMPLEMENTATION_SUMMARY.md Security ADR : docs/architecture/adr-009-security-system-complete.md Quick Help : provisioning help auth or provisioning auth --help Last Updated : 2025-10-09 Maintained By : Security Team","breadcrumbs":"Authentication Layer Guide » Related Docs","id":"4137","title":"Related Docs"},"4138":{"body":"","breadcrumbs":"Authentication Layer Guide » Setup Guide","id":"4138","title":"Setup Guide"},"4139":{"body":"Current Settings (from your config) [security]\\nrequire_auth = true # ✅ Auth is REQUIRED\\nallow_skip_auth = false # ❌ Cannot skip with env var\\nauth_timeout = 3600 # Token valid for 1 hour [platform.control_center]\\nurl = \\"http://localhost:3000\\" # Control Center endpoint","breadcrumbs":"Authentication Layer Guide » Complete Authentication Setup Guide","id":"4139","title":"Complete Authentication Setup Guide"},"414":{"body":"After completing platform configuration: Run Services : Start your platform services with configured settings Access Web UI : Open Control Center at http://localhost:8080 (default) Create First Infrastructure : Deploy your first servers and clusters Set Up Extensions : Configure providers and task services for your needs Backup Configuration : Back up runtime configs to private repository","breadcrumbs":"Platform Service Configuration » Next Steps","id":"414","title":"Next Steps"},"4140":{"body":"The Control Center is the authentication backend: # Check if it\'s already running\\ncurl http://localhost:3000/health # If not running, start it\\ncd /Users/Akasha/project-provisioning/provisioning/platform/control-center\\ncargo run --release & # Wait for it to start (may take 30-60 seconds)\\nsleep 30\\ncurl http://localhost:3000/health Expected Output: {\\"status\\": \\"healthy\\"}","breadcrumbs":"Authentication Layer Guide » STEP 1: Start Control Center","id":"4140","title":"STEP 1: Start Control Center"},"4141":{"body":"Check for default user setup: # Look for initialization scripts\\nls -la /Users/Akasha/project-provisioning/provisioning/platform/control-center/ # Check for README or setup instructions\\ncat /Users/Akasha/project-provisioning/provisioning/platform/control-center/README.md # Or check for default config\\ncat /Users/Akasha/project-provisioning/provisioning/platform/control-center/config.toml 2>/dev/null || echo \\"Config not found\\"","breadcrumbs":"Authentication Layer Guide » STEP 2: Find Default Credentials","id":"4141","title":"STEP 2: Find Default Credentials"},"4142":{"body":"Once you have credentials (usually admin / password from setup): # Interactive login - will prompt for password\\nprovisioning auth login # Or with username\\nprovisioning auth login admin # Verify you\'re logged in\\nprovisioning auth status Expected Success Output: ✓ Login successful! User: admin\\nRole: admin\\nExpires: 2025-10-22T14:30:00Z\\nMFA: false Session active and ready","breadcrumbs":"Authentication Layer Guide » STEP 3: Log In","id":"4142","title":"STEP 3: Log In"},"4143":{"body":"Once authenticated: # Try server creation again\\nprovisioning server create sgoyol --check # Or with full details\\nprovisioning server create sgoyol --infra workspace_librecloud --check","breadcrumbs":"Authentication Layer Guide » STEP 4: Now Create Your Server","id":"4143","title":"STEP 4: Now Create Your Server"},"4144":{"body":"If you want to bypass authentication temporarily for testing: Option A: Edit config to allow skip # You would need to parse and modify TOML - easier to do next option Option B: Use environment variable (if allowed by config) export PROVISIONING_SKIP_AUTH=true\\nprovisioning server create sgoyol\\nunset PROVISIONING_SKIP_AUTH Option C: Use check mode (always works, no auth needed) provisioning server create sgoyol --check Option D: Modify config.defaults.toml (permanent for dev) Edit: provisioning/config/config.defaults.toml Change line 193 to: allow_skip_auth = true","breadcrumbs":"Authentication Layer Guide » 🛠️ Alternative: Skip Auth for Development","id":"4144","title":"🛠️ Alternative: Skip Auth for Development"},"4145":{"body":"Problem Solution Control Center won\'t start Check port 3000 not in use: lsof -i :3000 \\"No token found\\" error Login with: provisioning auth login Login fails Verify Control Center is running: curl http://localhost:3000/health Token expired Re-login: provisioning auth login Plugin not available Using HTTP fallback - this is OK, works without plugin","breadcrumbs":"Authentication Layer Guide » 🔍 Troubleshooting","id":"4145","title":"🔍 Troubleshooting"},"4146":{"body":"Version : 1.0.0 Last Updated : 2025-10-08 Status : Production Ready","breadcrumbs":"Config Encryption Guide » Configuration Encryption Guide","id":"4146","title":"Configuration Encryption Guide"},"4147":{"body":"The Provisioning Platform includes a comprehensive configuration encryption system that provides: Transparent Encryption/Decryption : Configs are automatically decrypted on load Multiple KMS Backends : Age, AWS KMS, HashiCorp Vault, Cosmian KMS Memory-Only Decryption : Secrets never written to disk in plaintext SOPS Integration : Industry-standard encryption with SOPS Sensitive Data Detection : Automatic scanning for unencrypted sensitive data","breadcrumbs":"Config Encryption Guide » Overview","id":"4147","title":"Overview"},"4148":{"body":"Prerequisites Quick Start Configuration Encryption KMS Backends CLI Commands Integration with Config Loader Best Practices Troubleshooting","breadcrumbs":"Config Encryption Guide » Table of Contents","id":"4148","title":"Table of Contents"},"4149":{"body":"","breadcrumbs":"Config Encryption Guide » Prerequisites","id":"4149","title":"Prerequisites"},"415":{"body":"Setup Status & Current System Status - Quick reference for system readiness Configuration README - Detailed configuration management guide Setup Script Documentation - Complete script reference TypeDialog Platform Config Guide - Advanced configuration topics Deployment Guide - Production deployment procedures Version : 1.0.0 Last Updated : 2026-01-05 Difficulty : Beginner to Intermediate","breadcrumbs":"Platform Service Configuration » Additional Resources","id":"415","title":"Additional Resources"},"4150":{"body":"SOPS (v3.10.2+) # macOS\\nbrew install sops # Linux\\nwget https://github.com/mozilla/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64\\nsudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops\\nsudo chmod +x /usr/local/bin/sops Age (for Age backend - recommended) # macOS\\nbrew install age # Linux\\napt install age AWS CLI (for AWS KMS backend - optional) brew install awscli","breadcrumbs":"Config Encryption Guide » Required Tools","id":"4150","title":"Required Tools"},"4151":{"body":"# Check SOPS\\nsops --version # Check Age\\nage --version # Check AWS CLI (optional)\\naws --version","breadcrumbs":"Config Encryption Guide » Verify Installation","id":"4151","title":"Verify Installation"},"4152":{"body":"","breadcrumbs":"Config Encryption Guide » Quick Start","id":"4152","title":"Quick Start"},"4153":{"body":"Generate Age keys and create SOPS configuration: provisioning config init-encryption --kms age This will: Generate Age key pair in ~/.config/sops/age/keys.txt Display your public key (recipient) Create .sops.yaml in your project","breadcrumbs":"Config Encryption Guide » 1. Initialize Encryption","id":"4153","title":"1. Initialize Encryption"},"4154":{"body":"Add to your shell profile (~/.zshrc or ~/.bashrc): # Age encryption\\nexport SOPS_AGE_RECIPIENTS=\\"age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p\\"\\nexport PROVISIONING_KAGE=\\"$HOME/.config/sops/age/keys.txt\\" Replace the recipient with your actual public key.","breadcrumbs":"Config Encryption Guide » 2. Set Environment Variables","id":"4154","title":"2. Set Environment Variables"},"4155":{"body":"provisioning config validate-encryption Expected output: ✅ Encryption configuration is valid SOPS installed: true Age backend: true KMS enabled: false Errors: 0 Warnings: 0","breadcrumbs":"Config Encryption Guide » 3. Validate Setup","id":"4155","title":"3. Validate Setup"},"4156":{"body":"# Create a config with sensitive data\\ncat > workspace/config/secure.yaml < edit -> re-encrypt)\\nprovisioning config edit-secure workspace/config/secure.enc.yaml This will: Decrypt the file temporarily Open in your $EDITOR (vim/nano/etc) Re-encrypt when you save and close Remove temporary decrypted file","breadcrumbs":"Config Encryption Guide » Edit Encrypted Files","id":"4161","title":"Edit Encrypted Files"},"4162":{"body":"# Check if file is encrypted\\nprovisioning config is-encrypted workspace/config/secure.yaml # Get detailed encryption info\\nprovisioning config encryption-info workspace/config/secure.yaml","breadcrumbs":"Config Encryption Guide » Check Encryption Status","id":"4162","title":"Check Encryption Status"},"4163":{"body":"","breadcrumbs":"Config Encryption Guide » KMS Backends","id":"4163","title":"KMS Backends"},"4164":{"body":"Pros : Simple file-based keys No external dependencies Fast and secure Works offline Setup : # Initialize\\nprovisioning config init-encryption --kms age # Set environment variables\\nexport SOPS_AGE_RECIPIENTS=\\"age1...\\" # Your public key\\nexport PROVISIONING_KAGE=\\"$HOME/.config/sops/age/keys.txt\\" Encrypt/Decrypt : provisioning config encrypt secrets.yaml --kms age\\nprovisioning config decrypt secrets.enc.yaml","breadcrumbs":"Config Encryption Guide » Age (Recommended for Development)","id":"4164","title":"Age (Recommended for Development)"},"4165":{"body":"Pros : Centralized key management Audit logging IAM integration Key rotation Setup : Create KMS key in AWS Console Configure AWS credentials: aws configure Update .sops.yaml: creation_rules: - path_regex: .*\\\\.enc\\\\.yaml$ kms: \\"arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\\" Encrypt/Decrypt : provisioning config encrypt secrets.yaml --kms aws-kms\\nprovisioning config decrypt secrets.enc.yaml","breadcrumbs":"Config Encryption Guide » AWS KMS (Production)","id":"4165","title":"AWS KMS (Production)"},"4166":{"body":"Pros : Dynamic secrets Centralized secret management Audit logging Policy-based access Setup : Configure Vault address and token: export VAULT_ADDR=\\"https://vault.example.com:8200\\"\\nexport VAULT_TOKEN=\\"s.xxxxxxxxxxxxxx\\" Update configuration: # workspace/config/provisioning.yaml\\nkms: enabled: true mode: \\"remote\\" vault: address: \\"https://vault.example.com:8200\\" transit_key: \\"provisioning\\" Encrypt/Decrypt : provisioning config encrypt secrets.yaml --kms vault\\nprovisioning config decrypt secrets.enc.yaml","breadcrumbs":"Config Encryption Guide » HashiCorp Vault (Enterprise)","id":"4166","title":"HashiCorp Vault (Enterprise)"},"4167":{"body":"Pros : Confidential computing support Zero-knowledge architecture Post-quantum ready Cloud-agnostic Setup : Deploy Cosmian KMS server Update configuration: kms: enabled: true mode: \\"remote\\" remote: endpoint: \\"https://kms.example.com:9998\\" auth_method: \\"certificate\\" client_cert: \\"/path/to/client.crt\\" client_key: \\"/path/to/client.key\\" Encrypt/Decrypt : provisioning config encrypt secrets.yaml --kms cosmian\\nprovisioning config decrypt secrets.enc.yaml","breadcrumbs":"Config Encryption Guide » Cosmian KMS (Confidential Computing)","id":"4167","title":"Cosmian KMS (Confidential Computing)"},"4168":{"body":"","breadcrumbs":"Config Encryption Guide » CLI Commands","id":"4168","title":"CLI Commands"},"4169":{"body":"Command Description config encrypt Encrypt configuration file config decrypt Decrypt configuration file config edit-secure Edit encrypted file securely config rotate-keys Rotate encryption keys config is-encrypted Check if file is encrypted config encryption-info Show encryption details config validate-encryption Validate encryption setup config scan-sensitive Find unencrypted sensitive configs config encrypt-all Encrypt all sensitive configs config init-encryption Initialize encryption (generate keys)","breadcrumbs":"Config Encryption Guide » Configuration Encryption Commands","id":"4169","title":"Configuration Encryption Commands"},"417":{"body":"The AI integration consists of multiple components working together to provide intelligent infrastructure provisioning: typdialog-ai : AI-assisted form filling and configuration typdialog-ag : Autonomous AI agents for complex workflows typdialog-prov-gen : Natural language to Nickel configuration generation ai-service : Core AI service backend with multi-provider support mcp-server : Model Context Protocol server for LLM integration rag : Retrieval-Augmented Generation for contextual knowledge","breadcrumbs":"Overview » Overview","id":"417","title":"Overview"},"4170":{"body":"# Encrypt workspace config\\nprovisioning config encrypt workspace/config/secure.yaml --in-place # Edit encrypted file\\nprovisioning config edit-secure workspace/config/secure.yaml # Scan for unencrypted sensitive configs\\nprovisioning config scan-sensitive workspace/config --recursive # Encrypt all sensitive configs in workspace\\nprovisioning config encrypt-all workspace/config --kms age --recursive # Check encryption status\\nprovisioning config is-encrypted workspace/config/secure.yaml # Get detailed info\\nprovisioning config encryption-info workspace/config/secure.yaml # Validate setup\\nprovisioning config validate-encryption","breadcrumbs":"Config Encryption Guide » Examples","id":"4170","title":"Examples"},"4171":{"body":"","breadcrumbs":"Config Encryption Guide » Integration with Config Loader","id":"4171","title":"Integration with Config Loader"},"4172":{"body":"The config loader automatically detects and decrypts encrypted files: # Load encrypted config (automatically decrypted in memory)\\nuse lib_provisioning/config/loader.nu let config = (load-provisioning-config --debug) Key Features : Transparent : No code changes needed Memory-Only : Decrypted content never written to disk Fallback : If decryption fails, attempts to load as plain file Debug Support : Shows decryption status with --debug flag","breadcrumbs":"Config Encryption Guide » Automatic Decryption","id":"4172","title":"Automatic Decryption"},"4173":{"body":"use lib_provisioning/config/encryption.nu # Load encrypted config\\nlet secure_config = (load-encrypted-config \\"workspace/config/secure.enc.yaml\\") # Memory-only decryption (no file created)\\nlet decrypted_content = (decrypt-config-memory \\"workspace/config/secure.enc.yaml\\")","breadcrumbs":"Config Encryption Guide » Manual Loading","id":"4173","title":"Manual Loading"},"4174":{"body":"The system supports encrypted files at any level: 1. workspace/{name}/config/provisioning.yaml ← Can be encrypted\\n2. workspace/{name}/config/providers/*.toml ← Can be encrypted\\n3. workspace/{name}/config/platform/*.toml ← Can be encrypted\\n4. ~/.../provisioning/ws_{name}.yaml ← Can be encrypted\\n5. Environment variables (PROVISIONING_*) ← Plain text","breadcrumbs":"Config Encryption Guide » Configuration Hierarchy with Encryption","id":"4174","title":"Configuration Hierarchy with Encryption"},"4175":{"body":"","breadcrumbs":"Config Encryption Guide » Best Practices","id":"4175","title":"Best Practices"},"4176":{"body":"Always encrypt configs containing : Passwords API keys Secret keys Private keys Tokens Credentials Scan for unencrypted sensitive data : provisioning config scan-sensitive workspace --recursive","breadcrumbs":"Config Encryption Guide » 1. Encrypt All Sensitive Data","id":"4176","title":"1. Encrypt All Sensitive Data"},"4177":{"body":"Environment Recommended Backend Development Age (file-based) Staging AWS KMS or Vault Production AWS KMS or Vault CI/CD AWS KMS with IAM roles","breadcrumbs":"Config Encryption Guide » 2. Use Appropriate KMS Backend","id":"4177","title":"2. Use Appropriate KMS Backend"},"4178":{"body":"Age Keys : Store private keys securely: ~/.config/sops/age/keys.txt Set file permissions: chmod 600 ~/.config/sops/age/keys.txt Backup keys securely (encrypted backup) Never commit private keys to git AWS KMS : Use separate keys per environment Enable key rotation Use IAM policies for access control Monitor usage with CloudTrail Vault : Use transit engine for encryption Enable audit logging Implement least-privilege policies Regular policy reviews","breadcrumbs":"Config Encryption Guide » 3. Key Management","id":"4178","title":"3. Key Management"},"4179":{"body":"workspace/\\n└── config/ ├── provisioning.yaml # Plain (no secrets) ├── secure.yaml # Encrypted (SOPS auto-detects) ├── providers/ │ ├── aws.toml # Plain (no secrets) │ └── aws-credentials.enc.toml # Encrypted └── platform/ └── database.enc.yaml # Encrypted","breadcrumbs":"Config Encryption Guide » 4. File Organization","id":"4179","title":"4. File Organization"},"418":{"body":"","breadcrumbs":"Overview » Key Features","id":"418","title":"Key Features"},"4180":{"body":"Add to .gitignore : # Unencrypted sensitive files\\n**/secrets.yaml\\n**/credentials.yaml\\n**/*.dec.yaml\\n**/*.dec.toml # Temporary decrypted files\\n*.tmp.yaml\\n*.tmp.toml Commit encrypted files : # Encrypted files are safe to commit\\ngit add workspace/config/secure.enc.yaml\\ngit commit -m \\"Add encrypted configuration\\"","breadcrumbs":"Config Encryption Guide » 5. Git Integration","id":"4180","title":"5. Git Integration"},"4181":{"body":"Regular Key Rotation : # Generate new Age key\\nage-keygen -o ~/.config/sops/age/keys-new.txt # Update .sops.yaml with new recipient # Rotate keys for file\\nprovisioning config rotate-keys workspace/config/secure.yaml Frequency : Development: Annually Production: Quarterly After team member departure: Immediately","breadcrumbs":"Config Encryption Guide » 6. Rotation Strategy","id":"4181","title":"6. Rotation Strategy"},"4182":{"body":"Track encryption status : # Regular scans\\nprovisioning config scan-sensitive workspace --recursive # Validate encryption setup\\nprovisioning config validate-encryption Monitor access (with Vault/AWS KMS): Enable audit logging Review access patterns Alert on anomalies","breadcrumbs":"Config Encryption Guide » 7. Audit and Monitoring","id":"4182","title":"7. Audit and Monitoring"},"4183":{"body":"","breadcrumbs":"Config Encryption Guide » Troubleshooting","id":"4183","title":"Troubleshooting"},"4184":{"body":"Error : SOPS binary not found Solution : # Install SOPS\\nbrew install sops # Verify\\nsops --version","breadcrumbs":"Config Encryption Guide » SOPS Not Found","id":"4184","title":"SOPS Not Found"},"4185":{"body":"Error : Age key file not found: ~/.config/sops/age/keys.txt Solution : # Generate new key\\nmkdir -p ~/.config/sops/age\\nage-keygen -o ~/.config/sops/age/keys.txt # Set environment variable\\nexport PROVISIONING_KAGE=\\"$HOME/.config/sops/age/keys.txt\\"","breadcrumbs":"Config Encryption Guide » Age Key Not Found","id":"4185","title":"Age Key Not Found"},"4186":{"body":"Error : no AGE_RECIPIENTS for file.yaml Solution : # Extract public key from private key\\ngrep \\"public key:\\" ~/.config/sops/age/keys.txt # Set environment variable\\nexport SOPS_AGE_RECIPIENTS=\\"age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p\\"","breadcrumbs":"Config Encryption Guide » SOPS_AGE_RECIPIENTS Not Set","id":"4186","title":"SOPS_AGE_RECIPIENTS Not Set"},"4187":{"body":"Error : Failed to decrypt configuration file Solutions : Wrong key : # Verify you have the correct private key\\nprovisioning config validate-encryption File corrupted : # Check file integrity\\nsops --decrypt workspace/config/secure.yaml Wrong backend : # Check SOPS metadata in file\\nhead -20 workspace/config/secure.yaml","breadcrumbs":"Config Encryption Guide » Decryption Failed","id":"4187","title":"Decryption Failed"},"4188":{"body":"Error : AccessDeniedException: User is not authorized to perform: kms:Decrypt Solution : # Check AWS credentials\\naws sts get-caller-identity # Verify KMS key policy allows your IAM user/role\\naws kms describe-key --key-id ","breadcrumbs":"Config Encryption Guide » AWS KMS Access Denied","id":"4188","title":"AWS KMS Access Denied"},"4189":{"body":"Error : Vault encryption failed: connection refused Solution : # Verify Vault address\\necho $VAULT_ADDR # Check connectivity\\ncurl -k $VAULT_ADDR/v1/sys/health # Verify token\\nvault token lookup","breadcrumbs":"Config Encryption Guide » Vault Connection Failed","id":"4189","title":"Vault Connection Failed"},"419":{"body":"Generate infrastructure configurations from plain English descriptions: provisioning ai generate \\"Create a production PostgreSQL cluster with encryption and daily backups\\"","breadcrumbs":"Overview » Natural Language Configuration","id":"419","title":"Natural Language Configuration"},"4190":{"body":"","breadcrumbs":"Config Encryption Guide » Security Considerations","id":"4190","title":"Security Considerations"},"4191":{"body":"Protected Against : ✅ Plaintext secrets in git ✅ Accidental secret exposure ✅ Unauthorized file access ✅ Key compromise (with rotation) Not Protected Against : ❌ Memory dumps during decryption ❌ Root/admin access to running process ❌ Compromised Age/KMS keys ❌ Social engineering","breadcrumbs":"Config Encryption Guide » Threat Model","id":"4191","title":"Threat Model"},"4192":{"body":"Principle of Least Privilege : Only grant decryption access to those who need it Key Separation : Use different keys for different environments Regular Audits : Review who has access to keys Secure Key Storage : Never store private keys in git Rotation : Regularly rotate encryption keys Monitoring : Monitor decryption operations (with AWS KMS/Vault)","breadcrumbs":"Config Encryption Guide » Security Best Practices","id":"4192","title":"Security Best Practices"},"4193":{"body":"SOPS Documentation : https://github.com/mozilla/sops Age Encryption : https://age-encryption.org/ AWS KMS : https://aws.amazon.com/kms/ HashiCorp Vault : https://www.vaultproject.io/ Cosmian KMS : https://www.cosmian.com/","breadcrumbs":"Config Encryption Guide » Additional Resources","id":"4193","title":"Additional Resources"},"4194":{"body":"For issues or questions: Check troubleshooting section above Run: provisioning config validate-encryption Review logs with --debug flag","breadcrumbs":"Config Encryption Guide » Support","id":"4194","title":"Support"},"4195":{"body":"","breadcrumbs":"Config Encryption Guide » Quick Reference","id":"4195","title":"Quick Reference"},"4196":{"body":"# 1. Initialize encryption\\nprovisioning config init-encryption --kms age # 2. Set environment variables (add to ~/.zshrc or ~/.bashrc)\\nexport SOPS_AGE_RECIPIENTS=\\"age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p\\"\\nexport PROVISIONING_KAGE=\\"$HOME/.config/sops/age/keys.txt\\" # 3. Validate setup\\nprovisioning config validate-encryption","breadcrumbs":"Config Encryption Guide » Setup (One-time)","id":"4196","title":"Setup (One-time)"},"4197":{"body":"Task Command Encrypt file provisioning config encrypt secrets.yaml --in-place Decrypt file provisioning config decrypt secrets.enc.yaml Edit encrypted provisioning config edit-secure secrets.enc.yaml Check if encrypted provisioning config is-encrypted secrets.yaml Scan for unencrypted provisioning config scan-sensitive workspace --recursive Encrypt all sensitive provisioning config encrypt-all workspace/config --kms age Validate setup provisioning config validate-encryption Show encryption info provisioning config encryption-info secrets.yaml","breadcrumbs":"Config Encryption Guide » Common Commands","id":"4197","title":"Common Commands"},"4198":{"body":"Automatically encrypted by SOPS: workspace/*/config/secure.yaml ← Auto-encrypted *.enc.yaml ← Auto-encrypted *.enc.yml ← Auto-encrypted *.enc.toml ← Auto-encrypted workspace/*/config/providers/*credentials*.toml ← Auto-encrypted","breadcrumbs":"Config Encryption Guide » File Naming Conventions","id":"4198","title":"File Naming Conventions"},"4199":{"body":"# Create config with secrets\\ncat > workspace/config/secure.yaml < edit -> re-encrypt)\\nprovisioning config edit-secure workspace/config/secure.yaml # Configs are auto-decrypted when loaded\\nprovisioning env # Automatically decrypts secure.yaml","breadcrumbs":"Config Encryption Guide » Quick Workflow","id":"4199","title":"Quick Workflow"},"42":{"body":"Version Date Major Changes 3.5.0 2025-10-06 Mode system, OCI registry, comprehensive documentation 3.4.0 2025-10-06 Test environment service 3.3.0 2025-09-30 Interactive guides system 3.2.0 2025-09-30 Modular CLI refactoring 3.1.0 2025-09-25 Batch workflow system 3.0.0 2025-09-25 Hybrid orchestrator architecture 2.0.5 2025-10-02 Workspace switching system 2.0.0 2025-09-23 Configuration system migration Maintained By : Provisioning Team Last Review : 2025-10-06 Next Review : 2026-01-06","breadcrumbs":"Home » Version History","id":"42","title":"Version History"},"420":{"body":"Real-time suggestions and explanations as you fill out configuration forms via typdialog web UI.","breadcrumbs":"Overview » AI-Assisted Forms","id":"420","title":"AI-Assisted Forms"},"4200":{"body":"Backend Use Case Setup Command Age Development, simple setup provisioning config init-encryption --kms age AWS KMS Production, AWS environments Configure in .sops.yaml Vault Enterprise, dynamic secrets Set VAULT_ADDR and VAULT_TOKEN Cosmian Confidential computing Configure in config.toml","breadcrumbs":"Config Encryption Guide » KMS Backends","id":"4200","title":"KMS Backends"},"4201":{"body":"✅ Encrypt all files with passwords, API keys, secrets ✅ Never commit unencrypted secrets to git ✅ Set file permissions: chmod 600 ~/.config/sops/age/keys.txt ✅ Add plaintext files to .gitignore: *.dec.yaml, secrets.yaml ✅ Regular key rotation (quarterly for production) ✅ Separate keys per environment (dev/staging/prod) ✅ Backup Age keys securely (encrypted backup)","breadcrumbs":"Config Encryption Guide » Security Checklist","id":"4201","title":"Security Checklist"},"4202":{"body":"Problem Solution SOPS binary not found brew install sops Age key file not found provisioning config init-encryption --kms age SOPS_AGE_RECIPIENTS not set export SOPS_AGE_RECIPIENTS=\\"age1...\\" Decryption failed Check key file: provisioning config validate-encryption AWS KMS Access Denied Verify IAM permissions: aws sts get-caller-identity","breadcrumbs":"Config Encryption Guide » Troubleshooting","id":"4202","title":"Troubleshooting"},"4203":{"body":"# Run all encryption tests\\nnu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu # Run specific test\\nnu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu --test roundtrip # Test full workflow\\nnu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu test-full-encryption-workflow # Test KMS backend\\nuse lib_provisioning/kms/client.nu\\nkms-test --backend age","breadcrumbs":"Config Encryption Guide » Testing","id":"4203","title":"Testing"},"4204":{"body":"Configs are automatically decrypted when loaded: # Nushell code - encryption is transparent\\nuse lib_provisioning/config/loader.nu # Auto-decrypts encrypted files in memory\\nlet config = (load-provisioning-config) # Access secrets normally\\nlet db_password = ($config | get database.password)","breadcrumbs":"Config Encryption Guide » Integration","id":"4204","title":"Integration"},"4205":{"body":"If you lose your Age key: Check backups : ~/.config/sops/age/keys.txt.backup Check other systems : Keys might be on other dev machines Contact team : Team members with access can re-encrypt for you Rotate secrets : If keys are lost, rotate all secrets","breadcrumbs":"Config Encryption Guide » Emergency Key Recovery","id":"4205","title":"Emergency Key Recovery"},"4206":{"body":"Multiple Recipients (Team Access) # .sops.yaml\\ncreation_rules: - path_regex: .*\\\\.enc\\\\.yaml$ age: >- age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p, age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8q Key Rotation # Generate new key\\nage-keygen -o ~/.config/sops/age/keys-new.txt # Update .sops.yaml with new recipient # Rotate keys for file\\nprovisioning config rotate-keys workspace/config/secure.yaml Scan and Encrypt All # Find all unencrypted sensitive configs\\nprovisioning config scan-sensitive workspace --recursive # Encrypt them all\\nprovisioning config encrypt-all workspace --kms age --recursive # Verify\\nprovisioning config scan-sensitive workspace --recursive","breadcrumbs":"Config Encryption Guide » Advanced","id":"4206","title":"Advanced"},"4207":{"body":"Full Guide : docs/user/CONFIG_ENCRYPTION_GUIDE.md SOPS Docs : https://github.com/mozilla/sops Age Docs : https://age-encryption.org/ Last Updated : 2025-10-08 Version : 1.0.0","breadcrumbs":"Config Encryption Guide » Documentation","id":"4207","title":"Documentation"},"4208":{"body":"","breadcrumbs":"Security System » Complete Security System (v4.0.0)","id":"4208","title":"Complete Security System (v4.0.0)"},"4209":{"body":"A comprehensive security system with 39,699 lines across 12 components providing enterprise-grade protection for infrastructure automation.","breadcrumbs":"Security System » 🔐 Enterprise-Grade Security Implementation","id":"4209","title":"🔐 Enterprise-Grade Security Implementation"},"421":{"body":"AI analyzes deployment failures and suggests fixes: provisioning ai troubleshoot deployment-12345","breadcrumbs":"Overview » Intelligent Troubleshooting","id":"421","title":"Intelligent Troubleshooting"},"4210":{"body":"","breadcrumbs":"Security System » Core Security Components","id":"4210","title":"Core Security Components"},"4211":{"body":"Type : RS256 token-based authentication Features : Argon2id hashing, token rotation, session management Roles : 5 distinct role levels with inheritance Commands : provisioning login\\nprovisioning mfa totp verify","breadcrumbs":"Security System » 1. Authentication (JWT)","id":"4211","title":"1. Authentication (JWT)"},"4212":{"body":"Type : Policy-as-code using Cedar authorization engine Features : Context-aware policies, hot reload, fine-grained control Updates : Dynamic policy reloading without service restart","breadcrumbs":"Security System » 2. Authorization (Cedar)","id":"4212","title":"2. Authorization (Cedar)"},"4213":{"body":"Methods : TOTP (Time-based OTP) + WebAuthn/FIDO2 Features : Backup codes, rate limiting, device binding Commands : provisioning mfa totp enroll\\nprovisioning mfa webauthn enroll","breadcrumbs":"Security System » 3. Multi-Factor Authentication (MFA)","id":"4213","title":"3. Multi-Factor Authentication (MFA)"},"4214":{"body":"Dynamic Secrets : AWS STS, SSH keys, UpCloud credentials KMS Integration : Vault + AWS KMS + Age + Cosmian Features : Auto-cleanup, TTL management, rotation policies Commands : provisioning secrets generate aws --ttl 1hr\\nprovisioning ssh connect server01","breadcrumbs":"Security System » 4. Secrets Management","id":"4214","title":"4. Secrets Management"},"4215":{"body":"Backends : RustyVault, Age, AWS KMS, HashiCorp Vault, Cosmian Features : Envelope encryption, key rotation, secure storage Commands : provisioning kms encrypt\\nprovisioning config encrypt secure.yaml","breadcrumbs":"Security System » 5. Key Management System (KMS)","id":"4215","title":"5. Key Management System (KMS)"},"4216":{"body":"Format : Structured JSON logs with full context Compliance : GDPR-compliant with PII filtering Retention : 7-year data retention policy Exports : 5 export formats (JSON, CSV, SYSLOG, Splunk, CloudWatch)","breadcrumbs":"Security System » 6. Audit Logging","id":"4216","title":"6. Audit Logging"},"4217":{"body":"Approval : Multi-party approval workflow Features : Temporary elevated privileges, auto-revocation, audit trail Commands : provisioning break-glass request \\"reason\\"\\nprovisioning break-glass approve ","breadcrumbs":"Security System » 7. Break-Glass Emergency Access","id":"4217","title":"7. Break-Glass Emergency Access"},"4218":{"body":"Standards : GDPR, SOC2, ISO 27001, incident response procedures Features : Compliance reporting, audit trails, policy enforcement Commands : provisioning compliance report\\nprovisioning compliance gdpr export ","breadcrumbs":"Security System » 8. Compliance Management","id":"4218","title":"8. Compliance Management"},"4219":{"body":"Filtering : By user, action, time range, resource Features : Structured query language, real-time search Commands : provisioning audit query --user alice --action deploy --from 24h","breadcrumbs":"Security System » 9. Audit Query System","id":"4219","title":"9. Audit Query System"},"422":{"body":"Configuration Optimization AI reviews configurations and suggests performance and security improvements: provisioning ai optimize workspaces/prod/config.ncl AI agents execute multi-step workflows with minimal human intervention: provisioning ai agent --goal \\"Set up complete dev environment for Python app\\"","breadcrumbs":"Overview » » Autonomous Agents","id":"422","title":"Autonomous Agents"},"4220":{"body":"Features : Rotation policies, expiration tracking, revocation Integration : Seamless with auth system","breadcrumbs":"Security System » 10. Token Management","id":"4220","title":"10. Token Management"},"4221":{"body":"Model : Role-based access control (RBAC) Features : Resource-level permissions, delegation, audit","breadcrumbs":"Security System » 11. Access Control","id":"4221","title":"11. Access Control"},"4222":{"body":"Standards : AES-256, TLS 1.3, envelope encryption Coverage : At-rest and in-transit encryption","breadcrumbs":"Security System » 12. Encryption","id":"4222","title":"12. Encryption"},"4223":{"body":"Overhead : <20 ms per secure operation Tests : 350+ comprehensive test cases Endpoints : 83+ REST API endpoints CLI Commands : 111+ security-related commands","breadcrumbs":"Security System » Performance Characteristics","id":"4223","title":"Performance Characteristics"},"4224":{"body":"Component Command Purpose Login provisioning login User authentication MFA TOTP provisioning mfa totp enroll Setup time-based MFA MFA WebAuthn provisioning mfa webauthn enroll Setup hardware security key Secrets provisioning secrets generate aws --ttl 1hr Generate temporary credentials SSH provisioning ssh connect server01 Secure SSH session KMS Encrypt provisioning kms encrypt Encrypt configuration Break-Glass provisioning break-glass request \\"reason\\" Request emergency access Compliance provisioning compliance report Generate compliance report GDPR Export provisioning compliance gdpr export Export user data Audit provisioning audit query --user alice --action deploy --from 24h Search audit logs","breadcrumbs":"Security System » Quick Reference","id":"4224","title":"Quick Reference"},"4225":{"body":"Security system is integrated throughout provisioning platform: Embedded : All authentication/authorization checks Non-blocking : <20 ms overhead on operations Graceful degradation : Fallback mechanisms for partial failures Hot reload : Policies update without service restart","breadcrumbs":"Security System » Architecture","id":"4225","title":"Architecture"},"4226":{"body":"Security policies and settings are defined in: provisioning/kcl/security.k - KCL security schema definitions provisioning/config/security/*.toml - Security policy configurations Environment-specific overrides in workspace/config/","breadcrumbs":"Security System » Configuration","id":"4226","title":"Configuration"},"4227":{"body":"Full implementation: ADR-009: Security System Complete User guides: Authentication Layer Guide Admin guides: MFA Admin Setup Guide Implementation details: Supplementary documentation in subdirectories","breadcrumbs":"Security System » Documentation","id":"4227","title":"Documentation"},"4228":{"body":"# Show security help\\nprovisioning help security # Show specific security command help\\nprovisioning login --help\\nprovisioning mfa --help\\nprovisioning secrets --help","breadcrumbs":"Security System » Help Commands","id":"4228","title":"Help Commands"},"4229":{"body":"Version : 1.0.0 Date : 2025-10-08 Status : Production-ready","breadcrumbs":"RustyVault KMS Guide » RustyVault KMS Backend Guide","id":"4229","title":"RustyVault KMS Backend Guide"},"423":{"body":"Architecture - AI system architecture and components Natural Language Config - NL to Nickel generation AI-Assisted Forms - typdialog-ai integration AI Agents - typdialog-ag autonomous agents Config Generation - typdialog-prov-gen details RAG System - Retrieval-Augmented Generation MCP Integration - Model Context Protocol Security Policies - Cedar policies for AI Troubleshooting with AI - AI debugging workflows API Reference - AI service API documentation Configuration - AI system configuration guide Cost Management - Managing LLM API costs","breadcrumbs":"Overview » » Documentation Structure","id":"423","title":"Documentation Structure"},"4230":{"body":"RustyVault is a self-hosted, Rust-based secrets management system that provides a Vault-compatible API . The provisioning platform now supports RustyVault as a KMS backend alongside Age, Cosmian, AWS KMS, and HashiCorp Vault.","breadcrumbs":"RustyVault KMS Guide » Overview","id":"4230","title":"Overview"},"4231":{"body":"Self-hosted : Full control over your key management infrastructure Pure Rust : Better performance and memory safety Vault-compatible : Drop-in replacement for HashiCorp Vault Transit engine OSI-approved License : Apache 2.0 (vs HashiCorp\'s BSL) Embeddable : Can run as standalone service or embedded library No Vendor Lock-in : Open-source alternative to proprietary KMS solutions","breadcrumbs":"RustyVault KMS Guide » Why RustyVault","id":"4231","title":"Why RustyVault"},"4232":{"body":"KMS Service Backends:\\n├── Age (local development, file-based)\\n├── Cosmian (privacy-preserving, production)\\n├── AWS KMS (cloud-native AWS)\\n├── HashiCorp Vault (enterprise, external)\\n└── RustyVault (self-hosted, embedded) ✨ NEW","breadcrumbs":"RustyVault KMS Guide » Architecture Position","id":"4232","title":"Architecture Position"},"4233":{"body":"","breadcrumbs":"RustyVault KMS Guide » Installation","id":"4233","title":"Installation"},"4234":{"body":"# Install RustyVault binary\\ncargo install rusty_vault # Start RustyVault server\\nrustyvault server -config=/path/to/config.hcl","breadcrumbs":"RustyVault KMS Guide » Option 1: Standalone RustyVault Server","id":"4234","title":"Option 1: Standalone RustyVault Server"},"4235":{"body":"# Pull RustyVault image (if available)\\ndocker pull tongsuo/rustyvault:latest # Run RustyVault container\\ndocker run -d \\\\ --name rustyvault \\\\ -p 8200:8200 \\\\ -v $(pwd)/config:/vault/config \\\\ -v $(pwd)/data:/vault/data \\\\ tongsuo/rustyvault:latest","breadcrumbs":"RustyVault KMS Guide » Option 2: Docker Deployment","id":"4235","title":"Option 2: Docker Deployment"},"4236":{"body":"# Clone repository\\ngit clone https://github.com/Tongsuo-Project/RustyVault.git\\ncd RustyVault # Build and run\\ncargo build --release\\n./target/release/rustyvault server -config=config.hcl","breadcrumbs":"RustyVault KMS Guide » Option 3: From Source","id":"4236","title":"Option 3: From Source"},"4237":{"body":"","breadcrumbs":"RustyVault KMS Guide » Configuration","id":"4237","title":"Configuration"},"4238":{"body":"Create rustyvault-config.hcl: # RustyVault Server Configuration storage \\"file\\" { path = \\"/vault/data\\"\\n} listener \\"tcp\\" { address = \\"0.0.0.0:8200\\" tls_disable = true # Enable TLS in production\\n} api_addr = \\"http://127.0.0.1:8200\\"\\ncluster_addr = \\"https://127.0.0.1:8201\\" # Enable Transit secrets engine\\ndefault_lease_ttl = \\"168h\\"\\nmax_lease_ttl = \\"720h\\"","breadcrumbs":"RustyVault KMS Guide » RustyVault Server Configuration","id":"4238","title":"RustyVault Server Configuration"},"4239":{"body":"# Initialize (first time only)\\nexport VAULT_ADDR=\'http://127.0.0.1:8200\'\\nrustyvault operator init # Unseal (after every restart)\\nrustyvault operator unseal \\nrustyvault operator unseal \\nrustyvault operator unseal # Save root token\\nexport RUSTYVAULT_TOKEN=\'\'","breadcrumbs":"RustyVault KMS Guide » Initialize RustyVault","id":"4239","title":"Initialize RustyVault"},"424":{"body":"","breadcrumbs":"Overview » » Quick Start","id":"424","title":"Quick Start"},"4240":{"body":"# Enable transit secrets engine\\nrustyvault secrets enable transit # Create encryption key\\nrustyvault write -f transit/keys/provisioning-main # Verify key creation\\nrustyvault read transit/keys/provisioning-main","breadcrumbs":"RustyVault KMS Guide » Enable Transit Engine","id":"4240","title":"Enable Transit Engine"},"4241":{"body":"","breadcrumbs":"RustyVault KMS Guide » KMS Service Configuration","id":"4241","title":"KMS Service Configuration"},"4242":{"body":"[kms]\\ntype = \\"rustyvault\\"\\nserver_url = \\"http://localhost:8200\\"\\ntoken = \\"${RUSTYVAULT_TOKEN}\\"\\nmount_point = \\"transit\\"\\nkey_name = \\"provisioning-main\\"\\ntls_verify = true [service]\\nbind_addr = \\"0.0.0.0:8081\\"\\nlog_level = \\"info\\"\\naudit_logging = true [tls]\\nenabled = false # Set true with HTTPS","breadcrumbs":"RustyVault KMS Guide » Update provisioning/config/kms.toml","id":"4242","title":"Update provisioning/config/kms.toml"},"4243":{"body":"# RustyVault connection\\nexport RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"s.xxxxxxxxxxxxxxxxxxxxxx\\"\\nexport RUSTYVAULT_MOUNT_POINT=\\"transit\\"\\nexport RUSTYVAULT_KEY_NAME=\\"provisioning-main\\"\\nexport RUSTYVAULT_TLS_VERIFY=\\"true\\" # KMS service\\nexport KMS_BACKEND=\\"rustyvault\\"\\nexport KMS_BIND_ADDR=\\"0.0.0.0:8081\\"","breadcrumbs":"RustyVault KMS Guide » Environment Variables","id":"4243","title":"Environment Variables"},"4244":{"body":"","breadcrumbs":"RustyVault KMS Guide » Usage","id":"4244","title":"Usage"},"4245":{"body":"# With RustyVault backend\\ncd provisioning/platform/kms-service\\ncargo run # With custom config\\ncargo run -- --config=/path/to/kms.toml","breadcrumbs":"RustyVault KMS Guide » Start KMS Service","id":"4245","title":"Start KMS Service"},"4246":{"body":"# Encrypt configuration file\\nprovisioning kms encrypt provisioning/config/secrets.yaml # Decrypt configuration\\nprovisioning kms decrypt provisioning/config/secrets.yaml.enc # Generate data key (envelope encryption)\\nprovisioning kms generate-key --spec AES256 # Health check\\nprovisioning kms health","breadcrumbs":"RustyVault KMS Guide » CLI Operations","id":"4246","title":"CLI Operations"},"4247":{"body":"# Health check\\ncurl http://localhost:8081/health # Encrypt data\\ncurl -X POST http://localhost:8081/encrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"plaintext\\": \\"SGVsbG8sIFdvcmxkIQ==\\", \\"context\\": \\"environment=production\\" }\' # Decrypt data\\ncurl -X POST http://localhost:8081/decrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"ciphertext\\": \\"vault:v1:...\\", \\"context\\": \\"environment=production\\" }\' # Generate data key\\ncurl -X POST http://localhost:8081/datakey/generate \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{\\"key_spec\\": \\"AES_256\\"}\'","breadcrumbs":"RustyVault KMS Guide » REST API Usage","id":"4247","title":"REST API Usage"},"4248":{"body":"","breadcrumbs":"RustyVault KMS Guide » Advanced Features","id":"4248","title":"Advanced Features"},"4249":{"body":"Additional authenticated data binds encrypted data to specific contexts: # Encrypt with context\\ncurl -X POST http://localhost:8081/encrypt \\\\ -d \'{ \\"plaintext\\": \\"c2VjcmV0\\", \\"context\\": \\"environment=prod,service=api\\" }\' # Decrypt requires same context\\ncurl -X POST http://localhost:8081/decrypt \\\\ -d \'{ \\"ciphertext\\": \\"vault:v1:...\\", \\"context\\": \\"environment=prod,service=api\\" }\'","breadcrumbs":"RustyVault KMS Guide » Context-based Encryption (AAD)","id":"4249","title":"Context-based Encryption (AAD)"},"425":{"body":"# Edit provisioning config\\nvim provisioning/config/ai.toml # Set provider and enable features\\n[ai]\\nenabled = true\\nprovider = \\"anthropic\\" # or \\"openai\\" or \\"local\\"\\nmodel = \\"claude-sonnet-4\\" [ai.features]\\nform_assistance = true\\nconfig_generation = true\\ntroubleshooting = true","breadcrumbs":"Overview » » Enable AI Features","id":"425","title":"Enable AI Features"},"4250":{"body":"For large files, use envelope encryption: # 1. Generate data key\\nDATA_KEY=$(curl -X POST http://localhost:8081/datakey/generate \\\\ -d \'{\\"key_spec\\": \\"AES_256\\"}\' | jq -r \'.plaintext\') # 2. Encrypt large file with data key (locally)\\nopenssl enc -aes-256-cbc -in large-file.bin -out encrypted.bin -K $DATA_KEY # 3. Store encrypted data key (from response)\\necho \\"vault:v1:...\\" > encrypted-data-key.txt","breadcrumbs":"RustyVault KMS Guide » Envelope Encryption","id":"4250","title":"Envelope Encryption"},"4251":{"body":"# Rotate encryption key in RustyVault\\nrustyvault write -f transit/keys/provisioning-main/rotate # Verify new version\\nrustyvault read transit/keys/provisioning-main # Rewrap existing ciphertext with new key version\\ncurl -X POST http://localhost:8081/rewrap \\\\ -d \'{\\"ciphertext\\": \\"vault:v1:...\\"}\'","breadcrumbs":"RustyVault KMS Guide » Key Rotation","id":"4251","title":"Key Rotation"},"4252":{"body":"","breadcrumbs":"RustyVault KMS Guide » Production Deployment","id":"4252","title":"Production Deployment"},"4253":{"body":"Deploy multiple RustyVault instances behind a load balancer: # docker-compose.yml\\nversion: \'3.8\' services: rustyvault-1: image: tongsuo/rustyvault:latest ports: - \\"8200:8200\\" volumes: - ./config:/vault/config - vault-data-1:/vault/data rustyvault-2: image: tongsuo/rustyvault:latest ports: - \\"8201:8200\\" volumes: - ./config:/vault/config - vault-data-2:/vault/data lb: image: nginx:alpine ports: - \\"80:80\\" volumes: - ./nginx.conf:/etc/nginx/nginx.conf depends_on: - rustyvault-1 - rustyvault-2 volumes: vault-data-1: vault-data-2:","breadcrumbs":"RustyVault KMS Guide » High Availability Setup","id":"4253","title":"High Availability Setup"},"4254":{"body":"# kms.toml\\n[kms]\\ntype = \\"rustyvault\\"\\nserver_url = \\"https://vault.example.com:8200\\"\\ntoken = \\"${RUSTYVAULT_TOKEN}\\"\\ntls_verify = true [tls]\\nenabled = true\\ncert_path = \\"/etc/kms/certs/server.crt\\"\\nkey_path = \\"/etc/kms/certs/server.key\\"\\nca_path = \\"/etc/kms/certs/ca.crt\\"","breadcrumbs":"RustyVault KMS Guide » TLS Configuration","id":"4254","title":"TLS Configuration"},"4255":{"body":"# rustyvault-config.hcl\\nseal \\"awskms\\" { region = \\"us-east-1\\" kms_key_id = \\"arn:aws:kms:us-east-1:123456789012:key/...\\"\\n}","breadcrumbs":"RustyVault KMS Guide » Auto-Unseal (AWS KMS)","id":"4255","title":"Auto-Unseal (AWS KMS)"},"4256":{"body":"","breadcrumbs":"RustyVault KMS Guide » Monitoring","id":"4256","title":"Monitoring"},"4257":{"body":"# RustyVault health\\ncurl http://localhost:8200/v1/sys/health # KMS service health\\ncurl http://localhost:8081/health # Metrics (if enabled)\\ncurl http://localhost:8081/metrics","breadcrumbs":"RustyVault KMS Guide » Health Checks","id":"4257","title":"Health Checks"},"4258":{"body":"Enable audit logging in RustyVault: # rustyvault-config.hcl\\naudit { path = \\"/vault/logs/audit.log\\" format = \\"json\\"\\n}","breadcrumbs":"RustyVault KMS Guide » Audit Logging","id":"4258","title":"Audit Logging"},"4259":{"body":"","breadcrumbs":"RustyVault KMS Guide » Troubleshooting","id":"4259","title":"Troubleshooting"},"426":{"body":"# Simple generation\\nprovisioning ai generate \\"PostgreSQL database with encryption\\" # With specific schema\\nprovisioning ai generate \\\\ --schema database \\\\ --output workspaces/dev/db.ncl \\\\ \\"Production PostgreSQL with 100GB storage and daily backups\\"","breadcrumbs":"Overview » » Generate Configuration from Natural Language","id":"426","title":"Generate Configuration from Natural Language"},"4260":{"body":"1. Connection Refused # Check RustyVault is running\\ncurl http://localhost:8200/v1/sys/health # Check token is valid\\nexport VAULT_ADDR=\'http://localhost:8200\'\\nrustyvault token lookup 2. Authentication Failed # Verify token in environment\\necho $RUSTYVAULT_TOKEN # Renew token if needed\\nrustyvault token renew 3. Key Not Found # List available keys\\nrustyvault list transit/keys # Create missing key\\nrustyvault write -f transit/keys/provisioning-main 4. TLS Verification Failed # Disable TLS verification (dev only)\\nexport RUSTYVAULT_TLS_VERIFY=false # Or add CA certificate\\nexport RUSTYVAULT_CACERT=/path/to/ca.crt","breadcrumbs":"RustyVault KMS Guide » Common Issues","id":"4260","title":"Common Issues"},"4261":{"body":"","breadcrumbs":"RustyVault KMS Guide » Migration from Other Backends","id":"4261","title":"Migration from Other Backends"},"4262":{"body":"RustyVault is API-compatible, minimal changes required: # Old config (Vault)\\n[kms]\\ntype = \\"vault\\"\\naddress = \\"https://vault.example.com:8200\\"\\ntoken = \\"${VAULT_TOKEN}\\" # New config (RustyVault)\\n[kms]\\ntype = \\"rustyvault\\"\\nserver_url = \\"http://rustyvault.example.com:8200\\"\\ntoken = \\"${RUSTYVAULT_TOKEN}\\"","breadcrumbs":"RustyVault KMS Guide » From HashiCorp Vault","id":"4262","title":"From HashiCorp Vault"},"4263":{"body":"Re-encrypt existing encrypted files: # 1. Decrypt with Age\\nprovisioning kms decrypt --backend age secrets.enc > secrets.plain # 2. Encrypt with RustyVault\\nprovisioning kms encrypt --backend rustyvault secrets.plain > secrets.rustyvault.enc","breadcrumbs":"RustyVault KMS Guide » From Age","id":"4263","title":"From Age"},"4264":{"body":"","breadcrumbs":"RustyVault KMS Guide » Security Considerations","id":"4264","title":"Security Considerations"},"4265":{"body":"Enable TLS : Always use HTTPS in production Rotate Tokens : Regularly rotate RustyVault tokens Least Privilege : Use policies to restrict token permissions Audit Logging : Enable and monitor audit logs Backup Keys : Secure backup of unseal keys and root token Network Isolation : Run RustyVault in isolated network segment","breadcrumbs":"RustyVault KMS Guide » Best Practices","id":"4265","title":"Best Practices"},"4266":{"body":"Create restricted policy for KMS service: # kms-policy.hcl\\npath \\"transit/encrypt/provisioning-main\\" { capabilities = [\\"update\\"]\\n} path \\"transit/decrypt/provisioning-main\\" { capabilities = [\\"update\\"]\\n} path \\"transit/datakey/plaintext/provisioning-main\\" { capabilities = [\\"update\\"]\\n} Apply policy: rustyvault policy write kms-service kms-policy.hcl\\nrustyvault token create -policy=kms-service","breadcrumbs":"RustyVault KMS Guide » Token Policies","id":"4266","title":"Token Policies"},"4267":{"body":"","breadcrumbs":"RustyVault KMS Guide » Performance","id":"4267","title":"Performance"},"4268":{"body":"Operation Latency Throughput Encrypt 5-15 ms 2,000-5,000 ops/sec Decrypt 5-15 ms 2,000-5,000 ops/sec Generate Key 10-20 ms 1,000-2,000 ops/sec Actual performance depends on hardware, network, and RustyVault configuration","breadcrumbs":"RustyVault KMS Guide » Benchmarks (Estimated)","id":"4268","title":"Benchmarks (Estimated)"},"4269":{"body":"Connection Pooling : Reuse HTTP connections Batching : Batch multiple operations when possible Caching : Cache data keys for envelope encryption Local Unseal : Use auto-unseal for faster restarts","breadcrumbs":"RustyVault KMS Guide » Optimization Tips","id":"4269","title":"Optimization Tips"},"427":{"body":"# Open typdialog web UI with AI assistance\\nprovisioning workspace init --interactive --ai-assist # AI provides real-time suggestions as you type\\n# AI explains validation errors in plain English\\n# AI fills multiple fields from natural language description","breadcrumbs":"Overview » » Use AI-Assisted Forms","id":"427","title":"Use AI-Assisted Forms"},"4270":{"body":"KMS Service : docs/user/CONFIG_ENCRYPTION_GUIDE.md Dynamic Secrets : docs/user/DYNAMIC_SECRETS_QUICK_REFERENCE.md Security System : docs/architecture/adr-009-security-system-complete.md RustyVault GitHub : https://github.com/Tongsuo-Project/RustyVault","breadcrumbs":"RustyVault KMS Guide » Related Documentation","id":"4270","title":"Related Documentation"},"4271":{"body":"GitHub Issues : https://github.com/Tongsuo-Project/RustyVault/issues Documentation : https://github.com/Tongsuo-Project/RustyVault/tree/main/docs Community : https://users.rust-lang.org/t/rustyvault-a-hashicorp-vault-replacement-in-rust/103943 Last Updated : 2025-10-08 Maintained By : Architecture Team","breadcrumbs":"RustyVault KMS Guide » Support","id":"4271","title":"Support"},"4272":{"body":"SecretumVault is an enterprise-grade, post-quantum ready secrets management system integrated as the fourth KMS backend in the provisioning platform, alongside Age (dev), Cosmian (prod), and RustyVault (self-hosted).","breadcrumbs":"SecretumVault KMS Guide » SecretumVault KMS Backend Guide","id":"4272","title":"SecretumVault KMS Backend Guide"},"4273":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Overview","id":"4273","title":"Overview"},"4274":{"body":"SecretumVault provides: Post-Quantum Cryptography : Ready for quantum-resistant algorithms Enterprise Features : Policy-as-code (Cedar), audit logging, compliance tracking Multiple Storage Backends : Filesystem (dev), SurrealDB (staging), etcd (prod), PostgreSQL Transit Engine : Encryption-as-a-service for data protection KV Engine : Versioned secret storage with rotation policies High Availability : Seamless transition from embedded to distributed modes","breadcrumbs":"SecretumVault KMS Guide » What is SecretumVault","id":"4274","title":"What is SecretumVault"},"4275":{"body":"Scenario Backend Reason Local development Age Simple, no dependencies Testing/Staging SecretumVault Enterprise features, production-like Production Cosmian or SecretumVault Enterprise security, compliance Self-Hosted Enterprise SecretumVault + etcd Full control, HA support","breadcrumbs":"SecretumVault KMS Guide » When to Use SecretumVault","id":"4275","title":"When to Use SecretumVault"},"4276":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Deployment Modes","id":"4276","title":"Deployment Modes"},"4277":{"body":"Storage : Filesystem (~/.config/provisioning/secretumvault/data) Performance : <3 ms encryption/decryption Setup : No separate service required Best For : Local development and testing export PROVISIONING_ENV=dev\\nexport KMS_DEV_BACKEND=secretumvault\\nprovisioning kms encrypt config.yaml","breadcrumbs":"SecretumVault KMS Guide » Development Mode (Embedded)","id":"4277","title":"Development Mode (Embedded)"},"4278":{"body":"Storage : SurrealDB (document database) Performance : <10 ms operations Setup : Start SecretumVault service separately Best For : Team testing, staging environments # Start SecretumVault service\\nsecretumvault server --storage-backend surrealdb # Configure provisioning\\nexport PROVISIONING_ENV=staging\\nexport SECRETUMVAULT_URL=http://localhost:8200\\nexport SECRETUMVAULT_TOKEN=your-auth-token provisioning kms encrypt config.yaml","breadcrumbs":"SecretumVault KMS Guide » Staging Mode (Service + SurrealDB)","id":"4278","title":"Staging Mode (Service + SurrealDB)"},"4279":{"body":"Storage : etcd cluster (3+ nodes) Performance : <10 ms operations (ninety-ninth percentile) Setup : etcd cluster + SecretumVault service Best For : Production deployments with HA requirements # Setup etcd cluster (3 nodes minimum)\\netcd --name etcd1 --data-dir etcd1-data \\\\ --advertise-client-urls http://localhost:2379 \\\\ --listen-client-urls http://localhost:2379 # Start SecretumVault with etcd\\nsecretumvault server \\\\ --storage-backend etcd \\\\ --etcd-endpoints http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 # Configure provisioning\\nexport PROVISIONING_ENV=prod\\nexport SECRETUMVAULT_URL=https://your-secretumvault:8200\\nexport SECRETUMVAULT_TOKEN=your-auth-token\\nexport SECRETUMVAULT_STORAGE=etcd provisioning kms encrypt config.yaml","breadcrumbs":"SecretumVault KMS Guide » Production Mode (Service + etcd)","id":"4279","title":"Production Mode (Service + etcd)"},"428":{"body":"# Analyze failed deployment\\nprovisioning ai troubleshoot deployment-12345 # AI analyzes logs and suggests fixes\\n# AI generates corrected configuration\\n# AI explains root cause in plain language","breadcrumbs":"Overview » » Troubleshoot with AI","id":"428","title":"Troubleshoot with AI"},"4280":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Configuration","id":"4280","title":"Configuration"},"4281":{"body":"Variable Purpose Default Example PROVISIONING_ENV Deployment environment dev staging, prod KMS_DEV_BACKEND Development KMS backend age secretumvault KMS_STAGING_BACKEND Staging KMS backend secretumvault cosmian KMS_PROD_BACKEND Production KMS backend cosmian secretumvault SECRETUMVAULT_URL Server URL http://localhost:8200 https://kms.example.com SECRETUMVAULT_TOKEN Authentication token (none) (Bearer token) SECRETUMVAULT_STORAGE Storage backend filesystem surrealdb, etcd SECRETUMVAULT_TLS_VERIFY Verify TLS certificates false true","breadcrumbs":"SecretumVault KMS Guide » Environment Variables","id":"4281","title":"Environment Variables"},"4282":{"body":"System Defaults : provisioning/config/secretumvault.toml KMS Config : provisioning/config/kms.toml Edit these files to customize: Engine mount points Key names Storage backend settings Performance tuning Audit logging Key rotation policies","breadcrumbs":"SecretumVault KMS Guide » Configuration Files","id":"4282","title":"Configuration Files"},"4283":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Operations","id":"4283","title":"Operations"},"4284":{"body":"# Encrypt a file\\nprovisioning kms encrypt config.yaml\\n# Output: config.yaml.enc # Encrypt with specific key\\nprovisioning kms encrypt --key-id my-key config.yaml # Encrypt and sign\\nprovisioning kms encrypt --sign config.yaml","breadcrumbs":"SecretumVault KMS Guide » Encrypt Data","id":"4284","title":"Encrypt Data"},"4285":{"body":"# Decrypt a file\\nprovisioning kms decrypt config.yaml.enc\\n# Output: config.yaml # Decrypt with specific key\\nprovisioning kms decrypt --key-id my-key config.yaml.enc # Verify and decrypt\\nprovisioning kms decrypt --verify config.yaml.enc","breadcrumbs":"SecretumVault KMS Guide » Decrypt Data","id":"4285","title":"Decrypt Data"},"4286":{"body":"# Generate AES-256 data key\\nprovisioning kms generate-key --spec AES256 # Generate AES-128 data key\\nprovisioning kms generate-key --spec AES128 # Generate RSA-4096 key\\nprovisioning kms generate-key --spec RSA4096","breadcrumbs":"SecretumVault KMS Guide » Generate Data Keys","id":"4286","title":"Generate Data Keys"},"4287":{"body":"# Check KMS health\\nprovisioning kms health # Get KMS version\\nprovisioning kms version # Detailed KMS status\\nprovisioning kms status","breadcrumbs":"SecretumVault KMS Guide » Health and Status","id":"4287","title":"Health and Status"},"4288":{"body":"# Rotate encryption key\\nprovisioning kms rotate-key provisioning-master # Check rotation policy\\nprovisioning kms rotation-policy provisioning-master # Update rotation interval\\nprovisioning kms update-rotation 90 # Rotate every 90 days","breadcrumbs":"SecretumVault KMS Guide » Key Rotation","id":"4288","title":"Key Rotation"},"4289":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Storage Backends","id":"4289","title":"Storage Backends"},"429":{"body":"The AI system implements strict security controls: ✅ Cedar Policies : AI access controlled by Cedar authorization ✅ Secret Isolation : AI cannot access secrets directly ✅ Human Approval : Critical operations require human approval ✅ Audit Trail : All AI operations logged ✅ Data Sanitization : Secrets/PII sanitized before sending to LLM ✅ Local Models : Support for air-gapped deployments See Security Policies for complete details.","breadcrumbs":"Overview » » Security and Privacy","id":"429","title":"Security and Privacy"},"4290":{"body":"Local file-based storage with no external dependencies. Pros : Zero external dependencies Fast (local disk access) Easy to inspect/backup Cons : Single-node only No HA Manual backup required Configuration : [secretumvault.storage.filesystem]\\ndata_dir = \\"~/.config/provisioning/secretumvault/data\\"\\npermissions = \\"0700\\"","breadcrumbs":"SecretumVault KMS Guide » Filesystem (Development)","id":"4290","title":"Filesystem (Development)"},"4291":{"body":"Embedded or standalone document database. Pros : Embedded or distributed Flexible schema Real-time syncing Cons : More complex than filesystem New technology (less tested than etcd) Configuration : [secretumvault.storage.surrealdb]\\nconnection_url = \\"ws://localhost:8000\\"\\nnamespace = \\"provisioning\\"\\ndatabase = \\"secrets\\"\\nusername = \\"${SECRETUMVAULT_SURREALDB_USER:-admin}\\"\\npassword = \\"${SECRETUMVAULT_SURREALDB_PASS:-password}\\"","breadcrumbs":"SecretumVault KMS Guide » SurrealDB (Staging)","id":"4291","title":"SurrealDB (Staging)"},"4292":{"body":"Distributed key-value store for high availability. Pros : Proven in production HA and disaster recovery Consistent consensus protocol Multi-site replication Cons : Operational complexity Requires 3+ nodes More infrastructure Configuration : [secretumvault.storage.etcd]\\nendpoints = [\\"http://etcd1:2379\\", \\"http://etcd2:2379\\", \\"http://etcd3:2379\\"]\\ntls_enabled = true\\ntls_cert_file = \\"/path/to/client.crt\\"\\ntls_key_file = \\"/path/to/client.key\\"","breadcrumbs":"SecretumVault KMS Guide » etcd (Production)","id":"4292","title":"etcd (Production)"},"4293":{"body":"Relational database backend. Pros : Mature and reliable Advanced querying Full ACID transactions Cons : Schema requirements External database dependency More operational overhead Configuration : [secretumvault.storage.postgresql]\\nconnection_url = \\"postgresql://user:pass@localhost:5432/secretumvault\\"\\nmax_connections = 10\\nssl_mode = \\"require\\"","breadcrumbs":"SecretumVault KMS Guide » PostgreSQL (Enterprise)","id":"4293","title":"PostgreSQL (Enterprise)"},"4294":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Troubleshooting","id":"4294","title":"Troubleshooting"},"4295":{"body":"Error : \\"Failed to connect to SecretumVault service\\" Solutions : Verify SecretumVault is running: curl http://localhost:8200/v1/sys/health Check server URL configuration: provisioning config show secretumvault.server_url Verify network connectivity: nc -zv localhost 8200","breadcrumbs":"SecretumVault KMS Guide » Connection Errors","id":"4295","title":"Connection Errors"},"4296":{"body":"Error : \\"Authentication failed: X-Vault-Token missing or invalid\\" Solutions : Set authentication token: export SECRETUMVAULT_TOKEN=your-token Verify token is still valid: provisioning secrets verify-token Get new token from SecretumVault: secretumvault auth login","breadcrumbs":"SecretumVault KMS Guide » Authentication Failures","id":"4296","title":"Authentication Failures"},"4297":{"body":"Filesystem Backend Error : \\"Permission denied: ~/.config/provisioning/secretumvault/data\\" Solution : Check directory permissions: ls -la ~/.config/provisioning/secretumvault/\\n# Should be: drwx------ (0700)\\nchmod 700 ~/.config/provisioning/secretumvault/data SurrealDB Backend Error : \\"Failed to connect to SurrealDB at ws://localhost:8000\\" Solution : Start SurrealDB first: surreal start --bind 0.0.0.0:8000 file://secretum.db etcd Backend Error : \\"etcd cluster unhealthy\\" Solution : Check etcd cluster status: etcdctl member list\\netcdctl endpoint health # Verify all nodes are reachable\\ncurl http://etcd1:2379/health\\ncurl http://etcd2:2379/health\\ncurl http://etcd3:2379/health","breadcrumbs":"SecretumVault KMS Guide » Storage Backend Errors","id":"4297","title":"Storage Backend Errors"},"4298":{"body":"Slow encryption/decryption : Check network latency (for service mode): ping -c 3 secretumvault-server Monitor SecretumVault performance: provisioning kms metrics Check storage backend performance: Filesystem: Check disk I/O SurrealDB: Monitor database load etcd: Check cluster consensus state High memory usage : Check cache settings: provisioning config show secretumvault.performance.cache_ttl Reduce cache TTL: provisioning config set secretumvault.performance.cache_ttl 60 Monitor active connections: provisioning kms status","breadcrumbs":"SecretumVault KMS Guide » Performance Issues","id":"4298","title":"Performance Issues"},"4299":{"body":"Enable debug logging : export RUST_LOG=debug\\nprovisioning kms encrypt config.yaml Check configuration : provisioning config show secretumvault\\nprovisioning config validate Test connectivity : provisioning kms health --verbose View audit logs : tail -f ~/.config/provisioning/logs/secretumvault-audit.log","breadcrumbs":"SecretumVault KMS Guide » Debugging","id":"4299","title":"Debugging"},"43":{"body":"This guide will help you install Infrastructure Automation on your machine and get it ready for use.","breadcrumbs":"Installation Guide » Installation Guide","id":"43","title":"Installation Guide"},"430":{"body":"| | Provider | Models | Best For | | | | ---------- | -------- | ---------- | | | | Anthropic | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context | | | | OpenAI | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling | | | | Local | Llama 3, Mistral | Air-gapped, privacy-critical | |","breadcrumbs":"Overview » » Supported LLM Providers","id":"430","title":"Supported LLM Providers"},"4300":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Security Best Practices","id":"4300","title":"Security Best Practices"},"4301":{"body":"Never commit tokens to version control Use environment variables or .env files (gitignored) Rotate tokens regularly Use different tokens per environment","breadcrumbs":"SecretumVault KMS Guide » Token Management","id":"4301","title":"Token Management"},"4302":{"body":"Enable TLS verification in production: export SECRETUMVAULT_TLS_VERIFY=true Use proper certificates (not self-signed in production) Pin certificates to prevent MITM attacks","breadcrumbs":"SecretumVault KMS Guide » TLS/SSL","id":"4302","title":"TLS/SSL"},"4303":{"body":"Restrict who can access SecretumVault admin UI Use strong authentication (MFA preferred) Audit all secrets access Implement least-privilege principle","breadcrumbs":"SecretumVault KMS Guide » Access Control","id":"4303","title":"Access Control"},"4304":{"body":"Rotate keys regularly (every 90 days recommended) Keep old versions for decryption Test rotation procedures in staging first Monitor rotation status","breadcrumbs":"SecretumVault KMS Guide » Key Rotation","id":"4304","title":"Key Rotation"},"4305":{"body":"Backup SecretumVault data regularly Test restore procedures Store backups securely Keep backup keys separate from encrypted data","breadcrumbs":"SecretumVault KMS Guide » Backup and Recovery","id":"4305","title":"Backup and Recovery"},"4306":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Migration Guide","id":"4306","title":"Migration Guide"},"4307":{"body":"# Export all secrets encrypted with Age\\nprovisioning secrets export --backend age --output secrets.json # Import into SecretumVault\\nprovisioning secrets import --backend secretumvault secrets.json # Re-encrypt all configurations\\nfind workspace/infra -name \\"*.enc\\" -exec provisioning kms reencrypt {} \\\\;","breadcrumbs":"SecretumVault KMS Guide » From Age to SecretumVault","id":"4307","title":"From Age to SecretumVault"},"4308":{"body":"# Both use Vault-compatible APIs, so migration is simpler:\\n# 1. Ensure SecretumVault keys are available\\n# 2. Update KMS_PROD_BACKEND=secretumvault\\n# 3. Test with staging first\\n# 4. Monitor during transition","breadcrumbs":"SecretumVault KMS Guide » From RustyVault to SecretumVault","id":"4308","title":"From RustyVault to SecretumVault"},"4309":{"body":"# For production migration:\\n# 1. Set up SecretumVault with etcd backend\\n# 2. Verify high availability is working\\n# 3. Run parallel encryption with both systems\\n# 4. Validate all decryptions work\\n# 5. Update KMS_PROD_BACKEND=secretumvault\\n# 6. Monitor closely for 24 hours\\n# 7. Keep Cosmian as fallback for 7 days","breadcrumbs":"SecretumVault KMS Guide » From Cosmian to SecretumVault","id":"4309","title":"From Cosmian to SecretumVault"},"431":{"body":"AI features incur LLM API costs. The system implements cost controls: Caching : Reduces API calls by 50-80% Rate Limiting : Prevents runaway costs Budget Limits : Daily/monthly cost caps Local Models : Zero marginal cost for air-gapped deployments See Cost Management for optimization strategies.","breadcrumbs":"Overview » » Cost Considerations","id":"431","title":"Cost Considerations"},"4310":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Performance Tuning","id":"4310","title":"Performance Tuning"},"4311":{"body":"[secretumvault.performance]\\nmax_connections = 5\\nconnection_timeout = 5\\nrequest_timeout = 30\\ncache_ttl = 60","breadcrumbs":"SecretumVault KMS Guide » Development (Filesystem)","id":"4311","title":"Development (Filesystem)"},"4312":{"body":"[secretumvault.performance]\\nmax_connections = 20\\nconnection_timeout = 5\\nrequest_timeout = 30\\ncache_ttl = 300","breadcrumbs":"SecretumVault KMS Guide » Staging (SurrealDB)","id":"4312","title":"Staging (SurrealDB)"},"4313":{"body":"[secretumvault.performance]\\nmax_connections = 50\\nconnection_timeout = 10\\nrequest_timeout = 30\\ncache_ttl = 600","breadcrumbs":"SecretumVault KMS Guide » Production (etcd)","id":"4313","title":"Production (etcd)"},"4314":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Compliance and Audit","id":"4314","title":"Compliance and Audit"},"4315":{"body":"All operations are logged: # View recent audit events\\nprovisioning kms audit --limit 100 # Export audit logs\\nprovisioning kms audit export --output audit.json # Audit specific operations\\nprovisioning kms audit --action encrypt --from 24h","breadcrumbs":"SecretumVault KMS Guide » Audit Logging","id":"4315","title":"Audit Logging"},"4316":{"body":"# Generate compliance report\\nprovisioning compliance report --backend secretumvault # GDPR data export\\nprovisioning compliance gdpr-export user@example.com # SOC2 audit trail\\nprovisioning compliance soc2-export --output soc2-audit.json","breadcrumbs":"SecretumVault KMS Guide » Compliance Reports","id":"4316","title":"Compliance Reports"},"4317":{"body":"","breadcrumbs":"SecretumVault KMS Guide » Advanced Topics","id":"4317","title":"Advanced Topics"},"4318":{"body":"Enable fine-grained access control: # Enable Cedar integration\\nprovisioning config set secretumvault.authorization.cedar_enabled true # Define access policies\\nprovisioning policy define-kms-access user@example.com admin\\nprovisioning policy define-kms-access deployer@example.com deploy-only","breadcrumbs":"SecretumVault KMS Guide » Cedar Authorization Policies","id":"4318","title":"Cedar Authorization Policies"},"4319":{"body":"Configure master key settings: # Set KEK rotation interval\\nprovisioning config set secretumvault.rotation.rotation_interval_days 90 # Enable automatic rotation\\nprovisioning config set secretumvault.rotation.auto_rotate true # Retain old versions for decryption\\nprovisioning config set secretumvault.rotation.retain_old_versions true","breadcrumbs":"SecretumVault KMS Guide » Key Encryption Keys (KEK)","id":"4319","title":"Key Encryption Keys (KEK)"},"432":{"body":"The AI integration is documented in: ADR-015: AI Integration Architecture","breadcrumbs":"Overview » » Architecture Decision Record","id":"432","title":"Architecture Decision Record"},"4320":{"body":"For production deployments across regions: # Region 1\\nexport SECRETUMVAULT_URL=https://kms-us-east.example.com\\nexport SECRETUMVAULT_STORAGE=etcd # Region 2 (for failover)\\nexport SECRETUMVAULT_URL_FALLBACK=https://kms-us-west.example.com","breadcrumbs":"SecretumVault KMS Guide » Multi-Region Setup","id":"4320","title":"Multi-Region Setup"},"4321":{"body":"Documentation : docs/user/SECRETUMVAULT_KMS_GUIDE.md (this file) Configuration Template : provisioning/config/secretumvault.toml KMS Configuration : provisioning/config/kms.toml Issues : Report issues with provisioning kms debug Logs : Check ~/.config/provisioning/logs/secretumvault-*.log","breadcrumbs":"SecretumVault KMS Guide » Support and Resources","id":"4321","title":"Support and Resources"},"4322":{"body":"Age KMS Guide - Simple local encryption Cosmian KMS Guide - Enterprise confidential computing RustyVault Guide - Self-hosted Vault KMS Overview - KMS backend comparison","breadcrumbs":"SecretumVault KMS Guide » See Also","id":"4322","title":"See Also"},"4323":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » SSH Temporal Keys - User Guide","id":"4323","title":"SSH Temporal Keys - User Guide"},"4324":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Quick Start","id":"4324","title":"Quick Start"},"4325":{"body":"The fastest way to use temporal SSH keys: # Auto-generate, deploy, and connect (key auto-revoked after disconnect)\\nssh connect server.example.com # Connect with custom user and TTL\\nssh connect server.example.com --user deploy --ttl 30 min # Keep key active after disconnect\\nssh connect server.example.com --keep","breadcrumbs":"SSH Temporal Keys User Guide » Generate and Connect with Temporary Key","id":"4325","title":"Generate and Connect with Temporary Key"},"4326":{"body":"For more control over the key lifecycle: # 1. Generate key\\nssh generate-key server.example.com --user root --ttl 1hr # Output:\\n# ✓ SSH key generated successfully\\n# Key ID: abc-123-def-456\\n# Type: dynamickeypair\\n# User: root\\n# Server: server.example.com\\n# Expires: 2024-01-01T13:00:00Z\\n# Fingerprint: SHA256:...\\n#\\n# Private Key (save securely):\\n# -----BEGIN OPENSSH PRIVATE KEY-----\\n# ...\\n# -----END OPENSSH PRIVATE KEY----- # 2. Deploy key to server\\nssh deploy-key abc-123-def-456 # 3. Use the private key to connect\\nssh -i /path/to/private/key root@server.example.com # 4. Revoke when done\\nssh revoke-key abc-123-def-456","breadcrumbs":"SSH Temporal Keys User Guide » Manual Key Management","id":"4326","title":"Manual Key Management"},"4327":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Key Features","id":"4327","title":"Key Features"},"4328":{"body":"All keys expire automatically after their TTL: Default TTL : 1 hour Configurable : From 5 minutes to 24 hours Background Cleanup : Automatic removal from servers every 5 minutes","breadcrumbs":"SSH Temporal Keys User Guide » Automatic Expiration","id":"4328","title":"Automatic Expiration"},"4329":{"body":"Choose the right key type for your use case: Type Description Use Case dynamic (default) Generated Ed25519 keys Quick SSH access ca Vault CA-signed certificate Enterprise with SSH CA otp Vault one-time password Single-use access","breadcrumbs":"SSH Temporal Keys User Guide » Multiple Key Types","id":"4329","title":"Multiple Key Types"},"433":{"body":"Read Architecture to understand AI system design Configure AI features in Configuration Try Natural Language Config for your first AI-generated config Explore AI Agents for automation workflows Review Security Policies to understand access controls Version : 1.0 Last Updated : 2025-01-08 Status : Active","breadcrumbs":"Overview » » Next Steps","id":"433","title":"Next Steps"},"4330":{"body":"✅ No static SSH keys to manage ✅ Short-lived credentials (1 hour default) ✅ Automatic cleanup on expiration ✅ Audit trail for all operations ✅ Private keys never stored on disk","breadcrumbs":"SSH Temporal Keys User Guide » Security Benefits","id":"4330","title":"Security Benefits"},"4331":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Common Usage Patterns","id":"4331","title":"Common Usage Patterns"},"4332":{"body":"# Quick SSH for debugging\\nssh connect dev-server.local --ttl 30 min # Execute commands\\nssh root@dev-server.local \\"systemctl status nginx\\" # Connection closes, key auto-revokes","breadcrumbs":"SSH Temporal Keys User Guide » Development Workflow","id":"4332","title":"Development Workflow"},"4333":{"body":"# Generate key with longer TTL for deployment\\nssh generate-key prod-server.example.com --ttl 2hr # Deploy to server\\nssh deploy-key # Run deployment script\\nssh -i /tmp/deploy-key root@prod-server.example.com < deploy.sh # Manual revoke when done\\nssh revoke-key ","breadcrumbs":"SSH Temporal Keys User Guide » Production Deployment","id":"4333","title":"Production Deployment"},"4334":{"body":"# Generate one key\\nssh generate-key server01.example.com --ttl 1hr # Use the same private key for multiple servers (if you have provisioning access)\\n# Note: Currently each key is server-specific, multi-server support coming soon","breadcrumbs":"SSH Temporal Keys User Guide » Multi-Server Access","id":"4334","title":"Multi-Server Access"},"4335":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Command Reference","id":"4335","title":"Command Reference"},"4336":{"body":"Generate a new temporal SSH key. Syntax : ssh generate-key [options] Options : --user : SSH user (default: root) --ttl : Key lifetime (default: 1hr) --type : Key type (default: dynamic) --ip
: Allowed IP (OTP mode only) --principal : Principal (CA mode only) Examples : # Basic usage\\nssh generate-key server.example.com # Custom user and TTL\\nssh generate-key server.example.com --user deploy --ttl 30 min # Vault CA mode\\nssh generate-key server.example.com --type ca --principal admin","breadcrumbs":"SSH Temporal Keys User Guide » ssh generate-key","id":"4336","title":"ssh generate-key"},"4337":{"body":"Deploy a generated key to the target server. Syntax : ssh deploy-key Example : ssh deploy-key abc-123-def-456","breadcrumbs":"SSH Temporal Keys User Guide » ssh deploy-key","id":"4337","title":"ssh deploy-key"},"4338":{"body":"List all active SSH keys. Syntax : ssh list-keys [--expired] Examples : # List active keys\\nssh list-keys # Show only deployed keys\\nssh list-keys | where deployed == true # Include expired keys\\nssh list-keys --expired","breadcrumbs":"SSH Temporal Keys User Guide » ssh list-keys","id":"4338","title":"ssh list-keys"},"4339":{"body":"Get detailed information about a specific key. Syntax : ssh get-key Example : ssh get-key abc-123-def-456","breadcrumbs":"SSH Temporal Keys User Guide » ssh get-key","id":"4339","title":"ssh get-key"},"434":{"body":"","breadcrumbs":"Architecture » AI Integration Architecture","id":"434","title":"AI Integration Architecture"},"4340":{"body":"Immediately revoke a key (removes from server and tracking). Syntax : ssh revoke-key Example : ssh revoke-key abc-123-def-456","breadcrumbs":"SSH Temporal Keys User Guide » ssh revoke-key","id":"4340","title":"ssh revoke-key"},"4341":{"body":"Auto-generate, deploy, connect, and revoke (all-in-one). Syntax : ssh connect [options] Options : --user : SSH user (default: root) --ttl : Key lifetime (default: 1hr) --type : Key type (default: dynamic) --keep: Don\'t revoke after disconnect Examples : # Quick connection\\nssh connect server.example.com # Custom user\\nssh connect server.example.com --user deploy # Keep key active after disconnect\\nssh connect server.example.com --keep","breadcrumbs":"SSH Temporal Keys User Guide » ssh connect","id":"4341","title":"ssh connect"},"4342":{"body":"Show SSH key statistics. Syntax : ssh stats Example Output : SSH Key Statistics: Total generated: 42 Active keys: 10 Expired keys: 32 Keys by type: dynamic: 35 otp: 5 certificate: 2 Last cleanup: 2024-01-01T12:00:00Z Cleaned keys: 5","breadcrumbs":"SSH Temporal Keys User Guide » ssh stats","id":"4342","title":"ssh stats"},"4343":{"body":"Manually trigger cleanup of expired keys. Syntax : ssh cleanup","breadcrumbs":"SSH Temporal Keys User Guide » ssh cleanup","id":"4343","title":"ssh cleanup"},"4344":{"body":"Run a quick test of the SSH key system. Syntax : ssh test [--user ] Example : ssh test server.example.com --user root","breadcrumbs":"SSH Temporal Keys User Guide » ssh test","id":"4344","title":"ssh test"},"4345":{"body":"Show help information. Syntax : ssh help","breadcrumbs":"SSH Temporal Keys User Guide » ssh help","id":"4345","title":"ssh help"},"4346":{"body":"The --ttl option accepts various duration formats: Format Example Meaning Minutes 30 min 30 minutes Hours 2hr 2 hours Mixed 1hr 30 min 1.5 hours Seconds 3600sec 1 hour","breadcrumbs":"SSH Temporal Keys User Guide » Duration Formats","id":"4346","title":"Duration Formats"},"4347":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Working with Private Keys","id":"4347","title":"Working with Private Keys"},"4348":{"body":"When you generate a key, save the private key immediately: # Generate and save to file\\nssh generate-key server.example.com | get private_key | save -f ~/.ssh/temp_key\\nchmod 600 ~/.ssh/temp_key # Use the key\\nssh -i ~/.ssh/temp_key root@server.example.com # Cleanup\\nrm ~/.ssh/temp_key","breadcrumbs":"SSH Temporal Keys User Guide » Saving Private Keys","id":"4348","title":"Saving Private Keys"},"4349":{"body":"Add the temporary key to your SSH agent: # Generate key and extract private key\\nssh generate-key server.example.com | get private_key | save -f /tmp/temp_key\\nchmod 600 /tmp/temp_key # Add to agent\\nssh-add /tmp/temp_key # Connect (agent provides the key automatically)\\nssh root@server.example.com # Remove from agent\\nssh-add -d /tmp/temp_key\\nrm /tmp/temp_key","breadcrumbs":"SSH Temporal Keys User Guide » Using SSH Agent","id":"4349","title":"Using SSH Agent"},"435":{"body":"The provisioning platform\'s AI system provides intelligent capabilities for configuration generation, troubleshooting, and automation. The architecture consists of multiple layers designed for reliability, security, and performance.","breadcrumbs":"Architecture » Overview","id":"435","title":"Overview"},"4350":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Troubleshooting","id":"4350","title":"Troubleshooting"},"4351":{"body":"Problem : ssh deploy-key returns error Solutions : Check SSH connectivity to server: ssh root@server.example.com Verify provisioning key is configured: echo $PROVISIONING_SSH_KEY Check server SSH daemon: ssh root@server.example.com \\"systemctl status sshd\\"","breadcrumbs":"SSH Temporal Keys User Guide » Key Deployment Fails","id":"4351","title":"Key Deployment Fails"},"4352":{"body":"Problem : SSH connection fails with \\"Permission denied (publickey)\\" Solutions : Verify key was deployed: ssh list-keys | where id == \\"\\" Check key hasn\'t expired: ssh get-key | get expires_at Verify private key permissions: chmod 600 /path/to/private/key","breadcrumbs":"SSH Temporal Keys User Guide » Private Key Not Working","id":"4352","title":"Private Key Not Working"},"4353":{"body":"Problem : Expired keys not being removed Solutions : Check orchestrator is running: curl http://localhost:9090/health Trigger manual cleanup: ssh cleanup Check orchestrator logs: tail -f ./data/orchestrator.log | grep SSH","breadcrumbs":"SSH Temporal Keys User Guide » Cleanup Not Running","id":"4353","title":"Cleanup Not Running"},"4354":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Best Practices","id":"4354","title":"Best Practices"},"4355":{"body":"Short TTLs : Use the shortest TTL that works for your task ssh connect server.example.com --ttl 30 min Immediate Revocation : Revoke keys when you\'re done ssh revoke-key Private Key Handling : Never share or commit private keys # Save to temp location, delete after use\\nssh generate-key server.example.com | get private_key | save -f /tmp/key\\n# ... use key ...\\nrm /tmp/key","breadcrumbs":"SSH Temporal Keys User Guide » Security","id":"4355","title":"Security"},"4356":{"body":"Automated Deployments : Generate key in CI/CD #!/bin/bash\\nKEY_ID=$(ssh generate-key prod.example.com --ttl 1hr | get id)\\nssh deploy-key $KEY_ID\\n# Run deployment\\nansible-playbook deploy.yml\\nssh revoke-key $KEY_ID Interactive Use : Use ssh connect for quick access ssh connect dev.example.com Monitoring : Check statistics regularly ssh stats","breadcrumbs":"SSH Temporal Keys User Guide » Workflow Integration","id":"4356","title":"Workflow Integration"},"4357":{"body":"","breadcrumbs":"SSH Temporal Keys User Guide » Advanced Usage","id":"4357","title":"Advanced Usage"},"4358":{"body":"If your organization uses HashiCorp Vault: CA Mode (Recommended) # Generate CA-signed certificate\\nssh generate-key server.example.com --type ca --principal admin --ttl 1hr # Vault signs your public key\\n# Server must trust Vault CA certificate Setup (one-time): # On servers, add to /etc/ssh/sshd_config:\\nTrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem # Get Vault CA public key:\\nvault read -field=public_key ssh/config/ca | \\\\ sudo tee /etc/ssh/trusted-user-ca-keys.pem # Restart SSH:\\nsudo systemctl restart sshd OTP Mode # Generate one-time password\\nssh generate-key server.example.com --type otp --ip 192.168.1.100 # Use the OTP to connect (single use only)","breadcrumbs":"SSH Temporal Keys User Guide » Vault Integration","id":"4358","title":"Vault Integration"},"4359":{"body":"Use in scripts for automated operations: # deploy.nu\\ndef deploy [target: string] { let key = (ssh generate-key $target --ttl 1hr) ssh deploy-key $key.id # Run deployment try { ssh $\\"root@($target)\\" \\"bash /path/to/deploy.sh\\" } catch { print \\"Deployment failed\\" } # Always cleanup ssh revoke-key $key.id\\n}","breadcrumbs":"SSH Temporal Keys User Guide » Scripting","id":"4359","title":"Scripting"},"436":{"body":"","breadcrumbs":"Architecture » Core Components - Production-Ready","id":"436","title":"Core Components - Production-Ready"},"4360":{"body":"For programmatic access, use the REST API: # Generate key\\ncurl -X POST http://localhost:9090/api/v1/ssh/generate \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"key_type\\": \\"dynamickeypair\\", \\"user\\": \\"root\\", \\"target_server\\": \\"server.example.com\\", \\"ttl_seconds\\": 3600 }\' # Deploy key\\ncurl -X POST http://localhost:9090/api/v1/ssh/{key_id}/deploy # List keys\\ncurl http://localhost:9090/api/v1/ssh/keys # Get stats\\ncurl http://localhost:9090/api/v1/ssh/stats","breadcrumbs":"SSH Temporal Keys User Guide » API Integration","id":"4360","title":"API Integration"},"4361":{"body":"Q: Can I use the same key for multiple servers? A: Currently, each key is tied to a specific server. Multi-server support is planned. Q: What happens if the orchestrator crashes? A: Keys in memory are lost, but keys already deployed to servers remain until their expiration time. Q: Can I extend the TTL of an existing key? A: No, you must generate a new key. This is by design for security. Q: What\'s the maximum TTL? A: Configurable by admin, default maximum is 24 hours. Q: Are private keys stored anywhere? A: Private keys exist only in memory during generation and are shown once to the user. They are never written to disk by the system. Q: What happens if cleanup fails? A: The key remains in authorized_keys until the next cleanup run. You can trigger manual cleanup with ssh cleanup. Q: Can I use this with non-root users? A: Yes, use --user when generating the key. Q: How do I know when my key will expire? A: Use ssh get-key to see the exact expiration timestamp.","breadcrumbs":"SSH Temporal Keys User Guide » FAQ","id":"4361","title":"FAQ"},"4362":{"body":"For issues or questions: Check orchestrator logs: tail -f ./data/orchestrator.log Run diagnostics: ssh stats Test connectivity: ssh test server.example.com Review documentation: SSH_KEY_MANAGEMENT.md","breadcrumbs":"SSH Temporal Keys User Guide » Support","id":"4362","title":"Support"},"4363":{"body":"Architecture : SSH_KEY_MANAGEMENT.md Implementation : SSH_IMPLEMENTATION_SUMMARY.md Configuration : config/ssh-config.toml.example","breadcrumbs":"SSH Temporal Keys User Guide » See Also","id":"4363","title":"See Also"},"4364":{"body":"Version : 1.0.0 Last Updated : 2025-10-09 Target Audience : Developers, DevOps Engineers, System Administrators","breadcrumbs":"Plugin Integration Guide » Nushell Plugin Integration Guide","id":"4364","title":"Nushell Plugin Integration Guide"},"4365":{"body":"Overview Why Native Plugins? Prerequisites Installation Quick Start (5 Minutes) Authentication Plugin (nu_plugin_auth) KMS Plugin (nu_plugin_kms) Orchestrator Plugin (nu_plugin_orchestrator) Integration Examples Best Practices Troubleshooting Migration Guide Advanced Configuration Security Considerations FAQ","breadcrumbs":"Plugin Integration Guide » Table of Contents","id":"4365","title":"Table of Contents"},"4366":{"body":"The Provisioning Platform provides three native Nushell plugins that dramatically improve performance and user experience compared to traditional HTTP API calls: Plugin Purpose Performance Gain nu_plugin_auth JWT authentication, MFA, session management 20% faster nu_plugin_kms Encryption/decryption with multiple KMS backends 10x faster nu_plugin_orchestrator Orchestrator operations without HTTP overhead 50x faster","breadcrumbs":"Plugin Integration Guide » Overview","id":"4366","title":"Overview"},"4367":{"body":"Traditional HTTP Flow:\\nUser Command → HTTP Request → Network → Server Processing → Response → Parse JSON Total: ~50-100 ms per operation Plugin Flow:\\nUser Command → Direct Rust Function Call → Return Nushell Data Structure Total: ~1-10 ms per operation","breadcrumbs":"Plugin Integration Guide » Architecture Benefits","id":"4367","title":"Architecture Benefits"},"4368":{"body":"✅ Performance : 10-50x faster than HTTP API ✅ Type Safety : Full Nushell type system integration ✅ Pipeline Support : Native Nushell data structures ✅ Offline Capability : KMS and orchestrator work without network ✅ OS Integration : Native keyring for secure token storage ✅ Graceful Fallback : HTTP still available if plugins not installed","breadcrumbs":"Plugin Integration Guide » Key Features","id":"4368","title":"Key Features"},"4369":{"body":"","breadcrumbs":"Plugin Integration Guide » Why Native Plugins","id":"4369","title":"Why Native Plugins"},"437":{"body":"Status : ✅ Production-Ready (2,500+ lines Rust code) The core AI service provides: Multi-provider LLM support (Anthropic Claude, OpenAI GPT-4, local models) Streaming response support for real-time feedback Request caching with LRU and semantic similarity Rate limiting and cost control Comprehensive error handling HTTP REST API on port 8083 Supported Models : Claude Sonnet 4, Claude Opus 4 (Anthropic) GPT-4 Turbo, GPT-4 (OpenAI) Llama 3, Mistral (local/on-premise)","breadcrumbs":"Architecture » 1. AI Service (provisioning/platform/ai-service)","id":"437","title":"1. AI Service (provisioning/platform/ai-service)"},"4370":{"body":"Real-world benchmarks from production workload: Operation HTTP API Plugin Improvement Speedup KMS Encrypt (RustyVault) ~50 ms ~5 ms -45 ms 10x KMS Decrypt (RustyVault) ~50 ms ~5 ms -45 ms 10x KMS Encrypt (Age) ~30 ms ~3 ms -27 ms 10x KMS Decrypt (Age) ~30 ms ~3 ms -27 ms 10x Orchestrator Status ~30 ms ~1 ms -29 ms 30x Orchestrator Tasks List ~50 ms ~5 ms -45 ms 10x Orchestrator Validate ~100 ms ~10 ms -90 ms 10x Auth Login ~100 ms ~80 ms -20 ms 1.25x Auth Verify ~50 ms ~10 ms -40 ms 5x Auth MFA Verify ~80 ms ~60 ms -20 ms 1.3x","breadcrumbs":"Plugin Integration Guide » Performance Comparison","id":"4370","title":"Performance Comparison"},"4371":{"body":"Scenario : Encrypt 100 configuration files # HTTP API approach\\nls configs/*.yaml | each { |file| http post http://localhost:9998/encrypt { data: (open $file) }\\n} | save encrypted/\\n# Total time: ~5 seconds (50 ms × 100) # Plugin approach\\nls configs/*.yaml | each { |file| kms encrypt (open $file) --backend rustyvault\\n} | save encrypted/\\n# Total time: ~0.5 seconds (5 ms × 100)\\n# Result: 10x faster","breadcrumbs":"Plugin Integration Guide » Use Case: Batch Processing","id":"4371","title":"Use Case: Batch Processing"},"4372":{"body":"1. Native Nushell Integration # HTTP: Parse JSON, check status codes\\nlet result = http post http://localhost:9998/encrypt { data: \\"secret\\" }\\nif $result.status == \\"success\\" { $result.encrypted\\n} else { error make { msg: $result.error }\\n} # Plugin: Direct return values\\nkms encrypt \\"secret\\"\\n# Returns encrypted string directly, errors use Nushell\'s error system 2. Pipeline Friendly # HTTP: Requires wrapping, JSON parsing\\n[\\"secret1\\", \\"secret2\\"] | each { |s| (http post http://localhost:9998/encrypt { data: $s }).encrypted\\n} # Plugin: Natural pipeline flow\\n[\\"secret1\\", \\"secret2\\"] | each { |s| kms encrypt $s } 3. Tab Completion # All plugin commands have full tab completion\\nkms \\n# → encrypt, decrypt, generate-key, status, backends kms encrypt --\\n# → --backend, --key, --context","breadcrumbs":"Plugin Integration Guide » Developer Experience Benefits","id":"4372","title":"Developer Experience Benefits"},"4373":{"body":"","breadcrumbs":"Plugin Integration Guide » Prerequisites","id":"4373","title":"Prerequisites"},"4374":{"body":"Software Minimum Version Purpose Nushell 0.107.1 Shell and plugin runtime Rust 1.75+ Building plugins from source Cargo (included with Rust) Build tool","breadcrumbs":"Plugin Integration Guide » Required Software","id":"4374","title":"Required Software"},"4375":{"body":"Software Purpose Platform gnome-keyring Secure token storage Linux kwallet Secure token storage Linux (KDE) age Age encryption backend All RustyVault High-performance KMS All","breadcrumbs":"Plugin Integration Guide » Optional Dependencies","id":"4375","title":"Optional Dependencies"},"4376":{"body":"Platform Status Notes macOS ✅ Full Keychain integration Linux ✅ Full Requires keyring service Windows ✅ Full Credential Manager integration FreeBSD ⚠️ Partial No keyring integration","breadcrumbs":"Plugin Integration Guide » Platform Support","id":"4376","title":"Platform Support"},"4377":{"body":"","breadcrumbs":"Plugin Integration Guide » Installation","id":"4377","title":"Installation"},"4378":{"body":"cd /Users/Akasha/project-provisioning/provisioning/core/plugins/nushell-plugins","breadcrumbs":"Plugin Integration Guide » Step 1: Clone or Navigate to Plugin Directory","id":"4378","title":"Step 1: Clone or Navigate to Plugin Directory"},"4379":{"body":"# Build in release mode (optimized for performance)\\ncargo build --release --all # Or build individually\\ncargo build --release -p nu_plugin_auth\\ncargo build --release -p nu_plugin_kms\\ncargo build --release -p nu_plugin_orchestrator Expected output: Compiling nu_plugin_auth v0.1.0 Compiling nu_plugin_kms v0.1.0 Compiling nu_plugin_orchestrator v0.1.0 Finished release [optimized] target(s) in 2m 15s","breadcrumbs":"Plugin Integration Guide » Step 2: Build All Plugins","id":"4379","title":"Step 2: Build All Plugins"},"438":{"body":"Status : ✅ Production-Ready (22/22 tests passing) The RAG system enables AI to access and reason over platform documentation: Vector embeddings via SurrealDB vector store Hybrid search: vector similarity + BM25 keyword search Document chunking (code and markdown aware) Relevance ranking and context selection Semantic caching for repeated queries Capabilities : provisioning ai query \\"How do I set up Kubernetes?\\"\\nprovisioning ai template \\"Describe my infrastructure\\"","breadcrumbs":"Architecture » 2. RAG System (Retrieval-Augmented Generation)","id":"438","title":"2. RAG System (Retrieval-Augmented Generation)"},"4380":{"body":"# Register all three plugins\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # On macOS, full paths:\\nplugin add $PWD/target/release/nu_plugin_auth\\nplugin add $PWD/target/release/nu_plugin_kms\\nplugin add $PWD/target/release/nu_plugin_orchestrator","breadcrumbs":"Plugin Integration Guide » Step 3: Register Plugins with Nushell","id":"4380","title":"Step 3: Register Plugins with Nushell"},"4381":{"body":"# List registered plugins\\nplugin list | where name =~ \\"auth|kms|orch\\" # Test each plugin\\nauth --help\\nkms --help\\norch --help Expected output: ╭───┬─────────────────────────┬─────────┬───────────────────────────────────╮\\n│ # │ name │ version │ filename │\\n├───┼─────────────────────────┼─────────┼───────────────────────────────────┤\\n│ 0 │ nu_plugin_auth │ 0.1.0 │ .../nu_plugin_auth │\\n│ 1 │ nu_plugin_kms │ 0.1.0 │ .../nu_plugin_kms │\\n│ 2 │ nu_plugin_orchestrator │ 0.1.0 │ .../nu_plugin_orchestrator │\\n╰───┴─────────────────────────┴─────────┴───────────────────────────────────╯","breadcrumbs":"Plugin Integration Guide » Step 4: Verify Installation","id":"4381","title":"Step 4: Verify Installation"},"4382":{"body":"# Add to ~/.config/nushell/env.nu\\n$env.RUSTYVAULT_ADDR = \\"http://localhost:8200\\"\\n$env.RUSTYVAULT_TOKEN = \\"your-vault-token\\"\\n$env.CONTROL_CENTER_URL = \\"http://localhost:3000\\"\\n$env.ORCHESTRATOR_DATA_DIR = \\"/opt/orchestrator/data\\"","breadcrumbs":"Plugin Integration Guide » Step 5: Configure Environment (Optional)","id":"4382","title":"Step 5: Configure Environment (Optional)"},"4383":{"body":"","breadcrumbs":"Plugin Integration Guide » Quick Start (5 Minutes)","id":"4383","title":"Quick Start (5 Minutes)"},"4384":{"body":"# Login (password prompted securely)\\nauth login admin\\n# ✓ Login successful\\n# User: admin\\n# Role: Admin\\n# Expires: 2025-10-09T14:30:00Z # Verify session\\nauth verify\\n# {\\n# \\"active\\": true,\\n# \\"user\\": \\"admin\\",\\n# \\"role\\": \\"Admin\\",\\n# \\"expires_at\\": \\"2025-10-09T14:30:00Z\\"\\n# } # Enroll in MFA (optional but recommended)\\nauth mfa enroll totp\\n# QR code displayed, save backup codes # Verify MFA\\nauth mfa verify --code 123456\\n# ✓ MFA verification successful # Logout\\nauth logout\\n# ✓ Logged out successfully","breadcrumbs":"Plugin Integration Guide » 1. Authentication Workflow","id":"4384","title":"1. Authentication Workflow"},"4385":{"body":"# Encrypt data\\nkms encrypt \\"my secret data\\"\\n# vault:v1:8GawgGuP... # Decrypt data\\nkms decrypt \\"vault:v1:8GawgGuP...\\"\\n# my secret data # Check available backends\\nkms status\\n# {\\n# \\"backend\\": \\"rustyvault\\",\\n# \\"status\\": \\"healthy\\",\\n# \\"url\\": \\"http://localhost:8200\\"\\n# } # Encrypt with specific backend\\nkms encrypt \\"data\\" --backend age --key age1xxxxxxx","breadcrumbs":"Plugin Integration Guide » 2. KMS Operations","id":"4385","title":"2. KMS Operations"},"4386":{"body":"# Check orchestrator status (no HTTP call)\\norch status\\n# {\\n# \\"active_tasks\\": 5,\\n# \\"completed_tasks\\": 120,\\n# \\"health\\": \\"healthy\\"\\n# } # Validate workflow\\norch validate workflows/deploy.ncl\\n# {\\n# \\"valid\\": true,\\n# \\"workflow\\": { \\"name\\": \\"deploy_k8s\\", \\"operations\\": 5 }\\n# } # List running tasks\\norch tasks --status running\\n# [ { \\"task_id\\": \\"task_123\\", \\"name\\": \\"deploy_k8s\\", \\"progress\\": 45 } ]","breadcrumbs":"Plugin Integration Guide » 3. Orchestrator Operations","id":"4386","title":"3. Orchestrator Operations"},"4387":{"body":"# Complete authenticated deployment pipeline\\nauth login admin | if $in.success { auth verify } | if $in.active { orch validate workflows/production.ncl | if $in.valid { kms encrypt (open secrets.yaml | to json) | save production-secrets.enc } }\\n# ✓ Pipeline completed successfully","breadcrumbs":"Plugin Integration Guide » 4. Combined Workflow","id":"4387","title":"4. Combined Workflow"},"4388":{"body":"The authentication plugin manages JWT-based authentication, MFA enrollment/verification, and session management with OS-native keyring integration.","breadcrumbs":"Plugin Integration Guide » Authentication Plugin (nu_plugin_auth)","id":"4388","title":"Authentication Plugin (nu_plugin_auth)"},"4389":{"body":"Command Purpose Example auth login Login and store JWT auth login admin auth logout Logout and clear tokens auth logout auth verify Verify current session auth verify auth sessions List active sessions auth sessions auth mfa enroll Enroll in MFA auth mfa enroll totp auth mfa verify Verify MFA code auth mfa verify --code 123456","breadcrumbs":"Plugin Integration Guide » Available Commands","id":"4389","title":"Available Commands"},"439":{"body":"Status : ✅ Production-Ready Provides Model Context Protocol integration: Standardized tool interface for LLMs Complex workflow composition Integration with external AI systems (Claude, other LLMs) Tool calling for provisioning operations","breadcrumbs":"Architecture » 3. MCP Server (Model Context Protocol)","id":"439","title":"3. MCP Server (Model Context Protocol)"},"4390":{"body":"auth login [password] Login to provisioning platform and store JWT tokens securely in OS keyring. Arguments: username (required): Username for authentication password (optional): Password (prompted if not provided) Flags: --url : Control center URL (default: http://localhost:3000) --password : Password (alternative to positional argument) Examples: # Interactive password prompt (recommended)\\nauth login admin\\n# Password: ••••••••\\n# ✓ Login successful\\n# User: admin\\n# Role: Admin\\n# Expires: 2025-10-09T14:30:00Z # Password in command (not recommended for production)\\nauth login admin mypassword # Custom control center URL\\nauth login admin --url https://control-center.example.com # Pipeline usage\\nlet creds = { username: \\"admin\\", password: (input --suppress-output \\"Password: \\") }\\nauth login $creds.username $creds.password Token Storage Locations: macOS : Keychain Access (login keychain) Linux : Secret Service API (gnome-keyring, kwallet) Windows : Windows Credential Manager Security Notes: Tokens encrypted at rest by OS Requires user authentication to access (macOS Touch ID, Linux password) Never stored in plain text files auth logout Logout from current session and remove stored tokens from keyring. Examples: # Simple logout\\nauth logout\\n# ✓ Logged out successfully # Conditional logout\\nif (auth verify | get active) { auth logout echo \\"Session terminated\\"\\n} # Logout all sessions (requires admin role)\\nauth sessions | each { |sess| auth logout --session-id $sess.session_id\\n} auth verify Verify current session status and check token validity. Returns: active (bool): Whether session is active user (string): Username role (string): User role expires_at (datetime): Token expiration mfa_verified (bool): MFA verification status Examples: # Check if logged in\\nauth verify\\n# {\\n# \\"active\\": true,\\n# \\"user\\": \\"admin\\",\\n# \\"role\\": \\"Admin\\",\\n# \\"expires_at\\": \\"2025-10-09T14:30:00Z\\",\\n# \\"mfa_verified\\": true\\n# } # Pipeline usage\\nif (auth verify | get active) { echo \\"✓ Authenticated\\"\\n} else { auth login admin\\n} # Check expiration\\nlet session = auth verify\\nif ($session.expires_at | into datetime) < (date now) { echo \\"Session expired, re-authenticating...\\" auth login $session.user\\n} auth sessions List all active sessions for current user. Examples: # List all sessions\\nauth sessions\\n# [\\n# {\\n# \\"session_id\\": \\"sess_abc123\\",\\n# \\"created_at\\": \\"2025-10-09T12:00:00Z\\",\\n# \\"expires_at\\": \\"2025-10-09T14:30:00Z\\",\\n# \\"ip_address\\": \\"192.168.1.100\\",\\n# \\"user_agent\\": \\"nushell/0.107.1\\"\\n# }\\n# ] # Filter recent sessions (last hour)\\nauth sessions | where created_at > ((date now) - 1hr) # Find sessions by IP\\nauth sessions | where ip_address =~ \\"192.168\\" # Count active sessions\\nauth sessions | length auth mfa enroll Enroll in Multi-Factor Authentication (TOTP or WebAuthn). Arguments: type (required): MFA type (totp or webauthn) TOTP Enrollment: auth mfa enroll totp\\n# ✓ TOTP enrollment initiated\\n#\\n# Scan this QR code with your authenticator app:\\n#\\n# ████ ▄▄▄▄▄ █▀█ █▄▀▀▀▄ ▄▄▄▄▄ ████\\n# ████ █ █ █▀▀▀█▄ ▀▀█ █ █ ████\\n# ████ █▄▄▄█ █ █▀▄ ▀▄▄█ █▄▄▄█ ████\\n# (QR code continues...)\\n#\\n# Or enter manually:\\n# Secret: JBSWY3DPEHPK3PXP\\n# URL: otpauth://totp/Provisioning:admin?secret=JBSWY3DPEHPK3PXP&issuer=Provisioning\\n#\\n# Backup codes (save securely):\\n# 1. ABCD-EFGH-IJKL\\n# 2. MNOP-QRST-UVWX\\n# 3. YZAB-CDEF-GHIJ\\n# (8 more codes...) WebAuthn Enrollment: auth mfa enroll webauthn\\n# ✓ WebAuthn enrollment initiated\\n#\\n# Insert your security key and touch the button...\\n# (waiting for device interaction)\\n#\\n# ✓ Security key registered successfully\\n# Device: YubiKey 5 NFC\\n# Created: 2025-10-09T13:00:00Z Supported Authenticator Apps: Google Authenticator Microsoft Authenticator Authy 1Password Bitwarden Supported Hardware Keys: YubiKey (all models) Titan Security Key Feitian ePass macOS Touch ID Windows Hello auth mfa verify --code Verify MFA code (TOTP or backup code). Flags: --code (required): 6-digit TOTP code or backup code Examples: # Verify TOTP code\\nauth mfa verify --code 123456\\n# ✓ MFA verification successful # Verify backup code\\nauth mfa verify --code ABCD-EFGH-IJKL\\n# ✓ MFA verification successful (backup code used)\\n# Warning: This backup code cannot be used again # Pipeline usage\\nlet code = input \\"MFA code: \\"\\nauth mfa verify --code $code Error Cases: # Invalid code\\nauth mfa verify --code 999999\\n# Error: Invalid MFA code\\n# → Verify time synchronization on your device # Rate limited\\nauth mfa verify --code 123456\\n# Error: Too many failed attempts\\n# → Wait 5 minutes before trying again # No MFA enrolled\\nauth mfa verify --code 123456\\n# Error: MFA not enrolled for this user\\n# → Run: auth mfa enroll totp","breadcrumbs":"Plugin Integration Guide » Command Reference","id":"4390","title":"Command Reference"},"4391":{"body":"Variable Description Default USER Default username Current OS user CONTROL_CENTER_URL Control center URL http://localhost:3000 AUTH_KEYRING_SERVICE Keyring service name provisioning-auth","breadcrumbs":"Plugin Integration Guide » Environment Variables","id":"4391","title":"Environment Variables"},"4392":{"body":"\\"No active session\\" # Solution: Login first\\nauth login \\"Keyring error\\" (macOS) # Check Keychain Access permissions\\n# System Preferences → Security & Privacy → Privacy → Full Disk Access\\n# Add: /Applications/Nushell.app (or /usr/local/bin/nu) # Or grant access manually\\nsecurity unlock-keychain ~/Library/Keychains/login.keychain-db \\"Keyring error\\" (Linux) # Install keyring service\\nsudo apt install gnome-keyring # Ubuntu/Debian\\nsudo dnf install gnome-keyring # Fedora\\nsudo pacman -S gnome-keyring # Arch # Or use KWallet (KDE)\\nsudo apt install kwalletmanager # Start keyring daemon\\neval $(gnome-keyring-daemon --start)\\nexport $(gnome-keyring-daemon --start --components=secrets) \\"MFA verification failed\\" # Check time synchronization (TOTP requires accurate time)\\n# macOS:\\nsudo sntp -sS time.apple.com # Linux:\\nsudo ntpdate pool.ntp.org\\n# Or\\nsudo systemctl restart systemd-timesyncd # Use backup code if TOTP not working\\nauth mfa verify --code ABCD-EFGH-IJKL","breadcrumbs":"Plugin Integration Guide » Troubleshooting Authentication","id":"4392","title":"Troubleshooting Authentication"},"4393":{"body":"The KMS plugin provides high-performance encryption and decryption using multiple backend providers.","breadcrumbs":"Plugin Integration Guide » KMS Plugin (nu_plugin_kms)","id":"4393","title":"KMS Plugin (nu_plugin_kms)"},"4394":{"body":"Backend Performance Use Case Setup Complexity rustyvault ⚡ Very Fast (~5 ms) Production KMS Medium age ⚡ Very Fast (~3 ms) Local development Low cosmian 🐢 Moderate (~30 ms) Cloud KMS Medium aws 🐢 Moderate (~50 ms) AWS environments Medium vault 🐢 Moderate (~40 ms) Enterprise KMS High","breadcrumbs":"Plugin Integration Guide » Supported Backends","id":"4394","title":"Supported Backends"},"4395":{"body":"Choose rustyvault when: ✅ Running in production with high throughput requirements ✅ Need ~5 ms encryption/decryption latency ✅ Have RustyVault server deployed ✅ Require key rotation and versioning Choose age when: ✅ Developing locally without external dependencies ✅ Need simple file encryption ✅ Want ~3 ms latency ❌ Don\'t need centralized key management Choose cosmian when: ✅ Using Cosmian KMS service ✅ Need cloud-based key management ⚠️ Can accept ~30 ms latency Choose aws when: ✅ Deployed on AWS infrastructure ✅ Using AWS IAM for access control ✅ Need AWS KMS integration ⚠️ Can accept ~50 ms latency Choose vault when: ✅ Using HashiCorp Vault enterprise ✅ Need advanced policy management ✅ Require audit trails ⚠️ Can accept ~40 ms latency","breadcrumbs":"Plugin Integration Guide » Backend Selection Guide","id":"4395","title":"Backend Selection Guide"},"4396":{"body":"Command Purpose Example kms encrypt Encrypt data kms encrypt \\"secret\\" kms decrypt Decrypt data kms decrypt \\"vault:v1:...\\" kms generate-key Generate DEK kms generate-key --spec AES256 kms status Backend status kms status","breadcrumbs":"Plugin Integration Guide » Available Commands","id":"4396","title":"Available Commands"},"4397":{"body":"kms encrypt [--backend ] Encrypt data using specified KMS backend. Arguments: data (required): Data to encrypt (string or binary) Flags: --backend : KMS backend (rustyvault, age, cosmian, aws, vault) --key : Key ID or recipient (backend-specific) --context : Additional authenticated data (AAD) Examples: # Auto-detect backend from environment\\nkms encrypt \\"secret configuration data\\"\\n# vault:v1:8GawgGuP+emDKX5q... # RustyVault backend\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main\\n# vault:v1:abc123def456... # Age backend (local encryption)\\nkms encrypt \\"data\\" --backend age --key age1xxxxxxxxx\\n# -----BEGIN AGE ENCRYPTED FILE-----\\n# YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+...\\n# -----END AGE ENCRYPTED FILE----- # AWS KMS\\nkms encrypt \\"data\\" --backend aws --key alias/provisioning\\n# AQICAHhwbGF0Zm9ybS1wcm92aXNpb25p... # With context (AAD for additional security)\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main --context \\"user=admin,env=production\\" # Encrypt file contents\\nkms encrypt (open config.yaml) --backend rustyvault | save config.yaml.enc # Encrypt multiple files\\nls configs/*.yaml | each { |file| kms encrypt (open $file.name) --backend age | save $\\"encrypted/($file.name).enc\\"\\n} Output Formats: RustyVault : vault:v1:base64_ciphertext Age : -----BEGIN AGE ENCRYPTED FILE-----...-----END AGE ENCRYPTED FILE----- AWS : base64_aws_kms_ciphertext Cosmian : cosmian:v1:base64_ciphertext kms decrypt [--backend ] Decrypt KMS-encrypted data. Arguments: encrypted (required): Encrypted data (detects format automatically) Flags: --backend : KMS backend (auto-detected from format if not specified) --context : Additional authenticated data (must match encryption context) Examples: # Auto-detect backend from format\\nkms decrypt \\"vault:v1:8GawgGuP...\\"\\n# secret configuration data # Explicit backend\\nkms decrypt \\"vault:v1:abc123...\\" --backend rustyvault # Age decryption\\nkms decrypt \\"-----BEGIN AGE ENCRYPTED FILE-----...\\"\\n# (uses AGE_IDENTITY from environment) # With context (must match encryption context)\\nkms decrypt \\"vault:v1:abc123...\\" --context \\"user=admin,env=production\\" # Decrypt file\\nkms decrypt (open config.yaml.enc) | save config.yaml # Decrypt multiple files\\nls encrypted/*.enc | each { |file| kms decrypt (open $file.name) | save $\\"configs/(($file.name | path basename) | str replace \'.enc\' \'\')\\"\\n} # Pipeline decryption\\nopen secrets.json | get database_password_enc | kms decrypt | str trim | psql --dbname mydb --password Error Cases: # Invalid ciphertext\\nkms decrypt \\"invalid_data\\"\\n# Error: Invalid ciphertext format\\n# → Verify data was encrypted with KMS # Context mismatch\\nkms decrypt \\"vault:v1:abc...\\" --context \\"wrong=context\\"\\n# Error: Authentication failed (AAD mismatch)\\n# → Verify encryption context matches # Backend unavailable\\nkms decrypt \\"vault:v1:abc...\\"\\n# Error: Failed to connect to RustyVault at http://localhost:8200\\n# → Check RustyVault is running: curl http://localhost:8200/v1/sys/health kms generate-key [--spec ] Generate data encryption key (DEK) using KMS envelope encryption. Flags: --spec : Key specification (AES128 or AES256, default: AES256) --backend : KMS backend Examples: # Generate AES-256 key\\nkms generate-key\\n# {\\n# \\"plaintext\\": \\"rKz3N8xPq...\\", # base64-encoded key\\n# \\"ciphertext\\": \\"vault:v1:...\\", # encrypted DEK\\n# \\"spec\\": \\"AES256\\"\\n# } # Generate AES-128 key\\nkms generate-key --spec AES128 # Use in envelope encryption pattern\\nlet dek = kms generate-key\\nlet encrypted_data = ($data | openssl enc -aes-256-cbc -K $dek.plaintext)\\n{ data: $encrypted_data, encrypted_key: $dek.ciphertext\\n} | save secure_data.json # Later, decrypt:\\nlet envelope = open secure_data.json\\nlet dek = kms decrypt $envelope.encrypted_key\\n$envelope.data | openssl enc -d -aes-256-cbc -K $dek Use Cases: Envelope encryption (encrypt large data locally, protect DEK with KMS) Database field encryption File encryption with key wrapping kms status Show KMS backend status, configuration, and health. Examples: # Show current backend status\\nkms status\\n# {\\n# \\"backend\\": \\"rustyvault\\",\\n# \\"status\\": \\"healthy\\",\\n# \\"url\\": \\"http://localhost:8200\\",\\n# \\"mount_point\\": \\"transit\\",\\n# \\"version\\": \\"0.1.0\\",\\n# \\"latency_ms\\": 5\\n# } # Check all configured backends\\nkms status --all\\n# [\\n# { \\"backend\\": \\"rustyvault\\", \\"status\\": \\"healthy\\", ... },\\n# { \\"backend\\": \\"age\\", \\"status\\": \\"available\\", ... },\\n# { \\"backend\\": \\"aws\\", \\"status\\": \\"unavailable\\", \\"error\\": \\"...\\" }\\n# ] # Filter to specific backend\\nkms status | where backend == \\"rustyvault\\" # Health check in automation\\nif (kms status | get status) == \\"healthy\\" { echo \\"✓ KMS operational\\"\\n} else { error make { msg: \\"KMS unhealthy\\" }\\n}","breadcrumbs":"Plugin Integration Guide » Command Reference","id":"4397","title":"Command Reference"},"4398":{"body":"RustyVault Backend # Environment variables\\nexport RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"hvs.xxxxxxxxxxxxx\\"\\nexport RUSTYVAULT_MOUNT=\\"transit\\" # Transit engine mount point\\nexport RUSTYVAULT_KEY=\\"provisioning-main\\" # Default key name # Usage\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main Setup RustyVault: # Start RustyVault\\nrustyvault server -dev # Enable transit engine\\nrustyvault secrets enable transit # Create encryption key\\nrustyvault write -f transit/keys/provisioning-main Age Backend # Generate Age keypair\\nage-keygen -o ~/.age/key.txt # Environment variables\\nexport AGE_IDENTITY=\\"$HOME/.age/key.txt\\" # Private key\\nexport AGE_RECIPIENT=\\"age1xxxxxxxxx\\" # Public key (from key.txt) # Usage\\nkms encrypt \\"data\\" --backend age\\nkms decrypt (open file.enc) --backend age AWS KMS Backend # AWS credentials\\nexport AWS_REGION=\\"us-east-1\\"\\nexport AWS_ACCESS_KEY_ID=\\"AKIAXXXXX\\"\\nexport AWS_SECRET_ACCESS_KEY=\\"xxxxx\\" # KMS configuration\\nexport AWS_KMS_KEY_ID=\\"alias/provisioning\\" # Usage\\nkms encrypt \\"data\\" --backend aws --key alias/provisioning Setup AWS KMS: # Create KMS key\\naws kms create-key --description \\"Provisioning Platform\\" # Create alias\\naws kms create-alias --alias-name alias/provisioning --target-key-id # Grant permissions\\naws kms create-grant --key-id --grantee-principal \\\\ --operations Encrypt Decrypt GenerateDataKey Cosmian Backend # Cosmian KMS configuration\\nexport KMS_HTTP_URL=\\"http://localhost:9998\\"\\nexport KMS_HTTP_BACKEND=\\"cosmian\\"\\nexport COSMIAN_API_KEY=\\"your-api-key\\" # Usage\\nkms encrypt \\"data\\" --backend cosmian Vault Backend (HashiCorp) # Vault configuration\\nexport VAULT_ADDR=\\"https://vault.example.com:8200\\"\\nexport VAULT_TOKEN=\\"hvs.xxxxxxxxxxxxx\\"\\nexport VAULT_MOUNT=\\"transit\\"\\nexport VAULT_KEY=\\"provisioning\\" # Usage\\nkms encrypt \\"data\\" --backend vault --key provisioning","breadcrumbs":"Plugin Integration Guide » Backend Configuration","id":"4398","title":"Backend Configuration"},"4399":{"body":"Test Setup: Data size: 1 KB Iterations: 1000 Hardware: Apple M1, 16 GB RAM Network: localhost Results: Backend Encrypt (avg) Decrypt (avg) Throughput (ops/sec) RustyVault 4.8 ms 5.1 ms ~200 Age 2.9 ms 3.2 ms ~320 Cosmian HTTP 31 ms 29 ms ~33 AWS KMS 52 ms 48 ms ~20 Vault 38 ms 41 ms ~25 Scaling Test (1000 operations): # RustyVault: ~5 seconds\\n0..1000 | each { |_| kms encrypt \\"data\\" --backend rustyvault } | length\\n# Age: ~3 seconds\\n0..1000 | each { |_| kms encrypt \\"data\\" --backend age } | length","breadcrumbs":"Plugin Integration Guide » Performance Benchmarks","id":"4399","title":"Performance Benchmarks"},"44":{"body":"System requirements and prerequisites Different installation methods How to verify your installation Setting up your environment Troubleshooting common installation issues","breadcrumbs":"Installation Guide » What You\'ll Learn","id":"44","title":"What You\'ll Learn"},"440":{"body":"Status : ✅ Production-Ready Interactive commands: provisioning ai template --prompt \\"Describe infrastructure\\"\\nprovisioning ai query --prompt \\"Configuration question\\"\\nprovisioning ai chat # Interactive mode Configuration : [ai]\\nenabled = true\\nprovider = \\"anthropic\\" # or \\"openai\\" or \\"local\\"\\nmodel = \\"claude-sonnet-4\\" [ai.cache]\\nenabled = true\\nsemantic_similarity = true\\nttl_seconds = 3600 [ai.limits]\\nmax_tokens = 4096\\ntemperature = 0.7","breadcrumbs":"Architecture » 4. CLI Integration","id":"440","title":"4. CLI Integration"},"4400":{"body":"\\"RustyVault connection failed\\" # Check RustyVault is running\\ncurl http://localhost:8200/v1/sys/health\\n# Expected: { \\"initialized\\": true, \\"sealed\\": false } # Check environment\\necho $env.RUSTYVAULT_ADDR\\necho $env.RUSTYVAULT_TOKEN # Test authentication\\ncurl -H \\"X-Vault-Token: $RUSTYVAULT_TOKEN\\" $RUSTYVAULT_ADDR/v1/sys/health \\"Age encryption failed\\" # Check Age keys exist\\nls -la ~/.age/\\n# Expected: key.txt # Verify key format\\ncat ~/.age/key.txt | head -1\\n# Expected: # created: \\n# Line 2: # public key: age1xxxxx\\n# Line 3: AGE-SECRET-KEY-xxxxx # Extract public key\\nexport AGE_RECIPIENT=$(grep \\"public key:\\" ~/.age/key.txt | cut -d: -f2 | tr -d \' \')\\necho $AGE_RECIPIENT \\"AWS KMS access denied\\" # Verify AWS credentials\\naws sts get-caller-identity\\n# Expected: Account, UserId, Arn # Check KMS key permissions\\naws kms describe-key --key-id alias/provisioning # Test encryption\\naws kms encrypt --key-id alias/provisioning --plaintext \\"test\\"","breadcrumbs":"Plugin Integration Guide » Troubleshooting KMS","id":"4400","title":"Troubleshooting KMS"},"4401":{"body":"The orchestrator plugin provides direct file-based access to orchestrator state, eliminating HTTP overhead for status queries and validation.","breadcrumbs":"Plugin Integration Guide » Orchestrator Plugin (nu_plugin_orchestrator)","id":"4401","title":"Orchestrator Plugin (nu_plugin_orchestrator)"},"4402":{"body":"Command Purpose Example orch status Orchestrator status orch status orch validate Validate workflow orch validate workflow.ncl orch tasks List tasks orch tasks --status running","breadcrumbs":"Plugin Integration Guide » Available Commands","id":"4402","title":"Available Commands"},"4403":{"body":"orch status [--data-dir ] Get orchestrator status from local files (no HTTP, ~1 ms latency). Flags: --data-dir : Data directory (default from ORCHESTRATOR_DATA_DIR) Examples: # Default data directory\\norch status\\n# {\\n# \\"active_tasks\\": 5,\\n# \\"completed_tasks\\": 120,\\n# \\"failed_tasks\\": 2,\\n# \\"pending_tasks\\": 3,\\n# \\"uptime\\": \\"2d 4h 15m\\",\\n# \\"health\\": \\"healthy\\"\\n# } # Custom data directory\\norch status --data-dir /opt/orchestrator/data # Monitor in loop\\nwhile true { clear orch status | table sleep 5sec\\n} # Alert on failures\\nif (orch status | get failed_tasks) > 0 { echo \\"⚠️ Failed tasks detected!\\"\\n} orch validate [--strict] Validate workflow Nickel file syntax and structure. Arguments: workflow.ncl (required): Path to Nickel workflow file Flags: --strict: Enable strict validation (warnings as errors) Examples: # Basic validation\\norch validate workflows/deploy.ncl\\n# {\\n# \\"valid\\": true,\\n# \\"workflow\\": {\\n# \\"name\\": \\"deploy_k8s_cluster\\",\\n# \\"version\\": \\"1.0.0\\",\\n# \\"operations\\": 5\\n# },\\n# \\"warnings\\": [],\\n# \\"errors\\": []\\n# } # Strict mode (warnings cause failure)\\norch validate workflows/deploy.ncl --strict\\n# Error: Validation failed with warnings:\\n# - Operation \'create_servers\': Missing retry_policy\\n# - Operation \'install_k8s\': Resource limits not specified # Validate all workflows\\nls workflows/*.ncl | each { |file| let result = orch validate $file.name if $result.valid { echo $\\"✓ ($file.name)\\" } else { echo $\\"✗ ($file.name): ($result.errors | str join \', \')\\" }\\n} # CI/CD validation\\ntry { orch validate workflow.ncl --strict echo \\"✓ Validation passed\\"\\n} catch { echo \\"✗ Validation failed\\" exit 1\\n} Validation Checks: ✅ KCL syntax correctness ✅ Required fields present (name, version, operations) ✅ Dependency graph valid (no cycles) ✅ Resource limits within bounds ✅ Provider configurations valid ✅ Operation types supported ⚠️ Optional: Retry policies defined ⚠️ Optional: Resource limits specified orch tasks [--status ] [--limit ] List orchestrator tasks from local state. Flags: --status : Filter by status (pending, running, completed, failed) --limit : Limit results (default: 100) --data-dir : Data directory Examples: # All tasks (last 100)\\norch tasks\\n# [\\n# {\\n# \\"task_id\\": \\"task_abc123\\",\\n# \\"name\\": \\"deploy_kubernetes\\",\\n# \\"status\\": \\"running\\",\\n# \\"priority\\": 5,\\n# \\"created_at\\": \\"2025-10-09T12:00:00Z\\",\\n# \\"progress\\": 45\\n# }\\n# ] # Running tasks only\\norch tasks --status running # Failed tasks (last 10)\\norch tasks --status failed --limit 10 # Pending high-priority tasks\\norch tasks --status pending | where priority > 7 # Monitor active tasks\\nwatch { orch tasks --status running | select name progress updated_at | table\\n} # Count tasks by status\\norch tasks | group-by status | each { |group| { status: $group.0, count: ($group.1 | length) }\\n}","breadcrumbs":"Plugin Integration Guide » Command Reference","id":"4403","title":"Command Reference"},"4404":{"body":"Variable Description Default ORCHESTRATOR_DATA_DIR Data directory provisioning/platform/orchestrator/data","breadcrumbs":"Plugin Integration Guide » Environment Variables","id":"4404","title":"Environment Variables"},"4405":{"body":"Operation HTTP API Plugin Latency Reduction Status query ~30 ms ~1 ms 97% faster Validate workflow ~100 ms ~10 ms 90% faster List tasks ~50 ms ~5 ms 90% faster Use Case: CI/CD Pipeline # HTTP approach (slow)\\nhttp get http://localhost:9090/tasks --status running | each { |task| http get $\\"http://localhost:9090/tasks/($task.id)\\" }\\n# Total: ~500 ms for 10 tasks # Plugin approach (fast)\\norch tasks --status running\\n# Total: ~5 ms for 10 tasks\\n# Result: 100x faster","breadcrumbs":"Plugin Integration Guide » Performance Comparison","id":"4405","title":"Performance Comparison"},"4406":{"body":"\\"Failed to read status\\" # Check data directory exists\\nls -la provisioning/platform/orchestrator/data/ # Create if missing\\nmkdir -p provisioning/platform/orchestrator/data # Check permissions (must be readable)\\nchmod 755 provisioning/platform/orchestrator/data \\"Workflow validation failed\\" # Use strict mode for detailed errors\\norch validate workflows/deploy.ncl --strict # Check Nickel syntax manually\\nnickel typecheck workflows/deploy.ncl\\nnickel eval workflows/deploy.ncl \\"No tasks found\\" # Check orchestrator running\\nps aux | grep orchestrator # Start orchestrator if not running\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background # Check task files\\nls provisioning/platform/orchestrator/data/tasks/","breadcrumbs":"Plugin Integration Guide » Troubleshooting Orchestrator","id":"4406","title":"Troubleshooting Orchestrator"},"4407":{"body":"","breadcrumbs":"Plugin Integration Guide » Integration Examples","id":"4407","title":"Integration Examples"},"4408":{"body":"Full workflow with authentication, secrets, and deployment: # Step 1: Login with MFA\\nauth login admin\\nauth mfa verify --code (input \\"MFA code: \\") # Step 2: Verify orchestrator health\\nif (orch status | get health) != \\"healthy\\" { error make { msg: \\"Orchestrator unhealthy\\" }\\n} # Step 3: Validate deployment workflow\\nlet validation = orch validate workflows/production-deploy.ncl --strict\\nif not $validation.valid { error make { msg: $\\"Validation failed: ($validation.errors)\\" }\\n} # Step 4: Encrypt production secrets\\nlet secrets = open secrets/production.yaml\\nkms encrypt ($secrets | to json) --backend rustyvault --key prod-main | save secrets/production.enc # Step 5: Submit deployment\\nprovisioning cluster create production --check # Step 6: Monitor progress\\nwhile (orch tasks --status running | length) > 0 { orch tasks --status running | select name progress updated_at | table sleep 10sec\\n} echo \\"✓ Deployment complete\\"","breadcrumbs":"Plugin Integration Guide » Example 1: Complete Authenticated Deployment","id":"4408","title":"Example 1: Complete Authenticated Deployment"},"4409":{"body":"Rotate all secrets in multiple environments: # Rotate database passwords\\n[\\"dev\\", \\"staging\\", \\"production\\"] | each { |env| # Generate new password let new_password = (openssl rand -base64 32) # Encrypt with environment-specific key let encrypted = kms encrypt $new_password --backend rustyvault --key $\\"($env)-main\\" # Save encrypted password { environment: $env, password_enc: $encrypted, rotated_at: (date now | format date \\"%Y-%m-%d %H:%M:%S\\") } | save $\\"secrets/db-password-($env).json\\" echo $\\"✓ Rotated password for ($env)\\"\\n}","breadcrumbs":"Plugin Integration Guide » Example 2: Batch Secret Rotation","id":"4409","title":"Example 2: Batch Secret Rotation"},"441":{"body":"","breadcrumbs":"Architecture » Planned Components - Q2 2025","id":"441","title":"Planned Components - Q2 2025"},"4410":{"body":"Deploy to multiple environments with validation: # Define environments\\nlet environments = [ { name: \\"dev\\", validate: \\"basic\\" }, { name: \\"staging\\", validate: \\"strict\\" }, { name: \\"production\\", validate: \\"strict\\", mfa_required: true }\\n] # Deploy to each environment\\n$environments | each { |env| echo $\\"Deploying to ($env.name)...\\" # Authenticate if production if $env.mfa_required? { if not (auth verify | get mfa_verified) { auth mfa verify --code (input $\\"MFA code for ($env.name): \\") } } # Validate workflow let validation = if $env.validate == \\"strict\\" { orch validate $\\"workflows/($env.name)-deploy.ncl\\" --strict } else { orch validate $\\"workflows/($env.name)-deploy.ncl\\" } if not $validation.valid { echo $\\"✗ Validation failed for ($env.name)\\" continue } # Decrypt secrets let secrets = kms decrypt (open $\\"secrets/($env.name).enc\\") # Deploy provisioning cluster create $env.name echo $\\"✓ Deployed to ($env.name)\\"\\n}","breadcrumbs":"Plugin Integration Guide » Example 3: Multi-Environment Deployment","id":"4410","title":"Example 3: Multi-Environment Deployment"},"4411":{"body":"Backup configuration files with encryption: # Backup script\\nlet backup_dir = $\\"backups/(date now | format date \\"%Y%m%d-%H%M%S\\")\\"\\nmkdir $backup_dir # Backup and encrypt configs\\nls configs/**/*.yaml | each { |file| let encrypted = kms encrypt (open $file.name) --backend age let backup_path = $\\"($backup_dir)/($file.name | path basename).enc\\" $encrypted | save $backup_path echo $\\"✓ Backed up ($file.name)\\"\\n} # Create manifest\\n{ backup_date: (date now), files: (ls $\\"($backup_dir)/*.enc\\" | length), backend: \\"age\\"\\n} | save $\\"($backup_dir)/manifest.json\\" echo $\\"✓ Backup complete: ($backup_dir)\\"","breadcrumbs":"Plugin Integration Guide » Example 4: Automated Backup and Encryption","id":"4411","title":"Example 4: Automated Backup and Encryption"},"4412":{"body":"Real-time health monitoring: # Health dashboard\\nwhile true { clear # Header echo \\"=== Provisioning Platform Health Dashboard ===\\" echo $\\"Updated: (date now | format date \\"%Y-%m-%d %H:%M:%S\\")\\" echo \\"\\" # Authentication status let auth_status = try { auth verify } catch { { active: false } } echo $\\"Auth: (if $auth_status.active { \'✓ Active\' } else { \'✗ Inactive\' })\\" # KMS status let kms_health = kms status echo $\\"KMS: (if $kms_health.status == \'healthy\' { \'✓ Healthy\' } else { \'✗ Unhealthy\' })\\" # Orchestrator status let orch_health = orch status echo $\\"Orchestrator: (if $orch_health.health == \'healthy\' { \'✓ Healthy\' } else { \'✗ Unhealthy\' })\\" echo $\\"Active Tasks: ($orch_health.active_tasks)\\" echo $\\"Failed Tasks: ($orch_health.failed_tasks)\\" # Task summary echo \\"\\" echo \\"=== Running Tasks ===\\" orch tasks --status running | select name progress updated_at | table sleep 10sec\\n}","breadcrumbs":"Plugin Integration Guide » Example 5: Health Monitoring Dashboard","id":"4412","title":"Example 5: Health Monitoring Dashboard"},"4413":{"body":"","breadcrumbs":"Plugin Integration Guide » Best Practices","id":"4413","title":"Best Practices"},"4414":{"body":"✅ Use Plugins When: Performance is critical (high-frequency operations) Working in pipelines (Nushell data structures) Need offline capability (KMS, orchestrator local ops) Building automation scripts CI/CD pipelines Use HTTP When: Calling from external systems (not Nushell) Need consistent REST API interface Cross-language integration Web UI backend","breadcrumbs":"Plugin Integration Guide » When to Use Plugins vs HTTP","id":"4414","title":"When to Use Plugins vs HTTP"},"4415":{"body":"1. Batch Operations # ❌ Slow: Individual HTTP calls in loop\\nls configs/*.yaml | each { |file| http post http://localhost:9998/encrypt { data: (open $file.name) }\\n}\\n# Total: ~5 seconds (50 ms × 100) # ✅ Fast: Plugin in pipeline\\nls configs/*.yaml | each { |file| kms encrypt (open $file.name)\\n}\\n# Total: ~0.5 seconds (5 ms × 100) 2. Parallel Processing # Process multiple operations in parallel\\nls configs/*.yaml | par-each { |file| kms encrypt (open $file.name) | save $\\"encrypted/($file.name).enc\\" } 3. Caching Session State # Cache auth verification\\nlet $auth_cache = auth verify\\nif $auth_cache.active { # Use cached result instead of repeated calls echo $\\"Authenticated as ($auth_cache.user)\\"\\n}","breadcrumbs":"Plugin Integration Guide » Performance Optimization","id":"4415","title":"Performance Optimization"},"4416":{"body":"Graceful Degradation: # Try plugin, fallback to HTTP if unavailable\\ndef kms_encrypt [data: string] { try { kms encrypt $data } catch { http post http://localhost:9998/encrypt { data: $data } | get encrypted }\\n} Comprehensive Error Handling: # Handle all error cases\\ndef safe_deployment [] { # Check authentication let auth_status = try { auth verify } catch { echo \\"✗ Authentication failed, logging in...\\" auth login admin auth verify } # Check KMS health let kms_health = try { kms status } catch { error make { msg: \\"KMS unavailable, cannot proceed\\" } } # Validate workflow let validation = try { orch validate workflow.ncl --strict } catch { error make { msg: \\"Workflow validation failed\\" } } # Proceed if all checks pass if $auth_status.active and $kms_health.status == \\"healthy\\" and $validation.valid { echo \\"✓ All checks passed, deploying...\\" provisioning cluster create production }\\n}","breadcrumbs":"Plugin Integration Guide » Error Handling","id":"4416","title":"Error Handling"},"4417":{"body":"1. Never Log Decrypted Data # ❌ BAD: Logs plaintext password\\nlet password = kms decrypt $encrypted_password\\necho $\\"Password: ($password)\\" # Visible in logs! # ✅ GOOD: Use directly without logging\\nlet password = kms decrypt $encrypted_password\\npsql --dbname mydb --password $password # Not logged 2. Use Context (AAD) for Critical Data # Encrypt with context\\nlet context = $\\"user=(whoami),env=production,date=(date now | format date \\"%Y-%m-%d\\")\\"\\nkms encrypt $sensitive_data --context $context # Decrypt requires same context\\nkms decrypt $encrypted --context $context 3. Rotate Backup Codes # After using backup code, generate new set\\nauth mfa verify --code ABCD-EFGH-IJKL\\n# Warning: Backup code used\\nauth mfa regenerate-backups\\n# New backup codes generated 4. Limit Token Lifetime # Check token expiration before long operations\\nlet session = auth verify\\nlet expires_in = (($session.expires_at | into datetime) - (date now))\\nif $expires_in < 5 min { echo \\"⚠️ Token expiring soon, re-authenticating...\\" auth login $session.user\\n}","breadcrumbs":"Plugin Integration Guide » Security Best Practices","id":"4417","title":"Security Best Practices"},"4418":{"body":"","breadcrumbs":"Plugin Integration Guide » Troubleshooting","id":"4418","title":"Troubleshooting"},"4419":{"body":"\\"Plugin not found\\" # Check plugin registration\\nplugin list | where name =~ \\"auth|kms|orch\\" # Re-register if missing\\ncd provisioning/core/plugins/nushell-plugins\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # Restart Nushell\\nexit\\nnu \\"Plugin command failed\\" # Enable debug mode\\n$env.RUST_LOG = \\"debug\\" # Run command again to see detailed errors\\nkms encrypt \\"test\\" # Check plugin version compatibility\\nplugin list | where name =~ \\"kms\\" | select name version \\"Permission denied\\" # Check plugin executable permissions\\nls -l provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*\\n# Should show: -rwxr-xr-x # Fix if needed\\nchmod +x provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*","breadcrumbs":"Plugin Integration Guide » Common Issues Across Plugins","id":"4419","title":"Common Issues Across Plugins"},"442":{"body":"Status : 🔴 Planned Self-directed agents for complex tasks: Multi-step workflow execution Decision making and adaptation Monitoring and self-healing recommendations","breadcrumbs":"Architecture » Autonomous Agents (typdialog-ag)","id":"442","title":"Autonomous Agents (typdialog-ag)"},"4420":{"body":"macOS Issues: # \\"cannot be opened because the developer cannot be verified\\"\\nxattr -d com.apple.quarantine target/release/nu_plugin_auth\\nxattr -d com.apple.quarantine target/release/nu_plugin_kms\\nxattr -d com.apple.quarantine target/release/nu_plugin_orchestrator # Keychain access denied\\n# System Preferences → Security & Privacy → Privacy → Full Disk Access\\n# Add: /usr/local/bin/nu Linux Issues: # Keyring service not running\\nsystemctl --user status gnome-keyring-daemon\\nsystemctl --user start gnome-keyring-daemon # Missing dependencies\\nsudo apt install libssl-dev pkg-config # Ubuntu/Debian\\nsudo dnf install openssl-devel # Fedora Windows Issues: # Credential Manager access denied\\n# Control Panel → User Accounts → Credential Manager\\n# Ensure Windows Credential Manager service is running # Missing Visual C++ runtime\\n# Download from: https://aka.ms/vs/17/release/vc_redist.x64.exe","breadcrumbs":"Plugin Integration Guide » Platform-Specific Issues","id":"4420","title":"Platform-Specific Issues"},"4421":{"body":"Enable Verbose Logging: # Set log level\\n$env.RUST_LOG = \\"debug,nu_plugin_auth=trace\\" # Run command\\nauth login admin # Check logs Test Plugin Directly: # Test plugin communication (advanced)\\necho \'{\\"Call\\": [0, {\\"name\\": \\"auth\\", \\"call\\": \\"login\\", \\"args\\": [\\"admin\\", \\"password\\"]}]}\' \\\\ | target/release/nu_plugin_auth Check Plugin Health: # Test each plugin\\nauth --help # Should show auth commands\\nkms --help # Should show kms commands\\norch --help # Should show orch commands # Test functionality\\nauth verify # Should return session status\\nkms status # Should return backend status\\norch status # Should return orchestrator status","breadcrumbs":"Plugin Integration Guide » Debugging Techniques","id":"4421","title":"Debugging Techniques"},"4422":{"body":"","breadcrumbs":"Plugin Integration Guide » Migration Guide","id":"4422","title":"Migration Guide"},"4423":{"body":"Phase 1: Install Plugins (No Breaking Changes) # Build and register plugins\\ncd provisioning/core/plugins/nushell-plugins\\ncargo build --release --all\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # Verify HTTP still works\\nhttp get http://localhost:9090/health Phase 2: Update Scripts Incrementally # Before (HTTP)\\ndef encrypt_config [file: string] { let data = open $file let result = http post http://localhost:9998/encrypt { data: $data } $result.encrypted | save $\\"($file).enc\\"\\n} # After (Plugin with fallback)\\ndef encrypt_config [file: string] { let data = open $file let encrypted = try { kms encrypt $data --backend rustyvault } catch { # Fallback to HTTP if plugin unavailable (http post http://localhost:9998/encrypt { data: $data }).encrypted } $encrypted | save $\\"($file).enc\\"\\n} Phase 3: Test Migration # Run side-by-side comparison\\ndef test_migration [] { let test_data = \\"test secret data\\" # Plugin approach let start_plugin = date now let plugin_result = kms encrypt $test_data let plugin_time = ((date now) - $start_plugin) # HTTP approach let start_http = date now let http_result = (http post http://localhost:9998/encrypt { data: $test_data }).encrypted let http_time = ((date now) - $start_http) echo $\\"Plugin: ($plugin_time)ms\\" echo $\\"HTTP: ($http_time)ms\\" echo $\\"Speedup: (($http_time / $plugin_time))x\\"\\n} Phase 4: Gradual Rollout # Use feature flag for controlled rollout\\n$env.USE_PLUGINS = true def encrypt_with_flag [data: string] { if $env.USE_PLUGINS { kms encrypt $data } else { (http post http://localhost:9998/encrypt { data: $data }).encrypted }\\n} Phase 5: Full Migration # Replace all HTTP calls with plugin calls\\n# Remove fallback logic once stable\\ndef encrypt_config [file: string] { let data = open $file kms encrypt $data --backend rustyvault | save $\\"($file).enc\\"\\n}","breadcrumbs":"Plugin Integration Guide » Migrating from HTTP to Plugin-Based","id":"4423","title":"Migrating from HTTP to Plugin-Based"},"4424":{"body":"# If issues arise, quickly rollback\\ndef rollback_to_http [] { # Remove plugin registrations plugin rm nu_plugin_auth plugin rm nu_plugin_kms plugin rm nu_plugin_orchestrator # Restart Nushell exec nu\\n}","breadcrumbs":"Plugin Integration Guide » Rollback Strategy","id":"4424","title":"Rollback Strategy"},"4425":{"body":"","breadcrumbs":"Plugin Integration Guide » Advanced Configuration","id":"4425","title":"Advanced Configuration"},"4426":{"body":"# ~/.config/nushell/config.nu\\n$env.PLUGIN_PATH = \\"/opt/provisioning/plugins\\" # Register from custom location\\nplugin add $\\"($env.PLUGIN_PATH)/nu_plugin_auth\\"\\nplugin add $\\"($env.PLUGIN_PATH)/nu_plugin_kms\\"\\nplugin add $\\"($env.PLUGIN_PATH)/nu_plugin_orchestrator\\"","breadcrumbs":"Plugin Integration Guide » Custom Plugin Paths","id":"4426","title":"Custom Plugin Paths"},"4427":{"body":"# ~/.config/nushell/env.nu # Development environment\\nif ($env.ENV? == \\"dev\\") { $env.RUSTYVAULT_ADDR = \\"http://localhost:8200\\" $env.CONTROL_CENTER_URL = \\"http://localhost:3000\\"\\n} # Staging environment\\nif ($env.ENV? == \\"staging\\") { $env.RUSTYVAULT_ADDR = \\"https://vault-staging.example.com\\" $env.CONTROL_CENTER_URL = \\"https://control-staging.example.com\\"\\n} # Production environment\\nif ($env.ENV? == \\"prod\\") { $env.RUSTYVAULT_ADDR = \\"https://vault.example.com\\" $env.CONTROL_CENTER_URL = \\"https://control.example.com\\"\\n}","breadcrumbs":"Plugin Integration Guide » Environment-Specific Configuration","id":"4427","title":"Environment-Specific Configuration"},"4428":{"body":"# ~/.config/nushell/config.nu # Auth shortcuts\\nalias login = auth login\\nalias logout = auth logout\\nalias whoami = auth verify | get user # KMS shortcuts\\nalias encrypt = kms encrypt\\nalias decrypt = kms decrypt # Orchestrator shortcuts\\nalias status = orch status\\nalias tasks = orch tasks\\nalias validate = orch validate","breadcrumbs":"Plugin Integration Guide » Plugin Aliases","id":"4428","title":"Plugin Aliases"},"4429":{"body":"# ~/.config/nushell/custom_commands.nu # Encrypt all files in directory\\ndef encrypt-dir [dir: string] { ls $\\"($dir)/**/*\\" | where type == file | each { |file| kms encrypt (open $file.name) | save $\\"($file.name).enc\\" echo $\\"✓ Encrypted ($file.name)\\" }\\n} # Decrypt all files in directory\\ndef decrypt-dir [dir: string] { ls $\\"($dir)/**/*.enc\\" | each { |file| kms decrypt (open $file.name) | save (echo $file.name | str replace \'.enc\' \'\') echo $\\"✓ Decrypted ($file.name)\\" }\\n} # Monitor deployments\\ndef watch-deployments [] { while true { clear echo \\"=== Active Deployments ===\\" orch tasks --status running | table sleep 5sec }\\n}","breadcrumbs":"Plugin Integration Guide » Custom Commands","id":"4429","title":"Custom Commands"},"443":{"body":"Status : 🔴 Planned Real-time AI suggestions in configuration forms: Context-aware field recommendations Validation error explanations Auto-completion for infrastructure patterns","breadcrumbs":"Architecture » AI-Assisted Forms (typdialog-ai)","id":"443","title":"AI-Assisted Forms (typdialog-ai)"},"4430":{"body":"","breadcrumbs":"Plugin Integration Guide » Security Considerations","id":"4430","title":"Security Considerations"},"4431":{"body":"What Plugins Protect Against: ✅ Network eavesdropping (no HTTP for KMS/orch) ✅ Token theft from files (keyring storage) ✅ Credential exposure in logs (prompt-based input) ✅ Man-in-the-middle attacks (local file access) What Plugins Don\'t Protect Against: ❌ Memory dumping (decrypted data in RAM) ❌ Malicious plugins (trust registry only) ❌ Compromised OS keyring ❌ Physical access to machine","breadcrumbs":"Plugin Integration Guide » Threat Model","id":"4431","title":"Threat Model"},"4432":{"body":"1. Verify Plugin Integrity # Check plugin signatures (if available)\\nsha256sum target/release/nu_plugin_auth\\n# Compare with published checksums # Build from trusted source\\ngit clone https://github.com/provisioning-platform/plugins\\ncd plugins\\ncargo build --release --all 2. Restrict Plugin Access # Set plugin permissions (only owner can execute)\\nchmod 700 target/release/nu_plugin_* # Store in protected directory\\nsudo mkdir -p /opt/provisioning/plugins\\nsudo chown $(whoami):$(whoami) /opt/provisioning/plugins\\nsudo chmod 755 /opt/provisioning/plugins\\nmv target/release/nu_plugin_* /opt/provisioning/plugins/ 3. Audit Plugin Usage # Log plugin calls (for compliance)\\ndef logged_encrypt [data: string] { let timestamp = date now let result = kms encrypt $data { timestamp: $timestamp, action: \\"encrypt\\" } | save --append audit.log $result\\n} 4. Rotate Credentials Regularly # Weekly credential rotation script\\ndef rotate_credentials [] { # Re-authenticate auth logout auth login admin # Rotate KMS keys (if supported) kms rotate-key --key provisioning-main # Update encrypted secrets ls secrets/*.enc | each { |file| let plain = kms decrypt (open $file.name) kms encrypt $plain | save $file.name }\\n}","breadcrumbs":"Plugin Integration Guide » Secure Deployment","id":"4432","title":"Secure Deployment"},"4433":{"body":"Q: Can I use plugins without RustyVault/Age installed? A: Yes, authentication and orchestrator plugins work independently. KMS plugin requires at least one backend configured (Age is easiest for local dev). Q: Do plugins work in CI/CD pipelines? A: Yes, plugins work great in CI/CD. For headless environments (no keyring), use environment variables for auth or file-based tokens. # CI/CD example\\nexport CONTROL_CENTER_TOKEN=\\"jwt-token-here\\"\\nkms encrypt \\"data\\" --backend age Q: How do I update plugins? A: Rebuild and re-register: cd provisioning/core/plugins/nushell-plugins\\ngit pull\\ncargo build --release --all\\nplugin add --force target/release/nu_plugin_auth\\nplugin add --force target/release/nu_plugin_kms\\nplugin add --force target/release/nu_plugin_orchestrator Q: Can I use multiple KMS backends simultaneously? A: Yes, specify --backend for each operation: kms encrypt \\"data1\\" --backend rustyvault\\nkms encrypt \\"data2\\" --backend age\\nkms encrypt \\"data3\\" --backend aws Q: What happens if a plugin crashes? A: Nushell isolates plugin crashes. The command fails with an error, but Nushell continues running. Check logs with $env.RUST_LOG = \\"debug\\". Q: Are plugins compatible with older Nushell versions? A: Plugins require Nushell 0.107.1+. For older versions, use HTTP API. Q: How do I backup MFA enrollment? A: Save backup codes securely (password manager, encrypted file). QR code can be re-scanned from the same secret. # Save backup codes\\nauth mfa enroll totp | save mfa-backup-codes.txt\\nkms encrypt (open mfa-backup-codes.txt) | save mfa-backup-codes.enc\\nrm mfa-backup-codes.txt Q: Can plugins work offline? A: Partially: ✅ kms with Age backend (fully offline) ✅ orch status/tasks (reads local files) ❌ auth (requires control center) ❌ kms with RustyVault/AWS/Vault (requires network) Q: How do I troubleshoot plugin performance? A: Use Nushell\'s timing: timeit { kms encrypt \\"data\\" }\\n# 5 ms 123μs 456 ns timeit { http post http://localhost:9998/encrypt { data: \\"data\\" } }\\n# 52 ms 789μs 123 ns","breadcrumbs":"Plugin Integration Guide » FAQ","id":"4433","title":"FAQ"},"4434":{"body":"Security System : /Users/Akasha/project-provisioning/docs/architecture/adr-009-security-system-complete.md JWT Authentication : /Users/Akasha/project-provisioning/docs/architecture/JWT_AUTH_IMPLEMENTATION.md Config Encryption : /Users/Akasha/project-provisioning/docs/user/CONFIG_ENCRYPTION_GUIDE.md RustyVault Integration : /Users/Akasha/project-provisioning/RUSTYVAULT_INTEGRATION_SUMMARY.md MFA Implementation : /Users/Akasha/project-provisioning/docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md Nushell Plugins Reference : /Users/Akasha/project-provisioning/docs/user/NUSHELL_PLUGINS_GUIDE.md Version : 1.0.0 Maintained By : Platform Team Last Updated : 2025-10-09 Feedback : Open an issue or contact platform-team@example.com","breadcrumbs":"Plugin Integration Guide » Related Documentation","id":"4434","title":"Related Documentation"},"4435":{"body":"Complete guide to authentication, KMS, and orchestrator plugins.","breadcrumbs":"NuShell Plugins Guide » Nushell Plugins for Provisioning Platform","id":"4435","title":"Nushell Plugins for Provisioning Platform"},"4436":{"body":"Three native Nushell plugins provide high-performance integration with the provisioning platform: nu_plugin_auth - JWT authentication and MFA operations nu_plugin_kms - Key management (RustyVault, Age, Cosmian, AWS, Vault) nu_plugin_orchestrator - Orchestrator operations (status, validate, tasks)","breadcrumbs":"NuShell Plugins Guide » Overview","id":"4436","title":"Overview"},"4437":{"body":"Performance Advantages : 10x faster than HTTP API calls (KMS operations) Direct access to Rust libraries (no HTTP overhead) Native integration with Nushell pipelines Type safety with Nushell\'s type system Developer Experience : Pipeline friendly - Use Nushell pipes naturally Tab completion - All commands and flags Consistent interface - Follows Nushell conventions Error handling - Nushell-native error messages","breadcrumbs":"NuShell Plugins Guide » Why Native Plugins","id":"4437","title":"Why Native Plugins"},"4438":{"body":"","breadcrumbs":"NuShell Plugins Guide » Installation","id":"4438","title":"Installation"},"4439":{"body":"Nushell 0.107.1+ Rust toolchain (for building from source) Access to provisioning platform services","breadcrumbs":"NuShell Plugins Guide » Prerequisites","id":"4439","title":"Prerequisites"},"444":{"body":"Fine-tuning capabilities for custom models Autonomous workflow execution with human approval Cedar authorization policies for AI actions Custom knowledge bases per workspace","breadcrumbs":"Architecture » Advanced Features","id":"444","title":"Advanced Features"},"4440":{"body":"cd /Users/Akasha/project-provisioning/provisioning/core/plugins/nushell-plugins # Build all plugins\\ncargo build --release -p nu_plugin_auth\\ncargo build --release -p nu_plugin_kms\\ncargo build --release -p nu_plugin_orchestrator # Or build individually\\ncargo build --release -p nu_plugin_auth\\ncargo build --release -p nu_plugin_kms\\ncargo build --release -p nu_plugin_orchestrator","breadcrumbs":"NuShell Plugins Guide » Build from Source","id":"4440","title":"Build from Source"},"4441":{"body":"# Register all plugins\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # Verify registration\\nplugin list | where name =~ \\"provisioning\\"","breadcrumbs":"NuShell Plugins Guide » Register with Nushell","id":"4441","title":"Register with Nushell"},"4442":{"body":"# Test auth commands\\nauth --help # Test KMS commands\\nkms --help # Test orchestrator commands\\norch --help","breadcrumbs":"NuShell Plugins Guide » Verify Installation","id":"4442","title":"Verify Installation"},"4443":{"body":"Authentication plugin for JWT login, MFA enrollment, and session management.","breadcrumbs":"NuShell Plugins Guide » Plugin: nu_plugin_auth","id":"4443","title":"Plugin: nu_plugin_auth"},"4444":{"body":"auth login [password] Login to provisioning platform and store JWT tokens securely. Arguments : username (required): Username for authentication password (optional): Password (prompts interactively if not provided) Flags : --url : Control center URL (default: http://localhost:9080) --password : Password (alternative to positional argument) Examples : # Interactive password prompt (recommended)\\nauth login admin # Password in command (not recommended for production)\\nauth login admin mypassword # Custom URL\\nauth login admin --url http://control-center:9080 # Pipeline usage\\n\\"admin\\" | auth login Token Storage : Tokens are stored securely in OS-native keyring: macOS : Keychain Access Linux : Secret Service (gnome-keyring, kwallet) Windows : Credential Manager Success Output : ✓ Login successful\\nUser: admin\\nRole: Admin\\nExpires: 2025-10-09T14:30:00Z auth logout Logout from current session and remove stored tokens. Examples : # Simple logout\\nauth logout # Pipeline usage (conditional logout)\\nif (auth verify | get active) { auth logout } Success Output : ✓ Logged out successfully auth verify Verify current session and check token validity. Examples : # Check session status\\nauth verify # Pipeline usage\\nauth verify | if $in.active { echo \\"Session valid\\" } else { echo \\"Session expired\\" } Success Output : { \\"active\\": true, \\"user\\": \\"admin\\", \\"role\\": \\"Admin\\", \\"expires_at\\": \\"2025-10-09T14:30:00Z\\", \\"mfa_verified\\": true\\n} auth sessions List all active sessions for current user. Examples : # List sessions\\nauth sessions # Filter by date\\nauth sessions | where created_at > (date now | date to-timezone UTC | into string) Output Format : [ { \\"session_id\\": \\"sess_abc123\\", \\"created_at\\": \\"2025-10-09T12:00:00Z\\", \\"expires_at\\": \\"2025-10-09T14:30:00Z\\", \\"ip_address\\": \\"192.168.1.100\\", \\"user_agent\\": \\"nushell/0.107.1\\" }\\n] auth mfa enroll Enroll in MFA (TOTP or WebAuthn). Arguments : type (required): MFA type (totp or webauthn) Examples : # Enroll TOTP (Google Authenticator, Authy)\\nauth mfa enroll totp # Enroll WebAuthn (YubiKey, Touch ID, Windows Hello)\\nauth mfa enroll webauthn TOTP Enrollment Output : ✓ TOTP enrollment initiated Scan this QR code with your authenticator app: ████ ▄▄▄▄▄ █▀█ █▄▀▀▀▄ ▄▄▄▄▄ ████ ████ █ █ █▀▀▀█▄ ▀▀█ █ █ ████ ████ █▄▄▄█ █ █▀▄ ▀▄▄█ █▄▄▄█ ████ ... Or enter manually:\\nSecret: JBSWY3DPEHPK3PXP\\nURL: otpauth://totp/Provisioning:admin?secret=JBSWY3DPEHPK3PXP&issuer=Provisioning Backup codes (save securely):\\n1. ABCD-EFGH-IJKL\\n2. MNOP-QRST-UVWX\\n... auth mfa verify --code Verify MFA code (TOTP or backup code). Flags : --code (required): 6-digit TOTP code or backup code Examples : # Verify TOTP code\\nauth mfa verify --code 123456 # Verify backup code\\nauth mfa verify --code ABCD-EFGH-IJKL Success Output : ✓ MFA verification successful","breadcrumbs":"NuShell Plugins Guide » Commands","id":"4444","title":"Commands"},"4445":{"body":"Variable Description Default USER Default username Current OS user CONTROL_CENTER_URL Control center URL http://localhost:9080","breadcrumbs":"NuShell Plugins Guide » Environment Variables","id":"4445","title":"Environment Variables"},"4446":{"body":"Common Errors : # \\"No active session\\"\\nError: No active session found\\n→ Run: auth login # \\"Invalid credentials\\"\\nError: Authentication failed: Invalid username or password\\n→ Check username and password # \\"Token expired\\"\\nError: Token has expired\\n→ Run: auth login # \\"MFA required\\"\\nError: MFA verification required\\n→ Run: auth mfa verify --code # \\"Keyring error\\" (macOS)\\nError: Failed to access keyring\\n→ Check Keychain Access permissions # \\"Keyring error\\" (Linux)\\nError: Failed to access keyring\\n→ Install gnome-keyring or kwallet","breadcrumbs":"NuShell Plugins Guide » Error Handling","id":"4446","title":"Error Handling"},"4447":{"body":"Key Management Service plugin supporting multiple backends.","breadcrumbs":"NuShell Plugins Guide » Plugin: nu_plugin_kms","id":"4447","title":"Plugin: nu_plugin_kms"},"4448":{"body":"Backend Description Use Case rustyvault RustyVault Transit engine Production KMS age Age encryption (local) Development/testing cosmian Cosmian KMS (HTTP) Cloud KMS aws AWS KMS AWS environments vault HashiCorp Vault Enterprise KMS","breadcrumbs":"NuShell Plugins Guide » Supported Backends","id":"4448","title":"Supported Backends"},"4449":{"body":"kms encrypt [--backend ] Encrypt data using KMS. Arguments : data (required): Data to encrypt (string or binary) Flags : --backend : KMS backend (rustyvault, age, cosmian, aws, vault) --key : Key ID or recipient (backend-specific) --context : Additional authenticated data (AAD) Examples : # Auto-detect backend from environment\\nkms encrypt \\"secret data\\" # RustyVault\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main # Age (local encryption)\\nkms encrypt \\"data\\" --backend age --key age1xxxxxxxxx # AWS KMS\\nkms encrypt \\"data\\" --backend aws --key alias/provisioning # With context (AAD)\\nkms encrypt \\"data\\" --backend rustyvault --key provisioning-main --context \\"user=admin\\" Output Format : vault:v1:abc123def456... kms decrypt [--backend ] Decrypt KMS-encrypted data. Arguments : encrypted (required): Encrypted data (base64 or KMS format) Flags : --backend : KMS backend (auto-detected if not specified) --context : Additional authenticated data (AAD, must match encryption) Examples : # Auto-detect backend\\nkms decrypt \\"vault:v1:abc123def456...\\" # RustyVault explicit\\nkms decrypt \\"vault:v1:abc123...\\" --backend rustyvault # Age\\nkms decrypt \\"-----BEGIN AGE ENCRYPTED FILE-----...\\" --backend age # With context\\nkms decrypt \\"vault:v1:abc123...\\" --backend rustyvault --context \\"user=admin\\" Output : secret data kms generate-key [--spec ] Generate data encryption key (DEK) using KMS. Flags : --spec : Key specification (AES128 or AES256, default: AES256) --backend : KMS backend Examples : # Generate AES-256 key\\nkms generate-key # Generate AES-128 key\\nkms generate-key --spec AES128 # Specific backend\\nkms generate-key --backend rustyvault Output Format : { \\"plaintext\\": \\"base64-encoded-key\\", \\"ciphertext\\": \\"vault:v1:encrypted-key\\", \\"spec\\": \\"AES256\\"\\n} kms status Show KMS backend status and configuration. Examples : # Show status\\nkms status # Filter to specific backend\\nkms status | where backend == \\"rustyvault\\" Output Format : { \\"backend\\": \\"rustyvault\\", \\"status\\": \\"healthy\\", \\"url\\": \\"http://localhost:8200\\", \\"mount_point\\": \\"transit\\", \\"version\\": \\"0.1.0\\"\\n}","breadcrumbs":"NuShell Plugins Guide » Commands","id":"4449","title":"Commands"},"445":{"body":"┌─────────────────────────────────────────────────┐\\n│ User Interface │\\n│ ├── CLI (provisioning ai ...) │\\n│ ├── Web UI (typdialog) │\\n│ └── MCP Client (Claude, etc.) │\\n└──────────────┬──────────────────────────────────┘ ↓\\n┌──────────────────────────────────────────────────┐\\n│ AI Service (Port 8083) │\\n│ ├── Request Router │\\n│ ├── Cache Layer (LRU + Semantic) │\\n│ ├── Prompt Engineering │\\n│ └── Response Streaming │\\n└──────┬─────────────────┬─────────────────────────┘ ↓ ↓\\n┌─────────────┐ ┌──────────────────┐\\n│ RAG System │ │ LLM Provider │\\n│ SurrealDB │ │ ├── Anthropic │\\n│ Vector DB │ │ ├── OpenAI │\\n│ + BM25 │ │ └── Local Model │\\n└─────────────┘ └──────────────────┘ ↓ ↓\\n┌──────────────────────────────────────┐\\n│ Cached Responses + Real Responses │\\n│ Streamed to User │\\n└──────────────────────────────────────┘","breadcrumbs":"Architecture » Architecture Diagram","id":"445","title":"Architecture Diagram"},"4450":{"body":"RustyVault Backend : export RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"your-token-here\\"\\nexport RUSTYVAULT_MOUNT=\\"transit\\" Age Backend : export AGE_RECIPIENT=\\"age1xxxxxxxxx\\"\\nexport AGE_IDENTITY=\\"/path/to/key.txt\\" HTTP Backend (Cosmian) : export KMS_HTTP_URL=\\"http://localhost:9998\\"\\nexport KMS_HTTP_BACKEND=\\"cosmian\\" AWS KMS : export AWS_REGION=\\"us-east-1\\"\\nexport AWS_ACCESS_KEY_ID=\\"...\\"\\nexport AWS_SECRET_ACCESS_KEY=\\"...\\"","breadcrumbs":"NuShell Plugins Guide » Environment Variables","id":"4450","title":"Environment Variables"},"4451":{"body":"Operation HTTP API Plugin Improvement Encrypt (RustyVault) ~50 ms ~5 ms 10x faster Decrypt (RustyVault) ~50 ms ~5 ms 10x faster Encrypt (Age) ~30 ms ~3 ms 10x faster Decrypt (Age) ~30 ms ~3 ms 10x faster Generate Key ~60 ms ~8 ms 7.5x faster","breadcrumbs":"NuShell Plugins Guide » Performance Comparison","id":"4451","title":"Performance Comparison"},"4452":{"body":"Orchestrator operations plugin for status, validation, and task management.","breadcrumbs":"NuShell Plugins Guide » Plugin: nu_plugin_orchestrator","id":"4452","title":"Plugin: nu_plugin_orchestrator"},"4453":{"body":"orch status [--data-dir ] Get orchestrator status from local files (no HTTP). Flags : --data-dir : Data directory (default: provisioning/platform/orchestrator/data) Examples : # Default data dir\\norch status # Custom dir\\norch status --data-dir ./custom/data # Pipeline usage\\norch status | if $in.active_tasks > 0 { echo \\"Tasks running\\" } Output Format : { \\"active_tasks\\": 5, \\"completed_tasks\\": 120, \\"failed_tasks\\": 2, \\"pending_tasks\\": 3, \\"uptime\\": \\"2d 4h 15m\\", \\"health\\": \\"healthy\\"\\n} orch validate [--strict] Validate workflow Nickel file. Arguments : workflow.ncl (required): Path to Nickel workflow file Flags : --strict: Enable strict validation (all checks, warnings as errors) Examples : # Basic validation\\norch validate workflows/deploy.ncl # Strict mode\\norch validate workflows/deploy.ncl --strict # Pipeline usage\\nls workflows/*.ncl | each { |file| orch validate $file.name } Output Format : { \\"valid\\": true, \\"workflow\\": { \\"name\\": \\"deploy_k8s_cluster\\", \\"version\\": \\"1.0.0\\", \\"operations\\": 5 }, \\"warnings\\": [], \\"errors\\": []\\n} Validation Checks : KCL syntax errors Required fields present Dependency graph valid (no cycles) Resource limits within bounds Provider configurations valid orch tasks [--status ] [--limit ] List orchestrator tasks. Flags : --status : Filter by status (pending, running, completed, failed) --limit : Limit number of results (default: 100) --data-dir : Data directory (default from ORCHESTRATOR_DATA_DIR) Examples : # All tasks\\norch tasks # Pending tasks only\\norch tasks --status pending # Running tasks (limit to 10)\\norch tasks --status running --limit 10 # Pipeline usage\\norch tasks --status failed | each { |task| echo $\\"Failed: ($task.name)\\" } Output Format : [ { \\"task_id\\": \\"task_abc123\\", \\"name\\": \\"deploy_kubernetes\\", \\"status\\": \\"running\\", \\"priority\\": 5, \\"created_at\\": \\"2025-10-09T12:00:00Z\\", \\"updated_at\\": \\"2025-10-09T12:05:00Z\\", \\"progress\\": 45 }\\n]","breadcrumbs":"NuShell Plugins Guide » Commands","id":"4453","title":"Commands"},"4454":{"body":"Variable Description Default ORCHESTRATOR_DATA_DIR Data directory provisioning/platform/orchestrator/data","breadcrumbs":"NuShell Plugins Guide » Environment Variables","id":"4454","title":"Environment Variables"},"4455":{"body":"Operation HTTP API Plugin Improvement Status ~30 ms ~3 ms 10x faster Validate ~100 ms ~10 ms 10x faster Tasks List ~50 ms ~5 ms 10x faster","breadcrumbs":"NuShell Plugins Guide » Performance Comparison","id":"4455","title":"Performance Comparison"},"4456":{"body":"","breadcrumbs":"NuShell Plugins Guide » Pipeline Examples","id":"4456","title":"Pipeline Examples"},"4457":{"body":"# Login and verify in one pipeline\\nauth login admin | if $in.success { auth verify } | if $in.mfa_required { auth mfa verify --code (input \\"MFA code: \\") }","breadcrumbs":"NuShell Plugins Guide » Authentication Flow","id":"4457","title":"Authentication Flow"},"4458":{"body":"# Encrypt multiple secrets\\n[\\"secret1\\", \\"secret2\\", \\"secret3\\"] | each { |data| kms encrypt $data --backend rustyvault } | save encrypted_secrets.json # Decrypt and process\\nopen encrypted_secrets.json | each { |enc| kms decrypt $enc } | each { |plain| echo $\\"Decrypted: ($plain)\\" }","breadcrumbs":"NuShell Plugins Guide » KMS Operations","id":"4458","title":"KMS Operations"},"4459":{"body":"# Monitor running tasks\\nwhile true { orch tasks --status running | each { |task| echo $\\"($task.name): ($task.progress)%\\" } sleep 5sec\\n}","breadcrumbs":"NuShell Plugins Guide » Orchestrator Monitoring","id":"4459","title":"Orchestrator Monitoring"},"446":{"body":"| | Metric | Value | | | | -------- | ------- | | | | Cold response (cache miss) | 2-5 seconds | | | | Cached response | <500ms | | | | Streaming start time | <1 second | | | | AI service memory usage | ~200MB at rest | | | | Cache size (configurable) | Up to 500MB | | | | Vector DB (SurrealDB) | Included, auto-managed | |","breadcrumbs":"Architecture » Performance Characteristics","id":"446","title":"Performance Characteristics"},"4460":{"body":"# Complete deployment workflow\\nauth login admin | auth mfa verify --code (input \\"MFA: \\") | orch validate workflows/deploy.ncl | if $in.valid { orch tasks --status pending | where priority > 5 | each { |task| echo $\\"High priority: ($task.name)\\" } }","breadcrumbs":"NuShell Plugins Guide » Combined Workflow","id":"4460","title":"Combined Workflow"},"4461":{"body":"","breadcrumbs":"NuShell Plugins Guide » Troubleshooting","id":"4461","title":"Troubleshooting"},"4462":{"body":"\\"No active session\\" : auth login \\"Keyring error\\" (macOS) : Check Keychain Access permissions Security & Privacy → Privacy → Full Disk Access → Add Nushell \\"Keyring error\\" (Linux) : # Install keyring service\\nsudo apt install gnome-keyring # Ubuntu/Debian\\nsudo dnf install gnome-keyring # Fedora # Or use KWallet\\nsudo apt install kwalletmanager \\"MFA verification failed\\" : Check time synchronization (TOTP requires accurate clocks) Use backup codes if TOTP not working Re-enroll MFA if device lost","breadcrumbs":"NuShell Plugins Guide » Auth Plugin","id":"4462","title":"Auth Plugin"},"4463":{"body":"\\"RustyVault connection failed\\" : # Check RustyVault running\\ncurl http://localhost:8200/v1/sys/health # Set environment\\nexport RUSTYVAULT_ADDR=\\"http://localhost:8200\\"\\nexport RUSTYVAULT_TOKEN=\\"your-token\\" \\"Age encryption failed\\" : # Check Age keys\\nls -la ~/.age/ # Generate new key if needed\\nage-keygen -o ~/.age/key.txt # Set environment\\nexport AGE_RECIPIENT=\\"age1xxxxxxxxx\\"\\nexport AGE_IDENTITY=\\"$HOME/.age/key.txt\\" \\"AWS KMS access denied\\" : # Check AWS credentials\\naws sts get-caller-identity # Check KMS key policy\\naws kms describe-key --key-id alias/provisioning","breadcrumbs":"NuShell Plugins Guide » KMS Plugin","id":"4463","title":"KMS Plugin"},"4464":{"body":"\\"Failed to read status\\" : # Check data directory exists\\nls provisioning/platform/orchestrator/data/ # Create if missing\\nmkdir -p provisioning/platform/orchestrator/data \\"Workflow validation failed\\" : # Use strict mode for detailed errors\\norch validate workflows/deploy.ncl --strict \\"No tasks found\\" : # Check orchestrator running\\nps aux | grep orchestrator # Start orchestrator\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"NuShell Plugins Guide » Orchestrator Plugin","id":"4464","title":"Orchestrator Plugin"},"4465":{"body":"","breadcrumbs":"NuShell Plugins Guide » Development","id":"4465","title":"Development"},"4466":{"body":"cd provisioning/core/plugins/nushell-plugins # Clean build\\ncargo clean # Build with debug info\\ncargo build -p nu_plugin_auth\\ncargo build -p nu_plugin_kms\\ncargo build -p nu_plugin_orchestrator # Run tests\\ncargo test -p nu_plugin_auth\\ncargo test -p nu_plugin_kms\\ncargo test -p nu_plugin_orchestrator # Run all tests\\ncargo test --all","breadcrumbs":"NuShell Plugins Guide » Building from Source","id":"4466","title":"Building from Source"},"4467":{"body":"name: Build Nushell Plugins on: [push, pull_request] jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: stable - name: Build Plugins run: | cd provisioning/core/plugins/nushell-plugins cargo build --release --all - name: Test Plugins run: | cd provisioning/core/plugins/nushell-plugins cargo test --all - name: Upload Artifacts uses: actions/upload-artifact@v3 with: name: plugins path: provisioning/core/plugins/nushell-plugins/target/release/nu_plugin_*","breadcrumbs":"NuShell Plugins Guide » Adding to CI/CD","id":"4467","title":"Adding to CI/CD"},"4468":{"body":"","breadcrumbs":"NuShell Plugins Guide » Advanced Usage","id":"4468","title":"Advanced Usage"},"4469":{"body":"Create ~/.config/nushell/plugin_config.nu: # Auth plugin defaults\\n$env.CONTROL_CENTER_URL = \\"https://control-center.example.com\\" # KMS plugin defaults\\n$env.RUSTYVAULT_ADDR = \\"https://vault.example.com:8200\\"\\n$env.RUSTYVAULT_MOUNT = \\"transit\\" # Orchestrator plugin defaults\\n$env.ORCHESTRATOR_DATA_DIR = \\"/opt/orchestrator/data\\"","breadcrumbs":"NuShell Plugins Guide » Custom Plugin Configuration","id":"4469","title":"Custom Plugin Configuration"},"447":{"body":"","breadcrumbs":"Architecture » Security Model","id":"447","title":"Security Model"},"4470":{"body":"Add to ~/.config/nushell/config.nu: # Auth shortcuts\\nalias login = auth login\\nalias logout = auth logout # KMS shortcuts\\nalias encrypt = kms encrypt\\nalias decrypt = kms decrypt # Orchestrator shortcuts\\nalias status = orch status\\nalias validate = orch validate\\nalias tasks = orch tasks","breadcrumbs":"NuShell Plugins Guide » Plugin Aliases","id":"4470","title":"Plugin Aliases"},"4471":{"body":"","breadcrumbs":"NuShell Plugins Guide » Security Best Practices","id":"4471","title":"Security Best Practices"},"4472":{"body":"✅ DO : Use interactive password prompts ✅ DO : Enable MFA for production environments ✅ DO : Verify session before sensitive operations ❌ DON\'T : Pass passwords in command line (visible in history) ❌ DON\'T : Store tokens in plain text files","breadcrumbs":"NuShell Plugins Guide » Authentication","id":"4472","title":"Authentication"},"4473":{"body":"✅ DO : Use context (AAD) for encryption when available ✅ DO : Rotate KMS keys regularly ✅ DO : Use hardware-backed keys (WebAuthn, YubiKey) when possible ❌ DON\'T : Share Age private keys ❌ DON\'T : Log decrypted data","breadcrumbs":"NuShell Plugins Guide » KMS Operations","id":"4473","title":"KMS Operations"},"4474":{"body":"✅ DO : Validate workflows in strict mode before production ✅ DO : Monitor task status regularly ✅ DO : Use appropriate data directory permissions (700) ❌ DON\'T : Run orchestrator as root ❌ DON\'T : Expose data directory over network shares","breadcrumbs":"NuShell Plugins Guide » Orchestrator","id":"4474","title":"Orchestrator"},"4475":{"body":"Q: Why use plugins instead of HTTP API? A: Plugins are 10x faster, have better Nushell integration, and eliminate HTTP overhead. Q: Can I use plugins without orchestrator running? A: auth and kms work independently. orch requires access to orchestrator data directory. Q: How do I update plugins? A: Rebuild and re-register: cargo build --release --all && plugin add target/release/nu_plugin_* Q: Are plugins cross-platform? A: Yes, plugins work on macOS, Linux, and Windows (with appropriate keyring services). Q: Can I use multiple KMS backends simultaneously? A: Yes, specify --backend flag for each operation. Q: How do I backup MFA enrollment? A: Save backup codes securely (password manager, encrypted file). QR code can be re-scanned.","breadcrumbs":"NuShell Plugins Guide » FAQ","id":"4475","title":"FAQ"},"4476":{"body":"Security System : docs/architecture/adr-009-security-system-complete.md JWT Auth : docs/architecture/JWT_AUTH_IMPLEMENTATION.md Config Encryption : docs/user/CONFIG_ENCRYPTION_GUIDE.md RustyVault Integration : RUSTYVAULT_INTEGRATION_SUMMARY.md MFA Implementation : docs/architecture/MFA_IMPLEMENTATION_SUMMARY.md Version : 1.0.0 Last Updated : 2025-10-09 Maintained By : Platform Team","breadcrumbs":"NuShell Plugins Guide » Related Documentation","id":"4476","title":"Related Documentation"},"4477":{"body":"For complete documentation on Nushell plugins including installation, configuration, and advanced usage, see: Complete Guide : Plugin Integration Guide (1500+ lines) Quick Reference : Nushell Plugins Guide","breadcrumbs":"NuShell Plugins System » Nushell Plugins Integration (v1.0.0) - See detailed guide for complete reference","id":"4477","title":"Nushell Plugins Integration (v1.0.0) - See detailed guide for complete reference"},"4478":{"body":"Native Nushell plugins eliminate HTTP overhead and provide direct Rust-to-Nushell integration for critical platform operations.","breadcrumbs":"NuShell Plugins System » Overview","id":"4478","title":"Overview"},"4479":{"body":"Plugin Operation HTTP Latency Plugin Latency Speedup nu_plugin_kms Encrypt (RustyVault) ~50 ms ~5 ms 10x nu_plugin_kms Decrypt (RustyVault) ~50 ms ~5 ms 10x nu_plugin_orchestrator Status query ~30 ms ~1 ms 30x nu_plugin_auth Verify session ~50 ms ~10 ms 5x","breadcrumbs":"NuShell Plugins System » Performance Improvements","id":"4479","title":"Performance Improvements"},"448":{"body":"All AI operations controlled by Cedar policies: User role-based access control Operation-specific permissions Complete audit logging","breadcrumbs":"Architecture » Cedar Authorization","id":"448","title":"Cedar Authorization"},"4480":{"body":"Authentication Plugin (nu_plugin_auth) JWT login/logout with password prompts MFA enrollment (TOTP, WebAuthn) Session management OS-native keyring integration KMS Plugin (nu_plugin_kms) Multiple backend support (RustyVault, Age, Cosmian, AWS KMS, Vault) 10x faster encryption/decryption Context-based encryption (AAD support) Orchestrator Plugin (nu_plugin_orchestrator) Direct file-based operations (no HTTP) 30-50x faster status queries KCL workflow validation","breadcrumbs":"NuShell Plugins System » Three Native Plugins","id":"4480","title":"Three Native Plugins"},"4481":{"body":"# Authentication\\nauth login admin\\nauth verify\\nauth mfa enroll totp # KMS Operations\\nkms encrypt \\"data\\"\\nkms decrypt \\"vault:v1:abc123...\\" # Orchestrator\\norch status\\norch validate workflows/deploy.ncl\\norch tasks --status running","breadcrumbs":"NuShell Plugins System » Quick Commands","id":"4481","title":"Quick Commands"},"4482":{"body":"cd provisioning/core/plugins/nushell-plugins\\ncargo build --release --all # Register with Nushell\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator","breadcrumbs":"NuShell Plugins System » Installation","id":"4482","title":"Installation"},"4483":{"body":"✅ 10x faster KMS operations (5 ms vs 50 ms) ✅ 30-50x faster orchestrator queries (1 ms vs 30-50 ms) ✅ Native Nushell integration with data structures and pipelines ✅ Offline capability (KMS with Age, orchestrator local ops) ✅ OS-native keyring for secure token storage See Plugin Integration Guide for complete information.","breadcrumbs":"NuShell Plugins System » Benefits","id":"4483","title":"Benefits"},"4484":{"body":"","breadcrumbs":"Plugin Usage Guide » Provisioning Plugins Usage Guide","id":"4484","title":"Provisioning Plugins Usage Guide"},"4485":{"body":"Three high-performance Nushell plugins have been integrated into the provisioning system to provide 10-50x performance improvements over HTTP-based operations: nu_plugin_auth - JWT authentication with system keyring integration nu_plugin_kms - Multi-backend KMS encryption nu_plugin_orchestrator - Local orchestrator operations","breadcrumbs":"Plugin Usage Guide » Overview","id":"4485","title":"Overview"},"4486":{"body":"","breadcrumbs":"Plugin Usage Guide » Installation","id":"4486","title":"Installation"},"4487":{"body":"Nushell 0.107.1 or later All plugins are pre-compiled in provisioning/core/plugins/nushell-plugins/","breadcrumbs":"Plugin Usage Guide » Prerequisites","id":"4487","title":"Prerequisites"},"4488":{"body":"Run the installation script in a new Nushell session: nu provisioning/core/plugins/install-and-register.nu This will: Copy plugins to ~/.local/share/nushell/plugins/ Register plugins with Nushell Verify installation","breadcrumbs":"Plugin Usage Guide » Quick Install","id":"4488","title":"Quick Install"},"4489":{"body":"If the script doesn\'t work, run these commands: # Copy plugins\\ncp provisioning/core/plugins/nushell-plugins/nu_plugin_auth/target/release/nu_plugin_auth ~/.local/share/nushell/plugins/\\ncp provisioning/core/plugins/nushell-plugins/nu_plugin_kms/target/release/nu_plugin_kms ~/.local/share/nushell/plugins/\\ncp provisioning/core/plugins/nushell-plugins/nu_plugin_orchestrator/target/release/nu_plugin_orchestrator ~/.local/share/nushell/plugins/ chmod +x ~/.local/share/nushell/plugins/nu_plugin_* # Register with Nushell (run in a fresh session)\\nplugin add ~/.local/share/nushell/plugins/nu_plugin_auth\\nplugin add ~/.local/share/nushell/plugins/nu_plugin_kms\\nplugin add ~/.local/share/nushell/plugins/nu_plugin_orchestrator","breadcrumbs":"Plugin Usage Guide » Manual Installation","id":"4489","title":"Manual Installation"},"449":{"body":"Secrets never sent to external LLMs PII/sensitive data sanitized before API calls Encryption at rest in local cache HSM support for key storage","breadcrumbs":"Architecture » Secret Protection","id":"449","title":"Secret Protection"},"4490":{"body":"","breadcrumbs":"Plugin Usage Guide » Usage","id":"4490","title":"Usage"},"4491":{"body":"10x faster than HTTP fallback Login provisioning auth login [password] # Examples\\nprovisioning auth login admin\\nprovisioning auth login admin mypassword\\nprovisioning auth login --url http://localhost:8081 admin Verify Token provisioning auth verify [--local] # Examples\\nprovisioning auth verify\\nprovisioning auth verify --local Logout provisioning auth logout # Example\\nprovisioning auth logout List Sessions provisioning auth sessions [--active] # Examples\\nprovisioning auth sessions\\nprovisioning auth sessions --active","breadcrumbs":"Plugin Usage Guide » Authentication Plugin","id":"4491","title":"Authentication Plugin"},"4492":{"body":"10x faster than HTTP fallback Supports multiple backends: RustyVault, Age, AWS KMS, HashiCorp Vault, Cosmian Encrypt Data provisioning kms encrypt [--backend ] [--key ] # Examples\\nprovisioning kms encrypt \\"secret-data\\"\\nprovisioning kms encrypt \\"secret\\" --backend age\\nprovisioning kms encrypt \\"secret\\" --backend rustyvault --key my-key Decrypt Data provisioning kms decrypt [--backend ] [--key ] # Examples\\nprovisioning kms decrypt $encrypted_data\\nprovisioning kms decrypt $encrypted --backend age KMS Status provisioning kms status # Output shows current backend and availability List Backends provisioning kms list-backends # Shows all available KMS backends","breadcrumbs":"Plugin Usage Guide » KMS Plugin","id":"4492","title":"KMS Plugin"},"4493":{"body":"30x faster than HTTP fallback Local file-based orchestration without network overhead. Check Status provisioning orch status [--data-dir ] # Examples\\nprovisioning orch status\\nprovisioning orch status --data-dir /custom/data List Tasks provisioning orch tasks [--status ] [--limit ] [--data-dir ] # Examples\\nprovisioning orch tasks\\nprovisioning orch tasks --status pending\\nprovisioning orch tasks --status running --limit 10 Validate Workflow provisioning orch validate [--strict] # Examples\\nprovisioning orch validate workflows/deployment.ncl\\nprovisioning orch validate workflows/deployment.ncl --strict Submit Workflow provisioning orch submit [--priority <0-100>] [--check] # Examples\\nprovisioning orch submit workflows/deployment.ncl\\nprovisioning orch submit workflows/critical.ncl --priority 90\\nprovisioning orch submit workflows/test.ncl --check Monitor Task provisioning orch monitor [--once] [--interval ] [--timeout ] # Examples\\nprovisioning orch monitor task-123\\nprovisioning orch monitor task-123 --once\\nprovisioning orch monitor task-456 --interval 5000 --timeout 600","breadcrumbs":"Plugin Usage Guide » Orchestrator Plugin","id":"4493","title":"Orchestrator Plugin"},"4494":{"body":"Check which plugins are installed: provisioning plugin status # Output:\\n# Provisioning Plugins Status\\n# ============================\\n# [OK] nu_plugin_auth - JWT authentication with keyring\\n# [OK] nu_plugin_kms - Multi-backend encryption\\n# [OK] nu_plugin_orchestrator - Local orchestrator (30x faster)\\n#\\n# All plugins loaded - using native high-performance mode","breadcrumbs":"Plugin Usage Guide » Plugin Status","id":"4494","title":"Plugin Status"},"4495":{"body":"provisioning plugin test # Runs quick tests on all installed plugins\\n# Output shows which plugins are responding","breadcrumbs":"Plugin Usage Guide » Testing Plugins","id":"4495","title":"Testing Plugins"},"4496":{"body":"provisioning plugin list # Shows all provisioning plugins registered with Nushell","breadcrumbs":"Plugin Usage Guide » List Registered Plugins","id":"4496","title":"List Registered Plugins"},"4497":{"body":"Operation With Plugin HTTP Fallback Speedup Auth verify ~10 ms ~50 ms 5x Auth login ~15 ms ~100 ms 7x KMS encrypt ~5-8 ms ~50 ms 10x KMS decrypt ~5-8 ms ~50 ms 10x Orch status ~1-5 ms ~30 ms 30x Orch tasks list ~2-10 ms ~50 ms 25x","breadcrumbs":"Plugin Usage Guide » Performance Comparison","id":"4497","title":"Performance Comparison"},"4498":{"body":"If plugins are not installed or fail to load, all commands automatically fall back to HTTP-based operations: # With plugins installed (fast)\\n$ provisioning auth verify\\nToken is valid # Without plugins (slower, but functional)\\n$ provisioning auth verify\\n[HTTP fallback mode]\\nToken is valid (slower) This ensures the system remains functional even if plugins aren\'t available.","breadcrumbs":"Plugin Usage Guide » Graceful Fallback","id":"4498","title":"Graceful Fallback"},"4499":{"body":"","breadcrumbs":"Plugin Usage Guide » Troubleshooting","id":"4499","title":"Troubleshooting"},"45":{"body":"","breadcrumbs":"Installation Guide » System Requirements","id":"45","title":"System Requirements"},"450":{"body":"Air-gapped deployments: On-premise LLM models (Llama 3, Mistral) Zero external API calls Full data privacy compliance Ideal for classified environments","breadcrumbs":"Architecture » Local Model Support","id":"450","title":"Local Model Support"},"4500":{"body":"Make sure you: Have a fresh Nushell session Ran plugin add for all three plugins The plugin files are executable: chmod +x ~/.local/share/nushell/plugins/nu_plugin_*","breadcrumbs":"Plugin Usage Guide » Plugins not found after installation","id":"4500","title":"Plugins not found after installation"},"4501":{"body":"If you see \\"command not found\\" when running provisioning auth login, the auth plugin is not loaded. Run: plugin list | grep nu_plugin If you don\'t see the plugins, register them: plugin add ~/.local/share/nushell/plugins/nu_plugin_auth\\nplugin add ~/.local/share/nushell/plugins/nu_plugin_kms\\nplugin add ~/.local/share/nushell/plugins/nu_plugin_orchestrator","breadcrumbs":"Plugin Usage Guide » \\"Command not found\\" errors","id":"4501","title":"\\"Command not found\\" errors"},"4502":{"body":"Check the plugin logs: provisioning plugin test If a plugin fails, the system will automatically fall back to HTTP mode.","breadcrumbs":"Plugin Usage Guide » Plugins crash or are unresponsive","id":"4502","title":"Plugins crash or are unresponsive"},"4503":{"body":"All plugin commands are integrated into the main provisioning CLI: # Shortcuts available\\nprovisioning auth login admin # Full command\\nprovisioning login admin # Alias provisioning kms encrypt secret # Full command\\nprovisioning encrypt secret # Alias provisioning orch status # Full command\\nprovisioning orch-status # Alias","breadcrumbs":"Plugin Usage Guide » Integration with Provisioning CLI","id":"4503","title":"Integration with Provisioning CLI"},"4504":{"body":"","breadcrumbs":"Plugin Usage Guide » Advanced Configuration","id":"4504","title":"Advanced Configuration"},"4505":{"body":"For orchestrator operations, specify custom data directory: provisioning orch status --data-dir /custom/orchestrator/data\\nprovisioning orch tasks --data-dir /custom/orchestrator/data","breadcrumbs":"Plugin Usage Guide » Custom Data Directory","id":"4505","title":"Custom Data Directory"},"4506":{"body":"For auth operations with custom endpoint: provisioning auth login admin --url http://custom-auth-server:8081\\nprovisioning auth verify --url http://custom-auth-server:8081","breadcrumbs":"Plugin Usage Guide » Custom Auth URL","id":"4506","title":"Custom Auth URL"},"4507":{"body":"Specify which KMS backend to use: # Use Age encryption\\nprovisioning kms encrypt \\"data\\" --backend age # Use RustyVault\\nprovisioning kms encrypt \\"data\\" --backend rustyvault # Use AWS KMS\\nprovisioning kms encrypt \\"data\\" --backend aws # Decrypt with same backend\\nprovisioning kms decrypt $encrypted --backend age","breadcrumbs":"Plugin Usage Guide » KMS Backend Selection","id":"4507","title":"KMS Backend Selection"},"4508":{"body":"If you need to rebuild plugins: cd provisioning/core/plugins/nushell-plugins # Build auth plugin\\ncd nu_plugin_auth && cargo build --release && cd .. # Build KMS plugin\\ncd nu_plugin_kms && cargo build --release && cd .. # Build orchestrator plugin\\ncd nu_plugin_orchestrator && cargo build --release && cd .. # Run install script\\ncd ../..\\nnu install-and-register.nu","breadcrumbs":"Plugin Usage Guide » Building Plugins from Source","id":"4508","title":"Building Plugins from Source"},"4509":{"body":"The plugins follow Nushell\'s plugin protocol: Plugin Binary : Compiled Rust binary in target/release/ Registration : Via plugin add command IPC : Communication via Nushell\'s JSON protocol Fallback : HTTP API fallback if plugins unavailable","breadcrumbs":"Plugin Usage Guide » Architecture","id":"4509","title":"Architecture"},"451":{"body":"See Configuration Guide for: LLM provider setup Cache configuration Cost limits and budgets Security policies","breadcrumbs":"Architecture » Configuration","id":"451","title":"Configuration"},"4510":{"body":"Auth tokens are stored in system keyring (Keychain/Credential Manager/Secret Service) KMS keys are protected by the selected backend\'s security Orchestrator operations are local file-based (no network exposure) All operations are logged in provisioning audit logs","breadcrumbs":"Plugin Usage Guide » Security Notes","id":"4510","title":"Security Notes"},"4511":{"body":"For issues or questions: Check plugin status: provisioning plugin test Review logs: provisioning logs or /var/log/provisioning/ Test HTTP fallback by temporarily unregistering plugins Contact the provisioning team with plugin test output","breadcrumbs":"Plugin Usage Guide » Support","id":"4511","title":"Support"},"4512":{"body":"Status : Production Ready Date : 2025-11-19 Version : 1.0.0","breadcrumbs":"Secrets Management Guide » Secrets Management System - Configuration Guide","id":"4512","title":"Secrets Management System - Configuration Guide"},"4513":{"body":"The provisioning system supports secure SSH key retrieval from multiple secret sources, eliminating hardcoded filesystem dependencies and enabling enterprise-grade security. SSH keys are retrieved from configured secret sources (SOPS, KMS, RustyVault) with automatic fallback to local-dev mode for development environments.","breadcrumbs":"Secrets Management Guide » Overview","id":"4513","title":"Overview"},"4514":{"body":"","breadcrumbs":"Secrets Management Guide » Secret Sources","id":"4514","title":"Secret Sources"},"4515":{"body":"Age-based encrypted secrets file with YAML structure. Pros : ✅ Age encryption (modern, performant) ✅ Easy to version in Git (encrypted) ✅ No external services required ✅ Simple YAML structure Cons : ❌ Requires Age key management ❌ No key rotation automation Environment Variables : PROVISIONING_SECRET_SOURCE=sops\\nPROVISIONING_SOPS_ENABLED=true\\nPROVISIONING_SOPS_SECRETS_FILE=/path/to/secrets.enc.yaml\\nPROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning Secrets File Structure (provisioning/secrets.enc.yaml): # Encrypted with sops\\nssh: web-01: ubuntu: /path/to/id_rsa root: /path/to/root_id_rsa db-01: postgres: /path/to/postgres_id_rsa Setup Instructions : # 1. Install sops and age\\nbrew install sops age # 2. Generate Age key (store securely!)\\nage-keygen -o $HOME/.age/provisioning # 3. Create encrypted secrets file\\ncat > secrets.yaml << \'EOF\'\\nssh: web-01: ubuntu: ~/.ssh/provisioning_web01 db-01: postgres: ~/.ssh/provisioning_db01\\nEOF # 4. Encrypt with sops\\nsops -e -i secrets.yaml # 5. Rename to enc version\\nmv secrets.yaml provisioning/secrets.enc.yaml # 6. Configure environment\\nexport PROVISIONING_SECRET_SOURCE=sops\\nexport PROVISIONING_SOPS_SECRETS_FILE=$(pwd)/provisioning/secrets.enc.yaml\\nexport PROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning","breadcrumbs":"Secrets Management Guide » 1. SOPS (Secrets Operations)","id":"4515","title":"1. SOPS (Secrets Operations)"},"4516":{"body":"AWS KMS or compatible key management service. Pros : ✅ Cloud-native security ✅ Automatic key rotation ✅ Audit logging built-in ✅ High availability Cons : ❌ Requires AWS account/credentials ❌ API calls add latency (~50 ms) ❌ Cost per API call Environment Variables : PROVISIONING_SECRET_SOURCE=kms\\nPROVISIONING_KMS_ENABLED=true\\nPROVISIONING_KMS_REGION=us-east-1 Secret Storage Pattern : provisioning/ssh-keys/{hostname}/{username} Setup Instructions : # 1. Create KMS key (one-time)\\naws kms create-key \\\\ --description \\"Provisioning SSH Keys\\" \\\\ --region us-east-1 # 2. Store SSH keys in Secrets Manager\\naws secretsmanager create-secret \\\\ --name provisioning/ssh-keys/web-01/ubuntu \\\\ --secret-string \\"$(cat ~/.ssh/provisioning_web01)\\" \\\\ --region us-east-1 # 3. Configure environment\\nexport PROVISIONING_SECRET_SOURCE=kms\\nexport PROVISIONING_KMS_REGION=us-east-1 # 4. Ensure AWS credentials available\\nexport AWS_PROFILE=provisioning\\n# or\\nexport AWS_ACCESS_KEY_ID=...\\nexport AWS_SECRET_ACCESS_KEY=...","breadcrumbs":"Secrets Management Guide » 2. KMS (Key Management Service)","id":"4516","title":"2. KMS (Key Management Service)"},"4517":{"body":"Self-hosted or managed Vault instance for secrets. Pros : ✅ Self-hosted option ✅ Fine-grained access control ✅ Multiple authentication methods ✅ Easy key rotation Cons : ❌ Requires Vault instance ❌ More operational overhead ❌ Network latency Environment Variables : PROVISIONING_SECRET_SOURCE=vault\\nPROVISIONING_VAULT_ENABLED=true\\nPROVISIONING_VAULT_ADDRESS=http://localhost:8200\\nPROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ... Secret Storage Pattern : GET /v1/secret/ssh-keys/{hostname}/{username}\\n# Returns: {\\"key_content\\": \\"-----BEGIN OPENSSH PRIVATE KEY-----...\\"} Setup Instructions : # 1. Start Vault (if not already running)\\ndocker run -p 8200:8200 \\\\ -e VAULT_DEV_ROOT_TOKEN_ID=provisioning \\\\ vault server -dev # 2. Create KV v2 mount (if not exists)\\nvault secrets enable -version=2 -path=secret kv # 3. Store SSH key\\nvault kv put secret/ssh-keys/web-01/ubuntu \\\\ key_content=@~/.ssh/provisioning_web01 # 4. Configure environment\\nexport PROVISIONING_SECRET_SOURCE=vault\\nexport PROVISIONING_VAULT_ADDRESS=http://localhost:8200\\nexport PROVISIONING_VAULT_TOKEN=provisioning # 5. Create AppRole for production\\nvault auth enable approle\\nvault write auth/approle/role/provisioning \\\\ token_ttl=1h \\\\ token_max_ttl=4h\\nvault read auth/approle/role/provisioning/role-id\\nvault write -f auth/approle/role/provisioning/secret-id","breadcrumbs":"Secrets Management Guide » 3. RustyVault (Hashicorp Vault-Compatible)","id":"4517","title":"3. RustyVault (Hashicorp Vault-Compatible)"},"4518":{"body":"Local filesystem SSH keys (development only). Pros : ✅ No setup required ✅ Fast (local filesystem) ✅ Works offline Cons : ❌ NOT for production ❌ Hardcoded filesystem dependency ❌ No key rotation Environment Variables : PROVISIONING_ENVIRONMENT=local-dev Behavior : Standard paths checked (in order): $HOME/.ssh/id_rsa $HOME/.ssh/id_ed25519 $HOME/.ssh/provisioning $HOME/.ssh/provisioning_rsa","breadcrumbs":"Secrets Management Guide » 4. Local-Dev (Fallback)","id":"4518","title":"4. Local-Dev (Fallback)"},"4519":{"body":"When PROVISIONING_SECRET_SOURCE is not explicitly set, the system auto-detects in this order: 1. PROVISIONING_SOPS_ENABLED=true or PROVISIONING_SOPS_SECRETS_FILE set? → Use SOPS\\n2. PROVISIONING_KMS_ENABLED=true or PROVISIONING_KMS_REGION set? → Use KMS\\n3. PROVISIONING_VAULT_ENABLED=true or both VAULT_ADDRESS and VAULT_TOKEN set? → Use Vault\\n4. Otherwise → Use local-dev (with warnings in production environments)","breadcrumbs":"Secrets Management Guide » Auto-Detection Logic","id":"4519","title":"Auto-Detection Logic"},"452":{"body":"RAG System - Retrieval implementation details Security Policies - Authorization and safety controls Configuration Guide - Setup instructions ADR-015 - Design decisions Last Updated : 2025-01-13 Status : ✅ Production-Ready (core system) Test Coverage : 22/22 tests passing","breadcrumbs":"Architecture » Related Documentation","id":"452","title":"Related Documentation"},"4520":{"body":"Secret Source Env Variables Enabled in SOPS PROVISIONING_SOPS_* Development, Staging, Production KMS PROVISIONING_KMS_* Staging, Production (with AWS) Vault PROVISIONING_VAULT_* Development, Staging, Production Local-dev PROVISIONING_ENVIRONMENT=local-dev Development only","breadcrumbs":"Secrets Management Guide » Configuration Matrix","id":"4520","title":"Configuration Matrix"},"4521":{"body":"","breadcrumbs":"Secrets Management Guide » Production Recommended Setup","id":"4521","title":"Production Recommended Setup"},"4522":{"body":"# Using Vault (recommended for self-hosted)\\nexport PROVISIONING_SECRET_SOURCE=vault\\nexport PROVISIONING_VAULT_ADDRESS=https://vault.example.com:8200\\nexport PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ...\\nexport PROVISIONING_ENVIRONMENT=production","breadcrumbs":"Secrets Management Guide » Minimal Setup (Single Source)","id":"4522","title":"Minimal Setup (Single Source)"},"4523":{"body":"# Primary: Vault\\nexport PROVISIONING_VAULT_ADDRESS=https://vault.primary.com:8200\\nexport PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ... # Fallback: SOPS\\nexport PROVISIONING_SOPS_SECRETS_FILE=/etc/provisioning/secrets.enc.yaml\\nexport PROVISIONING_SOPS_AGE_KEY_FILE=/etc/provisioning/.age/key # Environment\\nexport PROVISIONING_ENVIRONMENT=production\\nexport PROVISIONING_SECRET_SOURCE=vault # Explicit: use Vault first","breadcrumbs":"Secrets Management Guide » Enhanced Setup (Fallback Chain)","id":"4523","title":"Enhanced Setup (Fallback Chain)"},"4524":{"body":"# Use KMS (managed service)\\nexport PROVISIONING_SECRET_SOURCE=kms\\nexport PROVISIONING_KMS_REGION=us-east-1\\nexport AWS_PROFILE=provisioning-admin # Or use Vault with HA\\nexport PROVISIONING_VAULT_ADDRESS=https://vault-ha.example.com:8200\\nexport PROVISIONING_VAULT_NAMESPACE=provisioning\\nexport PROVISIONING_ENVIRONMENT=production","breadcrumbs":"Secrets Management Guide » High-Availability Setup","id":"4524","title":"High-Availability Setup"},"4525":{"body":"","breadcrumbs":"Secrets Management Guide » Validation & Testing","id":"4525","title":"Validation & Testing"},"4526":{"body":"# Nushell\\nprovisioning secrets status # Show secret source and configuration\\nprovisioning secrets validate # Detailed diagnostics\\nprovisioning secrets diagnose","breadcrumbs":"Secrets Management Guide » Check Configuration","id":"4526","title":"Check Configuration"},"4527":{"body":"# Test specific host/user\\nprovisioning secrets get-key web-01 ubuntu # Test all configured hosts\\nprovisioning secrets validate-all # Dry-run SSH with retrieved key\\nprovisioning ssh --test-key web-01 ubuntu","breadcrumbs":"Secrets Management Guide » Test SSH Key Retrieval","id":"4527","title":"Test SSH Key Retrieval"},"4528":{"body":"","breadcrumbs":"Secrets Management Guide » Migration Path","id":"4528","title":"Migration Path"},"4529":{"body":"# 1. Create SOPS secrets file with existing keys\\ncat > secrets.yaml << \'EOF\'\\nssh: web-01: ubuntu: ~/.ssh/provisioning_web01 db-01: postgres: ~/.ssh/provisioning_db01\\nEOF # 2. Encrypt with Age\\nsops -e -i secrets.yaml # 3. Move to repo\\nmv secrets.yaml provisioning/secrets.enc.yaml # 4. Update environment\\nexport PROVISIONING_SECRET_SOURCE=sops\\nexport PROVISIONING_SOPS_SECRETS_FILE=$(pwd)/provisioning/secrets.enc.yaml\\nexport PROVISIONING_SOPS_AGE_KEY_FILE=$HOME/.age/provisioning","breadcrumbs":"Secrets Management Guide » From Local-Dev to SOPS","id":"4529","title":"From Local-Dev to SOPS"},"453":{"body":"Status : ✅ Production-Ready (SurrealDB 1.5.0+, 22/22 tests passing) The RAG system enables the AI service to access, retrieve, and reason over infrastructure documentation, schemas, and past configurations. This allows the AI to generate contextually accurate infrastructure configurations and provide intelligent troubleshooting advice grounded in actual platform knowledge.","breadcrumbs":"RAG System » Retrieval-Augmented Generation (RAG) System","id":"453","title":"Retrieval-Augmented Generation (RAG) System"},"4530":{"body":"# 1. Decrypt SOPS file\\nsops -d provisioning/secrets.enc.yaml > /tmp/secrets.yaml # 2. Import to Vault\\nvault kv put secret/ssh-keys/web-01/ubuntu key_content=@~/.ssh/provisioning_web01 # 3. Update environment\\nexport PROVISIONING_SECRET_SOURCE=vault\\nexport PROVISIONING_VAULT_ADDRESS=http://vault.example.com:8200\\nexport PROVISIONING_VAULT_TOKEN=hvs.CAESIAoICQ... # 4. Validate retrieval works\\nprovisioning secrets validate-all","breadcrumbs":"Secrets Management Guide » From SOPS to Vault","id":"4530","title":"From SOPS to Vault"},"4531":{"body":"","breadcrumbs":"Secrets Management Guide » Security Best Practices","id":"4531","title":"Security Best Practices"},"4532":{"body":"# Add to .gitignore\\necho \\"provisioning/secrets.enc.yaml\\" >> .gitignore\\necho \\".age/provisioning\\" >> .gitignore\\necho \\".vault-token\\" >> .gitignore","breadcrumbs":"Secrets Management Guide » 1. Never Commit Secrets","id":"4532","title":"1. Never Commit Secrets"},"4533":{"body":"# SOPS: Rotate Age key\\nage-keygen -o ~/.age/provisioning.new\\n# Update all secrets with new key # KMS: Enable automatic rotation\\naws kms enable-key-rotation --key-id alias/provisioning # Vault: Set TTL on secrets\\nvault write -f secret/metadata/ssh-keys/web-01/ubuntu \\\\ delete_version_after=2160h # 90 days","breadcrumbs":"Secrets Management Guide » 2. Rotate Keys Regularly","id":"4533","title":"2. Rotate Keys Regularly"},"4534":{"body":"# SOPS: Protect Age key\\nchmod 600 ~/.age/provisioning # KMS: Restrict IAM permissions\\naws iam put-user-policy --user-name provisioning \\\\ --policy-name ProvisioningSecretsAccess \\\\ --policy-document file://kms-policy.json # Vault: Use AppRole for applications\\nvault write auth/approle/role/provisioning \\\\ token_ttl=1h \\\\ secret_id_ttl=30m","breadcrumbs":"Secrets Management Guide » 3. Restrict Access","id":"4534","title":"3. Restrict Access"},"4535":{"body":"# KMS: Enable CloudTrail\\naws cloudtrail put-event-selectors \\\\ --trail-name provisioning-trail \\\\ --event-selectors ReadWriteType=All # Vault: Check audit logs\\nvault audit list # SOPS: Version control (encrypted)\\ngit log -p provisioning/secrets.enc.yaml","breadcrumbs":"Secrets Management Guide » 4. Audit Logging","id":"4535","title":"4. Audit Logging"},"4536":{"body":"","breadcrumbs":"Secrets Management Guide » Troubleshooting","id":"4536","title":"Troubleshooting"},"4537":{"body":"# Test Age decryption\\nsops -d provisioning/secrets.enc.yaml # Verify Age key\\nage-keygen -l ~/.age/provisioning # Regenerate if needed\\nrm ~/.age/provisioning\\nage-keygen -o ~/.age/provisioning","breadcrumbs":"Secrets Management Guide » SOPS Issues","id":"4537","title":"SOPS Issues"},"4538":{"body":"# Test AWS credentials\\naws sts get-caller-identity # Check KMS key permissions\\naws kms describe-key --key-id alias/provisioning # List secrets\\naws secretsmanager list-secrets --filters Name=name,Values=provisioning","breadcrumbs":"Secrets Management Guide » KMS Issues","id":"4538","title":"KMS Issues"},"4539":{"body":"# Check Vault status\\nvault status # Test authentication\\nvault token lookup # List secrets\\nvault kv list secret/ssh-keys/ # Check audit logs\\nvault audit list\\nvault read sys/audit","breadcrumbs":"Secrets Management Guide » Vault Issues","id":"4539","title":"Vault Issues"},"454":{"body":"The RAG system consists of: Document Store : SurrealDB vector store with semantic indexing Hybrid Search : Vector similarity + BM25 keyword search Chunk Management : Intelligent document chunking for code and markdown Context Ranking : Relevance scoring for retrieved documents Semantic Cache : Deduplication of repeated queries","breadcrumbs":"RAG System » Architecture Overview","id":"454","title":"Architecture Overview"},"4540":{"body":"Q: Can I use multiple secret sources simultaneously? A: Yes, configure multiple sources and set PROVISIONING_SECRET_SOURCE to specify primary. If primary fails, manual fallback to secondary is supported. Q: What happens if secret retrieval fails? A: System logs the error and fails fast. No automatic fallback to local filesystem (for security). Q: Can I cache SSH keys? A: Currently not, keys are retrieved fresh for each operation. Use local caching at OS level (ssh-agent) if needed. Q: How do I rotate keys? A: Update the secret in your configured source (SOPS/KMS/Vault) and retrieve fresh on next operation. Q: Is local-dev mode secure? A: No - it\'s development only. Production requires SOPS/KMS/Vault.","breadcrumbs":"Secrets Management Guide » FAQ","id":"4540","title":"FAQ"},"4541":{"body":"SSH Operation ↓\\nSecretsManager (Nushell/Rust) ↓\\n[Detect Source] ↓\\n┌─────────────────────────────────────┐\\n│ SOPS KMS Vault LocalDev\\n│ (Encrypted (AWS KMS (Self- (Filesystem\\n│ Secrets) Service) Hosted) Dev Only)\\n│\\n└─────────────────────────────────────┘ ↓\\nReturn SSH Key Path/Content ↓\\nSSH Operation Completes","breadcrumbs":"Secrets Management Guide » Architecture","id":"4541","title":"Architecture"},"4542":{"body":"SSH operations automatically use secrets manager: # Automatic secret retrieval\\nssh-cmd-smart $settings $server false \\"command\\" $ip\\n# Internally:\\n# 1. Determine secret source\\n# 2. Retrieve SSH key for server.installer_user@ip\\n# 3. Execute SSH with retrieved key\\n# 4. Cleanup sensitive data # Batch operations also integrate\\nssh-batch-execute $servers $settings \\"command\\"\\n# Per-host: Retrieves key → executes → cleans up For Support : See docs/user/TROUBLESHOOTING_GUIDE.md For Integration : See provisioning/core/nulib/lib_provisioning/platform/secrets.nu","breadcrumbs":"Secrets Management Guide » Integration with SSH Utilities","id":"4542","title":"Integration with SSH Utilities"},"4543":{"body":"A unified Key Management Service for the Provisioning platform with support for multiple backends. Source : provisioning/platform/kms-service/","breadcrumbs":"KMS Service » KMS Service - Key Management Service","id":"4543","title":"KMS Service - Key Management Service"},"4544":{"body":"Age : Fast, offline encryption (development) RustyVault : Self-hosted Vault-compatible API Cosmian KMS : Enterprise-grade with confidential computing AWS KMS : Cloud-native key management HashiCorp Vault : Enterprise secrets management","breadcrumbs":"KMS Service » Supported Backends","id":"4544","title":"Supported Backends"},"4545":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ KMS Service │\\n├─────────────────────────────────────────────────────────┤\\n│ REST API (Axum) │\\n│ ├─ /api/v1/kms/encrypt POST │\\n│ ├─ /api/v1/kms/decrypt POST │\\n│ ├─ /api/v1/kms/generate-key POST │\\n│ ├─ /api/v1/kms/status GET │\\n│ └─ /api/v1/kms/health GET │\\n├─────────────────────────────────────────────────────────┤\\n│ Unified KMS Service Interface │\\n├─────────────────────────────────────────────────────────┤\\n│ Backend Implementations │\\n│ ├─ Age Client (local files) │\\n│ ├─ RustyVault Client (self-hosted) │\\n│ └─ Cosmian KMS Client (enterprise) │\\n└─────────────────────────────────────────────────────────┘","breadcrumbs":"KMS Service » Architecture","id":"4545","title":"Architecture"},"4546":{"body":"","breadcrumbs":"KMS Service » Quick Start","id":"4546","title":"Quick Start"},"4547":{"body":"# 1. Generate Age keys\\nmkdir -p ~/.config/provisioning/age\\nage-keygen -o ~/.config/provisioning/age/private_key.txt\\nage-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisioning/age/public_key.txt # 2. Set environment\\nexport PROVISIONING_ENV=dev # 3. Start KMS service\\ncd provisioning/platform/kms-service\\ncargo run --bin kms-service","breadcrumbs":"KMS Service » Development Setup (Age)","id":"4547","title":"Development Setup (Age)"},"4548":{"body":"# Set environment variables\\nexport PROVISIONING_ENV=prod\\nexport COSMIAN_KMS_URL=https://your-kms.example.com\\nexport COSMIAN_API_KEY=your-api-key-here # Start KMS service\\ncargo run --bin kms-service","breadcrumbs":"KMS Service » Production Setup (Cosmian)","id":"4548","title":"Production Setup (Cosmian)"},"4549":{"body":"","breadcrumbs":"KMS Service » REST API Examples","id":"4549","title":"REST API Examples"},"455":{"body":"","breadcrumbs":"RAG System » Core Components","id":"455","title":"Core Components"},"4550":{"body":"curl -X POST http://localhost:8082/api/v1/kms/encrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"plaintext\\": \\"SGVsbG8sIFdvcmxkIQ==\\", \\"context\\": \\"env=prod,service=api\\" }\'","breadcrumbs":"KMS Service » Encrypt Data","id":"4550","title":"Encrypt Data"},"4551":{"body":"curl -X POST http://localhost:8082/api/v1/kms/decrypt \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"ciphertext\\": \\"...\\", \\"context\\": \\"env=prod,service=api\\" }\'","breadcrumbs":"KMS Service » Decrypt Data","id":"4551","title":"Decrypt Data"},"4552":{"body":"# Encrypt data\\n\\"secret-data\\" | kms encrypt\\n\\"api-key\\" | kms encrypt --context \\"env=prod,service=api\\" # Decrypt data\\n$ciphertext | kms decrypt # Generate data key (Cosmian only)\\nkms generate-key # Check service status\\nkms status\\nkms health # Encrypt/decrypt files\\nkms encrypt-file config.yaml\\nkms decrypt-file config.yaml.enc","breadcrumbs":"KMS Service » Nushell CLI Integration","id":"4552","title":"Nushell CLI Integration"},"4553":{"body":"Feature Age RustyVault Cosmian KMS AWS KMS Vault Setup Simple Self-hosted Server setup AWS account Enterprise Speed Very fast Fast Fast Fast Fast Network No Yes Yes Yes Yes Key Rotation Manual Automatic Automatic Automatic Automatic Data Keys No Yes Yes Yes Yes Audit Logging No Yes Full Full Full Confidential No No Yes (SGX/SEV) No No License MIT Apache 2.0 Proprietary Proprietary BSL/Enterprise Cost Free Free Paid Paid Paid Use Case Dev/Test Self-hosted Privacy AWS Cloud Enterprise","breadcrumbs":"KMS Service » Backend Comparison","id":"4553","title":"Backend Comparison"},"4554":{"body":"Config Encryption (SOPS Integration) Dynamic Secrets (Provider API Keys) SSH Key Management Orchestrator (Workflow Data) Control Center (Audit Logs)","breadcrumbs":"KMS Service » Integration Points","id":"4554","title":"Integration Points"},"4555":{"body":"","breadcrumbs":"KMS Service » Deployment","id":"4555","title":"Deployment"},"4556":{"body":"FROM rust:1.70 as builder\\nWORKDIR /app\\nCOPY . .\\nRUN cargo build --release FROM debian:bookworm-slim\\nRUN apt-get update && \\\\ apt-get install -y ca-certificates && \\\\ rm -rf /var/lib/apt/lists/*\\nCOPY --from=builder /app/target/release/kms-service /usr/local/bin/\\nENTRYPOINT [\\"kms-service\\"]","breadcrumbs":"KMS Service » Docker","id":"4556","title":"Docker"},"4557":{"body":"apiVersion: apps/v1\\nkind: Deployment\\nmetadata: name: kms-service\\nspec: replicas: 2 template: spec: containers: - name: kms-service image: provisioning/kms-service:latest env: - name: PROVISIONING_ENV value: \\"prod\\" - name: COSMIAN_KMS_URL value: \\"https://kms.example.com\\" ports: - containerPort: 8082","breadcrumbs":"KMS Service » Kubernetes","id":"4557","title":"Kubernetes"},"4558":{"body":"Development : Use Age for dev/test only, never for production secrets Production : Always use Cosmian KMS with TLS verification enabled API Keys : Never hardcode, use environment variables Key Rotation : Enable automatic rotation (90 days recommended) Context Encryption : Always use encryption context (AAD) Network Access : Restrict KMS service access with firewall rules Monitoring : Enable health checks and monitor operation metrics","breadcrumbs":"KMS Service » Security Best Practices","id":"4558","title":"Security Best Practices"},"4559":{"body":"User Guide : KMS Guide Migration : KMS Simplification","breadcrumbs":"KMS Service » Related Documentation","id":"4559","title":"Related Documentation"},"456":{"body":"The system uses embedding models to convert documents into vector representations: ┌─────────────────────┐\\n│ Document Source │\\n│ (Markdown, Code) │\\n└──────────┬──────────┘ │ ▼\\n┌──────────────────────────────────┐\\n│ Chunking & Tokenization │\\n│ - Code-aware splits │\\n│ - Markdown aware │\\n│ - Preserves context │\\n└──────────┬───────────────────────┘ │ ▼\\n┌──────────────────────────────────┐\\n│ Embedding Model │\\n│ (OpenAI Ada, Anthropic, Local) │\\n└──────────┬───────────────────────┘ │ ▼\\n┌──────────────────────────────────┐\\n│ Vector Storage (SurrealDB) │\\n│ - Vector index │\\n│ - Metadata indexed │\\n│ - BM25 index for keywords │\\n└──────────────────────────────────┘","breadcrumbs":"RAG System » 1. Vector Embeddings","id":"456","title":"1. Vector Embeddings"},"4560":{"body":"Complete guide to using Gitea integration for workspace management, extension distribution, and collaboration. Version: 1.0.0 Last Updated: 2025-10-06","breadcrumbs":"Gitea Integration Guide » Gitea Integration Guide","id":"4560","title":"Gitea Integration Guide"},"4561":{"body":"Overview Setup Workspace Git Integration Workspace Locking Extension Publishing Service Management API Reference Troubleshooting","breadcrumbs":"Gitea Integration Guide » Table of Contents","id":"4561","title":"Table of Contents"},"4562":{"body":"The Gitea integration provides: Workspace Git Integration : Version control for workspaces Distributed Locking : Prevent concurrent workspace modifications Extension Distribution : Publish and download extensions via releases Collaboration : Share workspaces and extensions across teams Service Management : Deploy and manage local Gitea instance","breadcrumbs":"Gitea Integration Guide » Overview","id":"4562","title":"Overview"},"4563":{"body":"┌─────────────────────────────────────────────────────────┐\\n│ Provisioning System │\\n├─────────────────────────────────────────────────────────┤\\n│ │\\n│ ┌────────────┐ ┌──────────────┐ ┌─────────────────┐ │\\n│ │ Workspace │ │ Extension │ │ Locking │ │\\n│ │ Git │ │ Publishing │ │ (Issues) │ │\\n│ └─────┬──────┘ └──────┬───────┘ └────────┬────────┘ │\\n│ │ │ │ │\\n│ └────────────────┼─────────────────────┘ │\\n│ │ │\\n│ ┌──────▼──────┐ │\\n│ │ Gitea API │ │\\n│ │ Client │ │\\n│ └──────┬──────┘ │\\n│ │ │\\n└─────────────────────────┼────────────────────────────────┘ │ ┌───────▼────────┐ │ Gitea Service │ │ (Local/Remote)│ └────────────────┘","breadcrumbs":"Gitea Integration Guide » Architecture","id":"4563","title":"Architecture"},"4564":{"body":"","breadcrumbs":"Gitea Integration Guide » Setup","id":"4564","title":"Setup"},"4565":{"body":"Nushell 0.107.1+ Git installed and configured Docker (for local Gitea deployment) or access to remote Gitea instance SOPS (for encrypted token storage)","breadcrumbs":"Gitea Integration Guide » Prerequisites","id":"4565","title":"Prerequisites"},"4566":{"body":"1. Add Gitea Configuration to Nickel Edit your provisioning/schemas/modes.ncl or workspace config: import provisioning.gitea as gitea # Local Docker deployment\\n_gitea_config = gitea.GiteaConfig { mode = \\"local\\" local = gitea.LocalGitea { enabled = True deployment = \\"docker\\" port = 3000 auto_start = True docker = gitea.DockerGitea { image = \\"gitea/gitea:1.21\\" container_name = \\"provisioning-gitea\\" } } auth = gitea.GiteaAuth { token_path = \\"~/.provisioning/secrets/gitea-token.enc\\" username = \\"provisioning\\" }\\n} # Or remote Gitea instance\\n_gitea_remote = gitea.GiteaConfig { mode = \\"remote\\" remote = gitea.RemoteGitea { enabled = True url = \\"https://gitea.example.com\\" api_url = \\"https://gitea.example.com/api/v1\\" } auth = gitea.GiteaAuth { token_path = \\"~/.provisioning/secrets/gitea-token.enc\\" username = \\"myuser\\" }\\n} 2. Create Gitea Access Token For local Gitea: Start Gitea: provisioning gitea start Open http://localhost:3000 Register admin account Go to Settings → Applications → Generate New Token Save token to encrypted file: # Create encrypted token file\\necho \\"your-gitea-token\\" | sops --encrypt /dev/stdin > ~/.provisioning/secrets/gitea-token.enc For remote Gitea: Login to your Gitea instance Generate personal access token Save encrypted as above 3. Verify Setup # Check Gitea status\\nprovisioning gitea status # Validate token\\nprovisioning gitea auth validate # Show current user\\nprovisioning gitea user","breadcrumbs":"Gitea Integration Guide » Configuration","id":"4566","title":"Configuration"},"4567":{"body":"","breadcrumbs":"Gitea Integration Guide » Workspace Git Integration","id":"4567","title":"Workspace Git Integration"},"4568":{"body":"When creating a new workspace, enable git integration: # Initialize new workspace with Gitea\\nprovisioning workspace init my-workspace --git --remote gitea # Or initialize existing workspace\\ncd workspace_my-workspace\\nprovisioning gitea workspace init . my-workspace --remote gitea This will: Initialize git repository in workspace Create repository on Gitea (workspaces/my-workspace) Add remote origin Push initial commit","breadcrumbs":"Gitea Integration Guide » Initialize Workspace with Git","id":"4568","title":"Initialize Workspace with Git"},"4569":{"body":"# Clone from Gitea\\nprovisioning workspace clone workspaces/my-workspace ./workspace_my-workspace # Or using full identifier\\nprovisioning workspace clone my-workspace ./workspace_my-workspace","breadcrumbs":"Gitea Integration Guide » Clone Existing Workspace","id":"4569","title":"Clone Existing Workspace"},"457":{"body":"SurrealDB serves as the vector database and knowledge store: # Configuration in provisioning/schemas/ai.ncl\\nlet { rag = { enabled = true, db_url = \\"surreal://localhost:8000\\", namespace = \\"provisioning\\", database = \\"ai_rag\\", # Collections for different document types collections = { documentation = { chunking_strategy = \\"markdown\\", chunk_size = 1024, overlap = 256, }, schemas = { chunking_strategy = \\"code\\", chunk_size = 512, overlap = 128, }, deployments = { chunking_strategy = \\"json\\", chunk_size = 2048, overlap = 512, }, }, # Embedding configuration embedding = { provider = \\"openai\\", # or \\"anthropic\\", \\"local\\" model = \\"text-embedding-3-small\\", cache_vectors = true, }, # Search configuration search = { hybrid_enabled = true, vector_weight = 0.7, keyword_weight = 0.3, top_k = 5, # Number of results to return semantic_cache = true, }, }\\n}","breadcrumbs":"RAG System » 2. SurrealDB Integration","id":"457","title":"2. SurrealDB Integration"},"4570":{"body":"# Push workspace changes\\ncd workspace_my-workspace\\nprovisioning workspace push --message \\"Updated infrastructure configs\\" # Pull latest changes\\nprovisioning workspace pull # Sync (pull + push)\\nprovisioning workspace sync","breadcrumbs":"Gitea Integration Guide » Push/Pull Changes","id":"4570","title":"Push/Pull Changes"},"4571":{"body":"# Create branch\\nprovisioning workspace branch create feature-new-cluster # Switch branch\\nprovisioning workspace branch switch feature-new-cluster # List branches\\nprovisioning workspace branch list # Delete branch\\nprovisioning workspace branch delete feature-new-cluster","breadcrumbs":"Gitea Integration Guide » Branch Management","id":"4571","title":"Branch Management"},"4572":{"body":"# Get workspace git status\\nprovisioning workspace git status # Show uncommitted changes\\nprovisioning workspace git diff # Show staged changes\\nprovisioning workspace git diff --staged","breadcrumbs":"Gitea Integration Guide » Git Status","id":"4572","title":"Git Status"},"4573":{"body":"Distributed locking prevents concurrent modifications to workspaces using Gitea issues.","breadcrumbs":"Gitea Integration Guide » Workspace Locking","id":"4573","title":"Workspace Locking"},"4574":{"body":"read : Multiple readers allowed, blocks writers write : Exclusive access, blocks all other locks deploy : Exclusive access for deployments","breadcrumbs":"Gitea Integration Guide » Lock Types","id":"4574","title":"Lock Types"},"4575":{"body":"# Acquire write lock\\nprovisioning gitea lock acquire my-workspace write \\\\ --operation \\"Deploying servers\\" \\\\ --expiry \\"2025-10-06T14:00:00Z\\" # Output:\\n# ✓ Lock acquired for workspace: my-workspace\\n# Lock ID: 42\\n# Type: write\\n# User: provisioning","breadcrumbs":"Gitea Integration Guide » Acquire Lock","id":"4575","title":"Acquire Lock"},"4576":{"body":"# List locks for workspace\\nprovisioning gitea lock list my-workspace # List all active locks\\nprovisioning gitea lock list # Get lock details\\nprovisioning gitea lock info my-workspace 42","breadcrumbs":"Gitea Integration Guide » Check Lock Status","id":"4576","title":"Check Lock Status"},"4577":{"body":"# Release lock\\nprovisioning gitea lock release my-workspace 42","breadcrumbs":"Gitea Integration Guide » Release Lock","id":"4577","title":"Release Lock"},"4578":{"body":"# Force release stuck lock\\nprovisioning gitea lock force-release my-workspace 42 \\\\ --reason \\"Deployment failed, releasing lock\\"","breadcrumbs":"Gitea Integration Guide » Force Release Lock (Admin)","id":"4578","title":"Force Release Lock (Admin)"},"4579":{"body":"Use with-workspace-lock for automatic lock management: use lib_provisioning/gitea/locking.nu * with-workspace-lock \\"my-workspace\\" \\"deploy\\" \\"Server deployment\\" { # Your deployment code here # Lock automatically released on completion or error\\n}","breadcrumbs":"Gitea Integration Guide » Automatic Locking","id":"4579","title":"Automatic Locking"},"458":{"body":"Intelligent chunking preserves context while managing token limits: Markdown Chunking Strategy Input Document: provisioning/docs/src/guides/from-scratch.md Chunks: [1] Header + first section (up to 1024 tokens) [2] Next logical section + overlap with [1] [3] Code examples preserve as atomic units [4] Continue with overlap... Each chunk includes: - Original section heading (for context) - Content - Source file and line numbers - Metadata (doctype, category, version) Code Chunking Strategy Input Document: provisioning/schemas/main.ncl Chunks: [1] Top-level let binding + comments [2] Function definition (atomic, preserves signature) [3] Type definition (atomic, preserves interface) [4] Implementation blocks with context overlap Each chunk preserves: - Type signatures - Function signatures - Import statements needed for context - Comments and docstrings","breadcrumbs":"RAG System » 3. Document Chunking","id":"458","title":"3. Document Chunking"},"4580":{"body":"# Cleanup expired locks\\nprovisioning gitea lock cleanup","breadcrumbs":"Gitea Integration Guide » Lock Cleanup","id":"4580","title":"Lock Cleanup"},"4581":{"body":"Publish taskservs, providers, and clusters as versioned releases on Gitea.","breadcrumbs":"Gitea Integration Guide » Extension Publishing","id":"4581","title":"Extension Publishing"},"4582":{"body":"# Publish taskserv\\nprovisioning gitea extension publish \\\\ ./extensions/taskservs/database/postgres \\\\ 1.2.0 \\\\ --release-notes \\"Added connection pooling support\\" # Publish provider\\nprovisioning gitea extension publish \\\\ ./extensions/providers/aws_prov \\\\ 2.0.0 \\\\ --prerelease # Publish cluster\\nprovisioning gitea extension publish \\\\ ./extensions/clusters/buildkit \\\\ 1.0.0 This will: Validate extension structure Create git tag (if workspace is git repo) Package extension as .tar.gz Create Gitea release Upload package as release asset","breadcrumbs":"Gitea Integration Guide » Publish Extension","id":"4582","title":"Publish Extension"},"4583":{"body":"# List all extensions\\nprovisioning gitea extension list # Filter by type\\nprovisioning gitea extension list --type taskserv\\nprovisioning gitea extension list --type provider\\nprovisioning gitea extension list --type cluster","breadcrumbs":"Gitea Integration Guide » List Published Extensions","id":"4583","title":"List Published Extensions"},"4584":{"body":"# Download specific version\\nprovisioning gitea extension download postgres 1.2.0 \\\\ --destination ./extensions/taskservs/database # Extension is downloaded and extracted automatically","breadcrumbs":"Gitea Integration Guide » Download Extension","id":"4584","title":"Download Extension"},"4585":{"body":"# Get extension information\\nprovisioning gitea extension info postgres 1.2.0","breadcrumbs":"Gitea Integration Guide » Extension Metadata","id":"4585","title":"Extension Metadata"},"4586":{"body":"# 1. Make changes to extension\\ncd extensions/taskservs/database/postgres # 2. Update version in kcl/kcl.mod\\n# 3. Update CHANGELOG.md # 4. Commit changes\\ngit add .\\ngit commit -m \\"Release v1.2.0\\" # 5. Publish to Gitea\\nprovisioning gitea extension publish . 1.2.0","breadcrumbs":"Gitea Integration Guide » Publishing Workflow","id":"4586","title":"Publishing Workflow"},"4587":{"body":"","breadcrumbs":"Gitea Integration Guide » Service Management","id":"4587","title":"Service Management"},"4588":{"body":"# Start Gitea (local mode)\\nprovisioning gitea start # Stop Gitea\\nprovisioning gitea stop # Restart Gitea\\nprovisioning gitea restart","breadcrumbs":"Gitea Integration Guide » Start/Stop Gitea","id":"4588","title":"Start/Stop Gitea"},"4589":{"body":"# Get service status\\nprovisioning gitea status # Output:\\n# Gitea Status:\\n# Mode: local\\n# Deployment: docker\\n# Running: true\\n# Port: 3000\\n# URL: http://localhost:3000\\n# Container: provisioning-gitea\\n# Health: ✓ OK","breadcrumbs":"Gitea Integration Guide » Check Status","id":"4589","title":"Check Status"},"459":{"body":"The system implements dual search strategy for optimal results:","breadcrumbs":"RAG System » Hybrid Search","id":"459","title":"Hybrid Search"},"4590":{"body":"# View recent logs\\nprovisioning gitea logs # Follow logs\\nprovisioning gitea logs --follow # Show specific number of lines\\nprovisioning gitea logs --lines 200","breadcrumbs":"Gitea Integration Guide » View Logs","id":"4590","title":"View Logs"},"4591":{"body":"# Install latest version\\nprovisioning gitea install # Install specific version\\nprovisioning gitea install 1.21.0 # Custom install directory\\nprovisioning gitea install --install-dir ~/bin","breadcrumbs":"Gitea Integration Guide » Install Gitea Binary","id":"4591","title":"Install Gitea Binary"},"4592":{"body":"","breadcrumbs":"Gitea Integration Guide » API Reference","id":"4592","title":"API Reference"},"4593":{"body":"use lib_provisioning/gitea/api_client.nu * # Create repository\\ncreate-repository \\"my-org\\" \\"my-repo\\" \\"Description\\" true # Get repository\\nget-repository \\"my-org\\" \\"my-repo\\" # Delete repository\\ndelete-repository \\"my-org\\" \\"my-repo\\" --force # List repositories\\nlist-repositories \\"my-org\\"","breadcrumbs":"Gitea Integration Guide » Repository Operations","id":"4593","title":"Repository Operations"},"4594":{"body":"# Create release\\ncreate-release \\"my-org\\" \\"my-repo\\" \\"v1.0.0\\" \\"Release Name\\" \\"Notes\\" # Upload asset\\nupload-release-asset \\"my-org\\" \\"my-repo\\" 123 \\"./file.tar.gz\\" # Get release\\nget-release-by-tag \\"my-org\\" \\"my-repo\\" \\"v1.0.0\\" # List releases\\nlist-releases \\"my-org\\" \\"my-repo\\"","breadcrumbs":"Gitea Integration Guide » Release Operations","id":"4594","title":"Release Operations"},"4595":{"body":"use lib_provisioning/gitea/workspace_git.nu * # Initialize workspace git\\ninit-workspace-git \\"./workspace_test\\" \\"test\\" --remote \\"gitea\\" # Clone workspace\\nclone-workspace \\"workspaces/my-workspace\\" \\"./workspace_my-workspace\\" # Push changes\\npush-workspace \\"./workspace_my-workspace\\" \\"Updated configs\\" # Pull changes\\npull-workspace \\"./workspace_my-workspace\\"","breadcrumbs":"Gitea Integration Guide » Workspace Operations","id":"4595","title":"Workspace Operations"},"4596":{"body":"use lib_provisioning/gitea/locking.nu * # Acquire lock\\nlet lock = acquire-workspace-lock \\"my-workspace\\" \\"write\\" \\"Deployment\\" # Release lock\\nrelease-workspace-lock \\"my-workspace\\" $lock.lock_id # Check if locked\\nis-workspace-locked \\"my-workspace\\" \\"write\\" # List locks\\nlist-workspace-locks \\"my-workspace\\"","breadcrumbs":"Gitea Integration Guide » Locking Operations","id":"4596","title":"Locking Operations"},"4597":{"body":"","breadcrumbs":"Gitea Integration Guide » Troubleshooting","id":"4597","title":"Troubleshooting"},"4598":{"body":"Problem : provisioning gitea start fails Solutions : # Check Docker status\\ndocker ps # Check if port is in use\\nlsof -i :3000 # Check Gitea logs\\nprovisioning gitea logs # Remove old container\\ndocker rm -f provisioning-gitea\\nprovisioning gitea start","breadcrumbs":"Gitea Integration Guide » Gitea Not Starting","id":"4598","title":"Gitea Not Starting"},"4599":{"body":"Problem : provisioning gitea auth validate returns false Solutions : # Verify token file exists\\nls ~/.provisioning/secrets/gitea-token.enc # Test decryption\\nsops --decrypt ~/.provisioning/secrets/gitea-token.enc # Regenerate token in Gitea UI\\n# Save new token\\necho \\"new-token\\" | sops --encrypt /dev/stdin > ~/.provisioning/secrets/gitea-token.enc","breadcrumbs":"Gitea Integration Guide » Token Authentication Failed","id":"4599","title":"Token Authentication Failed"},"46":{"body":"Linux : Any modern distribution (Ubuntu 20.04+, CentOS 8+, Debian 11+) macOS : 11.0+ (Big Sur and newer) Windows : Windows 10/11 with WSL2","breadcrumbs":"Installation Guide » Operating System Support","id":"46","title":"Operating System Support"},"460":{"body":"// Find semantically similar documents\\nasync fn vector_search(query: &str, top_k: usize) -> Vec { let embedding = embed(query).await?; // L2 distance in SurrealDB db.query(\\" SELECT *, vector::similarity::cosine(embedding, $embedding) AS score FROM documents WHERE embedding <~> $embedding ORDER BY score DESC LIMIT $top_k \\") .bind((\\"embedding\\", embedding)) .bind((\\"top_k\\", top_k)) .await\\n} Use case : Semantic understanding of intent Query: \\"How to configure PostgreSQL\\" Finds: Documents about database configuration, examples, schemas","breadcrumbs":"RAG System » Vector Similarity Search","id":"460","title":"Vector Similarity Search"},"4600":{"body":"Problem : Git push fails with authentication error Solutions : # Check remote URL\\ncd workspace_my-workspace\\ngit remote -v # Reconfigure remote with token\\ngit remote set-url origin http://username:token@localhost:3000/org/repo.git # Or use SSH\\ngit remote set-url origin git@localhost:workspaces/my-workspace.git","breadcrumbs":"Gitea Integration Guide » Cannot Push to Repository","id":"4600","title":"Cannot Push to Repository"},"4601":{"body":"Problem : Cannot acquire lock, workspace already locked Solutions : # Check active locks\\nprovisioning gitea lock list my-workspace # Get lock details\\nprovisioning gitea lock info my-workspace 42 # If lock is stale, force release\\nprovisioning gitea lock force-release my-workspace 42 --reason \\"Stale lock\\"","breadcrumbs":"Gitea Integration Guide » Lock Already Exists","id":"4601","title":"Lock Already Exists"},"4602":{"body":"Problem : Extension publishing fails validation Solutions : # Check extension structure\\nls -la extensions/taskservs/myservice/\\n# Required:\\n# - schemas/manifest.toml\\n# - schemas/*.ncl (main schema file) # Verify manifest.toml format\\ncat extensions/taskservs/myservice/schemas/manifest.toml # Should have:\\n# [package]\\n# name = \\"myservice\\"\\n# version = \\"1.0.0\\"","breadcrumbs":"Gitea Integration Guide » Extension Validation Failed","id":"4602","title":"Extension Validation Failed"},"4603":{"body":"Problem : Gitea Docker container has permission errors Solutions : # Fix data directory permissions\\nsudo chown -R 1000:1000 ~/.provisioning/gitea # Or recreate with correct permissions\\nprovisioning gitea stop --remove\\nrm -rf ~/.provisioning/gitea\\nprovisioning gitea start","breadcrumbs":"Gitea Integration Guide » Docker Volume Permissions","id":"4603","title":"Docker Volume Permissions"},"4604":{"body":"","breadcrumbs":"Gitea Integration Guide » Best Practices","id":"4604","title":"Best Practices"},"4605":{"body":"Always use locking for concurrent operations Commit frequently with descriptive messages Use branches for experimental changes Sync before operations to get latest changes","breadcrumbs":"Gitea Integration Guide » Workspace Management","id":"4605","title":"Workspace Management"},"4606":{"body":"Follow semantic versioning (MAJOR.MINOR.PATCH) Update CHANGELOG.md for each release Test extensions before publishing Use prerelease flag for beta versions","breadcrumbs":"Gitea Integration Guide » Extension Publishing","id":"4606","title":"Extension Publishing"},"4607":{"body":"Encrypt tokens with SOPS Use private repositories for sensitive workspaces Rotate tokens regularly Audit lock history via Gitea issues","breadcrumbs":"Gitea Integration Guide » Security","id":"4607","title":"Security"},"4608":{"body":"Cleanup expired locks periodically Use shallow clones for large workspaces Archive old releases to reduce storage Monitor Gitea resources for local deployments","breadcrumbs":"Gitea Integration Guide » Performance","id":"4608","title":"Performance"},"4609":{"body":"","breadcrumbs":"Gitea Integration Guide » Advanced Usage","id":"4609","title":"Advanced Usage"},"461":{"body":"// Find documents with matching keywords\\nasync fn keyword_search(query: &str, top_k: usize) -> Vec { // BM25 full-text search in SurrealDB db.query(\\" SELECT *, search::bm25(.) AS score FROM documents WHERE text @@ $query ORDER BY score DESC LIMIT $top_k \\") .bind((\\"query\\", query)) .bind((\\"top_k\\", top_k)) .await\\n} Use case : Exact term matching Query: \\"SurrealDB configuration\\" Finds: Documents mentioning SurrealDB specifically","breadcrumbs":"RAG System » BM25 Keyword Search","id":"461","title":"BM25 Keyword Search"},"4610":{"body":"Edit docker-compose.yml: services: gitea: image: gitea/gitea:1.21 environment: - GITEA__server__DOMAIN=gitea.example.com - GITEA__server__ROOT_URL=https://gitea.example.com # Add custom settings volumes: - /custom/path/gitea:/data","breadcrumbs":"Gitea Integration Guide » Custom Gitea Deployment","id":"4610","title":"Custom Gitea Deployment"},"4611":{"body":"Configure webhooks for automated workflows: import provisioning.gitea as gitea _webhook = gitea.GiteaWebhook { url = \\"https://provisioning.example.com/api/webhooks/gitea\\" events = [\\"push\\", \\"pull_request\\", \\"release\\"] secret = \\"webhook-secret\\"\\n}","breadcrumbs":"Gitea Integration Guide » Webhooks Integration","id":"4611","title":"Webhooks Integration"},"4612":{"body":"# Publish all taskservs with same version\\nprovisioning gitea extension publish-batch \\\\ ./extensions/taskservs \\\\ 1.0.0 \\\\ --extension-type taskserv","breadcrumbs":"Gitea Integration Guide » Batch Extension Publishing","id":"4612","title":"Batch Extension Publishing"},"4613":{"body":"Gitea API Documentation : https://docs.gitea.com/api/ Nickel Schema : /Users/Akasha/project-provisioning/provisioning/schemas/gitea.ncl API Client : /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/api_client.nu Workspace Git : /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/workspace_git.nu Locking : /Users/Akasha/project-provisioning/provisioning/core/nulib/lib_provisioning/gitea/locking.nu Version: 1.0.0 Maintained By: Provisioning Team Last Updated: 2025-10-06","breadcrumbs":"Gitea Integration Guide » References","id":"4613","title":"References"},"4614":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Service Mesh & Ingress Guide","id":"4614","title":"Service Mesh & Ingress Guide"},"4615":{"body":"This guide helps you choose between different service mesh and ingress controller options for your Kubernetes deployments.","breadcrumbs":"Service Mesh Ingress Guide » Comparison","id":"4615","title":"Comparison"},"4616":{"body":"Service Mesh Handles East-West traffic (service-to-service communication): Automatic mTLS encryption between services Traffic management and routing Observability and monitoring Service discovery Fault tolerance and resilience Ingress Controller Handles North-South traffic (external to internal): Route external traffic into the cluster TLS/HTTPS termination Virtual hosts and path routing Load balancing Can work with or without a service mesh","breadcrumbs":"Service Mesh Ingress Guide » Understanding the Difference","id":"4616","title":"Understanding the Difference"},"4617":{"body":"Istio Version : 1.24.0 Best for : Full-featured service mesh deployments with comprehensive observability Key Features : ✅ Comprehensive feature set ✅ Built-in Istio Gateway ingress controller ✅ Advanced traffic management ✅ Strong observability (Kiali, Grafana, Jaeger) ✅ Virtual services, destination rules, traffic policies ✅ Mutual TLS (mTLS) with automatic certificate rotation ✅ Canary deployments and traffic mirroring Resource Requirements : CPU: 500m (Pilot) + 100m per gateway Memory: 2048Mi (Pilot) + 128Mi per gateway High overhead Pros : Industry-standard solution with large community Rich feature set for complex requirements Built-in ingress gateway (don\'t need external ingress) Strong observability capabilities Enterprise support available Cons : Significant resource overhead Complex configuration learning curve Can be overkill for simple applications Sidecar injection required for all services Use when : You need comprehensive traffic management Complex microservice patterns (canary deployments, traffic mirroring) Enterprise requirements You already understand service meshes Your team has Istio expertise Installation : provisioning taskserv create istio Linkerd Version : 2.16.0 Best for : Lightweight, high-performance service mesh with minimal complexity Key Features : ✅ Ultra-lightweight (minimal resource footprint) ✅ Simple configuration ✅ Automatic mTLS with certificate rotation ✅ Fast sidecar startup (built in Rust) ✅ Live traffic visualization ✅ Service topology and dependency discovery ✅ Golden metrics out of the box (latency, success rate, throughput) Resource Requirements : CPU proxy: 100m request, 1000m limit Memory proxy: 20Mi request, 250Mi limit Very lightweight compared to Istio Pros : Minimal resource overhead Simple, intuitive configuration Fast startup and deployment Built in Rust for performance Excellent golden metrics Good for resource-constrained environments Can run alongside Istio Cons : Fewer advanced features than Istio Requires external ingress controller Smaller ecosystem and fewer integrations Less feature-rich traffic management Requires cert-manager for mTLS Use when : You want simplicity and minimal overhead Running on resource-constrained clusters You prefer straightforward configuration You don\'t need advanced traffic management You\'re using Kubernetes 1.21+ Installation : # Linkerd requires cert-manager\\nprovisioning taskserv create cert-manager\\nprovisioning taskserv create linkerd\\nprovisioning taskserv create nginx-ingress # Or traefik/contour Cilium Version : See existing Cilium taskserv Best for : CNI-based networking with integrated service mesh Key Features : ✅ CNI and service mesh in one solution ✅ eBPF-based for high performance ✅ Network policy enforcement ✅ Service mesh mode (optional) ✅ Hubble for observability ✅ Cluster mesh for multi-cluster Pros : Replaces CNI plugin entirely High-performance eBPF kernel networking Can serve as both CNI and service mesh No sidecar needed (uses eBPF) Network policy support Cons : Requires Linux kernel with eBPF support Service mesh mode is secondary feature More complex than Linkerd Not as mature in service mesh role Use when : You need both CNI and service mesh You\'re on modern Linux kernels with eBPF You want kernel-level networking","breadcrumbs":"Service Mesh Ingress Guide » Service Mesh Options","id":"4617","title":"Service Mesh Options"},"4618":{"body":"Nginx Ingress Version : 1.12.0 Best for : Most Kubernetes deployments - proven, reliable, widely supported Key Features : ✅ Battle-tested and production-proven ✅ Most popular ingress controller ✅ Extensive documentation and community ✅ Rich configuration options ✅ SSL/TLS termination ✅ URL rewriting and routing ✅ Rate limiting and DDoS protection Pros : Proven stability in production Widest community and ecosystem Extensive documentation Multiple commercial support options Works with any service mesh Moderate resource footprint Cons : Configuration can be verbose Limited middleware ecosystem (compared to Traefik) No automatic TLS with Let\'s Encrypt Configuration via annotations Use when : You want proven stability Wide community support is important You need traditional ingress controller You\'re building production systems You want abundant documentation Installation : provisioning taskserv create nginx-ingress With Linkerd : provisioning taskserv create linkerd\\nprovisioning taskserv create nginx-ingress Traefik Version : 3.3.0 Best for : Modern cloud-native applications with dynamic service discovery Key Features : ✅ Automatic service discovery ✅ Native Let\'s Encrypt support ✅ Middleware system for advanced routing ✅ Built-in dashboard and metrics ✅ API-driven configuration ✅ Dynamic configuration updates ✅ Support for multiple protocols (HTTP, TCP, gRPC) Pros : Modern, cloud-native design Automatic TLS with Let\'s Encrypt Middleware ecosystem for extensibility Built-in dashboard for monitoring Dynamic configuration without restart API-driven approach Growing community Cons : Different configuration paradigm (IngressRoute CRD) Smaller community than Nginx Learning curve for traditional ops Less mature than Nginx Use when : You want modern cloud-native features Automatic TLS is important You like middleware-based routing You want dynamic configuration You\'re building microservices platforms Installation : provisioning taskserv create traefik With Linkerd : provisioning taskserv create linkerd\\nprovisioning taskserv create traefik Contour Version : 1.31.0 Best for : Envoy-based ingress with simple CRD configuration Key Features : ✅ Envoy proxy backend (same as Istio) ✅ Simple CRD-based configuration ✅ HTTPProxy CRD for advanced routing ✅ Service delegation and composition ✅ External authorization ✅ Rate limiting support Pros : Uses same Envoy proxy as Istio Simple but powerful configuration Good for multi-tenant clusters CRD-based (declarative) Good documentation Cons : Smaller community than Nginx/Traefik Fewer integrations and plugins Less feature-rich than Traefik Fewer real-world examples Use when : You want Envoy proxy for consistency with Istio You prefer simple configuration You like CRD-based approach You need multi-tenant support Installation : provisioning taskserv create contour HAProxy Ingress Version : 0.15.0 Best for : High-performance environments requiring advanced load balancing Key Features : ✅ HAProxy backend for performance ✅ Advanced load balancing algorithms ✅ High throughput ✅ Flexible configuration ✅ Proven performance Pros : Excellent performance Advanced load balancing options Battle-tested HAProxy backend Good for high-traffic scenarios Cons : Less Kubernetes-native than others Smaller community Configuration complexity Fewer modern features Use when : Performance is critical High traffic is expected You need advanced load balancing","breadcrumbs":"Service Mesh Ingress Guide » Ingress Controller Options","id":"4618","title":"Ingress Controller Options"},"4619":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Recommended Combinations","id":"4619","title":"Recommended Combinations"},"462":{"body":"async fn hybrid_search( query: &str, vector_weight: f32, keyword_weight: f32, top_k: usize,\\n) -> Vec { let vector_results = vector_search(query, top_k * 2).await?; let keyword_results = keyword_search(query, top_k * 2).await?; let mut scored = HashMap::new(); // Score from vector search for (i, doc) in vector_results.iter().enumerate() { *scored.entry(doc.id).or_insert(0.0) += vector_weight * (1.0 - (i as f32 / top_k as f32)); } // Score from keyword search for (i, doc) in keyword_results.iter().enumerate() { *scored.entry(doc.id).or_insert(0.0) += keyword_weight * (1.0 - (i as f32 / top_k as f32)); } // Return top-k by combined score let mut results: Vec<_> = scored.into_iter().collect();\\n| results.sort_by( | a, b | b.1.partial_cmp(&a.1).unwrap()); |\\n| Ok(results.into_iter().take(top_k).map( | (id, _) | ...).collect()) |\\n}","breadcrumbs":"RAG System » Hybrid Results","id":"462","title":"Hybrid Results"},"4620":{"body":"Why : Lightweight mesh + proven ingress = great balance provisioning taskserv create cert-manager\\nprovisioning taskserv create linkerd\\nprovisioning taskserv create nginx-ingress Pros : Minimal overhead Simple to manage Proven stability Good observability Cons : Less advanced features than Istio","breadcrumbs":"Service Mesh Ingress Guide » 1. Linkerd + Nginx Ingress (Recommended for most users)","id":"4620","title":"1. Linkerd + Nginx Ingress (Recommended for most users)"},"4621":{"body":"Why : All-in-one service mesh with built-in gateway provisioning taskserv create istio Pros : Unified traffic management Powerful observability No external ingress needed Rich features Cons : Higher resource usage More complex","breadcrumbs":"Service Mesh Ingress Guide » 2. Istio (Standalone)","id":"4621","title":"2. Istio (Standalone)"},"4622":{"body":"Why : Lightweight mesh + modern ingress provisioning taskserv create cert-manager\\nprovisioning taskserv create linkerd\\nprovisioning taskserv create traefik Pros : Minimal overhead Modern features Automatic TLS","breadcrumbs":"Service Mesh Ingress Guide » 3. Linkerd + Traefik","id":"4622","title":"3. Linkerd + Traefik"},"4623":{"body":"Why : Just get traffic in without service mesh provisioning taskserv create nginx-ingress Pros : Simplest setup Minimal overhead Proven stability","breadcrumbs":"Service Mesh Ingress Guide » 4. No Mesh + Nginx Ingress (Simple deployments)","id":"4623","title":"4. No Mesh + Nginx Ingress (Simple deployments)"},"4624":{"body":"Requirement Istio Linkerd Cilium Nginx Traefik Contour HAProxy Lightweight ❌ ✅ ✅ ✅ ✅ ✅ ✅ Simple Config ❌ ✅ ⚠️ ⚠️ ✅ ✅ ❌ Full Features ✅ ⚠️ ✅ ⚠️ ✅ ⚠️ ✅ Auto TLS ❌ ❌ ❌ ❌ ✅ ❌ ❌ Service Mesh ✅ ✅ ✅ ❌ ❌ ❌ ❌ Performance ✅ ✅ ✅ ✅ ✅ ✅ ✅ Community ✅ ✅ ✅ ✅ ✅ ⚠️ ⚠️","breadcrumbs":"Service Mesh Ingress Guide » Decision Matrix","id":"4624","title":"Decision Matrix"},"4625":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Migration Paths","id":"4625","title":"Migration Paths"},"4626":{"body":"Install Linkerd alongside Istio Gradually migrate services (add Linkerd annotations) Verify Linkerd handles traffic correctly Install external ingress controller (Nginx/Traefik) Update Istio Virtual Services to use new ingress Remove Istio once migration complete","breadcrumbs":"Service Mesh Ingress Guide » From Istio to Linkerd","id":"4626","title":"From Istio to Linkerd"},"4627":{"body":"Install new ingress controller Create duplicate Ingress resources pointing to new controller Test with new ingress (use IngressClassName) Update DNS/load balancer to point to new ingress Drain connections from old ingress Remove old ingress controller","breadcrumbs":"Service Mesh Ingress Guide » Between Ingress Controllers","id":"4627","title":"Between Ingress Controllers"},"4628":{"body":"Complete examples of how to configure service meshes and ingress controllers in your workspace.","breadcrumbs":"Service Mesh Ingress Guide » Examples","id":"4628","title":"Examples"},"4629":{"body":"This is the recommended configuration for most deployments - lightweight and proven. Step 1: Create Taskserv Configurations File : workspace/infra/my-cluster/taskservs/cert-manager.ncl import provisioning.extensions.taskservs.infrastructure.cert_manager as cm # Cert-manager is required for Linkerd\'s mTLS certificates\\n_taskserv = cm.CertManager { version = \\"v1.15.0\\" namespace = \\"cert-manager\\"\\n} File : workspace/infra/my-cluster/taskservs/linkerd.ncl import provisioning.extensions.taskservs.networking.linkerd as linkerd # Lightweight service mesh with minimal overhead\\n_taskserv = linkerd.Linkerd { version = \\"2.16.0\\" namespace = \\"linkerd\\" # Enable observability ha_mode = False # Use True for production HA viz_enabled = True prometheus = True grafana = True # Use cert-manager for mTLS certificates cert_manager = True trust_domain = \\"cluster.local\\" # Resource configuration (very lightweight) resources = { proxy_cpu_request = \\"100m\\" proxy_cpu_limit = \\"1000m\\" proxy_memory_request = \\"20Mi\\" proxy_memory_limit = \\"250Mi\\" }\\n} File : workspace/infra/my-cluster/taskservs/nginx-ingress.ncl import provisioning.extensions.taskservs.networking.nginx_ingress as nginx # Battle-tested ingress controller\\n_taskserv = nginx.NginxIngress { version = \\"1.12.0\\" namespace = \\"ingress-nginx\\" # Deployment configuration deployment_type = \\"Deployment\\" # Or \\"DaemonSet\\" for node-local ingress replicas = 2 # Enable metrics for observability prometheus_metrics = True # Resource allocation resources = { cpu_request = \\"100m\\" cpu_limit = \\"1000m\\" memory_request = \\"90Mi\\" memory_limit = \\"500Mi\\" }\\n} Step 2: Deploy Service Mesh Components # Install cert-manager (prerequisite for Linkerd)\\nprovisioning taskserv create cert-manager # Install Linkerd service mesh\\nprovisioning taskserv create linkerd # Install Nginx ingress controller\\nprovisioning taskserv create nginx-ingress # Verify installation\\nlinkerd check\\nkubectl get deploy -n ingress-nginx Step 3: Configure Application Deployment File : workspace/infra/my-cluster/clusters/web-api.ncl import provisioning.kcl.k8s_deploy as k8s\\nimport provisioning.extensions.taskservs.networking.nginx_ingress as nginx # Define the web API service with Linkerd service mesh and Nginx ingress\\nservice = k8s.K8sDeploy { # Basic information name = \\"web-api\\" namespace = \\"production\\" create_ns = True # Service mesh configuration - use Linkerd service_mesh = \\"linkerd\\" service_mesh_ns = \\"linkerd\\" service_mesh_config = { mtls_enabled = True tracing_enabled = False } # Ingress configuration - use Nginx ingress_controller = \\"nginx\\" ingress_ns = \\"ingress-nginx\\" ingress_config = { tls_enabled = True default_backend = \\"web-api:8080\\" } # Deployment spec spec = { replicas = 3 containers = [ { name = \\"api\\" image = \\"myregistry.azurecr.io/web-api:v1.0.0\\" imagePull = \\"Always\\" ports = [ { name = \\"http\\" typ = \\"TCP\\" container = 8080 } ] } ] } # Kubernetes service service = { name = \\"web-api\\" typ = \\"ClusterIP\\" ports = [ { name = \\"http\\" typ = \\"TCP\\" target = 8080 } ] }\\n} Step 4: Create Ingress Resource File : workspace/infra/my-cluster/ingress/web-api-ingress.yaml apiVersion: networking.k8s.io/v1\\nkind: Ingress\\nmetadata: name: web-api namespace: production annotations: cert-manager.io/cluster-issuer: letsencrypt-prod nginx.ingress.kubernetes.io/rewrite-target: /\\nspec: ingressClassName: nginx tls: - hosts: - api.example.com secretName: web-api-tls rules: - host: api.example.com http: paths: - path: / pathType: Prefix backend: service: name: web-api port: number: 8080","breadcrumbs":"Service Mesh Ingress Guide » Example 1: Linkerd + Nginx Ingress Deployment","id":"4629","title":"Example 1: Linkerd + Nginx Ingress Deployment"},"463":{"body":"Reduces API calls by caching embeddings of repeated queries: struct SemanticCache { queries: Arc, CachedResult>>, similarity_threshold: f32,\\n} impl SemanticCache { async fn get(&self, query: &str) -> Option { let embedding = embed(query).await?; // Find cached query with similar embedding // (cosine distance < threshold) for entry in self.queries.iter() { let distance = cosine_distance(&embedding, entry.key()); if distance < self.similarity_threshold { return Some(entry.value().clone()); } } None } async fn insert(&self, query: &str, result: CachedResult) { let embedding = embed(query).await?; self.queries.insert(embedding, result); }\\n} Benefits : 50-80% reduction in embedding API calls Identical queries return in <10ms Similar queries reuse cached context","breadcrumbs":"RAG System » Semantic Caching","id":"463","title":"Semantic Caching"},"4630":{"body":"Complete service mesh with built-in ingress gateway. Step 1: Install Istio File : workspace/infra/my-cluster/taskservs/istio.ncl import provisioning.extensions.taskservs.networking.istio as istio # Full-featured service mesh\\n_taskserv = istio.Istio { version = \\"1.24.0\\" profile = \\"default\\" # Options: default, demo, minimal, remote namespace = \\"istio-system\\" # Core features mtls_enabled = True mtls_mode = \\"PERMISSIVE\\" # Start with PERMISSIVE, switch to STRICT when ready # Traffic management ingress_gateway = True egress_gateway = False # Observability tracing = { enabled = True provider = \\"jaeger\\" sampling_rate = 0.1 # Sample 10% for production } prometheus = True grafana = True kiali = True # Resource configuration resources = { pilot_cpu = \\"500m\\" pilot_memory = \\"2048Mi\\" gateway_cpu = \\"100m\\" gateway_memory = \\"128Mi\\" }\\n} Step 2: Deploy Istio # Install Istio\\nprovisioning taskserv create istio # Verify installation\\nistioctl verify-install Step 3: Configure Application with Istio File : workspace/infra/my-cluster/clusters/api-service.ncl import provisioning.kcl.k8s_deploy as k8s service = k8s.K8sDeploy { name = \\"api-service\\" namespace = \\"production\\" create_ns = True # Use Istio for both service mesh AND ingress service_mesh = \\"istio\\" service_mesh_ns = \\"istio-system\\" ingress_controller = \\"istio-gateway\\" # Istio\'s built-in gateway spec = { replicas = 3 containers = [ { name = \\"api\\" image = \\"myregistry.azurecr.io/api:v1.0.0\\" ports = [ { name = \\"http\\", typ = \\"TCP\\", container = 8080 } ] } ] } service = { name = \\"api-service\\" typ = \\"ClusterIP\\" ports = [ { name = \\"http\\", typ = \\"TCP\\", target = 8080 } ] } # Istio-specific proxy configuration prxyGatewayServers = [ { port = { number = 80, protocol = \\"HTTP\\", name = \\"http\\" } hosts = [\\"api.example.com\\"] }, { port = { number = 443, protocol = \\"HTTPS\\", name = \\"https\\" } hosts = [\\"api.example.com\\"] tls = { mode = \\"SIMPLE\\" credentialName = \\"api-tls-cert\\" } } ] # Virtual service routing configuration prxyVirtualService = { hosts = [\\"api.example.com\\"] gateways = [\\"api-gateway\\"] matches = [ { typ = \\"http\\" location = [ { port = 80 } ] route_destination = [ { port_number = 8080, host = \\"api-service\\" } ] } ] }\\n}","breadcrumbs":"Service Mesh Ingress Guide » Example 2: Istio (Standalone) Deployment","id":"4630","title":"Example 2: Istio (Standalone) Deployment"},"4631":{"body":"Lightweight mesh with modern ingress controller and automatic TLS. Step 1: Create Configurations File : workspace/infra/my-cluster/taskservs/linkerd.ncl import provisioning.extensions.taskservs.networking.linkerd as linkerd _taskserv = linkerd.Linkerd { version = \\"2.16.0\\" namespace = \\"linkerd\\" viz_enabled = True prometheus = True\\n} File : workspace/infra/my-cluster/taskservs/traefik.ncl import provisioning.extensions.taskservs.networking.traefik as traefik # Modern ingress with middleware and auto-TLS\\n_taskserv = traefik.Traefik { version = \\"3.3.0\\" namespace = \\"traefik\\" replicas = 2 dashboard = True metrics = True access_logs = True # Enable Let\'s Encrypt for automatic TLS lets_encrypt = True lets_encrypt_email = \\"admin@example.com\\" resources = { cpu_request = \\"100m\\" cpu_limit = \\"1000m\\" memory_request = \\"128Mi\\" memory_limit = \\"512Mi\\" }\\n} Step 2: Deploy provisioning taskserv create cert-manager\\nprovisioning taskserv create linkerd\\nprovisioning taskserv create traefik Step 3: Create Traefik IngressRoute File : workspace/infra/my-cluster/ingress/api-route.yaml apiVersion: traefik.io/v1alpha1\\nkind: IngressRoute\\nmetadata: name: api namespace: production\\nspec: entryPoints: - websecure routes: - match: Host(`api.example.com`) kind: Rule services: - name: api-service port: 8080 tls: certResolver: letsencrypt domains: - main: api.example.com","breadcrumbs":"Service Mesh Ingress Guide » Example 3: Linkerd + Traefik (Modern Cloud-Native)","id":"4631","title":"Example 3: Linkerd + Traefik (Modern Cloud-Native)"},"4632":{"body":"For simple deployments that don\'t need service mesh. Step 1: Install Nginx File : workspace/infra/my-cluster/taskservs/nginx-ingress.ncl import provisioning.extensions.taskservs.networking.nginx_ingress as nginx _taskserv = nginx.NginxIngress { version = \\"1.12.0\\" replicas = 2 prometheus_metrics = True\\n} Step 2: Deploy provisioning taskserv create nginx-ingress Step 3: Application Configuration File : workspace/infra/my-cluster/clusters/simple-app.ncl import provisioning.kcl.k8s_deploy as k8s service = k8s.K8sDeploy { name = \\"simple-app\\" namespace = \\"default\\" # No service mesh - just ingress ingress_controller = \\"nginx\\" ingress_ns = \\"ingress-nginx\\" spec = { replicas = 2 containers = [ { name = \\"app\\" image = \\"nginx:latest\\" ports = [{ name = \\"http\\", typ = \\"TCP\\", container = 80 }] } ] } service = { name = \\"simple-app\\" typ = \\"ClusterIP\\" ports = [{ name = \\"http\\", typ = \\"TCP\\", target = 80 }] }\\n} Step 4: Create Ingress File : workspace/infra/my-cluster/ingress/simple-app-ingress.yaml apiVersion: networking.k8s.io/v1\\nkind: Ingress\\nmetadata: name: simple-app namespace: default\\nspec: ingressClassName: nginx rules: - host: app.example.com http: paths: - path: / pathType: Prefix backend: service: name: simple-app port: number: 80","breadcrumbs":"Service Mesh Ingress Guide » Example 4: Minimal Setup (Just Nginx, No Service Mesh)","id":"4632","title":"Example 4: Minimal Setup (Just Nginx, No Service Mesh)"},"4633":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Enable Sidecar Injection for Services","id":"4633","title":"Enable Sidecar Injection for Services"},"4634":{"body":"# Label namespace for automatic sidecar injection\\nkubectl annotate namespace production linkerd.io/inject=enabled # Or add annotation to specific deployment\\nkubectl annotate pod my-pod linkerd.io/inject=enabled","breadcrumbs":"Service Mesh Ingress Guide » For Linkerd","id":"4634","title":"For Linkerd"},"4635":{"body":"# Label namespace for automatic sidecar injection\\nkubectl label namespace production istio-injection=enabled # Verify injection\\nkubectl describe pod -n production | grep istio-proxy","breadcrumbs":"Service Mesh Ingress Guide » For Istio","id":"4635","title":"For Istio"},"4636":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Monitoring and Observability","id":"4636","title":"Monitoring and Observability"},"4637":{"body":"# Open Linkerd Viz dashboard\\nlinkerd viz dashboard # View service topology\\nlinkerd viz stat ns\\nlinkerd viz tap -n production","breadcrumbs":"Service Mesh Ingress Guide » Linkerd Dashboard","id":"4637","title":"Linkerd Dashboard"},"4638":{"body":"# Kiali (service mesh visualization)\\nkubectl port-forward -n istio-system svc/kiali 20000:20000\\n# http://localhost:20000 # Grafana (metrics)\\nkubectl port-forward -n istio-system svc/grafana 3000:3000\\n# http://localhost:3000 (default: admin/admin) # Jaeger (distributed tracing)\\nkubectl port-forward -n istio-system svc/jaeger-query 16686:16686\\n# http://localhost:16686","breadcrumbs":"Service Mesh Ingress Guide » Istio Dashboards","id":"4638","title":"Istio Dashboards"},"4639":{"body":"# Forward Traefik dashboard\\nkubectl port-forward -n traefik svc/traefik 8080:8080\\n# http://localhost:8080/dashboard/","breadcrumbs":"Service Mesh Ingress Guide » Traefik Dashboard","id":"4639","title":"Traefik Dashboard"},"464":{"body":"","breadcrumbs":"RAG System » Ingestion Workflow","id":"464","title":"Ingestion Workflow"},"4640":{"body":"","breadcrumbs":"Service Mesh Ingress Guide » Quick Reference","id":"4640","title":"Quick Reference"},"4641":{"body":"Service Mesh - Istio # Install Istio (includes built-in ingress gateway)\\nprovisioning taskserv create istio # Verify installation\\nistioctl verify-install # Enable sidecar injection on namespace\\nkubectl label namespace default istio-injection=enabled # View Kiali dashboard\\nkubectl port-forward -n istio-system svc/kiali 20000:20000\\n# Open: http://localhost:20000 Service Mesh - Linkerd # Install cert-manager first (Linkerd requirement)\\nprovisioning taskserv create cert-manager # Install Linkerd\\nprovisioning taskserv create linkerd # Verify installation\\nlinkerd check # Enable automatic sidecar injection\\nkubectl annotate namespace default linkerd.io/inject=enabled # View live dashboard\\nlinkerd viz dashboard Ingress Controllers # Install Nginx Ingress (most popular)\\nprovisioning taskserv create nginx-ingress # Install Traefik (modern cloud-native)\\nprovisioning taskserv create traefik # Install Contour (Envoy-based)\\nprovisioning taskserv create contour # Install HAProxy Ingress (high-performance)\\nprovisioning taskserv create haproxy-ingress","breadcrumbs":"Service Mesh Ingress Guide » Installation Commands","id":"4641","title":"Installation Commands"},"4642":{"body":"Option 1: Linkerd + Nginx Ingress (Recommended) Lightweight mesh + proven ingress # Step 1: Install cert-manager\\nprovisioning taskserv create cert-manager # Step 2: Install Linkerd\\nprovisioning taskserv create linkerd # Step 3: Install Nginx Ingress\\nprovisioning taskserv create nginx-ingress # Step 4: Verify installation\\nlinkerd check\\nkubectl get deploy -n ingress-nginx # Step 5: Create sample application with Linkerd\\nkubectl annotate namespace default linkerd.io/inject=enabled\\nkubectl apply -f my-app.yaml Option 2: Istio (Standalone) Full-featured service mesh with built-in gateway # Install Istio\\nprovisioning taskserv create istio # Verify\\nistioctl verify-install # Enable sidecar injection\\nkubectl label namespace default istio-injection=enabled # Deploy applications\\nkubectl apply -f my-app.yaml Option 3: Linkerd + Traefik Lightweight mesh + modern ingress with auto TLS # Install prerequisites\\nprovisioning taskserv create cert-manager # Install service mesh\\nprovisioning taskserv create linkerd # Install modern ingress with Let\'s Encrypt\\nprovisioning taskserv create traefik # Enable sidecar injection\\nkubectl annotate namespace default linkerd.io/inject=enabled Option 4: Just Nginx Ingress (No Mesh) Simple deployments without service mesh # Install ingress controller\\nprovisioning taskserv create nginx-ingress # Deploy applications\\nkubectl apply -f ingress.yaml","breadcrumbs":"Service Mesh Ingress Guide » Common Installation Combinations","id":"4642","title":"Common Installation Combinations"},"4643":{"body":"Check Linkerd # Full system check\\nlinkerd check # Specific component checks\\nlinkerd check --pre # Pre-install checks\\nlinkerd check -n linkerd # Linkerd namespace\\nlinkerd check -n default # Custom namespace # View version\\nlinkerd version --client\\nlinkerd version --server Check Istio # Full system analysis\\nistioctl analyze # By namespace\\nistioctl analyze -n default # Verify configuration\\nistioctl verify-install # Check version\\nistioctl version Check Ingress Controllers # List ingress resources\\nkubectl get ingress -A # Get ingress details\\nkubectl describe ingress -n default # Nginx specific\\nkubectl get deploy -n ingress-nginx\\nkubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx # Traefik specific\\nkubectl get deploy -n traefik\\nkubectl logs -n traefik deployment/traefik","breadcrumbs":"Service Mesh Ingress Guide » Verification Commands","id":"4643","title":"Verification Commands"},"4644":{"body":"Service Mesh Issues # Linkerd - Check proxy status\\nlinkerd check -n # Linkerd - View service topology\\nlinkerd tap -n deployment/ # Istio - Check sidecar injection\\nkubectl describe pod -n # Look for istio-proxy container # Istio - View traffic policies\\nistioctl analyze Ingress Controller Issues # Check ingress controller logs\\nkubectl logs -n ingress-nginx deployment/ingress-nginx-controller\\nkubectl logs -n traefik deployment/traefik # Describe ingress resource\\nkubectl describe ingress -n # Check ingress controller service\\nkubectl get svc -n ingress-nginx\\nkubectl get svc -n traefik","breadcrumbs":"Service Mesh Ingress Guide » Troubleshooting","id":"4644","title":"Troubleshooting"},"4645":{"body":"Remove Linkerd # Remove annotations from namespaces\\nkubectl annotate namespace linkerd.io/inject- --all # Uninstall Linkerd\\nlinkerd uninstall | kubectl delete -f - # Remove Linkerd namespace\\nkubectl delete namespace linkerd Remove Istio # Remove labels from namespaces\\nkubectl label namespace istio-injection- --all # Uninstall Istio\\nistioctl uninstall --purge # Remove Istio namespace\\nkubectl delete namespace istio-system Remove Ingress Controllers # Nginx\\nhelm uninstall ingress-nginx -n ingress-nginx\\nkubectl delete namespace ingress-nginx # Traefik\\nhelm uninstall traefik -n traefik\\nkubectl delete namespace traefik","breadcrumbs":"Service Mesh Ingress Guide » Uninstallation","id":"4645","title":"Uninstallation"},"4646":{"body":"Linkerd Resource Limits # Adjust proxy resource limits in linkerd.ncl\\n_taskserv = linkerd.Linkerd { resources: { proxy_cpu_limit = \\"2000m\\" # Increase if needed proxy_memory_limit = \\"512Mi\\" # Increase if needed }\\n} Istio Profile Selection # Different resource profiles available\\nprofile = \\"default\\" # Full features (default)\\nprofile = \\"demo\\" # Demo mode (more resources)\\nprofile = \\"minimal\\" # Minimal (lower resources)\\nprofile = \\"remote\\" # Control plane only (advanced)","breadcrumbs":"Service Mesh Ingress Guide » Performance Tuning","id":"4646","title":"Performance Tuning"},"4647":{"body":"After implementing these examples, your workspace should look like: workspace/infra/my-cluster/\\n├── taskservs/\\n│ ├── cert-manager.ncl # For Linkerd mTLS\\n│ ├── linkerd.ncl # Service mesh option\\n│ ├── istio.ncl # OR Istio option\\n│ ├── nginx-ingress.ncl # Ingress controller\\n│ └── traefik.ncl # Alternative ingress\\n├── clusters/\\n│ ├── web-api.ncl # Application with Linkerd + Nginx\\n│ ├── api-service.ncl # Application with Istio\\n│ └── simple-app.ncl # App without service mesh\\n├── ingress/\\n│ ├── web-api-ingress.yaml # Nginx Ingress resource\\n│ ├── api-route.yaml # Traefik IngressRoute\\n│ └── simple-app-ingress.yaml # Simple Ingress\\n└── config.toml # Infrastructure-specific config","breadcrumbs":"Service Mesh Ingress Guide » Complete Workspace Directory Structure","id":"4647","title":"Complete Workspace Directory Structure"},"4648":{"body":"Choose your deployment model (Linkerd+Nginx, Istio, or plain Nginx) Create taskserv KCL files in workspace/infra//taskservs/ Install components using provisioning taskserv create Create application deployments with appropriate mesh/ingress configuration Monitor and observe using the appropriate dashboard","breadcrumbs":"Service Mesh Ingress Guide » Next Steps","id":"4648","title":"Next Steps"},"4649":{"body":"Linkerd Documentation : https://linkerd.io/ Istio Documentation : https://istio.io/ Nginx Ingress : https://kubernetes.github.io/ingress-nginx/ Traefik Documentation : https://doc.traefik.io/ Contour Documentation : https://projectcontour.io/ Cilium Documentation : https://docs.cilium.io/","breadcrumbs":"Service Mesh Ingress Guide » Additional Resources","id":"4649","title":"Additional Resources"},"465":{"body":"# Index all documentation\\nprovisioning ai index-docs provisioning/docs/src # Index schemas\\nprovisioning ai index-schemas provisioning/schemas # Index past deployments\\nprovisioning ai index-deployments workspaces/*/deployments # Watch directory for changes (development mode)\\nprovisioning ai watch docs provisioning/docs/src","breadcrumbs":"RAG System » Document Indexing","id":"465","title":"Document Indexing"},"4650":{"body":"Version : 1.0.0 Date : 2025-10-06 Audience : Users and Developers","breadcrumbs":"OCI Registry Guide » OCI Registry User Guide","id":"4650","title":"OCI Registry User Guide"},"4651":{"body":"Overview Quick Start OCI Commands Reference Dependency Management Extension Development Registry Setup Troubleshooting","breadcrumbs":"OCI Registry Guide » Table of Contents","id":"4651","title":"Table of Contents"},"4652":{"body":"The OCI registry integration enables distribution and management of provisioning extensions as OCI artifacts. This provides: Standard Distribution : Use industry-standard OCI registries Version Management : Proper semantic versioning for all extensions Dependency Resolution : Automatic dependency management Caching : Efficient caching to reduce downloads Security : TLS, authentication, and vulnerability scanning support","breadcrumbs":"OCI Registry Guide » Overview","id":"4652","title":"Overview"},"4653":{"body":"OCI (Open Container Initiative) artifacts are packaged files distributed through container registries. Unlike Docker images which contain applications, OCI artifacts can contain any type of content - in our case, provisioning extensions (KCL schemas, Nushell scripts, templates, etc.).","breadcrumbs":"OCI Registry Guide » What are OCI Artifacts","id":"4653","title":"What are OCI Artifacts"},"4654":{"body":"","breadcrumbs":"OCI Registry Guide » Quick Start","id":"4654","title":"Quick Start"},"4655":{"body":"Install one of the following OCI tools: # ORAS (recommended)\\nbrew install oras # Crane (Google\'s tool)\\ngo install github.com/google/go-containerregistry/cmd/crane@latest # Skopeo (RedHat\'s tool)\\nbrew install skopeo","breadcrumbs":"OCI Registry Guide » Prerequisites","id":"4655","title":"Prerequisites"},"4656":{"body":"# Start lightweight OCI registry (Zot)\\nprovisioning oci-registry start # Verify registry is running\\ncurl http://localhost:5000/v2/_catalog","breadcrumbs":"OCI Registry Guide » 1. Start Local OCI Registry (Development)","id":"4656","title":"1. Start Local OCI Registry (Development)"},"4657":{"body":"# Pull Kubernetes extension from registry\\nprovisioning oci pull kubernetes:1.28.0 # Pull with specific registry\\nprovisioning oci pull kubernetes:1.28.0 \\\\ --registry harbor.company.com \\\\ --namespace provisioning-extensions","breadcrumbs":"OCI Registry Guide » 2. Pull an Extension","id":"4657","title":"2. Pull an Extension"},"4658":{"body":"# List all extensions\\nprovisioning oci list # Search for specific extension\\nprovisioning oci search kubernetes # Show available versions\\nprovisioning oci tags kubernetes","breadcrumbs":"OCI Registry Guide » 3. List Available Extensions","id":"4658","title":"3. List Available Extensions"},"4659":{"body":"Edit workspace/config/provisioning.yaml: dependencies: extensions: source_type: \\"oci\\" oci: registry: \\"localhost:5000\\" namespace: \\"provisioning-extensions\\" tls_enabled: false modules: taskservs: - \\"oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0\\" - \\"oci://localhost:5000/provisioning-extensions/containerd:1.7.0\\"","breadcrumbs":"OCI Registry Guide » 4. Configure Workspace to Use OCI","id":"4659","title":"4. Configure Workspace to Use OCI"},"466":{"body":"// In ai-service on startup\\nasync fn initialize_rag() -> Result<()> { let rag = RAGSystem::new(&config.rag).await?; // Index documentation let docs = load_markdown_docs(\\"provisioning/docs/src\\")?; for doc in docs { rag.ingest_document(&doc).await?; } // Index schemas let schemas = load_nickel_schemas(\\"provisioning/schemas\\")?; for schema in schemas { rag.ingest_schema(&schema).await?; } Ok(())\\n}","breadcrumbs":"RAG System » Programmatic Indexing","id":"466","title":"Programmatic Indexing"},"4660":{"body":"# Resolve and install all dependencies\\nprovisioning dep resolve # Check what will be installed\\nprovisioning dep resolve --dry-run # Show dependency tree\\nprovisioning dep tree kubernetes","breadcrumbs":"OCI Registry Guide » 5. Resolve Dependencies","id":"4660","title":"5. Resolve Dependencies"},"4661":{"body":"","breadcrumbs":"OCI Registry Guide » OCI Commands Reference","id":"4661","title":"OCI Commands Reference"},"4662":{"body":"Download extension from OCI registry provisioning oci pull : [OPTIONS] # Examples:\\nprovisioning oci pull kubernetes:1.28.0\\nprovisioning oci pull redis:7.0.0 --registry harbor.company.com\\nprovisioning oci pull postgres:15.0 --insecure # Skip TLS verification Options : --registry : Override registry (default: from config) --namespace : Override namespace (default: provisioning-extensions) --destination : Local installation path --insecure: Skip TLS certificate verification","breadcrumbs":"OCI Registry Guide » Pull Extension","id":"4662","title":"Pull Extension"},"4663":{"body":"Publish extension to OCI registry provisioning oci push [OPTIONS] # Examples:\\nprovisioning oci push ./extensions/taskservs/redis redis 1.0.0\\nprovisioning oci push ./my-provider aws 2.1.0 --registry localhost:5000 Options : --registry : Target registry --namespace : Target namespace --insecure: Skip TLS verification Prerequisites : Extension must have valid manifest.yaml Must be logged in to registry (see oci login)","breadcrumbs":"OCI Registry Guide » Push Extension","id":"4663","title":"Push Extension"},"4664":{"body":"Show available extensions in registry provisioning oci list [OPTIONS] # Examples:\\nprovisioning oci list\\nprovisioning oci list --namespace provisioning-platform\\nprovisioning oci list --registry harbor.company.com Output : ┬───────────────┬──────────────────┬─────────────────────────┬─────────────────────────────────────────────┐\\n│ name │ registry │ namespace │ reference │\\n├───────────────┼──────────────────┼─────────────────────────┼─────────────────────────────────────────────┤\\n│ kubernetes │ localhost:5000 │ provisioning-extensions │ localhost:5000/provisioning-extensions/... │\\n│ containerd │ localhost:5000 │ provisioning-extensions │ localhost:5000/provisioning-extensions/... │\\n│ cilium │ localhost:5000 │ provisioning-extensions │ localhost:5000/provisioning-extensions/... │\\n└───────────────┴──────────────────┴─────────────────────────┴─────────────────────────────────────────────┘","breadcrumbs":"OCI Registry Guide » List Extensions","id":"4664","title":"List Extensions"},"4665":{"body":"Search for extensions matching query provisioning oci search [OPTIONS] # Examples:\\nprovisioning oci search kube\\nprovisioning oci search postgres\\nprovisioning oci search \\"container-*\\"","breadcrumbs":"OCI Registry Guide » Search Extensions","id":"4665","title":"Search Extensions"},"4666":{"body":"Display all available versions of an extension provisioning oci tags [OPTIONS] # Examples:\\nprovisioning oci tags kubernetes\\nprovisioning oci tags redis --registry harbor.company.com Output : ┬────────────┬─────────┬──────────────────────────────────────────────────────┐\\n│ artifact │ version │ reference │\\n├────────────┼─────────┼──────────────────────────────────────────────────────┤\\n│ kubernetes │ 1.29.0 │ localhost:5000/provisioning-extensions/kubernetes... │\\n│ kubernetes │ 1.28.0 │ localhost:5000/provisioning-extensions/kubernetes... │\\n│ kubernetes │ 1.27.0 │ localhost:5000/provisioning-extensions/kubernetes... │\\n└────────────┴─────────┴──────────────────────────────────────────────────────┘","breadcrumbs":"OCI Registry Guide » Show Tags (Versions)","id":"4666","title":"Show Tags (Versions)"},"4667":{"body":"Show detailed manifest and metadata provisioning oci inspect : [OPTIONS] # Examples:\\nprovisioning oci inspect kubernetes:1.28.0\\nprovisioning oci inspect redis:7.0.0 --format json Output : name: kubernetes\\ntype: taskserv\\nversion: 1.28.0\\ndescription: Kubernetes container orchestration platform\\nauthor: Provisioning Team\\nlicense: MIT\\ndependencies: containerd: \\">=1.7.0\\" etcd: \\">=3.5.0\\"\\nplatforms: - linux/amd64 - linux/arm64","breadcrumbs":"OCI Registry Guide » Inspect Extension","id":"4667","title":"Inspect Extension"},"4668":{"body":"Authenticate with OCI registry provisioning oci login [OPTIONS] # Examples:\\nprovisioning oci login localhost:5000\\nprovisioning oci login harbor.company.com --username admin\\nprovisioning oci login registry.io --password-stdin < token.txt\\nprovisioning oci login registry.io --token-file ~/.provisioning/tokens/registry Options : --username : Username (default: _token) --password-stdin: Read password from stdin --token-file : Read token from file Note : Credentials are stored in Docker config (~/.docker/config.json)","breadcrumbs":"OCI Registry Guide » Login to Registry","id":"4668","title":"Login to Registry"},"4669":{"body":"Remove stored credentials provisioning oci logout # Example:\\nprovisioning oci logout harbor.company.com","breadcrumbs":"OCI Registry Guide » Logout from Registry","id":"4669","title":"Logout from Registry"},"467":{"body":"","breadcrumbs":"RAG System » Usage Examples","id":"467","title":"Usage Examples"},"4670":{"body":"Remove extension from registry provisioning oci delete : [OPTIONS] # Examples:\\nprovisioning oci delete kubernetes:1.27.0\\nprovisioning oci delete redis:6.0.0 --force # Skip confirmation Options : --force: Skip confirmation prompt --registry : Target registry --namespace : Target namespace Warning : This operation is irreversible. Use with caution.","breadcrumbs":"OCI Registry Guide » Delete Extension","id":"4670","title":"Delete Extension"},"4671":{"body":"Copy extension between registries provisioning oci copy [OPTIONS] # Examples:\\n# Copy between namespaces in same registry\\nprovisioning oci copy \\\\ localhost:5000/test/kubernetes:1.28.0 \\\\ localhost:5000/production/kubernetes:1.28.0 # Copy between different registries\\nprovisioning oci copy \\\\ localhost:5000/provisioning-extensions/kubernetes:1.28.0 \\\\ harbor.company.com/provisioning/kubernetes:1.28.0","breadcrumbs":"OCI Registry Guide » Copy Extension","id":"4671","title":"Copy Extension"},"4672":{"body":"Display current OCI settings provisioning oci config # Output:\\n{ tool: \\"oras\\" registry: \\"localhost:5000\\" namespace: { extensions: \\"provisioning-extensions\\" platform: \\"provisioning-platform\\" } cache_dir: \\"~/.provisioning/oci-cache\\" tls_enabled: false\\n}","breadcrumbs":"OCI Registry Guide » Show OCI Configuration","id":"4672","title":"Show OCI Configuration"},"4673":{"body":"","breadcrumbs":"OCI Registry Guide » Dependency Management","id":"4673","title":"Dependency Management"},"4674":{"body":"Dependencies are configured in workspace/config/provisioning.yaml: dependencies: # Core provisioning system core: source: \\"oci://harbor.company.com/provisioning-core:v3.5.0\\" # Extensions (providers, taskservs, clusters) extensions: source_type: \\"oci\\" oci: registry: \\"localhost:5000\\" namespace: \\"provisioning-extensions\\" tls_enabled: false auth_token_path: \\"~/.provisioning/tokens/oci\\" modules: providers: - \\"oci://localhost:5000/provisioning-extensions/aws:2.0.0\\" - \\"oci://localhost:5000/provisioning-extensions/upcloud:1.5.0\\" taskservs: - \\"oci://localhost:5000/provisioning-extensions/kubernetes:1.28.0\\" - \\"oci://localhost:5000/provisioning-extensions/containerd:1.7.0\\" - \\"oci://localhost:5000/provisioning-extensions/etcd:3.5.0\\" clusters: - \\"oci://localhost:5000/provisioning-extensions/buildkit:0.12.0\\" # Platform services platform: source_type: \\"oci\\" oci: registry: \\"harbor.company.com\\" namespace: \\"provisioning-platform\\"","breadcrumbs":"OCI Registry Guide » Dependency Configuration","id":"4674","title":"Dependency Configuration"},"4675":{"body":"# Resolve and install all configured dependencies\\nprovisioning dep resolve # Dry-run (show what would be installed)\\nprovisioning dep resolve --dry-run # Resolve with specific version constraints\\nprovisioning dep resolve --update # Update to latest versions","breadcrumbs":"OCI Registry Guide » Resolve Dependencies","id":"4675","title":"Resolve Dependencies"},"4676":{"body":"# Check all dependencies for updates\\nprovisioning dep check-updates # Output:\\n┬─────────────┬─────────┬────────┬──────────────────┐\\n│ name │ current │ latest │ update_available │\\n├─────────────┼─────────┼────────┼──────────────────┤\\n│ kubernetes │ 1.28.0 │ 1.29.0 │ true │\\n│ containerd │ 1.7.0 │ 1.7.0 │ false │\\n│ etcd │ 3.5.0 │ 3.5.1 │ true │\\n└─────────────┴─────────┴────────┴──────────────────┘","breadcrumbs":"OCI Registry Guide » Check for Updates","id":"4676","title":"Check for Updates"},"4677":{"body":"# Update specific extension to latest version\\nprovisioning dep update kubernetes # Update to specific version\\nprovisioning dep update kubernetes --version 1.29.0","breadcrumbs":"OCI Registry Guide » Update Dependency","id":"4677","title":"Update Dependency"},"4678":{"body":"# Show dependency tree for extension\\nprovisioning dep tree kubernetes # Output:\\nkubernetes:1.28.0\\n├── containerd:1.7.0\\n│ └── runc:1.1.0\\n├── etcd:3.5.0\\n└── kubectl:1.28.0","breadcrumbs":"OCI Registry Guide » Dependency Tree","id":"4678","title":"Dependency Tree"},"4679":{"body":"# Validate dependency graph (check for cycles, conflicts)\\nprovisioning dep validate # Validate specific extension\\nprovisioning dep validate kubernetes","breadcrumbs":"OCI Registry Guide » Validate Dependencies","id":"4679","title":"Validate Dependencies"},"468":{"body":"# Search for context-aware information\\nprovisioning ai query \\"How do I configure PostgreSQL with encryption?\\" # Get configuration template\\nprovisioning ai template \\"Describe production Kubernetes on AWS\\" # Interactive mode\\nprovisioning ai chat\\n> What are the best practices for database backup?","breadcrumbs":"RAG System » Query the RAG System","id":"468","title":"Query the RAG System"},"4680":{"body":"","breadcrumbs":"OCI Registry Guide » Extension Development","id":"4680","title":"Extension Development"},"4681":{"body":"# Generate extension from template\\nprovisioning generate extension taskserv redis # Directory structure created:\\n# extensions/taskservs/redis/\\n# ├── schemas/\\n# │ ├── manifest.toml\\n# │ ├── main.ncl\\n# │ ├── version.ncl\\n# │ └── dependencies.ncl\\n# ├── scripts/\\n# │ ├── install.nu\\n# │ ├── check.nu\\n# │ └── uninstall.nu\\n# ├── templates/\\n# ├── docs/\\n# │ └── README.md\\n# ├── tests/\\n# └── manifest.yaml","breadcrumbs":"OCI Registry Guide » Create New Extension","id":"4681","title":"Create New Extension"},"4682":{"body":"Edit manifest.yaml: name: redis\\ntype: taskserv\\nversion: 1.0.0\\ndescription: Redis in-memory data structure store\\nauthor: Your Name\\nlicense: MIT\\nhomepage: https://redis.io\\nrepository: https://gitea.example.com/provisioning-extensions/redis dependencies: os: \\">=1.0.0\\" # Required OS taskserv tags: - database - cache - key-value platforms: - linux/amd64 - linux/arm64 min_provisioning_version: \\"3.0.0\\"","breadcrumbs":"OCI Registry Guide » Extension Manifest","id":"4682","title":"Extension Manifest"},"4683":{"body":"# Load extension from local path\\nprovisioning module load taskserv workspace_dev redis --source local # Test installation\\nprovisioning taskserv create redis --infra test-env --check # Run tests\\nprovisioning test extension redis","breadcrumbs":"OCI Registry Guide » Test Extension Locally","id":"4683","title":"Test Extension Locally"},"4684":{"body":"# Validate extension structure\\nprovisioning oci package validate ./extensions/taskservs/redis # Output:\\n✓ Extension structure valid\\nWarnings: - Missing docs/README.md (recommended)","breadcrumbs":"OCI Registry Guide » Validate Extension","id":"4684","title":"Validate Extension"},"4685":{"body":"# Package as OCI artifact\\nprovisioning oci package ./extensions/taskservs/redis # Output: redis-1.0.0.tar.gz # Inspect package\\nprovisioning oci inspect-artifact redis-1.0.0.tar.gz","breadcrumbs":"OCI Registry Guide » Package Extension","id":"4685","title":"Package Extension"},"4686":{"body":"# Login to registry (one-time)\\nprovisioning oci login localhost:5000 # Publish extension\\nprovisioning oci push ./extensions/taskservs/redis redis 1.0.0 # Verify publication\\nprovisioning oci tags redis # Share with team\\necho \\"Published: oci://localhost:5000/provisioning-extensions/redis:1.0.0\\"","breadcrumbs":"OCI Registry Guide » Publish Extension","id":"4686","title":"Publish Extension"},"4687":{"body":"","breadcrumbs":"OCI Registry Guide » Registry Setup","id":"4687","title":"Registry Setup"},"4688":{"body":"Using Zot (lightweight) : # Start Zot registry\\nprovisioning oci-registry start # Configuration:\\n# - Endpoint: localhost:5000\\n# - Storage: ~/.provisioning/oci-registry/\\n# - No authentication\\n# - TLS disabled # Stop registry\\nprovisioning oci-registry stop # Check status\\nprovisioning oci-registry status Manual Zot Setup : # Install Zot\\nbrew install project-zot/tap/zot # Create config\\ncat > zot-config.json < Result { // Retrieve relevant context let context = rag.search(user_request, top_k=5).await?; // Build prompt with context let prompt = build_prompt_with_context(user_request, &context); // Generate configuration let config = llm.generate(&prompt).await?; // Validate against schemas validate_nickel_config(&config)?; Ok(config)\\n}","breadcrumbs":"RAG System » AI Service Integration","id":"469","title":"AI Service Integration"},"4690":{"body":"","breadcrumbs":"OCI Registry Guide » Troubleshooting","id":"4690","title":"Troubleshooting"},"4691":{"body":"Error : \\"No OCI tool found. Install oras, crane, or skopeo\\" Solution : # Install ORAS (recommended)\\nbrew install oras # Or install Crane\\ngo install github.com/google/go-containerregistry/cmd/crane@latest # Or install Skopeo\\nbrew install skopeo","breadcrumbs":"OCI Registry Guide » No OCI Tool Found","id":"4691","title":"No OCI Tool Found"},"4692":{"body":"Error : \\"Connection refused to localhost:5000\\" Solution : # Check if registry is running\\ncurl http://localhost:5000/v2/_catalog # Start local registry if not running\\nprovisioning oci-registry start","breadcrumbs":"OCI Registry Guide » Connection Refused","id":"4692","title":"Connection Refused"},"4693":{"body":"Error : \\"x509: certificate signed by unknown authority\\" Solution : # For development, use --insecure flag\\nprovisioning oci pull kubernetes:1.28.0 --insecure # For production, configure TLS properly in workspace config:\\n# dependencies:\\n# extensions:\\n# oci:\\n# tls_enabled: true\\n# # Add CA certificate to system trust store","breadcrumbs":"OCI Registry Guide » TLS Certificate Error","id":"4693","title":"TLS Certificate Error"},"4694":{"body":"Error : \\"unauthorized: authentication required\\" Solution : # Login to registry\\nprovisioning oci login localhost:5000 # Or provide auth token in config:\\n# dependencies:\\n# extensions:\\n# oci:\\n# auth_token_path: \\"~/.provisioning/tokens/oci\\"","breadcrumbs":"OCI Registry Guide » Authentication Failed","id":"4694","title":"Authentication Failed"},"4695":{"body":"Error : \\"Dependency not found: kubernetes\\" Solutions : Check registry endpoint : provisioning oci config List available extensions : provisioning oci list Check namespace : provisioning oci list --namespace provisioning-extensions Verify extension exists : provisioning oci tags kubernetes","breadcrumbs":"OCI Registry Guide » Extension Not Found","id":"4695","title":"Extension Not Found"},"4696":{"body":"Error : \\"Circular dependency detected\\" Solution : # Validate dependency graph\\nprovisioning dep validate kubernetes # Check dependency tree\\nprovisioning dep tree kubernetes # Fix circular dependencies in extension manifests","breadcrumbs":"OCI Registry Guide » Dependency Resolution Failed","id":"4696","title":"Dependency Resolution Failed"},"4697":{"body":"","breadcrumbs":"OCI Registry Guide » Best Practices","id":"4697","title":"Best Practices"},"4698":{"body":"✅ DO : Pin to specific versions in production modules: taskservs: - \\"oci://registry/kubernetes:1.28.0\\" # Specific version ❌ DON\'T : Use latest tag in production modules: taskservs: - \\"oci://registry/kubernetes:latest\\" # Unpredictable","breadcrumbs":"OCI Registry Guide » Version Pinning","id":"4698","title":"Version Pinning"},"4699":{"body":"✅ DO : Follow semver (MAJOR.MINOR.PATCH) 1.0.0 → 1.0.1: Backward-compatible bug fix 1.0.0 → 1.1.0: Backward-compatible new feature 1.0.0 → 2.0.0: Breaking change ❌ DON\'T : Use arbitrary version numbers v1, version-2, latest-stable","breadcrumbs":"OCI Registry Guide » Semantic Versioning","id":"4699","title":"Semantic Versioning"},"47":{"body":"Component Minimum Recommended CPU 2 cores 4+ cores RAM 4 GB 8+ GB Storage 2 GB free 10+ GB free Network Internet connection Broadband connection","breadcrumbs":"Installation Guide » Hardware Requirements","id":"47","title":"Hardware Requirements"},"470":{"body":"// In typdialog-ai (JavaScript/TypeScript)\\nasync function suggestFieldValue(fieldName, currentInput) { // Query RAG for similar configurations const context = await rag.search( `Field: ${fieldName}, Input: ${currentInput}`, { topK: 3, semantic: true } ); // Generate suggestion using context const suggestion = await ai.suggest({ field: fieldName, input: currentInput, context: context, }); return suggestion;\\n}","breadcrumbs":"RAG System » Form Assistance Integration","id":"470","title":"Form Assistance Integration"},"4700":{"body":"✅ DO : Specify version constraints dependencies: containerd: \\">=1.7.0\\" etcd: \\"^3.5.0\\" # 3.5.x compatible ❌ DON\'T : Leave dependencies unversioned dependencies: containerd: \\"*\\" # Too permissive","breadcrumbs":"OCI Registry Guide » Dependency Management","id":"4700","title":"Dependency Management"},"4701":{"body":"✅ DO : Use TLS for remote registries Rotate authentication tokens regularly Scan images for vulnerabilities (Harbor) Sign artifacts (cosign) ❌ DON\'T : Use --insecure in production Store passwords in config files Skip certificate verification","breadcrumbs":"OCI Registry Guide » Security","id":"4701","title":"Security"},"4702":{"body":"Multi-Repository Architecture - Overall architecture Extension Development Guide - Create extensions Dependency Resolution - How dependencies work OCI Client Library - Low-level API Maintained By : Documentation Team Last Updated : 2025-10-06 Next Review : 2026-01-06","breadcrumbs":"OCI Registry Guide » Related Documentation","id":"4702","title":"Related Documentation"},"4703":{"body":"Date : 2025-11-23 Version : 1.0.0 For : provisioning v3.6.0+ Access powerful functionality from prov-ecosystem and provctl directly through provisioning CLI.","breadcrumbs":"Integrations Quick Start » Prov-Ecosystem & Provctl Integrations - Quick Start Guide","id":"4703","title":"Prov-Ecosystem & Provctl Integrations - Quick Start Guide"},"4704":{"body":"Four integrated feature sets: Feature Purpose Best For Runtime Abstraction Unified Docker/Podman/OrbStack/Colima/nerdctl Multi-platform deployments SSH Advanced Pooling, circuit breaker, retry strategies Large-scale distributed operations Backup System Multi-backend backups (Restic, Borg, Tar, Rsync) Data protection & disaster recovery GitOps Events Event-driven deployments from Git Continuous deployment automation Service Management Cross-platform services (systemd, launchd, runit) Infrastructure service orchestration","breadcrumbs":"Integrations Quick Start » Overview","id":"4704","title":"Overview"},"4705":{"body":"","breadcrumbs":"Integrations Quick Start » Quick Start Commands","id":"4705","title":"Quick Start Commands"},"4706":{"body":"# 1. Check what runtimes you have available\\nprovisioning runtime list # 2. Detect which runtime provisioning will use\\nprovisioning runtime detect # 3. Verify runtime works\\nprovisioning runtime info Expected Output : Available runtimes: • docker • podman","breadcrumbs":"Integrations Quick Start » 🏃 30-Second Test","id":"4706","title":"🏃 30-Second Test"},"4707":{"body":"","breadcrumbs":"Integrations Quick Start » 1️⃣ Runtime Abstraction","id":"4707","title":"1️⃣ Runtime Abstraction"},"4708":{"body":"Automatically detects and uses Docker, Podman, OrbStack, Colima, or nerdctl - whichever is available on your system. Eliminates hardcoding \\"docker\\" commands.","breadcrumbs":"Integrations Quick Start » What It Does","id":"4708","title":"What It Does"},"4709":{"body":"# Detect available runtime\\nprovisioning runtime detect\\n# Output: \\"Detected runtime: docker\\" # Execute command in runtime\\nprovisioning runtime exec \\"docker images\\"\\n# Runs: docker images # Get runtime info\\nprovisioning runtime info\\n# Shows: name, command, version # List all available runtimes\\nprovisioning runtime list\\n# Shows: docker, podman, orbstack... # Adapt docker-compose for detected runtime\\nprovisioning runtime compose ./docker-compose.yml\\n# Output: docker compose -f ./docker-compose.yml","breadcrumbs":"Integrations Quick Start » Commands","id":"4709","title":"Commands"},"471":{"body":"| | Operation | Time | Cache Hit | | | | ----------- | ------ | ----------- | | | | Vector embedding | 200-500ms | N/A | | | | Vector search (cold) | 300-800ms | N/A | | | | Keyword search | 50-200ms | N/A | | | | Hybrid search | 500-1200ms | <100ms cached | | | | Semantic cache hit | 10-50ms | Always | | Typical query flow : Embedding: 300ms Vector search: 400ms Keyword search: 100ms Ranking: 50ms Total : ~850ms (first call), <100ms (cached)","breadcrumbs":"RAG System » Performance Characteristics","id":"471","title":"Performance Characteristics"},"4710":{"body":"Use Case 1: Works on macOS with OrbStack, Linux with Docker # User on macOS with OrbStack\\n$ provisioning runtime exec \\"docker run -it ubuntu bash\\"\\n# Automatically uses orbctl (OrbStack) # User on Linux with Docker\\n$ provisioning runtime exec \\"docker run -it ubuntu bash\\"\\n# Automatically uses docker Use Case 2: Run docker-compose with detected runtime # Detect and run compose\\n$ compose_cmd=$(provisioning runtime compose ./docker-compose.yml)\\n$ eval $compose_cmd up -d\\n# Works with docker, podman, nerdctl automatically","breadcrumbs":"Integrations Quick Start » Examples","id":"4710","title":"Examples"},"4711":{"body":"No configuration needed! Runtime is auto-detected in order: Docker (macOS: OrbStack first; Linux: Docker first) Podman OrbStack (macOS) Colima (macOS) nerdctl","breadcrumbs":"Integrations Quick Start » Configuration","id":"4711","title":"Configuration"},"4712":{"body":"","breadcrumbs":"Integrations Quick Start » 2️⃣ SSH Advanced Operations","id":"4712","title":"2️⃣ SSH Advanced Operations"},"4713":{"body":"Advanced SSH with connection pooling (90% faster), circuit breaker for fault isolation, and deployment strategies (rolling, blue-green, canary).","breadcrumbs":"Integrations Quick Start » What It Does","id":"4713","title":"What It Does"},"4714":{"body":"# Create SSH pool connection to host\\nprovisioning ssh pool connect server.example.com root --port 22 --timeout 30 # Check pool status\\nprovisioning ssh pool status # List available deployment strategies\\nprovisioning ssh strategies\\n# Output: rolling, blue-green, canary # Configure retry strategy\\nprovisioning ssh retry-config exponential --max-retries 3 # Check circuit breaker status\\nprovisioning ssh circuit-breaker\\n# Output: state=closed, failures=0/5","breadcrumbs":"Integrations Quick Start » Commands","id":"4714","title":"Commands"},"4715":{"body":"Strategy Use Case Risk Rolling Gradual rollout across hosts Low (but slower) Blue-Green Zero-downtime, instant rollback Very low Canary Test on small % before full rollout Very low (5% at risk)","breadcrumbs":"Integrations Quick Start » Deployment Strategies","id":"4715","title":"Deployment Strategies"},"4716":{"body":"# Set up SSH pool\\nprovisioning ssh pool connect srv01.example.com root\\nprovisioning ssh pool connect srv02.example.com root\\nprovisioning ssh pool connect srv03.example.com root # Execute on pool (all 3 hosts in parallel)\\nprovisioning ssh pool exec [srv01, srv02, srv03] \\"systemctl restart myapp\\" --strategy rolling # Check status\\nprovisioning ssh pool status\\n# Output: connections=3, active=0, idle=3, circuit_breaker=green","breadcrumbs":"Integrations Quick Start » Example: Multi-Host Deployment","id":"4716","title":"Example: Multi-Host Deployment"},"4717":{"body":"# Exponential backoff: 100 ms, 200 ms, 400 ms, 800 ms...\\nprovisioning ssh retry-config exponential --max-retries 5 # Linear backoff: 100 ms, 200 ms, 300 ms, 400 ms...\\nprovisioning ssh retry-config linear --max-retries 3 # Fibonacci backoff: 100 ms, 100 ms, 200 ms, 300 ms, 500 ms...\\nprovisioning ssh retry-config fibonacci --max-retries 4","breadcrumbs":"Integrations Quick Start » Retry Strategies","id":"4717","title":"Retry Strategies"},"4718":{"body":"","breadcrumbs":"Integrations Quick Start » 3️⃣ Backup System","id":"4718","title":"3️⃣ Backup System"},"4719":{"body":"Multi-backend backup management with Restic, BorgBackup, Tar, or Rsync. Supports local, S3, SFTP, REST API, and Backblaze B2 repositories.","breadcrumbs":"Integrations Quick Start » What It Does","id":"4719","title":"What It Does"},"472":{"body":"See Configuration Guide for detailed RAG setup: LLM provider for embeddings SurrealDB connection Chunking strategies Search weights and limits Cache settings and TTLs","breadcrumbs":"RAG System » Configuration","id":"472","title":"Configuration"},"4720":{"body":"# Create backup job\\nprovisioning backup create daily-backup /data /var/lib \\\\ --backend restic \\\\ --repository s3://my-bucket/backups # Restore from snapshot\\nprovisioning backup restore snapshot-001 --restore_path /data # List available snapshots\\nprovisioning backup list # Schedule regular backups\\nprovisioning backup schedule daily-backup \\"0 2 * * *\\" \\\\ --paths [\\"/data\\" \\"/var/lib\\"] \\\\ --backend restic # Show retention policy\\nprovisioning backup retention\\n# Output: daily=7, weekly=4, monthly=12, yearly=5 # Check backup job status\\nprovisioning backup status backup-job-001","breadcrumbs":"Integrations Quick Start » Commands","id":"4720","title":"Commands"},"4721":{"body":"Backend Speed Compression Best For Restic ⚡⚡⚡ Excellent Cloud backups BorgBackup ⚡⚡ Excellent Large archives Tar ⚡⚡⚡ Good Simple backups Rsync ⚡⚡⚡ None Incremental syncs","breadcrumbs":"Integrations Quick Start » Backend Comparison","id":"4721","title":"Backend Comparison"},"4722":{"body":"# Create backup configuration\\nprovisioning backup create app-backup /opt/myapp /var/lib/myapp \\\\ --backend restic \\\\ --repository s3://prod-backups/myapp # Schedule daily at 2 AM\\nprovisioning backup schedule app-backup \\"0 2 * * *\\" # Set retention: keep 7 days, 4 weeks, 12 months, 5 years\\nprovisioning backup retention \\\\ --daily 7 \\\\ --weekly 4 \\\\ --monthly 12 \\\\ --yearly 5 # Verify backup was created\\nprovisioning backup list","breadcrumbs":"Integrations Quick Start » Example: Automated Daily Backups to S3","id":"4722","title":"Example: Automated Daily Backups to S3"},"4723":{"body":"# Test backup without actually creating it\\nprovisioning backup create test-backup /data --check # Test restore without actually restoring\\nprovisioning backup restore snapshot-001 --check","breadcrumbs":"Integrations Quick Start » Dry-Run (Test First)","id":"4723","title":"Dry-Run (Test First)"},"4724":{"body":"","breadcrumbs":"Integrations Quick Start » 4️⃣ GitOps Event-Driven Deployments","id":"4724","title":"4️⃣ GitOps Event-Driven Deployments"},"4725":{"body":"Automatically trigger deployments from Git events (push, PR, webhook, scheduled). Supports GitHub, GitLab, Gitea.","breadcrumbs":"Integrations Quick Start » What It Does","id":"4725","title":"What It Does"},"4726":{"body":"# Load GitOps rules from configuration file\\nprovisioning gitops rules ./gitops-rules.yaml # Watch for Git events (starts webhook listener)\\nprovisioning gitops watch --provider github --webhook-port 8080 # List supported events\\nprovisioning gitops events\\n# Output: push, pull-request, webhook, scheduled, health-check, manual # Manually trigger deployment\\nprovisioning gitops trigger deploy-prod --environment prod # List active deployments\\nprovisioning gitops deployments --status running # Show GitOps status\\nprovisioning gitops status\\n# Output: active_rules=5, total=42, successful=40, failed=2","breadcrumbs":"Integrations Quick Start » Commands","id":"4726","title":"Commands"},"4727":{"body":"File: gitops-rules.yaml rules: - name: deploy-prod provider: github repository: https://github.com/myorg/myrepo branch: main events: - push targets: - prod command: \\"provisioning deploy\\" require_approval: true - name: deploy-staging provider: github repository: https://github.com/myorg/myrepo branch: develop events: - push - pull-request targets: - staging command: \\"provisioning deploy\\" require_approval: false Then: # Load rules\\nprovisioning gitops rules ./gitops-rules.yaml # Watch for events\\nprovisioning gitops watch --provider github # When you push to main, deployment auto-triggers!\\n# git push origin main → provisioning deploy runs automatically","breadcrumbs":"Integrations Quick Start » Example: GitOps Configuration","id":"4727","title":"Example: GitOps Configuration"},"4728":{"body":"","breadcrumbs":"Integrations Quick Start » 5️⃣ Service Management","id":"4728","title":"5️⃣ Service Management"},"4729":{"body":"Install, start, stop, and manage services across systemd (Linux), launchd (macOS), runit, and OpenRC.","breadcrumbs":"Integrations Quick Start » What It Does","id":"4729","title":"What It Does"},"473":{"body":"","breadcrumbs":"RAG System » Limitations and Considerations","id":"473","title":"Limitations and Considerations"},"4730":{"body":"# Install service\\nprovisioning service install myapp /usr/local/bin/myapp \\\\ --user myapp \\\\ --working-dir /opt/myapp # Start service\\nprovisioning service start myapp # Stop service\\nprovisioning service stop myapp # Restart service\\nprovisioning service restart myapp # Check service status\\nprovisioning service status myapp\\n# Output: running=true, uptime=86400s, restarts=2 # List all services\\nprovisioning service list # Detect init system\\nprovisioning service detect-init\\n# Output: systemd (Linux), launchd (macOS), etc.","breadcrumbs":"Integrations Quick Start » Commands","id":"4730","title":"Commands"},"4731":{"body":"# On Linux (systemd)\\nprovisioning service install provisioning-worker \\\\ /usr/local/bin/provisioning-worker \\\\ --user provisioning \\\\ --working-dir /opt/provisioning # On macOS (launchd) - works the same!\\nprovisioning service install provisioning-worker \\\\ /usr/local/bin/provisioning-worker \\\\ --user provisioning \\\\ --working-dir /opt/provisioning # Service file is generated automatically for your platform\\nprovisioning service start provisioning-worker\\nprovisioning service status provisioning-worker","breadcrumbs":"Integrations Quick Start » Example: Install Custom Service","id":"4731","title":"Example: Install Custom Service"},"4732":{"body":"","breadcrumbs":"Integrations Quick Start » 🎯 Common Workflows","id":"4732","title":"🎯 Common Workflows"},"4733":{"body":"# Works on macOS with OrbStack, Linux with Docker, etc.\\nprovisioning runtime detect # Detects your platform\\nprovisioning runtime exec \\"docker ps\\" # Uses your runtime","breadcrumbs":"Integrations Quick Start » Workflow 1: Multi-Platform Deployment","id":"4733","title":"Workflow 1: Multi-Platform Deployment"},"4734":{"body":"# Connect to multiple servers\\nfor host in srv01 srv02 srv03; do provisioning ssh pool connect $host.example.com root\\ndone # Execute in parallel with 3x retry\\nprovisioning ssh pool exec [srv01, srv02, srv03] \\\\ \\"systemctl restart app\\" \\\\ --strategy rolling \\\\ --retry exponential","breadcrumbs":"Integrations Quick Start » Workflow 2: Large-Scale SSH Operations","id":"4734","title":"Workflow 2: Large-Scale SSH Operations"},"4735":{"body":"# Create backup job\\nprovisioning backup create daily /opt/app /data \\\\ --backend restic \\\\ --repository s3://backups # Schedule for 2 AM every day\\nprovisioning backup schedule daily \\"0 2 * * *\\" # Verify it works\\nprovisioning backup list","breadcrumbs":"Integrations Quick Start » Workflow 3: Automated Backups","id":"4735","title":"Workflow 3: Automated Backups"},"4736":{"body":"# Define rules in YAML\\ncat > gitops-rules.yaml << \'EOF\'\\nrules: - name: deploy-prod provider: github repository: https://github.com/myorg/repo branch: main events: [push] targets: [prod] command: \\"provisioning deploy\\"\\nEOF # Load and activate\\nprovisioning gitops rules ./gitops-rules.yaml\\nprovisioning gitops watch --provider github # Now pushing to main auto-deploys!","breadcrumbs":"Integrations Quick Start » Workflow 4: Continuous Deployment from Git","id":"4736","title":"Workflow 4: Continuous Deployment from Git"},"4737":{"body":"","breadcrumbs":"Integrations Quick Start » 🔧 Advanced Configuration","id":"4737","title":"🔧 Advanced Configuration"},"4738":{"body":"All integrations support Nickel schemas for advanced configuration: let { IntegrationConfig } = import \\"provisioning/integrations.ncl\\" in\\n{ integrations = { # Runtime configuration runtime = { preferred = \\"podman\\", check_order = [\\"podman\\", \\"docker\\", \\"nerdctl\\"], timeout_secs = 5, enable_cache = true, }, # Backup with retention policy backup = { default_backend = \\"restic\\", default_repository = { type = \\"s3\\", bucket = \\"prod-backups\\", prefix = \\"daily\\", }, jobs = [], verify_after_backup = true, }, # GitOps rules with approval gitops = { rules = [], default_strategy = \\"blue-green\\", dry_run_by_default = false, enable_audit_log = true, }, }\\n}","breadcrumbs":"Integrations Quick Start » Using with Nickel Configuration","id":"4738","title":"Using with Nickel Configuration"},"4739":{"body":"","breadcrumbs":"Integrations Quick Start » 💡 Tips & Tricks","id":"4739","title":"💡 Tips & Tricks"},"474":{"body":"RAG indexes static snapshots Changes to documentation require re-indexing Use watch mode during development","breadcrumbs":"RAG System » Document Freshness","id":"474","title":"Document Freshness"},"4740":{"body":"All major operations support --check for testing: provisioning runtime exec \\"systemctl restart app\\" --check\\n# Output: Would execute: [docker exec ...] provisioning backup create test /data --check\\n# Output: Backup would be created: [test] provisioning gitops trigger deploy-test --check\\n# Output: Deployment would trigger","breadcrumbs":"Integrations Quick Start » Tip 1: Dry-Run Mode","id":"4740","title":"Tip 1: Dry-Run Mode"},"4741":{"body":"Some commands support JSON output: provisioning runtime list --out json\\nprovisioning backup list --out json\\nprovisioning gitops deployments --out json","breadcrumbs":"Integrations Quick Start » Tip 2: Output Formats","id":"4741","title":"Tip 2: Output Formats"},"4742":{"body":"Chain commands in shell scripts: #!/bin/bash # Detect runtime and use it\\nRUNTIME=$(provisioning runtime detect | grep -oP \'docker|podman|nerdctl\') # Execute using detected runtime\\nprovisioning runtime exec \\"docker ps\\" # Create backup before deploy\\nprovisioning backup create pre-deploy-$(date +%s) /opt/app # Deploy\\nprovisioning deploy # Verify with GitOps\\nprovisioning gitops status","breadcrumbs":"Integrations Quick Start » Tip 3: Integration with Scripts","id":"4742","title":"Tip 3: Integration with Scripts"},"4743":{"body":"","breadcrumbs":"Integrations Quick Start » 🐛 Troubleshooting","id":"4743","title":"🐛 Troubleshooting"},"4744":{"body":"Solution : Install Docker, Podman, or OrbStack: # macOS\\nbrew install orbstack # Linux\\nsudo apt-get install docker.io # Then verify\\nprovisioning runtime detect","breadcrumbs":"Integrations Quick Start » Problem: \\"No container runtime detected\\"","id":"4744","title":"Problem: \\"No container runtime detected\\""},"4745":{"body":"Solution : Check port and timeout settings: # Use different port\\nprovisioning ssh pool connect server.example.com root --port 2222 # Increase timeout\\nprovisioning ssh pool connect server.example.com root --timeout 60","breadcrumbs":"Integrations Quick Start » Problem: SSH connection timeout","id":"4745","title":"Problem: SSH connection timeout"},"4746":{"body":"Solution : Check permissions on backup path: # Check if user can read target paths\\nls -l /data # Should be readable # Run with elevated privileges if needed\\nsudo provisioning backup create mybak /data --backend restic","breadcrumbs":"Integrations Quick Start » Problem: Backup fails with \\"Permission denied\\"","id":"4746","title":"Problem: Backup fails with \\"Permission denied\\""},"4747":{"body":"Topic Location Architecture docs/architecture/ECOSYSTEM_INTEGRATION.md CLI Help provisioning help integrations Rust Bridge provisioning/platform/integrations/provisioning-bridge/ Nushell Modules provisioning/core/nulib/lib_provisioning/integrations/ Nickel Schemas provisioning/schemas/integrations/","breadcrumbs":"Integrations Quick Start » 📚 Learn More","id":"4747","title":"📚 Learn More"},"4748":{"body":"# General help\\nprovisioning help integrations # Specific command help\\nprovisioning runtime --help\\nprovisioning backup --help\\nprovisioning gitops --help # System diagnostics\\nprovisioning status\\nprovisioning health Last Updated : 2025-11-23 Version : 1.0.0","breadcrumbs":"Integrations Quick Start » 🆘 Need Help","id":"4748","title":"🆘 Need Help"},"4749":{"body":"Status : ✅ COMPLETED - All phases (1-6) implemented and tested Date : December 2025 Tests : 25/25 passing (100%)","breadcrumbs":"Secrets Service Layer Complete » Secrets Service Layer (SST) - Complete User Guide","id":"4749","title":"Secrets Service Layer (SST) - Complete User Guide"},"475":{"body":"Large documents chunked to fit LLM context Some context may be lost in chunking Adjustable chunk size vs. context trade-off","breadcrumbs":"RAG System » Token Limits","id":"475","title":"Token Limits"},"4750":{"body":"The Secrets Service Layer (SST) is an enterprise-grade unified solution for managing all types of secrets (database credentials, SSH keys, API tokens, provider credentials) through a REST API controlled by Cedar policies with workspace isolation and real-time monitoring.","breadcrumbs":"Secrets Service Layer Complete » 📋 Executive Summary","id":"4750","title":"📋 Executive Summary"},"4751":{"body":"Feature Description Status Centralized Management Unified API for all secrets ✅ Complete Cedar Authorization Mandatory configurable policies ✅ Complete Workspace Isolation Secrets isolated by workspace and domain ✅ Complete Auto Rotation Automatic scheduling and rotation ✅ Complete Secret Sharing Cross-workspace sharing with access control ✅ Complete Real-time Monitoring Dashboard, expiration alerts ✅ Complete Complete Audit Full operation logging ✅ Complete KMS Encryption Envelope-based key encryption ✅ Complete Temporal + Permanent Support for SSH and provider credentials ✅ Complete","breadcrumbs":"Secrets Service Layer Complete » ✨ Key Features","id":"4751","title":"✨ Key Features"},"4752":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 🚀 Quick Start (5 minutes)","id":"4752","title":"🚀 Quick Start (5 minutes)"},"4753":{"body":"# Register workspace\\nprovisioning workspace register librecloud /Users/Akasha/project-provisioning/workspace_librecloud # Verify\\nprovisioning workspace list\\nprovisioning workspace active","breadcrumbs":"Secrets Service Layer Complete » 1. Register the workspace librecloud","id":"4753","title":"1. Register the workspace librecloud"},"4754":{"body":"# Create PostgreSQL credential\\nprovisioning secrets create database postgres \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --user admin \\\\ --password \\"secure_password\\" \\\\ --host db.local \\\\ --port 5432 \\\\ --database myapp","breadcrumbs":"Secrets Service Layer Complete » 2. Create your first database secret","id":"4754","title":"2. Create your first database secret"},"4755":{"body":"# Get credential (requires Cedar authorization)\\nprovisioning secrets get librecloud/wuji/postgres/admin_password","breadcrumbs":"Secrets Service Layer Complete » 3. Retrieve the secret","id":"4755","title":"3. Retrieve the secret"},"4756":{"body":"# List all PostgreSQL secrets\\nprovisioning secrets list --workspace librecloud --domain postgres # List all infrastructure secrets\\nprovisioning secrets list --workspace librecloud --infra wuji","breadcrumbs":"Secrets Service Layer Complete » 4. List secrets by domain","id":"4756","title":"4. List secrets by domain"},"4757":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 📚 Complete Guide by Phases","id":"4757","title":"📚 Complete Guide by Phases"},"4758":{"body":"1.1 Create Database Credentials REST Endpoint : POST /api/v1/secrets/database\\nContent-Type: application/json { \\"workspace_id\\": \\"librecloud\\", \\"infra_id\\": \\"wuji\\", \\"db_type\\": \\"postgresql\\", \\"host\\": \\"db.librecloud.internal\\", \\"port\\": 5432, \\"database\\": \\"production_db\\", \\"username\\": \\"admin\\", \\"password\\": \\"encrypted_password\\"\\n} CLI Command : provisioning secrets create database postgres \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --user admin \\\\ --password \\"password\\" \\\\ --host db.librecloud.internal \\\\ --port 5432 \\\\ --database production_db Result : Secret stored in SurrealDB with KMS encryption ✓ Secret created: librecloud/wuji/postgres/admin_password Workspace: librecloud Infrastructure: wuji Domain: postgres Type: Database Encrypted: Yes (KMS) 1.2 Create Application Secrets REST API : POST /api/v1/secrets/application\\n{ \\"workspace_id\\": \\"librecloud\\", \\"app_name\\": \\"myapp-web\\", \\"key_type\\": \\"api_token\\", \\"value\\": \\"sk_live_abc123xyz\\"\\n} CLI : provisioning secrets create app myapp-web \\\\ --workspace librecloud \\\\ --domain web \\\\ --type api_token \\\\ --value \\"sk_live_abc123xyz\\" 1.3 List Secrets REST API : GET /api/v1/secrets/list?workspace=librecloud&domain=postgres Response:\\n{ \\"secrets\\": [ { \\"path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"workspace_id\\": \\"librecloud\\", \\"domain\\": \\"postgres\\", \\"secret_type\\": \\"Database\\", \\"created_at\\": \\"2025-12-06T10:00:00Z\\", \\"created_by\\": \\"admin\\" } ]\\n} CLI : # All workspace secrets\\nprovisioning secrets list --workspace librecloud # Filter by domain\\nprovisioning secrets list --workspace librecloud --domain postgres # Filter by infrastructure\\nprovisioning secrets list --workspace librecloud --infra wuji 1.4 Retrieve a Secret REST API : GET /api/v1/secrets/librecloud/wuji/postgres/admin_password Requires:\\n- Header: Authorization: Bearer \\n- Cedar verification: [user has read permission]\\n- If MFA required: mfa_verified=true in JWT CLI : # Get full secret\\nprovisioning secrets get librecloud/wuji/postgres/admin_password # Output:\\n# Host: db.librecloud.internal\\n# Port: 5432\\n# User: admin\\n# Database: production_db\\n# Password: [encrypted in transit]","breadcrumbs":"Secrets Service Layer Complete » Phase 1: Database and Application Secrets","id":"4758","title":"Phase 1: Database and Application Secrets"},"4759":{"body":"2.1 Temporal SSH Keys (Auto-expiring) Use Case : Temporary server access (max 24 hours) # Generate temporary SSH key (TTL 2 hours)\\nprovisioning secrets create ssh \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --server web01 \\\\ --ttl 2h # Result:\\n# ✓ SSH key generated\\n# Server: web01\\n# TTL: 2 hours\\n# Expires at: 2025-12-06T12:00:00Z\\n# Private Key: [encrypted] Technical Details : Generated in real-time by Orchestrator Stored in memory (TTL-based) Automatic revocation on expiry Complete audit trail in vault_audit 2.2 Permanent SSH Keys (Stored) Use Case : Long-duration infrastructure keys # Create permanent SSH key (stored in DB)\\nprovisioning secrets create ssh \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --server web01 \\\\ --permanent # Result:\\n# ✓ Permanent SSH key created\\n# Storage: SurrealDB (encrypted)\\n# Rotation: Manual (or automatic if configured)\\n# Access: Cedar controlled 2.3 Provider Credentials UpCloud API (Temporal) : provisioning secrets create provider upcloud \\\\ --workspace librecloud \\\\ --roles \\"server,network,storage\\" \\\\ --ttl 4h # Result:\\n# ✓ UpCloud credential generated\\n# Token: tmp_upcloud_abc123\\n# Roles: server, network, storage\\n# TTL: 4 hours UpCloud API (Permanent) : provisioning secrets create provider upcloud \\\\ --workspace librecloud \\\\ --roles \\"server,network\\" \\\\ --permanent # Result:\\n# ✓ Permanent UpCloud credential created\\n# Token: upcloud_live_xyz789\\n# Storage: SurrealDB\\n# Rotation: Manual","breadcrumbs":"Secrets Service Layer Complete » Phase 2: SSH Keys and Provider Credentials","id":"4759","title":"Phase 2: SSH Keys and Provider Credentials"},"476":{"body":"Quality depends on embedding model Domain-specific models perform better Fine-tuning possible for specialized vocabularies","breadcrumbs":"RAG System » Embedding Quality","id":"476","title":"Embedding Quality"},"4760":{"body":"3.1 Plan Automatic Rotation Predefined Rotation Policies : Type Prod Dev Database Every 30d Every 90d Application Every 60d Every 14d SSH Every 365d Every 90d Provider Every 180d Every 30d Force Immediate Rotation : # Force rotation now\\nprovisioning secrets rotate librecloud/wuji/postgres/admin_password # Result:\\n# ✓ Rotation initiated\\n# Status: In Progress\\n# New password: [generated]\\n# Old password: [archived]\\n# Next rotation: 2025-01-05 Check Rotation Status : GET /api/v1/secrets/{path}/rotation-status Response:\\n{ \\"path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"status\\": \\"pending\\", \\"next_rotation\\": \\"2025-01-05T10:00:00Z\\", \\"last_rotation\\": \\"2025-12-05T10:00:00Z\\", \\"days_remaining\\": 30, \\"failure_count\\": 0\\n} 3.2 Rotation Job Scheduler (Background) System automatically runs rotations every hour: ┌─────────────────────────────────┐\\n│ Rotation Job Scheduler │\\n│ - Interval: 1 hour │\\n│ - Max concurrency: 5 rotations │\\n│ - Auto retry │\\n└─────────────────────────────────┘ ↓ Get due secrets ↓ Generate new credentials ↓ Validate functionality ↓ Update SurrealDB ↓ Log to audit trail Check Scheduler Status : provisioning secrets scheduler status # Result:\\n# Status: Running\\n# Last check: 2025-12-06T11:00:00Z\\n# Completed rotations: 24\\n# Failed rotations: 0","breadcrumbs":"Secrets Service Layer Complete » Phase 3: Auto Rotation","id":"4760","title":"Phase 3: Auto Rotation"},"4761":{"body":"Create a Grant (Access Authorization) Scenario : Share DB credential between librecloud and staging # REST API\\nPOST /api/v1/secrets/{path}/grant { \\"source_workspace\\": \\"librecloud\\", \\"target_workspace\\": \\"staging\\", \\"permission\\": \\"read\\", # read, write, rotate \\"require_approval\\": false\\n} # Response:\\n{ \\"grant_id\\": \\"grant-12345\\", \\"secret_path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"source_workspace\\": \\"librecloud\\", \\"target_workspace\\": \\"staging\\", \\"permission\\": \\"read\\", \\"status\\": \\"active\\", \\"granted_at\\": \\"2025-12-06T10:00:00Z\\", \\"access_count\\": 0\\n} CLI : provisioning secrets grant \\\\ --secret librecloud/wuji/postgres/admin_password \\\\ --target-workspace staging \\\\ --permission read # ✓ Grant created: grant-12345\\n# Source workspace: librecloud\\n# Target workspace: staging\\n# Permission: Read\\n# Approval required: No Revoke a Grant # Revoke access immediately\\nPOST /api/v1/secrets/grant/{grant_id}/revoke\\n{ \\"reason\\": \\"User left the team\\"\\n} # CLI\\nprovisioning secrets revoke-grant grant-12345 \\\\ --reason \\"User left the team\\" # ✓ Grant revoked\\n# Status: Revoked\\n# Access records: 42 List Grants # All workspace grants\\nGET /api/v1/secrets/grants?workspace=librecloud # Response:\\n{ \\"grants\\": [ { \\"grant_id\\": \\"grant-12345\\", \\"secret_path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"target_workspace\\": \\"staging\\", \\"permission\\": \\"read\\", \\"status\\": \\"active\\", \\"access_count\\": 42, \\"last_accessed\\": \\"2025-12-06T10:30:00Z\\" } ]\\n}","breadcrumbs":"Secrets Service Layer Complete » Phase 3.2: Share Secrets Across Workspaces","id":"4761","title":"Phase 3.2: Share Secrets Across Workspaces"},"4762":{"body":"Dashboard Metrics GET /api/v1/secrets/monitoring/dashboard Response:\\n{ \\"total_secrets\\": 45, \\"temporal_secrets\\": 12, \\"permanent_secrets\\": 33, \\"expiring_secrets\\": [ { \\"path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"domain\\": \\"postgres\\", \\"days_remaining\\": 5, \\"severity\\": \\"critical\\" } ], \\"failed_access_attempts\\": [ { \\"user\\": \\"alice\\", \\"secret_path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"reason\\": \\"insufficient_permissions\\", \\"timestamp\\": \\"2025-12-06T10:00:00Z\\" } ], \\"rotation_metrics\\": { \\"total\\": 45, \\"completed\\": 40, \\"pending\\": 3, \\"failed\\": 2 }\\n} CLI : provisioning secrets monitoring dashboard # ✓ Secrets Dashboard - Librecloud\\n#\\n# Total secrets: 45\\n# Temporal secrets: 12\\n# Permanent secrets: 33\\n#\\n# ⚠️ CRITICAL (next 3 days): 2\\n# - librecloud/wuji/postgres/admin_password (5 days)\\n# - librecloud/wuji/redis/password (1 day)\\n#\\n# ⚡ WARNING (next 7 days): 3\\n# - librecloud/app/api_token (7 days)\\n#\\n# 📊 Rotations completed: 40/45 (89%) Expiring Secrets Alerts GET /api/v1/secrets/monitoring/expiring?days=7 Response:\\n{ \\"expiring_secrets\\": [ { \\"path\\": \\"librecloud/wuji/postgres/admin_password\\", \\"domain\\": \\"postgres\\", \\"expires_in_days\\": 5, \\"type\\": \\"database\\", \\"last_rotation\\": \\"2025-11-05T10:00:00Z\\" } ]\\n}","breadcrumbs":"Secrets Service Layer Complete » Phase 3.4: Monitoring and Alerts","id":"4762","title":"Phase 3.4: Monitoring and Alerts"},"4763":{"body":"All operations are protected by Cedar policies :","breadcrumbs":"Secrets Service Layer Complete » 🔐 Cedar Authorization","id":"4763","title":"🔐 Cedar Authorization"},"4764":{"body":"// Requires MFA for production secrets\\n@id(\\"prod-secret-access-mfa\\")\\npermit ( principal, action == Provisioning::Action::\\"access\\", resource is Provisioning::Secret in Provisioning::Environment::\\"production\\"\\n) when { context.mfa_verified == true && resource.is_expired == false\\n}; // Only admins can create permanent secrets\\n@id(\\"permanent-secret-admin-only\\")\\npermit ( principal in Provisioning::Role::\\"security_admin\\", action == Provisioning::Action::\\"create\\", resource is Provisioning::Secret\\n) when { resource.lifecycle == \\"permanent\\"\\n};","breadcrumbs":"Secrets Service Layer Complete » Example Policy: Production Secret Access","id":"4764","title":"Example Policy: Production Secret Access"},"4765":{"body":"# Test Cedar decision\\nprovisioning policies check alice can access secret:librecloud/postgres/password # Result:\\n# User: alice\\n# Resource: secret:librecloud/postgres/password\\n# Decision: ✅ ALLOWED\\n# - Role: database_admin\\n# - MFA verified: Yes\\n# - Workspace: librecloud","breadcrumbs":"Secrets Service Layer Complete » Verify Authorization","id":"4765","title":"Verify Authorization"},"4766":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 🏗️ Data Structure","id":"4766","title":"🏗️ Data Structure"},"4767":{"body":"-- Table vault_secrets (SurrealDB)\\n{ id: \\"secret:uuid123\\", path: \\"librecloud/wuji/postgres/admin_password\\", workspace_id: \\"librecloud\\", infra_id: \\"wuji\\", domain: \\"postgres\\", secret_type: \\"Database\\", encrypted_value: \\"U2FsdGVkX1...\\", -- AES-256-GCM encrypted version: 1, created_at: \\"2025-12-05T10:00:00Z\\", created_by: \\"admin\\", updated_at: \\"2025-12-05T10:00:00Z\\", updated_by: \\"admin\\", tags: [\\"production\\", \\"critical\\"], auto_rotate: true, rotation_interval_days: 30, ttl_seconds: null, -- null = no auto expiry deleted: false, metadata: { db_host: \\"db.librecloud.internal\\", db_port: 5432, db_name: \\"production_db\\", username: \\"admin\\" }\\n}","breadcrumbs":"Secrets Service Layer Complete » Secret in Database","id":"4767","title":"Secret in Database"},"4768":{"body":"librecloud (Workspace) ├── wuji (Infrastructure) │ ├── postgres (Domain) │ │ ├── admin_password │ │ ├── readonly_user │ │ └── replication_user │ ├── redis (Domain) │ │ └── master_password │ └── ssh (Domain) │ ├── web01_key │ └── db01_key └── web (Infrastructure) ├── api (Domain) │ ├── stripe_token │ ├── github_token │ └── sendgrid_key └── auth (Domain) ├── jwt_secret └── oauth_client_secret","breadcrumbs":"Secrets Service Layer Complete » Secret Hierarchy","id":"4768","title":"Secret Hierarchy"},"4769":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 🔄 Complete Workflows","id":"4769","title":"🔄 Complete Workflows"},"477":{"body":"","breadcrumbs":"RAG System » Monitoring and Debugging","id":"477","title":"Monitoring and Debugging"},"4770":{"body":"1. Admin creates credential POST /api/v1/secrets/database 2. System encrypts with KMS ├─ Generates data key ├─ Encrypts secret with data key └─ Encrypts data key with KMS master key 3. Stores in SurrealDB ├─ vault_secrets (encrypted value) ├─ vault_versions (history) └─ vault_audit (audit record) 4. System schedules auto rotation ├─ Calculates next date (30 days) └─ Creates rotation_scheduler entry 5. Every hour, background job checks ├─ Any secrets due for rotation? ├─ Yes → Generate new password ├─ Validate functionality (connect to DB) ├─ Update SurrealDB └─ Log to audit 6. Monitoring alerts ├─ If 7 days remaining → WARNING alert ├─ If 3 days remaining → CRITICAL alert └─ If expired → EXPIRED alert","breadcrumbs":"Secrets Service Layer Complete » Workflow 1: Create and Rotate Database Credential","id":"4770","title":"Workflow 1: Create and Rotate Database Credential"},"4771":{"body":"1. Admin of librecloud creates grant POST /api/v1/secrets/{path}/grant 2. Cedar verifies authorization ├─ Is user admin of source workspace? └─ Is target workspace valid? 3. Grant created and recorded ├─ Unique ID: grant-xxxxx ├─ Status: active └─ Audit: who, when, why 4. Staging workspace user accesses secret GET /api/v1/secrets/{path} 5. System verifies access ├─ Cedar: Is grant active? ├─ Cedar: Sufficient permission? ├─ Cedar: MFA if required? └─ Yes → Return decrypted secret 6. Audit records access ├─ User who accessed ├─ Source IP ├─ Exact timestamp ├─ Success/failure └─ Increment access count in grant","breadcrumbs":"Secrets Service Layer Complete » Workflow 2: Share Secret Between Workspaces","id":"4771","title":"Workflow 2: Share Secret Between Workspaces"},"4772":{"body":"1. User requests temporary SSH key POST /api/v1/secrets/ssh {ttl: \\"2h\\"} 2. Cedar authorizes (requires MFA) ├─ User has role? ├─ MFA verified? └─ TTL within limit (max 24h)? 3. Orchestrator generates key ├─ Generates SSH key pair (RSA 4096) ├─ Stores in memory (TTL-based) ├─ Logs to audit └─ Returns private key 4. User downloads key └─ Valid for 2 hours 5. Automatic expiration ├─ 2-hour timer starts ├─ TTL expires → Auto revokes ├─ Later attempts → Access denied └─ Audit: automatic revocation","breadcrumbs":"Secrets Service Layer Complete » Workflow 3: Access Temporal SSH Secret","id":"4772","title":"Workflow 3: Access Temporal SSH Secret"},"4773":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 📝 Practical Examples","id":"4773","title":"📝 Practical Examples"},"4774":{"body":"# 1. Create credential\\nprovisioning secrets create database postgres \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --user admin \\\\ --password \\"P@ssw0rd123!\\" \\\\ --host db.librecloud.internal \\\\ --port 5432 \\\\ --database myapp_prod # 2. List PostgreSQL secrets\\nprovisioning secrets list --workspace librecloud --domain postgres # 3. Get for connection\\nprovisioning secrets get librecloud/wuji/postgres/admin_password # 4. Share with staging team\\nprovisioning secrets grant \\\\ --secret librecloud/wuji/postgres/admin_password \\\\ --target-workspace staging \\\\ --permission read # 5. Force rotation\\nprovisioning secrets rotate librecloud/wuji/postgres/admin_password # 6. Check status\\nprovisioning secrets monitoring dashboard | grep postgres","breadcrumbs":"Secrets Service Layer Complete » Example 1: Manage PostgreSQL Secrets","id":"4774","title":"Example 1: Manage PostgreSQL Secrets"},"4775":{"body":"# 1. Generate temporary SSH key (4 hours)\\nprovisioning secrets create ssh \\\\ --workspace librecloud \\\\ --infra wuji \\\\ --server web01 \\\\ --ttl 4h # 2. Download private key\\nprovisioning secrets get librecloud/wuji/ssh/web01_key > ~/.ssh/web01_temp # 3. Connect to server\\nchmod 600 ~/.ssh/web01_temp\\nssh -i ~/.ssh/web01_temp ubuntu@web01.librecloud.internal # 4. After 4 hours\\n# → Key revoked automatically\\n# → New SSH attempts fail\\n# → Access logged in audit","breadcrumbs":"Secrets Service Layer Complete » Example 2: Temporary SSH Access","id":"4775","title":"Example 2: Temporary SSH Access"},"4776":{"body":"# GitLab CI / GitHub Actions\\njobs: deploy: script: # 1. Get DB credential - export DB_PASSWORD=$(provisioning secrets get librecloud/prod/postgres/admin_password) # 2. Get API token - export API_TOKEN=$(provisioning secrets get librecloud/app/api_token) # 3. Deploy application - docker run -e DB_PASSWORD=$DB_PASSWORD -e API_TOKEN=$API_TOKEN myapp:latest # 4. System logs access in audit # → User: ci-deploy # → Workspace: librecloud # → Secrets accessed: 2 # → Status: success","breadcrumbs":"Secrets Service Layer Complete » Example 3: CI/CD Integration","id":"4776","title":"Example 3: CI/CD Integration"},"4777":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 🛡️ Security","id":"4777","title":"🛡️ Security"},"4778":{"body":"At Rest : AES-256-GCM with KMS key rotation In Transit : TLS 1.3 In Memory : Automatic cleanup of sensitive variables","breadcrumbs":"Secrets Service Layer Complete » Encryption","id":"4778","title":"Encryption"},"4779":{"body":"Cedar : All operations evaluated against policies MFA : Required for production secrets Workspace Isolation : Data separation at DB level","breadcrumbs":"Secrets Service Layer Complete » Access Control","id":"4779","title":"Access Control"},"478":{"body":"# View RAG search metrics\\nprovisioning ai metrics show rag # Analysis of search quality\\nprovisioning ai eval-rag --sample-queries 100","breadcrumbs":"RAG System » Query Metrics","id":"478","title":"Query Metrics"},"4780":{"body":"{ \\"timestamp\\": \\"2025-12-06T10:30:45Z\\", \\"user_id\\": \\"alice\\", \\"workspace\\": \\"librecloud\\", \\"action\\": \\"secrets:get\\", \\"resource\\": \\"librecloud/wuji/postgres/admin_password\\", \\"result\\": \\"success\\", \\"ip_address\\": \\"192.168.1.100\\", \\"mfa_verified\\": true, \\"cedar_policy\\": \\"prod-secret-access-mfa\\"\\n}","breadcrumbs":"Secrets Service Layer Complete » Audit","id":"4780","title":"Audit"},"4781":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 📊 Test Results","id":"4781","title":"📊 Test Results"},"4782":{"body":"✅ Phase 3.1: Rotation Scheduler (9 tests) - Schedule creation - Status transitions - Failure tracking ✅ Phase 3.2: Secret Sharing (8 tests) - Grant creation with permissions - Permission hierarchy - Access logging ✅ Phase 3.4: Monitoring (4 tests) - Dashboard metrics - Expiring alerts - Failed access recording ✅ Phase 5: Rotation Job Scheduler (4 tests) - Background job lifecycle - Configuration management ✅ Integration Tests (3 tests) - Multi-service workflows - End-to-end scenarios Execution : cargo test --test secrets_phases_integration_test test result: ok. 25 passed; 0 failed","breadcrumbs":"Secrets Service Layer Complete » All 25 Integration Tests Passing","id":"4782","title":"All 25 Integration Tests Passing"},"4783":{"body":"","breadcrumbs":"Secrets Service Layer Complete » 🆘 Troubleshooting","id":"4783","title":"🆘 Troubleshooting"},"4784":{"body":"Cause : User lacks permissions in policy Solution : # Check user and permission\\nprovisioning policies check $USER can access secret:librecloud/postgres/admin_password # Check roles\\nprovisioning auth whoami # Request access from admin\\nprovisioning secrets grant \\\\ --secret librecloud/wuji/postgres/admin_password \\\\ --target-workspace $WORKSPACE \\\\ --permission read","breadcrumbs":"Secrets Service Layer Complete » Problem: \\"Authorization denied by Cedar policy\\"","id":"4784","title":"Problem: \\"Authorization denied by Cedar policy\\""},"4785":{"body":"Cause : Typo in path or workspace doesn\'t exist Solution : # List available secrets\\nprovisioning secrets list --workspace librecloud # Check active workspace\\nprovisioning workspace active # Switch workspace if needed\\nprovisioning workspace switch librecloud","breadcrumbs":"Secrets Service Layer Complete » Problem: \\"Secret not found\\"","id":"4785","title":"Problem: \\"Secret not found\\""},"4786":{"body":"Cause : Operation requires MFA but not verified Solution : # Check MFA status\\nprovisioning auth status # Enroll if not configured\\nprovisioning mfa totp enroll # Use MFA token on next access\\nprovisioning secrets get librecloud/wuji/postgres/admin_password --mfa-code 123456","breadcrumbs":"Secrets Service Layer Complete » Problem: \\"MFA required\\"","id":"4786","title":"Problem: \\"MFA required\\""},"4787":{"body":"REST API : /docs/api/secrets-api.md CLI Reference : provisioning secrets --help Cedar Policies : provisioning/config/cedar-policies/secrets.cedar Architecture : /docs/architecture/SECRETS_SERVICE_LAYER.md Security : /docs/user/SECRETS_SECURITY_GUIDE.md","breadcrumbs":"Secrets Service Layer Complete » 📚 Complete Documentation","id":"4787","title":"📚 Complete Documentation"},"4788":{"body":"Phase 7 : Web UI Dashboard for visual management Phase 8 : HashiCorp Vault integration Phase 9 : Multi-datacenter secret replication Status : ✅ Secrets Service Layer - COMPLETED AND TESTED","breadcrumbs":"Secrets Service Layer Complete » 🎯 Next Steps (Future)","id":"4788","title":"🎯 Next Steps (Future)"},"4789":{"body":"Comprehensive OCI (Open Container Initiative) registry deployment and management for the provisioning system. Source : provisioning/platform/oci-registry/","breadcrumbs":"OCI Registry Platform » OCI Registry Service","id":"4789","title":"OCI Registry Service"},"479":{"body":"# In provisioning/config/ai.toml\\n[ai.rag.debug]\\nenabled = true\\nlog_embeddings = true # Log embedding vectors\\nlog_search_scores = true # Log relevance scores\\nlog_context_used = true # Log context retrieved","breadcrumbs":"RAG System » Debug Mode","id":"479","title":"Debug Mode"},"4790":{"body":"Zot (Recommended for Development): Lightweight, fast, OCI-native with UI Harbor (Recommended for Production): Full-featured enterprise registry Distribution (OCI Reference): Official OCI reference implementation","breadcrumbs":"OCI Registry Platform » Supported Registries","id":"4790","title":"Supported Registries"},"4791":{"body":"Multi-Registry Support : Zot, Harbor, Distribution Namespace Organization : Logical separation of artifacts Access Control : RBAC, policies, authentication Monitoring : Prometheus metrics, health checks Garbage Collection : Automatic cleanup of unused artifacts High Availability : Optional HA configurations TLS/SSL : Secure communication UI Interface : Web-based management (Zot, Harbor)","breadcrumbs":"OCI Registry Platform » Features","id":"4791","title":"Features"},"4792":{"body":"","breadcrumbs":"OCI Registry Platform » Quick Start","id":"4792","title":"Quick Start"},"4793":{"body":"cd provisioning/platform/oci-registry/zot\\ndocker-compose up -d # Initialize with namespaces and policies\\nnu ../scripts/init-registry.nu --registry-type zot # Access UI\\nopen http://localhost:5000","breadcrumbs":"OCI Registry Platform » Start Zot Registry (Default)","id":"4793","title":"Start Zot Registry (Default)"},"4794":{"body":"cd provisioning/platform/oci-registry/harbor\\ndocker-compose up -d\\nsleep 120 # Wait for services # Initialize\\nnu ../scripts/init-registry.nu --registry-type harbor --admin-password Harbor12345 # Access UI\\nopen http://localhost\\n# Login: admin / Harbor12345","breadcrumbs":"OCI Registry Platform » Start Harbor Registry","id":"4794","title":"Start Harbor Registry"},"4795":{"body":"Namespace Description Public Retention provisioning-extensions Extension packages No 10 tags, 90 days provisioning-kcl KCL schemas No 20 tags, 180 days provisioning-platform Platform images No 5 tags, 30 days provisioning-test Test artifacts Yes 3 tags, 7 days","breadcrumbs":"OCI Registry Platform » Default Namespaces","id":"4795","title":"Default Namespaces"},"4796":{"body":"","breadcrumbs":"OCI Registry Platform » Management","id":"4796","title":"Management"},"4797":{"body":"# Start registry\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry start --type zot\\" # Check status\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry status --type zot\\" # View logs\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry logs --type zot --follow\\" # Health check\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry health --type zot\\" # List namespaces\\nnu -c \\"use provisioning/core/nulib/lib_provisioning/oci_registry; oci-registry namespaces\\"","breadcrumbs":"OCI Registry Platform » Nushell Commands","id":"4797","title":"Nushell Commands"},"4798":{"body":"# Start\\ndocker-compose up -d # Stop\\ndocker-compose down # View logs\\ndocker-compose logs -f # Remove (including volumes)\\ndocker-compose down -v","breadcrumbs":"OCI Registry Platform » Docker Compose","id":"4798","title":"Docker Compose"},"4799":{"body":"Feature Zot Harbor Distribution Setup Simple Complex Simple UI Built-in Full-featured None Search Yes Yes No Scanning No Trivy No Replication No Yes No RBAC Basic Advanced Basic Best For Dev/CI Production Compliance","breadcrumbs":"OCI Registry Platform » Registry Comparison","id":"4799","title":"Registry Comparison"},"48":{"body":"x86_64 (Intel/AMD 64-bit) - Full support ARM64 (Apple Silicon, ARM servers) - Full support","breadcrumbs":"Installation Guide » Architecture Support","id":"48","title":"Architecture Support"},"480":{"body":"Architecture - AI system overview MCP Integration - RAG access via MCP Configuration - RAG setup guide API Reference - RAG API endpoints ADR-015 - Design decisions Last Updated : 2025-01-13 Status : ✅ Production-Ready Test Coverage : 22/22 tests passing Database : SurrealDB 1.5.0+","breadcrumbs":"RAG System » Related Documentation","id":"480","title":"Related Documentation"},"4800":{"body":"","breadcrumbs":"OCI Registry Platform » Security","id":"4800","title":"Security"},"4801":{"body":"Zot/Distribution (htpasswd) : htpasswd -Bc htpasswd provisioning\\ndocker login localhost:5000 Harbor (Database) : docker login localhost\\n# Username: admin / Password: Harbor12345","breadcrumbs":"OCI Registry Platform » Authentication","id":"4801","title":"Authentication"},"4802":{"body":"","breadcrumbs":"OCI Registry Platform » Monitoring","id":"4802","title":"Monitoring"},"4803":{"body":"# API check\\ncurl http://localhost:5000/v2/ # Catalog check\\ncurl http://localhost:5000/v2/_catalog","breadcrumbs":"OCI Registry Platform » Health Checks","id":"4803","title":"Health Checks"},"4804":{"body":"Zot : curl http://localhost:5000/metrics Harbor : curl http://localhost:9090/metrics","breadcrumbs":"OCI Registry Platform » Metrics","id":"4804","title":"Metrics"},"4805":{"body":"Architecture : OCI Integration User Guide : OCI Registry Guide","breadcrumbs":"OCI Registry Platform » Related Documentation","id":"4805","title":"Related Documentation"},"4806":{"body":"Version : 1.0.0 Date : 2025-10-06 Status : Production Ready","breadcrumbs":"Test Environment Guide » Test Environment Guide","id":"4806","title":"Test Environment Guide"},"4807":{"body":"The Test Environment Service provides automated containerized testing for taskservs, servers, and multi-node clusters. Built into the orchestrator, it eliminates manual Docker management and provides realistic test scenarios.","breadcrumbs":"Test Environment Guide » Overview","id":"4807","title":"Overview"},"4808":{"body":"┌─────────────────────────────────────────────────┐\\n│ Orchestrator (port 8080) │\\n│ ┌──────────────────────────────────────────┐ │\\n│ │ Test Orchestrator │ │\\n│ │ • Container Manager (Docker API) │ │\\n│ │ • Network Isolation │ │\\n│ │ • Multi-node Topologies │ │\\n│ │ • Test Execution │ │\\n│ └──────────────────────────────────────────┘ │\\n└─────────────────────────────────────────────────┘ ↓ ┌────────────────────────┐ │ Docker Containers │ │ • Isolated Networks │ │ • Resource Limits │ │ • Volume Mounts │ └────────────────────────┘","breadcrumbs":"Test Environment Guide » Architecture","id":"4808","title":"Architecture"},"4809":{"body":"","breadcrumbs":"Test Environment Guide » Test Environment Types","id":"4809","title":"Test Environment Types"},"481":{"body":"Status : ✅ Production-Ready (MCP 0.6.0+, integrated with Claude, compatible with all LLMs) The MCP server provides standardized Model Context Protocol integration, allowing external LLMs (Claude, GPT-4, local models) to access provisioning platform capabilities as tools. This enables complex multi-step workflows, tool composition, and integration with existing LLM applications.","breadcrumbs":"MCP Integration » Model Context Protocol (MCP) Integration","id":"481","title":"Model Context Protocol (MCP) Integration"},"4810":{"body":"Test individual taskserv in isolated container. # Basic test\\nprovisioning test env single kubernetes # With resource limits\\nprovisioning test env single redis --cpu 2000 --memory 4096 # Auto-start and cleanup\\nprovisioning test quick postgres","breadcrumbs":"Test Environment Guide » 1. Single Taskserv Test","id":"4810","title":"1. Single Taskserv Test"},"4811":{"body":"Simulate complete server with multiple taskservs. # Server with taskservs\\nprovisioning test env server web-01 [containerd kubernetes cilium] # With infrastructure context\\nprovisioning test env server db-01 [postgres redis] --infra prod-stack","breadcrumbs":"Test Environment Guide » 2. Server Simulation","id":"4811","title":"2. Server Simulation"},"4812":{"body":"Multi-node cluster simulation from templates. # 3-node Kubernetes cluster\\nprovisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start # etcd cluster\\nprovisioning test topology load etcd_cluster | test env cluster etcd","breadcrumbs":"Test Environment Guide » 3. Cluster Topology","id":"4812","title":"3. Cluster Topology"},"4813":{"body":"","breadcrumbs":"Test Environment Guide » Quick Start","id":"4813","title":"Quick Start"},"4814":{"body":"Docker running: docker ps # Should work without errors Orchestrator running: cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"Test Environment Guide » Prerequisites","id":"4814","title":"Prerequisites"},"4815":{"body":"# 1. Quick test (fastest)\\nprovisioning test quick kubernetes # 2. Or step-by-step\\n# Create environment\\nprovisioning test env single kubernetes --auto-start # List environments\\nprovisioning test env list # Check status\\nprovisioning test env status # View logs\\nprovisioning test env logs # Cleanup\\nprovisioning test env cleanup ","breadcrumbs":"Test Environment Guide » Basic Workflow","id":"4815","title":"Basic Workflow"},"4816":{"body":"","breadcrumbs":"Test Environment Guide » Topology Templates","id":"4816","title":"Topology Templates"},"4817":{"body":"# List templates\\nprovisioning test topology list Template Description Nodes kubernetes_3node K8s HA cluster 1 CP + 2 workers kubernetes_single All-in-one K8s 1 node etcd_cluster etcd cluster 3 members containerd_test Standalone containerd 1 node postgres_redis Database stack 2 nodes","breadcrumbs":"Test Environment Guide » Available Templates","id":"4817","title":"Available Templates"},"4818":{"body":"# Load and use template\\nprovisioning test topology load kubernetes_3node | test env cluster kubernetes # View template\\nprovisioning test topology load etcd_cluster","breadcrumbs":"Test Environment Guide » Using Templates","id":"4818","title":"Using Templates"},"4819":{"body":"Create my-topology.toml: [my_cluster]\\nname = \\"My Custom Cluster\\"\\ncluster_type = \\"custom\\" [[my_cluster.nodes]]\\nname = \\"node-01\\"\\nrole = \\"primary\\"\\ntaskservs = [\\"postgres\\", \\"redis\\"]\\n[my_cluster.nodes.resources]\\ncpu_millicores = 2000\\nmemory_mb = 4096 [[my_cluster.nodes]]\\nname = \\"node-02\\"\\nrole = \\"replica\\"\\ntaskservs = [\\"postgres\\"]\\n[my_cluster.nodes.resources]\\ncpu_millicores = 1000\\nmemory_mb = 2048 [my_cluster.network]\\nsubnet = \\"172.30.0.0/16\\"","breadcrumbs":"Test Environment Guide » Custom Topology","id":"4819","title":"Custom Topology"},"482":{"body":"The MCP integration follows the Model Context Protocol specification: ┌──────────────────────────────────────────────────────────────┐\\n│ External LLM (Claude, GPT-4, etc.) │\\n└────────────────────┬─────────────────────────────────────────┘ │ │ Tool Calls (JSON-RPC) ▼\\n┌──────────────────────────────────────────────────────────────┐\\n│ MCP Server (provisioning/platform/crates/mcp-server) │\\n│ │\\n│ ┌───────────────────────────────────────────────────────┐ │\\n│ │ Tool Registry │ │\\n│ │ - generate_config(description, schema) │ │\\n│ │ - validate_config(config) │ │\\n│ │ - search_docs(query) │ │\\n│ │ - troubleshoot_deployment(logs) │ │\\n│ │ - get_schema(name) │ │\\n│ │ - check_compliance(config, policy) │ │\\n│ └───────────────────────────────────────────────────────┘ │\\n│ │ │\\n│ ▼ │\\n│ ┌───────────────────────────────────────────────────────┐ │\\n│ │ Implementation Layer │ │\\n│ │ - AI Service client (ai-service port 8083) │ │\\n│ │ - Validator client │ │\\n│ │ - RAG client (SurrealDB) │ │\\n│ │ - Schema loader │ │\\n│ └───────────────────────────────────────────────────────┘ │\\n└──────────────────────────────────────────────────────────────┘","breadcrumbs":"MCP Integration » Architecture Overview","id":"482","title":"Architecture Overview"},"4820":{"body":"","breadcrumbs":"Test Environment Guide » Commands Reference","id":"4820","title":"Commands Reference"},"4821":{"body":"# Create from config\\nprovisioning test env create # Single taskserv\\nprovisioning test env single [--cpu N] [--memory MB] # Server simulation\\nprovisioning test env server [--infra NAME] # Cluster topology\\nprovisioning test env cluster # List environments\\nprovisioning test env list # Get details\\nprovisioning test env get # Show status\\nprovisioning test env status ","breadcrumbs":"Test Environment Guide » Environment Management","id":"4821","title":"Environment Management"},"4822":{"body":"# Run tests\\nprovisioning test env run [--tests [test1, test2]] # View logs\\nprovisioning test env logs # Cleanup\\nprovisioning test env cleanup ","breadcrumbs":"Test Environment Guide » Test Execution","id":"4822","title":"Test Execution"},"4823":{"body":"# One-command test (create, run, cleanup)\\nprovisioning test quick [--infra NAME]","breadcrumbs":"Test Environment Guide » Quick Test","id":"4823","title":"Quick Test"},"4824":{"body":"","breadcrumbs":"Test Environment Guide » REST API","id":"4824","title":"REST API"},"4825":{"body":"curl -X POST http://localhost:9090/test/environments/create \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"config\\": { \\"type\\": \\"single_taskserv\\", \\"taskserv\\": \\"kubernetes\\", \\"base_image\\": \\"ubuntu:22.04\\", \\"environment\\": {}, \\"resources\\": { \\"cpu_millicores\\": 2000, \\"memory_mb\\": 4096 } }, \\"infra\\": \\"my-project\\", \\"auto_start\\": true, \\"auto_cleanup\\": false }\'","breadcrumbs":"Test Environment Guide » Create Environment","id":"4825","title":"Create Environment"},"4826":{"body":"curl http://localhost:9090/test/environments","breadcrumbs":"Test Environment Guide » List Environments","id":"4826","title":"List Environments"},"4827":{"body":"curl -X POST http://localhost:9090/test/environments/{id}/run \\\\ -H \\"Content-Type: application/json\\" \\\\ -d \'{ \\"tests\\": [], \\"timeout_seconds\\": 300 }\'","breadcrumbs":"Test Environment Guide » Run Tests","id":"4827","title":"Run Tests"},"4828":{"body":"curl -X DELETE http://localhost:9090/test/environments/{id}","breadcrumbs":"Test Environment Guide » Cleanup","id":"4828","title":"Cleanup"},"4829":{"body":"","breadcrumbs":"Test Environment Guide » Use Cases","id":"4829","title":"Use Cases"},"483":{"body":"The MCP server is started as a stdio-based service: # Start MCP server (stdio transport)\\nprovisioning-mcp-server --config /etc/provisioning/ai.toml # With debug logging\\nRUST_LOG=debug provisioning-mcp-server --config /etc/provisioning/ai.toml # In Claude Desktop configuration\\n~/.claude/claude_desktop_config.json:\\n{ \\"mcpServers\\": { \\"provisioning\\": { \\"command\\": \\"provisioning-mcp-server\\", \\"args\\": [\\"--config\\", \\"/etc/provisioning/ai.toml\\"], \\"env\\": { \\"PROVISIONING_TOKEN\\": \\"your-auth-token\\" } } }\\n}","breadcrumbs":"MCP Integration » MCP Server Launch","id":"483","title":"MCP Server Launch"},"4830":{"body":"Test taskserv before deployment: # Test new taskserv version\\nprovisioning test env single my-taskserv --auto-start # Check logs\\nprovisioning test env logs ","breadcrumbs":"Test Environment Guide » 1. Taskserv Development","id":"4830","title":"1. Taskserv Development"},"4831":{"body":"Test taskserv combinations: # Test kubernetes + cilium + containerd\\nprovisioning test env server k8s-test [kubernetes cilium containerd] --auto-start","breadcrumbs":"Test Environment Guide » 2. Multi-Taskserv Integration","id":"4831","title":"2. Multi-Taskserv Integration"},"4832":{"body":"Test cluster configurations: # Test 3-node etcd cluster\\nprovisioning test topology load etcd_cluster | test env cluster etcd --auto-start","breadcrumbs":"Test Environment Guide » 3. Cluster Validation","id":"4832","title":"3. Cluster Validation"},"4833":{"body":"# .gitlab-ci.yml\\ntest-taskserv: stage: test script: - provisioning test quick kubernetes - provisioning test quick redis - provisioning test quick postgres","breadcrumbs":"Test Environment Guide » 4. CI/CD Integration","id":"4833","title":"4. CI/CD Integration"},"4834":{"body":"","breadcrumbs":"Test Environment Guide » Advanced Features","id":"4834","title":"Advanced Features"},"4835":{"body":"# Custom CPU and memory\\nprovisioning test env single postgres \\\\ --cpu 4000 \\\\ --memory 8192","breadcrumbs":"Test Environment Guide » Resource Limits","id":"4835","title":"Resource Limits"},"4836":{"body":"Each environment gets isolated network: Subnet: 172.20.0.0/16 (default) DNS enabled Container-to-container communication","breadcrumbs":"Test Environment Guide » Network Isolation","id":"4836","title":"Network Isolation"},"4837":{"body":"# Auto-cleanup after tests\\nprovisioning test env single redis --auto-start --auto-cleanup","breadcrumbs":"Test Environment Guide » Auto-Cleanup","id":"4837","title":"Auto-Cleanup"},"4838":{"body":"Run tests in parallel: # Create multiple environments\\nprovisioning test env single kubernetes --auto-start &\\nprovisioning test env single postgres --auto-start &\\nprovisioning test env single redis --auto-start & wait # List all\\nprovisioning test env list","breadcrumbs":"Test Environment Guide » Multiple Environments","id":"4838","title":"Multiple Environments"},"4839":{"body":"","breadcrumbs":"Test Environment Guide » Troubleshooting","id":"4839","title":"Troubleshooting"},"484":{"body":"","breadcrumbs":"MCP Integration » Available Tools","id":"484","title":"Available Tools"},"4840":{"body":"Error: Failed to connect to Docker Solution: # Check Docker\\ndocker ps # Start Docker daemon\\nsudo systemctl start docker # Linux\\nopen -a Docker # macOS","breadcrumbs":"Test Environment Guide » Docker not running","id":"4840","title":"Docker not running"},"4841":{"body":"Error: Connection refused (port 8080) Solution: cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"Test Environment Guide » Orchestrator not running","id":"4841","title":"Orchestrator not running"},"4842":{"body":"Check logs: provisioning test env logs Check Docker: docker ps -a\\ndocker logs ","breadcrumbs":"Test Environment Guide » Environment creation fails","id":"4842","title":"Environment creation fails"},"4843":{"body":"Error: Cannot allocate memory Solution: # Cleanup old environments\\nprovisioning test env list | each {|env| provisioning test env cleanup $env.id } # Or cleanup Docker\\ndocker system prune -af","breadcrumbs":"Test Environment Guide » Out of resources","id":"4843","title":"Out of resources"},"4844":{"body":"","breadcrumbs":"Test Environment Guide » Best Practices","id":"4844","title":"Best Practices"},"4845":{"body":"Reuse topology templates instead of recreating: provisioning test topology load kubernetes_3node | test env cluster kubernetes","breadcrumbs":"Test Environment Guide » 1. Use Templates","id":"4845","title":"1. Use Templates"},"4846":{"body":"Always use auto-cleanup in CI/CD: provisioning test quick # Includes auto-cleanup","breadcrumbs":"Test Environment Guide » 2. Auto-Cleanup","id":"4846","title":"2. Auto-Cleanup"},"4847":{"body":"Adjust resources based on needs: Development: 1-2 cores, 2 GB RAM Integration: 2-4 cores, 4-8 GB RAM Production-like: 4+ cores, 8+ GB RAM","breadcrumbs":"Test Environment Guide » 3. Resource Planning","id":"4847","title":"3. Resource Planning"},"4848":{"body":"Run independent tests in parallel: for taskserv in [kubernetes postgres redis] { provisioning test quick $taskserv &\\n}\\nwait","breadcrumbs":"Test Environment Guide » 4. Parallel Testing","id":"4848","title":"4. Parallel Testing"},"4849":{"body":"","breadcrumbs":"Test Environment Guide » Configuration","id":"4849","title":"Configuration"},"485":{"body":"Tool : generate_config Generate infrastructure configuration from natural language description. { \\"name\\": \\"generate_config\\", \\"description\\": \\"Generate a Nickel infrastructure configuration from a natural language description\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"description\\": { \\"type\\": \\"string\\", \\"description\\": \\"Natural language description of desired infrastructure\\" }, \\"schema\\": { \\"type\\": \\"string\\", \\"description\\": \\"Target schema name (e.g., \'database\', \'kubernetes\', \'network\'). Optional.\\" }, \\"format\\": { \\"type\\": \\"string\\", \\"enum\\": [\\"nickel\\", \\"toml\\"], \\"description\\": \\"Output format (default: nickel)\\" } }, \\"required\\": [\\"description\\"] }\\n} Example Usage : # Via MCP client\\nmcp-client provisioning generate_config \\\\ --description \\"Production PostgreSQL cluster with encryption and daily backups\\" \\\\ --schema database # Claude desktop prompt:\\n# @provisioning: Generate a production PostgreSQL setup with automated backups Response : { database = { engine = \\"postgresql\\", version = \\"15.0\\", instance = { instance_class = \\"db.r6g.xlarge\\", allocated_storage_gb = 100, iops = 3000, }, security = { encryption_enabled = true, encryption_key_id = \\"kms://prod-db-key\\", tls_enabled = true, tls_version = \\"1.3\\", }, backup = { enabled = true, retention_days = 30, preferred_window = \\"03:00-04:00\\", copy_to_region = \\"us-west-2\\", }, monitoring = { enhanced_monitoring_enabled = true, monitoring_interval_seconds = 60, log_exports = [\\"postgresql\\"], }, }\\n}","breadcrumbs":"MCP Integration » 1. Config Generation","id":"485","title":"1. Config Generation"},"4850":{"body":"Base image: ubuntu:22.04 CPU: 1000 millicores (1 core) Memory: 2048 MB (2 GB) Network: 172.20.0.0/16","breadcrumbs":"Test Environment Guide » Default Settings","id":"4850","title":"Default Settings"},"4851":{"body":"# Override defaults\\nprovisioning test env single postgres \\\\ --base-image debian:12 \\\\ --cpu 2000 \\\\ --memory 4096","breadcrumbs":"Test Environment Guide » Custom Config","id":"4851","title":"Custom Config"},"4852":{"body":"Test Environment API Topology Templates Orchestrator Guide Taskserv Development","breadcrumbs":"Test Environment Guide » Related Documentation","id":"4852","title":"Related Documentation"},"4853":{"body":"Version Date Changes 1.0.0 2025-10-06 Initial test environment service Maintained By : Infrastructure Team","breadcrumbs":"Test Environment Guide » Version History","id":"4853","title":"Version History"},"4854":{"body":"","breadcrumbs":"Test Environment System » Test Environment Service (v3.4.0)","id":"4854","title":"Test Environment Service (v3.4.0)"},"4855":{"body":"A comprehensive containerized test environment service has been integrated into the orchestrator, enabling automated testing of taskservs, complete servers, and multi-node clusters without manual Docker management.","breadcrumbs":"Test Environment System » 🚀 Test Environment Service Completed (2025-10-06)","id":"4855","title":"🚀 Test Environment Service Completed (2025-10-06)"},"4856":{"body":"Automated Container Management : No manual Docker operations required Three Test Environment Types : Single taskserv, server simulation, multi-node clusters Multi-Node Support : Test complex topologies (Kubernetes HA, etcd clusters) Network Isolation : Each test environment gets dedicated Docker networks Resource Management : Configurable CPU, memory, and disk limits Topology Templates : Predefined cluster configurations for common scenarios Auto-Cleanup : Optional automatic cleanup after tests complete CI/CD Integration : Easy integration into automated pipelines","breadcrumbs":"Test Environment System » Key Features","id":"4856","title":"Key Features"},"4857":{"body":"","breadcrumbs":"Test Environment System » Test Environment Types","id":"4857","title":"Test Environment Types"},"4858":{"body":"Test individual taskserv in isolated container: # Quick test (create, run, cleanup)\\nprovisioning test quick kubernetes # With custom resources\\nprovisioning test env single postgres --cpu 2000 --memory 4096 --auto-start --auto-cleanup # With infrastructure context\\nprovisioning test env single redis --infra my-project","breadcrumbs":"Test Environment System » 1. Single Taskserv Testing","id":"4858","title":"1. Single Taskserv Testing"},"4859":{"body":"Test complete server configurations with multiple taskservs: # Simulate web server\\nprovisioning test env server web-01 [containerd kubernetes cilium] --auto-start # Simulate database server\\nprovisioning test env server db-01 [postgres redis] --infra prod-stack --auto-start","breadcrumbs":"Test Environment System » 2. Server Simulation","id":"4859","title":"2. Server Simulation"},"486":{"body":"Tool : validate_config Validate a Nickel configuration against schemas and policies. { \\"name\\": \\"validate_config\\", \\"description\\": \\"Validate a Nickel configuration file\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"config\\": { \\"type\\": \\"string\\", \\"description\\": \\"Nickel configuration content or file path\\" }, \\"schema\\": { \\"type\\": \\"string\\", \\"description\\": \\"Schema name to validate against (optional)\\" }, \\"strict\\": { \\"type\\": \\"boolean\\", \\"description\\": \\"Enable strict validation (default: true)\\" } }, \\"required\\": [\\"config\\"] }\\n} Example Usage : # Validate configuration\\nmcp-client provisioning validate_config \\\\ --config \\"$(cat workspaces/prod/database.ncl)\\" # With specific schema\\nmcp-client provisioning validate_config \\\\ --config \\"workspaces/prod/kubernetes.ncl\\" \\\\ --schema kubernetes Response : { \\"valid\\": true, \\"errors\\": [], \\"warnings\\": [ \\"Consider enabling automated backups for production use\\" ], \\"metadata\\": { \\"schema\\": \\"kubernetes\\", \\"version\\": \\"1.28\\", \\"validated_at\\": \\"2025-01-13T10:45:30Z\\" }\\n}","breadcrumbs":"MCP Integration » 2. Config Validation","id":"486","title":"2. Config Validation"},"4860":{"body":"Test complex cluster configurations before deployment: # 3-node Kubernetes HA cluster\\nprovisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start # etcd cluster\\nprovisioning test topology load etcd_cluster | test env cluster etcd --auto-start # Single-node Kubernetes\\nprovisioning test topology load kubernetes_single | test env cluster kubernetes","breadcrumbs":"Test Environment System » 3. Multi-Node Cluster Topology","id":"4860","title":"3. Multi-Node Cluster Topology"},"4861":{"body":"# List all test environments\\nprovisioning test env list # Check environment status\\nprovisioning test env status # View environment logs\\nprovisioning test env logs # Run tests in environment\\nprovisioning test env run # Cleanup environment\\nprovisioning test env cleanup ","breadcrumbs":"Test Environment System » Test Environment Management","id":"4861","title":"Test Environment Management"},"4862":{"body":"Predefined multi-node cluster templates in provisioning/config/test-topologies.toml: Template Description Nodes Use Case kubernetes_3node K8s HA cluster 1 CP + 2 workers Production-like testing kubernetes_single All-in-one K8s 1 node Development testing etcd_cluster etcd cluster 3 members Distributed consensus containerd_test Standalone containerd 1 node Container runtime postgres_redis Database stack 2 nodes Database integration","breadcrumbs":"Test Environment System » Available Topology Templates","id":"4862","title":"Available Topology Templates"},"4863":{"body":"The orchestrator exposes test environment endpoints: Create Environment : POST http://localhost:9090/v1/test/environments/create List Environments : GET http://localhost:9090/v1/test/environments Get Environment : GET http://localhost:9090/v1/test/environments/{id} Run Tests : POST http://localhost:9090/v1/test/environments/{id}/run Cleanup : DELETE http://localhost:9090/v1/test/environments/{id} Get Logs : GET http://localhost:9090/v1/test/environments/{id}/logs","breadcrumbs":"Test Environment System » REST API Endpoints","id":"4863","title":"REST API Endpoints"},"4864":{"body":"Docker Running : Test environments require Docker daemon docker ps # Should work without errors Orchestrator Running : Start the orchestrator to manage test containers cd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background","breadcrumbs":"Test Environment System » Prerequisites","id":"4864","title":"Prerequisites"},"4865":{"body":"User Command (CLI/API) ↓\\nTest Orchestrator (Rust) ↓\\nContainer Manager (bollard) ↓\\nDocker API ↓\\nIsolated Test Containers • Dedicated networks • Resource limits • Volume mounts • Multi-node support","breadcrumbs":"Test Environment System » Architecture","id":"4865","title":"Architecture"},"4866":{"body":"Topology Templates : provisioning/config/test-topologies.toml Default Resources : 1000 millicores CPU, 2048 MB memory Network : 172.20.0.0/16 (default subnet) Base Image : ubuntu:22.04 (configurable)","breadcrumbs":"Test Environment System » Configuration","id":"4866","title":"Configuration"},"4867":{"body":"Taskserv Development : Test new taskservs before deployment Integration Testing : Validate taskserv combinations Cluster Validation : Test multi-node configurations CI/CD Integration : Automated infrastructure testing Production Simulation : Test production-like deployments safely","breadcrumbs":"Test Environment System » Use Cases","id":"4867","title":"Use Cases"},"4868":{"body":"# GitLab CI\\ntest-infrastructure: stage: test script: - ./scripts/start-orchestrator.nu --background - provisioning test quick kubernetes - provisioning test quick postgres - provisioning test quick redis - provisioning test topology load kubernetes_3node | test env cluster kubernetes --auto-start artifacts: when: on_failure paths: - test-logs/","breadcrumbs":"Test Environment System » CI/CD Integration Example","id":"4868","title":"CI/CD Integration Example"},"4869":{"body":"Complete documentation available: User Guide : Test Environment Guide Detailed Usage : Test Environment Usage Orchestrator README : Orchestrator","breadcrumbs":"Test Environment System » Documentation","id":"4869","title":"Documentation"},"487":{"body":"Tool : search_docs Search infrastructure documentation using RAG system. { \\"name\\": \\"search_docs\\", \\"description\\": \\"Search provisioning documentation for information\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"query\\": { \\"type\\": \\"string\\", \\"description\\": \\"Search query (natural language)\\" }, \\"top_k\\": { \\"type\\": \\"integer\\", \\"description\\": \\"Number of results (default: 5)\\" }, \\"doc_type\\": { \\"type\\": \\"string\\", \\"enum\\": [\\"guide\\", \\"schema\\", \\"example\\", \\"troubleshooting\\"], \\"description\\": \\"Filter by document type (optional)\\" } }, \\"required\\": [\\"query\\"] }\\n} Example Usage : # Search documentation\\nmcp-client provisioning search_docs \\\\ --query \\"How do I configure PostgreSQL with replication?\\" # Get examples\\nmcp-client provisioning search_docs \\\\ --query \\"Kubernetes networking\\" \\\\ --doc_type example \\\\ --top_k 3 Response : { \\"results\\": [ { \\"source\\": \\"provisioning/docs/src/guides/database-replication.md\\", \\"excerpt\\": \\"PostgreSQL logical replication enables streaming of changes...\\", \\"relevance\\": 0.94, \\"section\\": \\"Setup Logical Replication\\" }, { \\"source\\": \\"provisioning/schemas/database.ncl\\", \\"excerpt\\": \\"replication = { enabled = true, mode = \\\\\\"logical\\\\\\", ... }\\", \\"relevance\\": 0.87, \\"section\\": \\"Replication Configuration\\" } ]\\n}","breadcrumbs":"MCP Integration » 3. Documentation Search","id":"487","title":"3. Documentation Search"},"4870":{"body":"Test commands are integrated into the CLI with shortcuts: test or tst - Test command prefix test quick - One-command test test env single/server/cluster - Create test environments test topology load/list - Manage topology templates","breadcrumbs":"Test Environment System » Command Shortcuts","id":"4870","title":"Command Shortcuts"},"4871":{"body":"Version : 1.0.0 Date : 2025-10-06 Status : Production Ready","breadcrumbs":"TaskServ Validation Guide » Taskserv Validation and Testing Guide","id":"4871","title":"Taskserv Validation and Testing Guide"},"4872":{"body":"The taskserv validation and testing system provides comprehensive evaluation of infrastructure services before deployment, reducing errors and increasing confidence in deployments.","breadcrumbs":"TaskServ Validation Guide » Overview","id":"4872","title":"Overview"},"4873":{"body":"","breadcrumbs":"TaskServ Validation Guide » Validation Levels","id":"4873","title":"Validation Levels"},"4874":{"body":"Validates configuration files, templates, and scripts without requiring infrastructure access. What it checks: KCL schema syntax and semantics Jinja2 template syntax Shell script syntax (with shellcheck if available) File structure and naming conventions Command: provisioning taskserv validate kubernetes --level static","breadcrumbs":"TaskServ Validation Guide » 1. Static Validation","id":"4874","title":"1. Static Validation"},"4875":{"body":"Checks taskserv dependencies, conflicts, and requirements. What it checks: Required dependencies are available Optional dependencies status Conflicting taskservs Resource requirements (memory, CPU, disk) Health check configuration Command: provisioning taskserv validate kubernetes --level dependencies Check against infrastructure: provisioning taskserv check-deps kubernetes --infra my-project","breadcrumbs":"TaskServ Validation Guide » 2. Dependency Validation","id":"4875","title":"2. Dependency Validation"},"4876":{"body":"Enhanced check mode that performs validation and previews deployment without making changes. What it does: Runs static validation Validates dependencies Previews configuration generation Lists files to be deployed Checks prerequisites (without SSH in check mode) Command: provisioning taskserv create kubernetes --check","breadcrumbs":"TaskServ Validation Guide » 3. Check Mode (Dry-Run)","id":"4876","title":"3. Check Mode (Dry-Run)"},"4877":{"body":"Tests taskserv in isolated container environment before actual deployment. What it tests: Package prerequisites Configuration validity Script execution Health check simulation Command: # Test with Docker\\nprovisioning taskserv test kubernetes --runtime docker # Test with Podman\\nprovisioning taskserv test kubernetes --runtime podman # Keep container for inspection\\nprovisioning taskserv test kubernetes --runtime docker --keep","breadcrumbs":"TaskServ Validation Guide » 4. Sandbox Testing","id":"4877","title":"4. Sandbox Testing"},"4878":{"body":"","breadcrumbs":"TaskServ Validation Guide » Complete Validation Workflow","id":"4878","title":"Complete Validation Workflow"},"4879":{"body":"# 1. Static validation (fastest, no infrastructure needed)\\nprovisioning taskserv validate kubernetes --level static -v # 2. Dependency validation\\nprovisioning taskserv check-deps kubernetes --infra my-project # 3. Check mode (dry-run with full validation)\\nprovisioning taskserv create kubernetes --check -v # 4. Sandbox testing (optional, requires Docker/Podman)\\nprovisioning taskserv test kubernetes --runtime docker # 5. Actual deployment (after all validations pass)\\nprovisioning taskserv create kubernetes","breadcrumbs":"TaskServ Validation Guide » Recommended Validation Sequence","id":"4879","title":"Recommended Validation Sequence"},"488":{"body":"Tool : troubleshoot_deployment Analyze deployment failures and suggest fixes. { \\"name\\": \\"troubleshoot_deployment\\", \\"description\\": \\"Analyze deployment logs and suggest fixes\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"deployment_id\\": { \\"type\\": \\"string\\", \\"description\\": \\"Deployment ID (e.g., \'deploy-2025-01-13-001\')\\" }, \\"logs\\": { \\"type\\": \\"string\\", \\"description\\": \\"Deployment logs (optional, if deployment_id not provided)\\" }, \\"error_analysis_depth\\": { \\"type\\": \\"string\\", \\"enum\\": [\\"shallow\\", \\"deep\\"], \\"description\\": \\"Analysis depth (default: deep)\\" } } }\\n} Example Usage : # Troubleshoot recent deployment\\nmcp-client provisioning troubleshoot_deployment \\\\ --deployment_id \\"deploy-2025-01-13-001\\" # With custom logs\\nmcp-client provisioning troubleshoot_deployment \\\\\\n| --logs \\"$(journalctl -u provisioning --no-pager | tail -100)\\" | Response : { \\"status\\": \\"failure\\", \\"root_cause\\": \\"Database connection timeout during migration phase\\", \\"analysis\\": { \\"phase\\": \\"database_migration\\", \\"error_type\\": \\"connectivity\\", \\"confidence\\": 0.95 }, \\"suggestions\\": [ \\"Verify database security group allows inbound on port 5432\\", \\"Check database instance status (may be rebooting)\\", \\"Increase connection timeout in configuration\\" ], \\"corrected_config\\": \\"...generated Nickel config with fixes...\\", \\"similar_issues\\": [ \\"[https://docs/troubleshooting/database-connectivity.md\\"](https://docs/troubleshooting/database-connectivity.md\\") ]\\n}","breadcrumbs":"MCP Integration » 4. Deployment Troubleshooting","id":"488","title":"4. Deployment Troubleshooting"},"4880":{"body":"# Run all validation levels\\nprovisioning taskserv validate kubernetes --level all -v","breadcrumbs":"TaskServ Validation Guide » Quick Validation (All Levels)","id":"4880","title":"Quick Validation (All Levels)"},"4881":{"body":"","breadcrumbs":"TaskServ Validation Guide » Validation Commands Reference","id":"4881","title":"Validation Commands Reference"},"4882":{"body":"Multi-level validation framework. Options: --level - Validation level: static, dependencies, health, all (default: all) --infra - Infrastructure context --settings - Settings file path --verbose - Verbose output --out - Output format: json, yaml, text Examples: # Complete validation\\nprovisioning taskserv validate kubernetes # Only static validation\\nprovisioning taskserv validate kubernetes --level static # With verbose output\\nprovisioning taskserv validate kubernetes -v # JSON output\\nprovisioning taskserv validate kubernetes --out json","breadcrumbs":"TaskServ Validation Guide » provisioning taskserv validate ","id":"4882","title":"provisioning taskserv validate "},"4883":{"body":"Check dependencies against infrastructure. Options: --infra - Infrastructure context --settings - Settings file path --verbose - Verbose output Examples: # Check dependencies\\nprovisioning taskserv check-deps kubernetes --infra my-project # Verbose output\\nprovisioning taskserv check-deps kubernetes --infra my-project -v","breadcrumbs":"TaskServ Validation Guide » provisioning taskserv check-deps ","id":"4883","title":"provisioning taskserv check-deps "},"4884":{"body":"Enhanced check mode with full validation and preview. Options: --check - Enable check mode (no actual deployment) --verbose - Verbose output All standard create options Examples: # Check mode with verbose output\\nprovisioning taskserv create kubernetes --check -v # Check specific server\\nprovisioning taskserv create kubernetes server-01 --check","breadcrumbs":"TaskServ Validation Guide » provisioning taskserv create --check","id":"4884","title":"provisioning taskserv create --check"},"4885":{"body":"Sandbox testing in isolated environment. Options: --runtime - Runtime: docker, podman, native (default: docker) --infra - Infrastructure context --settings - Settings file path --keep - Keep container after test --verbose - Verbose output Examples: # Test with Docker\\nprovisioning taskserv test kubernetes --runtime docker # Test with Podman\\nprovisioning taskserv test kubernetes --runtime podman # Keep container for debugging\\nprovisioning taskserv test kubernetes --keep -v # Connect to kept container\\ndocker exec -it taskserv-test-kubernetes bash","breadcrumbs":"TaskServ Validation Guide » provisioning taskserv test ","id":"4885","title":"provisioning taskserv test "},"4886":{"body":"","breadcrumbs":"TaskServ Validation Guide » Validation Output","id":"4886","title":"Validation Output"},"4887":{"body":"Taskserv Validation\\nTaskserv: kubernetes\\nLevel: static Validating Nickel schemas for kubernetes... Checking main.ncl... ✓ Valid Checking version.ncl... ✓ Valid Checking dependencies.ncl... ✓ Valid Validating templates for kubernetes... Checking env-kubernetes.j2... ✓ Basic syntax OK Checking install-kubernetes.sh... ✓ Basic syntax OK Validation Summary\\n✓ nickel: 0 errors, 0 warnings\\n✓ templates: 0 errors, 0 warnings\\n✓ scripts: 0 errors, 0 warnings Overall Status\\n✓ VALID - 0 warnings","breadcrumbs":"TaskServ Validation Guide » Static Validation","id":"4887","title":"Static Validation"},"4888":{"body":"Dependency Validation Report\\nTaskserv: kubernetes Status: VALID Required Dependencies: • containerd • etcd • os Optional Dependencies: • cilium • helm Conflicts: • docker • podman","breadcrumbs":"TaskServ Validation Guide » Dependency Validation","id":"4888","title":"Dependency Validation"},"4889":{"body":"Check Mode: kubernetes on server-01 → Running static validation... ✓ Static validation passed → Checking dependencies... ✓ Dependencies OK Required: containerd, etcd, os → Previewing configuration generation... ✓ Configuration preview generated Files to process: 15 → Checking prerequisites... ℹ Prerequisite checks (preview mode): ⊘ Server accessibility: Check mode - SSH not tested ℹ Directory /tmp: Would verify directory exists ℹ Command bash: Would verify command is available Check Mode Summary\\n✓ All validations passed 💡 Taskserv can be deployed with: provisioning taskserv create kubernetes","breadcrumbs":"TaskServ Validation Guide » Check Mode Output","id":"4889","title":"Check Mode Output"},"489":{"body":"Tool : get_schema Retrieve schema definition with examples. { \\"name\\": \\"get_schema\\", \\"description\\": \\"Get a provisioning schema definition\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"schema_name\\": { \\"type\\": \\"string\\", \\"description\\": \\"Schema name (e.g., \'database\', \'kubernetes\')\\" }, \\"format\\": { \\"type\\": \\"string\\", \\"enum\\": [\\"schema\\", \\"example\\", \\"documentation\\"], \\"description\\": \\"Response format (default: schema)\\" } }, \\"required\\": [\\"schema_name\\"] }\\n} Example Usage : # Get schema definition\\nmcp-client provisioning get_schema --schema_name database # Get example configuration\\nmcp-client provisioning get_schema \\\\ --schema_name kubernetes \\\\ --format example","breadcrumbs":"MCP Integration » 5. Get Schema","id":"489","title":"5. Get Schema"},"4890":{"body":"Taskserv Sandbox Testing\\nTaskserv: kubernetes\\nRuntime: docker → Running pre-test validation...\\n✓ Validation passed → Preparing sandbox environment... Using base image: ubuntu:22.04\\n✓ Sandbox prepared: a1b2c3d4e5f6 → Running tests in sandbox... Test 1: Package prerequisites... Test 2: Configuration validity... Test 3: Script execution... Test 4: Health check simulation... Test Summary\\nTotal tests: 4\\nPassed: 4\\nFailed: 0\\nSkipped: 0 Detailed Results: ✓ Package prerequisites: Package manager accessible ✓ Configuration validity: 3 configuration files validated ✓ Script execution: 2 scripts validated ✓ Health check: Health check configuration valid: http://localhost:6443/healthz ✓ All tests passed","breadcrumbs":"TaskServ Validation Guide » Test Output","id":"4890","title":"Test Output"},"4891":{"body":"","breadcrumbs":"TaskServ Validation Guide » Integration with CI/CD","id":"4891","title":"Integration with CI/CD"},"4892":{"body":"validate-taskservs: stage: validate script: - provisioning taskserv validate kubernetes --level all --out json - provisioning taskserv check-deps kubernetes --infra production test-taskservs: stage: test script: - provisioning taskserv test kubernetes --runtime docker dependencies: - validate-taskservs deploy-taskservs: stage: deploy script: - provisioning taskserv create kubernetes dependencies: - test-taskservs only: - main","breadcrumbs":"TaskServ Validation Guide » GitLab CI Example","id":"4892","title":"GitLab CI Example"},"4893":{"body":"name: Taskserv Validation on: [push, pull_request] jobs: validate: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Validate Taskservs run: | provisioning taskserv validate kubernetes --level all -v - name: Check Dependencies run: | provisioning taskserv check-deps kubernetes --infra production - name: Test in Sandbox run: | provisioning taskserv test kubernetes --runtime docker","breadcrumbs":"TaskServ Validation Guide » GitHub Actions Example","id":"4893","title":"GitHub Actions Example"},"4894":{"body":"","breadcrumbs":"TaskServ Validation Guide » Troubleshooting","id":"4894","title":"Troubleshooting"},"4895":{"body":"If shellcheck is not available, script validation will be skipped with a warning. Install shellcheck: # macOS\\nbrew install shellcheck # Ubuntu/Debian\\napt install shellcheck # Fedora\\ndnf install shellcheck","breadcrumbs":"TaskServ Validation Guide » shellcheck not found","id":"4895","title":"shellcheck not found"},"4896":{"body":"Sandbox testing requires Docker or Podman. Check runtime: # Docker\\ndocker ps # Podman\\npodman ps # Use native mode (limited testing)\\nprovisioning taskserv test kubernetes --runtime native","breadcrumbs":"TaskServ Validation Guide » Docker/Podman not available","id":"4896","title":"Docker/Podman not available"},"4897":{"body":"Nickel type checking errors indicate syntax or type problems. Common fixes: Check schema syntax in .ncl files Validate imports and dependencies Run nickel format to format files Check manifest.toml dependencies","breadcrumbs":"TaskServ Validation Guide » Nickel type checking errors","id":"4897","title":"Nickel type checking errors"},"4898":{"body":"If conflicting taskservs are detected: Remove conflicting taskserv first Check infrastructure configuration Review dependency declarations in dependencies.ncl","breadcrumbs":"TaskServ Validation Guide » Dependency conflicts","id":"4898","title":"Dependency conflicts"},"4899":{"body":"","breadcrumbs":"TaskServ Validation Guide » Advanced Usage","id":"4899","title":"Advanced Usage"},"49":{"body":"Before installation, ensure you have: Administrative privileges - Required for system-wide installation Internet connection - For downloading dependencies Terminal/Command line access - Basic command line knowledge helpful","breadcrumbs":"Installation Guide » Prerequisites","id":"49","title":"Prerequisites"},"490":{"body":"Tool : check_compliance Verify configuration against compliance policies (Cedar). { \\"name\\": \\"check_compliance\\", \\"description\\": \\"Check configuration against compliance policies\\", \\"inputSchema\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"config\\": { \\"type\\": \\"string\\", \\"description\\": \\"Configuration to check\\" }, \\"policy_set\\": { \\"type\\": \\"string\\", \\"description\\": \\"Policy set to check against (e.g., \'pci-dss\', \'hipaa\', \'sox\')\\" } }, \\"required\\": [\\"config\\", \\"policy_set\\"] }\\n} Example Usage : # Check against PCI-DSS\\nmcp-client provisioning check_compliance \\\\ --config \\"$(cat workspaces/prod/database.ncl)\\" \\\\ --policy_set pci-dss","breadcrumbs":"MCP Integration » 6. Compliance Check","id":"490","title":"6. Compliance Check"},"4900":{"body":"You can create custom validation scripts by extending the validation framework: # custom_validation.nu\\nuse provisioning/core/nulib/taskservs/validate.nu * def custom-validate [taskserv: string] { # Custom validation logic let result = (validate-nickel-schemas $taskserv --verbose=true) # Additional custom checks # ... return $result\\n}","breadcrumbs":"TaskServ Validation Guide » Custom Validation Scripts","id":"4900","title":"Custom Validation Scripts"},"4901":{"body":"Validate multiple taskservs: # Validate all taskservs in infrastructure\\nfor taskserv in (provisioning taskserv list | get name) { provisioning taskserv validate $taskserv\\n}","breadcrumbs":"TaskServ Validation Guide » Batch Validation","id":"4901","title":"Batch Validation"},"4902":{"body":"Create test suite for all taskservs: #!/usr/bin/env nu let taskservs = [\\"kubernetes\\", \\"containerd\\", \\"cilium\\", \\"etcd\\"] for ts in $taskservs { print $\\"Testing ($ts)...\\" provisioning taskserv test $ts --runtime docker\\n}","breadcrumbs":"TaskServ Validation Guide » Automated Testing","id":"4902","title":"Automated Testing"},"4903":{"body":"","breadcrumbs":"TaskServ Validation Guide » Best Practices","id":"4903","title":"Best Practices"},"4904":{"body":"Always validate before deploying to production Run check mode to preview changes Test in sandbox for critical services Check dependencies in infrastructure context","breadcrumbs":"TaskServ Validation Guide » Before Deployment","id":"4904","title":"Before Deployment"},"4905":{"body":"Validate frequently during taskserv development Use verbose mode to understand validation details Fix warnings even if validation passes Keep containers for debugging test failures","breadcrumbs":"TaskServ Validation Guide » During Development","id":"4905","title":"During Development"},"4906":{"body":"Fail fast on validation errors Require all tests pass before merge Generate reports in JSON format for analysis Archive test results for audit trail","breadcrumbs":"TaskServ Validation Guide » In CI/CD","id":"4906","title":"In CI/CD"},"4907":{"body":"Taskserv Development Guide KCL Schema Reference Dependency Management CI/CD Integration","breadcrumbs":"TaskServ Validation Guide » Related Documentation","id":"4907","title":"Related Documentation"},"4908":{"body":"Version Date Changes 1.0.0 2025-10-06 Initial validation and testing guide Maintained By : Infrastructure Team Review Cycle : Quarterly","breadcrumbs":"TaskServ Validation Guide » Version History","id":"4908","title":"Version History"},"4909":{"body":"This comprehensive troubleshooting guide helps you diagnose and resolve common issues with Infrastructure Automation.","breadcrumbs":"Troubleshooting Guide » Troubleshooting Guide","id":"4909","title":"Troubleshooting Guide"},"491":{"body":"","breadcrumbs":"MCP Integration » Integration Examples","id":"491","title":"Integration Examples"},"4910":{"body":"Common issues and their solutions Diagnostic commands and techniques Error message interpretation Performance optimization Recovery procedures Prevention strategies","breadcrumbs":"Troubleshooting Guide » What You\'ll Learn","id":"4910","title":"What You\'ll Learn"},"4911":{"body":"","breadcrumbs":"Troubleshooting Guide » General Troubleshooting Approach","id":"4911","title":"General Troubleshooting Approach"},"4912":{"body":"# Check overall system status\\nprovisioning env\\nprovisioning validate config # Check specific component status\\nprovisioning show servers --infra my-infra\\nprovisioning taskserv list --infra my-infra --installed","breadcrumbs":"Troubleshooting Guide » 1. Identify the Problem","id":"4912","title":"1. Identify the Problem"},"4913":{"body":"# Enable debug mode for detailed output\\nprovisioning --debug # Check logs and errors\\nprovisioning show logs --infra my-infra","breadcrumbs":"Troubleshooting Guide » 2. Gather Information","id":"4913","title":"2. Gather Information"},"4914":{"body":"# Validate configuration\\nprovisioning validate config --detailed # Test connectivity\\nprovisioning provider test aws\\nprovisioning network test --infra my-infra","breadcrumbs":"Troubleshooting Guide » 3. Use Diagnostic Commands","id":"4914","title":"3. Use Diagnostic Commands"},"4915":{"body":"","breadcrumbs":"Troubleshooting Guide » Installation and Setup Issues","id":"4915","title":"Installation and Setup Issues"},"4916":{"body":"Symptoms: Installation script errors Missing dependencies Permission denied errors Diagnosis: # Check system requirements\\nuname -a\\ndf -h\\nwhoami # Check permissions\\nls -la /usr/local/\\nsudo -l Solutions: Permission Issues # Run installer with sudo\\nsudo ./install-provisioning # Or install to user directory\\n./install-provisioning --prefix=$HOME/provisioning\\nexport PATH=\\"$HOME/provisioning/bin:$PATH\\" Missing Dependencies # Ubuntu/Debian\\nsudo apt update\\nsudo apt install -y curl wget tar build-essential # RHEL/CentOS\\nsudo dnf install -y curl wget tar gcc make Architecture Issues # Check architecture\\nuname -m # Download correct architecture package\\n# x86_64: Intel/AMD 64-bit\\n# arm64: ARM 64-bit (Apple Silicon)\\nwget https://releases.example.com/provisioning-linux-x86_64.tar.gz","breadcrumbs":"Troubleshooting Guide » Issue: Installation Fails","id":"4916","title":"Issue: Installation Fails"},"4917":{"body":"Symptoms: bash: provisioning: command not found Diagnosis: # Check if provisioning is installed\\nwhich provisioning\\nls -la /usr/local/bin/provisioning # Check PATH\\necho $PATH Solutions: # Add to PATH\\nexport PATH=\\"/usr/local/bin:$PATH\\" # Make permanent (add to shell profile)\\necho \'export PATH=\\"/usr/local/bin:$PATH\\"\' >> ~/.bashrc\\nsource ~/.bashrc # Create symlink if missing\\nsudo ln -sf /usr/local/provisioning/core/nulib/provisioning /usr/local/bin/provisioning","breadcrumbs":"Troubleshooting Guide » Issue: Command Not Found","id":"4917","title":"Issue: Command Not Found"},"4918":{"body":"Symptoms: Plugin not found: nu_plugin_kcl\\nPlugin registration failed Diagnosis: # Check Nushell version\\nnu --version # Check KCL installation (required for nu_plugin_kcl)\\nkcl version # Check plugin registration\\nnu -c \\"version | get installed_plugins\\" Solutions: # Install KCL CLI (required for nu_plugin_kcl)\\n# Download from: https://github.com/kcl-lang/cli/releases # Re-register plugins\\nnu -c \\"plugin add /usr/local/provisioning/plugins/nu_plugin_kcl\\"\\nnu -c \\"plugin add /usr/local/provisioning/plugins/nu_plugin_tera\\" # Restart Nushell after plugin registration","breadcrumbs":"Troubleshooting Guide » Issue: Nushell Plugin Errors","id":"4918","title":"Issue: Nushell Plugin Errors"},"4919":{"body":"","breadcrumbs":"Troubleshooting Guide » Configuration Issues","id":"4919","title":"Configuration Issues"},"492":{"body":"~/.claude/claude_desktop_config.json:\\n{ \\"mcpServers\\": { \\"provisioning\\": { \\"command\\": \\"provisioning-mcp-server\\", \\"args\\": [\\"--config\\", \\"/etc/provisioning/ai.toml\\"], \\"env\\": { \\"PROVISIONING_API_KEY\\": \\"sk-...\\", \\"PROVISIONING_BASE_URL\\": \\"[http://localhost:8083\\"](http://localhost:8083\\") } } }\\n} Usage in Claude : User: I need a production Kubernetes cluster in AWS with automatic scaling Claude can now use provisioning tools:\\nI\'ll help you create a production Kubernetes cluster. Let me:\\n1. Search the documentation for best practices\\n2. Generate a configuration template\\n3. Validate it against your policies\\n4. Provide the final configuration","breadcrumbs":"MCP Integration » Claude Desktop (Most Common)","id":"492","title":"Claude Desktop (Most Common)"},"4920":{"body":"Symptoms: Configuration file not found\\nFailed to load configuration Diagnosis: # Check configuration file locations\\nprovisioning env | grep config # Check if files exist\\nls -la ~/.config/provisioning/\\nls -la /usr/local/provisioning/config.defaults.toml Solutions: # Initialize user configuration\\nprovisioning init config # Create missing directories\\nmkdir -p ~/.config/provisioning # Copy template\\ncp /usr/local/provisioning/config-examples/config.user.toml ~/.config/provisioning/config.toml # Verify configuration\\nprovisioning validate config","breadcrumbs":"Troubleshooting Guide » Issue: Configuration Not Found","id":"4920","title":"Issue: Configuration Not Found"},"4921":{"body":"Symptoms: Configuration validation failed\\nInvalid configuration value\\nMissing required field Diagnosis: # Detailed validation\\nprovisioning validate config --detailed # Check specific sections\\nprovisioning config show --section paths\\nprovisioning config show --section providers Solutions: Path Configuration Issues # Check base path exists\\nls -la /path/to/provisioning # Update configuration\\nnano ~/.config/provisioning/config.toml # Fix paths section\\n[paths]\\nbase = \\"/correct/path/to/provisioning\\" Provider Configuration Issues # Test provider connectivity\\nprovisioning provider test aws # Check credentials\\naws configure list # For AWS\\nupcloud-cli config # For UpCloud # Update provider configuration\\n[providers.aws]\\ninterface = \\"CLI\\" # or \\"API\\"","breadcrumbs":"Troubleshooting Guide » Issue: Configuration Validation Errors","id":"4921","title":"Issue: Configuration Validation Errors"},"4922":{"body":"Symptoms: Interpolation pattern not resolved: {{env.VARIABLE}}\\nTemplate rendering failed Diagnosis: # Test interpolation\\nprovisioning validate interpolation test # Check environment variables\\nenv | grep VARIABLE # Debug interpolation\\nprovisioning --debug validate interpolation validate Solutions: # Set missing environment variables\\nexport MISSING_VARIABLE=\\"value\\" # Use fallback values in configuration\\nconfig_value = \\"{{env.VARIABLE || \'default_value\'}}\\" # Check interpolation syntax\\n# Correct: {{env.HOME}}\\n# Incorrect: ${HOME} or $HOME","breadcrumbs":"Troubleshooting Guide » Issue: Interpolation Failures","id":"4922","title":"Issue: Interpolation Failures"},"4923":{"body":"","breadcrumbs":"Troubleshooting Guide » Server Management Issues","id":"4923","title":"Server Management Issues"},"4924":{"body":"Symptoms: Failed to create server\\nProvider API error\\nInsufficient quota Diagnosis: # Check provider status\\nprovisioning provider status aws # Test connectivity\\nping api.provider.com\\ncurl -I https://api.provider.com # Check quota\\nprovisioning provider quota --infra my-infra # Debug server creation\\nprovisioning --debug server create web-01 --infra my-infra --check Solutions: API Authentication Issues # AWS\\naws configure list\\naws sts get-caller-identity # UpCloud\\nupcloud-cli account show # Update credentials\\naws configure # For AWS\\nexport UPCLOUD_USERNAME=\\"your-username\\"\\nexport UPCLOUD_PASSWORD=\\"your-password\\" Quota/Limit Issues # Check current usage\\nprovisioning show costs --infra my-infra # Request quota increase from provider\\n# Or reduce resource requirements # Use smaller instance types\\n# Reduce number of servers Network/Connectivity Issues # Test network connectivity\\ncurl -v https://api.aws.amazon.com\\ncurl -v https://api.upcloud.com # Check DNS resolution\\nnslookup api.aws.amazon.com # Check firewall rules\\n# Ensure outbound HTTPS (port 443) is allowed","breadcrumbs":"Troubleshooting Guide » Issue: Server Creation Fails","id":"4924","title":"Issue: Server Creation Fails"},"4925":{"body":"Symptoms: Connection refused\\nPermission denied\\nHost key verification failed Diagnosis: # Check server status\\nprovisioning server list --infra my-infra # Test SSH manually\\nssh -v user@server-ip # Check SSH configuration\\nprovisioning show servers web-01 --infra my-infra Solutions: Connection Issues # Wait for server to be fully ready\\nprovisioning server list --infra my-infra --status # Check security groups/firewall\\n# Ensure SSH (port 22) is allowed # Use correct IP address\\nprovisioning show servers web-01 --infra my-infra | grep ip Authentication Issues # Check SSH key\\nls -la ~/.ssh/\\nssh-add -l # Generate new key if needed\\nssh-keygen -t ed25519 -f ~/.ssh/provisioning_key # Use specific key\\nprovisioning server ssh web-01 --key ~/.ssh/provisioning_key --infra my-infra Host Key Issues # Remove old host key\\nssh-keygen -R server-ip # Accept new host key\\nssh -o StrictHostKeyChecking=accept-new user@server-ip","breadcrumbs":"Troubleshooting Guide » Issue: SSH Access Fails","id":"4925","title":"Issue: SSH Access Fails"},"4926":{"body":"","breadcrumbs":"Troubleshooting Guide » Task Service Issues","id":"4926","title":"Task Service Issues"},"4927":{"body":"Symptoms: Service installation failed\\nPackage not found\\nDependency conflicts Diagnosis: # Check service prerequisites\\nprovisioning taskserv check kubernetes --infra my-infra # Debug installation\\nprovisioning --debug taskserv create kubernetes --infra my-infra --check # Check server resources\\nprovisioning server ssh web-01 --command \\"free -h && df -h\\" --infra my-infra Solutions: Resource Issues # Check available resources\\nprovisioning server ssh web-01 --command \\" echo \'Memory:\' && free -h echo \'Disk:\' && df -h echo \'CPU:\' && nproc\\n\\" --infra my-infra # Upgrade server if needed\\nprovisioning server resize web-01 --plan larger-plan --infra my-infra Package Repository Issues # Update package lists\\nprovisioning server ssh web-01 --command \\" sudo apt update && sudo apt upgrade -y\\n\\" --infra my-infra # Check repository connectivity\\nprovisioning server ssh web-01 --command \\" curl -I https://download.docker.com/linux/ubuntu/\\n\\" --infra my-infra Dependency Issues # Install missing dependencies\\nprovisioning taskserv create containerd --infra my-infra # Then install dependent service\\nprovisioning taskserv create kubernetes --infra my-infra","breadcrumbs":"Troubleshooting Guide » Issue: Service Installation Fails","id":"4927","title":"Issue: Service Installation Fails"},"4928":{"body":"Symptoms: Service status: failed\\nService not responding\\nHealth check failures Diagnosis: # Check service status\\nprovisioning taskserv status kubernetes --infra my-infra # Check service logs\\nprovisioning taskserv logs kubernetes --infra my-infra # SSH and check manually\\nprovisioning server ssh web-01 --command \\" sudo systemctl status kubernetes sudo journalctl -u kubernetes --no-pager -n 50\\n\\" --infra my-infra Solutions: Configuration Issues # Reconfigure service\\nprovisioning taskserv configure kubernetes --infra my-infra # Reset to defaults\\nprovisioning taskserv reset kubernetes --infra my-infra Port Conflicts # Check port usage\\nprovisioning server ssh web-01 --command \\" sudo netstat -tulpn | grep :6443 sudo ss -tulpn | grep :6443\\n\\" --infra my-infra # Change port configuration or stop conflicting service Permission Issues # Fix permissions\\nprovisioning server ssh web-01 --command \\" sudo chown -R kubernetes:kubernetes /var/lib/kubernetes sudo chmod 600 /etc/kubernetes/admin.conf\\n\\" --infra my-infra","breadcrumbs":"Troubleshooting Guide » Issue: Service Not Running","id":"4928","title":"Issue: Service Not Running"},"4929":{"body":"","breadcrumbs":"Troubleshooting Guide » Cluster Management Issues","id":"4929","title":"Cluster Management Issues"},"493":{"body":"import openai tools = [ { \\"type\\": \\"function\\", \\"function\\": { \\"name\\": \\"generate_config\\", \\"description\\": \\"Generate infrastructure configuration\\", \\"parameters\\": { \\"type\\": \\"object\\", \\"properties\\": { \\"description\\": { \\"type\\": \\"string\\", \\"description\\": \\"Infrastructure description\\" } }, \\"required\\": [\\"description\\"] } } }\\n] response = openai.ChatCompletion.create( model=\\"gpt-4\\", messages=[{\\"role\\": \\"user\\", \\"content\\": \\"Create a PostgreSQL database\\"}], tools=tools\\n)","breadcrumbs":"MCP Integration » OpenAI Function Calling","id":"493","title":"OpenAI Function Calling"},"4930":{"body":"Symptoms: Cluster deployment failed\\nPod creation errors\\nService unavailable Diagnosis: # Check cluster status\\nprovisioning cluster status web-cluster --infra my-infra # Check Kubernetes cluster\\nprovisioning server ssh master-01 --command \\" kubectl get nodes kubectl get pods --all-namespaces\\n\\" --infra my-infra # Check cluster logs\\nprovisioning cluster logs web-cluster --infra my-infra Solutions: Node Issues # Check node status\\nprovisioning server ssh master-01 --command \\" kubectl describe nodes\\n\\" --infra my-infra # Drain and rejoin problematic nodes\\nprovisioning server ssh master-01 --command \\" kubectl drain worker-01 --ignore-daemonsets kubectl delete node worker-01\\n\\" --infra my-infra # Rejoin node\\nprovisioning taskserv configure kubernetes --infra my-infra --servers worker-01 Resource Constraints # Check resource usage\\nprovisioning server ssh master-01 --command \\" kubectl top nodes kubectl top pods --all-namespaces\\n\\" --infra my-infra # Scale down or add more nodes\\nprovisioning cluster scale web-cluster --replicas 3 --infra my-infra\\nprovisioning server create worker-04 --infra my-infra Network Issues # Check network plugin\\nprovisioning server ssh master-01 --command \\" kubectl get pods -n kube-system | grep cilium\\n\\" --infra my-infra # Restart network plugin\\nprovisioning taskserv restart cilium --infra my-infra","breadcrumbs":"Troubleshooting Guide » Issue: Cluster Deployment Fails","id":"4930","title":"Issue: Cluster Deployment Fails"},"4931":{"body":"","breadcrumbs":"Troubleshooting Guide » Performance Issues","id":"4931","title":"Performance Issues"},"4932":{"body":"Symptoms: Commands take very long to complete Timeouts during operations High CPU/memory usage Diagnosis: # Check system resources\\ntop\\nhtop\\nfree -h\\ndf -h # Check network latency\\nping api.aws.amazon.com\\ntraceroute api.aws.amazon.com # Profile command execution\\ntime provisioning server list --infra my-infra Solutions: Local System Issues # Close unnecessary applications\\n# Upgrade system resources\\n# Use SSD storage if available # Increase timeout values\\nexport PROVISIONING_TIMEOUT=600 # 10 minutes Network Issues # Use region closer to your location\\n[providers.aws]\\nregion = \\"us-west-1\\" # Closer region # Enable connection pooling/caching\\n[cache]\\nenabled = true Large Infrastructure Issues # Use parallel operations\\nprovisioning server create --infra my-infra --parallel 4 # Filter results\\nprovisioning server list --infra my-infra --filter \\"status == \'running\'\\"","breadcrumbs":"Troubleshooting Guide » Issue: Slow Operations","id":"4932","title":"Issue: Slow Operations"},"4933":{"body":"Symptoms: System becomes unresponsive Out of memory errors Swap usage high Diagnosis: # Check memory usage\\nfree -h\\nps aux --sort=-%mem | head # Check for memory leaks\\nvalgrind provisioning server list --infra my-infra Solutions: # Increase system memory\\n# Close other applications\\n# Use streaming operations for large datasets # Enable garbage collection\\nexport PROVISIONING_GC_ENABLED=true # Reduce concurrent operations\\nexport PROVISIONING_MAX_PARALLEL=2","breadcrumbs":"Troubleshooting Guide » Issue: High Memory Usage","id":"4933","title":"Issue: High Memory Usage"},"4934":{"body":"","breadcrumbs":"Troubleshooting Guide » Network and Connectivity Issues","id":"4934","title":"Network and Connectivity Issues"},"4935":{"body":"Symptoms: Connection timeout\\nDNS resolution failed\\nSSL certificate errors Diagnosis: # Test basic connectivity\\nping 8.8.8.8\\ncurl -I https://api.aws.amazon.com\\nnslookup api.upcloud.com # Check SSL certificates\\nopenssl s_client -connect api.aws.amazon.com:443 -servername api.aws.amazon.com Solutions: DNS Issues # Use alternative DNS\\necho \'nameserver 8.8.8.8\' | sudo tee /etc/resolv.conf # Clear DNS cache\\nsudo systemctl restart systemd-resolved # Ubuntu\\nsudo dscacheutil -flushcache # macOS Proxy/Firewall Issues # Configure proxy if needed\\nexport HTTP_PROXY=http://proxy.company.com:9090\\nexport HTTPS_PROXY=http://proxy.company.com:9090 # Check firewall rules\\nsudo ufw status # Ubuntu\\nsudo firewall-cmd --list-all # RHEL/CentOS Certificate Issues # Update CA certificates\\nsudo apt update && sudo apt install ca-certificates # Ubuntu\\nbrew install ca-certificates # macOS # Skip SSL verification (temporary)\\nexport PROVISIONING_SKIP_SSL_VERIFY=true","breadcrumbs":"Troubleshooting Guide » Issue: API Connectivity Problems","id":"4935","title":"Issue: API Connectivity Problems"},"4936":{"body":"","breadcrumbs":"Troubleshooting Guide » Security and Encryption Issues","id":"4936","title":"Security and Encryption Issues"},"4937":{"body":"Symptoms: SOPS decryption failed\\nAge key not found\\nInvalid key format Diagnosis: # Check SOPS configuration\\nprovisioning sops config # Test SOPS manually\\nsops -d encrypted-file.ncl # Check Age keys\\nls -la ~/.config/sops/age/keys.txt\\nage-keygen -y ~/.config/sops/age/keys.txt Solutions: Missing Keys # Generate new Age key\\nage-keygen -o ~/.config/sops/age/keys.txt # Update SOPS configuration\\nprovisioning sops config --key-file ~/.config/sops/age/keys.txt Key Permissions # Fix key file permissions\\nchmod 600 ~/.config/sops/age/keys.txt\\nchown $(whoami) ~/.config/sops/age/keys.txt Configuration Issues # Update SOPS configuration in ~/.config/provisioning/config.toml\\n[sops]\\nuse_sops = true\\nkey_search_paths = [ \\"~/.config/sops/age/keys.txt\\", \\"/path/to/your/key.txt\\"\\n]","breadcrumbs":"Troubleshooting Guide » Issue: SOPS Decryption Fails","id":"4937","title":"Issue: SOPS Decryption Fails"},"4938":{"body":"Symptoms: Permission denied\\nAccess denied\\nInsufficient privileges Diagnosis: # Check user permissions\\nid\\ngroups # Check file permissions\\nls -la ~/.config/provisioning/\\nls -la /usr/local/provisioning/ # Test with sudo\\nsudo provisioning env Solutions: # Fix file ownership\\nsudo chown -R $(whoami):$(whoami) ~/.config/provisioning/ # Fix permissions\\nchmod -R 755 ~/.config/provisioning/\\nchmod 600 ~/.config/provisioning/config.toml # Add user to required groups\\nsudo usermod -a -G docker $(whoami) # For Docker access","breadcrumbs":"Troubleshooting Guide » Issue: Access Denied Errors","id":"4938","title":"Issue: Access Denied Errors"},"4939":{"body":"","breadcrumbs":"Troubleshooting Guide » Data and Storage Issues","id":"4939","title":"Data and Storage Issues"},"494":{"body":"# Start Ollama with provisioning MCP\\nOLLAMA_MCP_SERVERS=provisioning://localhost:3000 \\\\ ollama serve # Use with llama2 or mistral\\ncurl [http://localhost:11434/api/generate](http://localhost:11434/api/generate) \\\\ -d \'{ \\"model\\": \\"mistral\\", \\"prompt\\": \\"Create a Kubernetes cluster\\", \\"tools\\": [{\\"type\\": \\"mcp\\", \\"server\\": \\"provisioning\\"}] }\'","breadcrumbs":"MCP Integration » Local LLM Integration (Ollama)","id":"494","title":"Local LLM Integration (Ollama)"},"4940":{"body":"Symptoms: No space left on device\\nWrite failed\\nDisk full Diagnosis: # Check disk usage\\ndf -h\\ndu -sh ~/.config/provisioning/\\ndu -sh /usr/local/provisioning/ # Find large files\\nfind /usr/local/provisioning -type f -size +100M Solutions: # Clean up cache files\\nrm -rf ~/.config/provisioning/cache/*\\nrm -rf /usr/local/provisioning/.cache/* # Clean up logs\\nfind /usr/local/provisioning -name \\"*.log\\" -mtime +30 -delete # Clean up temporary files\\nrm -rf /tmp/provisioning-* # Compress old backups\\ngzip ~/.config/provisioning/backups/*.yaml","breadcrumbs":"Troubleshooting Guide » Issue: Disk Space Problems","id":"4940","title":"Issue: Disk Space Problems"},"4941":{"body":"","breadcrumbs":"Troubleshooting Guide » Recovery Procedures","id":"4941","title":"Recovery Procedures"},"4942":{"body":"# Restore from backup\\nprovisioning config restore --backup latest # Reset to defaults\\nprovisioning config reset # Recreate configuration\\nprovisioning init config --force","breadcrumbs":"Troubleshooting Guide » Configuration Recovery","id":"4942","title":"Configuration Recovery"},"4943":{"body":"# Check infrastructure status\\nprovisioning show servers --infra my-infra # Recover failed servers\\nprovisioning server create failed-server --infra my-infra # Restore from backup\\nprovisioning restore --backup latest --infra my-infra","breadcrumbs":"Troubleshooting Guide » Infrastructure Recovery","id":"4943","title":"Infrastructure Recovery"},"4944":{"body":"# Restart failed services\\nprovisioning taskserv restart kubernetes --infra my-infra # Reinstall corrupted services\\nprovisioning taskserv delete kubernetes --infra my-infra\\nprovisioning taskserv create kubernetes --infra my-infra","breadcrumbs":"Troubleshooting Guide » Service Recovery","id":"4944","title":"Service Recovery"},"4945":{"body":"","breadcrumbs":"Troubleshooting Guide » Prevention Strategies","id":"4945","title":"Prevention Strategies"},"4946":{"body":"# Weekly maintenance script\\n#!/bin/bash # Update system\\nprovisioning update --check # Validate configuration\\nprovisioning validate config # Check for service updates\\nprovisioning taskserv check-updates # Clean up old files\\nprovisioning cleanup --older-than 30d # Create backup\\nprovisioning backup create --name \\"weekly-$(date +%Y%m%d)\\"","breadcrumbs":"Troubleshooting Guide » Regular Maintenance","id":"4946","title":"Regular Maintenance"},"4947":{"body":"# Set up health monitoring\\n#!/bin/bash # Check system health every hour\\n0 * * * * /usr/local/bin/provisioning health check || echo \\"Health check failed\\" | mail -s \\"Provisioning Alert\\" admin@company.com # Weekly cost reports\\n0 9 * * 1 /usr/local/bin/provisioning show costs --all | mail -s \\"Weekly Cost Report\\" finance@company.com","breadcrumbs":"Troubleshooting Guide » Monitoring Setup","id":"4947","title":"Monitoring Setup"},"4948":{"body":"Configuration Management Version control all configuration files Use check mode before applying changes Regular validation and testing Security Regular key rotation Principle of least privilege Audit logs review Backup Strategy Automated daily backups Test restore procedures Off-site backup storage Documentation Document custom configurations Keep troubleshooting logs Share knowledge with team","breadcrumbs":"Troubleshooting Guide » Best Practices","id":"4948","title":"Best Practices"},"4949":{"body":"","breadcrumbs":"Troubleshooting Guide » Getting Additional Help","id":"4949","title":"Getting Additional Help"},"495":{"body":"Tools return consistent error responses: { \\"error\\": { \\"code\\": \\"VALIDATION_ERROR\\", \\"message\\": \\"Configuration has 3 validation errors\\", \\"details\\": [ { \\"field\\": \\"database.version\\", \\"message\\": \\"PostgreSQL version 9.6 is deprecated\\", \\"severity\\": \\"error\\" }, { \\"field\\": \\"backup.retention_days\\", \\"message\\": \\"Recommended minimum is 30 days for production\\", \\"severity\\": \\"warning\\" } ] }\\n}","breadcrumbs":"MCP Integration » Error Handling","id":"495","title":"Error Handling"},"4950":{"body":"#!/bin/bash\\n# Collect debug information echo \\"Collecting provisioning debug information...\\" mkdir -p /tmp/provisioning-debug\\ncd /tmp/provisioning-debug # System information\\nuname -a > system-info.txt\\nfree -h >> system-info.txt\\ndf -h >> system-info.txt # Provisioning information\\nprovisioning --version > provisioning-info.txt\\nprovisioning env >> provisioning-info.txt\\nprovisioning validate config --detailed > config-validation.txt 2>&1 # Configuration files\\ncp ~/.config/provisioning/config.toml user-config.toml 2>/dev/null || echo \\"No user config\\" > user-config.toml # Logs\\nprovisioning show logs > system-logs.txt 2>&1 # Create archive\\ncd /tmp\\ntar czf provisioning-debug-$(date +%Y%m%d_%H%M%S).tar.gz provisioning-debug/ echo \\"Debug information collected in: provisioning-debug-*.tar.gz\\"","breadcrumbs":"Troubleshooting Guide » Debug Information Collection","id":"4950","title":"Debug Information Collection"},"4951":{"body":"Built-in Help provisioning help\\nprovisioning help Documentation User guides in docs/user/ CLI reference: docs/user/cli-reference.md Configuration guide: docs/user/configuration.md Community Resources Project repository issues Community forums Documentation wiki Enterprise Support Professional services Priority support Custom development Remember: When reporting issues, always include the debug information collected above and specific error messages.","breadcrumbs":"Troubleshooting Guide » Support Channels","id":"4951","title":"Support Channels"},"4952":{"body":"Version : 3.5.0 Last Updated : 2025-10-09 Estimated Time : 30-60 minutes Difficulty : Beginner to Intermediate","breadcrumbs":"From Scratch » Complete Deployment Guide: From Scratch to Production","id":"4952","title":"Complete Deployment Guide: From Scratch to Production"},"4953":{"body":"Prerequisites Step 1: Install Nushell Step 2: Install Nushell Plugins (Recommended) Step 3: Install Required Tools Step 4: Clone and Setup Project Step 5: Initialize Workspace Step 6: Configure Environment Step 7: Discover and Load Modules Step 8: Validate Configuration Step 9: Deploy Servers Step 10: Install Task Services Step 11: Create Clusters Step 12: Verify Deployment Step 13: Post-Deployment Troubleshooting Next Steps","breadcrumbs":"From Scratch » Table of Contents","id":"4953","title":"Table of Contents"},"4954":{"body":"Before starting, ensure you have: ✅ Operating System : macOS, Linux, or Windows (WSL2 recommended) ✅ Administrator Access : Ability to install software and configure system ✅ Internet Connection : For downloading dependencies and accessing cloud providers ✅ Cloud Provider Credentials : UpCloud, Hetzner, AWS, or local development environment ✅ Basic Terminal Knowledge : Comfortable running shell commands ✅ Text Editor : vim, nano, Zed, VSCode, or your preferred editor","breadcrumbs":"From Scratch » Prerequisites","id":"4954","title":"Prerequisites"},"4955":{"body":"CPU : 2+ cores RAM : 8 GB minimum, 16 GB recommended Disk : 20 GB free space minimum","breadcrumbs":"From Scratch » Recommended Hardware","id":"4955","title":"Recommended Hardware"},"4956":{"body":"Nushell 0.109.1+ is the primary shell and scripting language for the provisioning platform.","breadcrumbs":"From Scratch » Step 1: Install Nushell","id":"4956","title":"Step 1: Install Nushell"},"4957":{"body":"# Install Nushell\\nbrew install nushell # Verify installation\\nnu --version\\n# Expected: 0.109.1 or higher","breadcrumbs":"From Scratch » macOS (via Homebrew)","id":"4957","title":"macOS (via Homebrew)"},"4958":{"body":"Ubuntu/Debian: # Add Nushell repository\\ncurl -fsSL https://starship.rs/install.sh | bash # Install Nushell\\nsudo apt update\\nsudo apt install nushell # Verify installation\\nnu --version Fedora: sudo dnf install nushell\\nnu --version Arch Linux: sudo pacman -S nushell\\nnu --version","breadcrumbs":"From Scratch » Linux (via Package Manager)","id":"4958","title":"Linux (via Package Manager)"},"4959":{"body":"# Install Rust (if not already installed)\\ncurl --proto \'=https\' --tlsv1.2 -sSf https://sh.rustup.rs | sh\\nsource $HOME/.cargo/env # Install Nushell\\ncargo install nu --locked # Verify installation\\nnu --version","breadcrumbs":"From Scratch » Linux/macOS (via Cargo)","id":"4959","title":"Linux/macOS (via Cargo)"},"496":{"body":"| | Operation | Latency | Notes | | | | ----------- | --------- | ------- | | | | generate_config | 2-5s | Depends on LLM and config complexity | | | | validate_config | 500-1000ms | Parallel schema validation | | | | search_docs | 300-800ms | RAG hybrid search | | | | troubleshoot | 3-8s | Depends on log size and analysis depth | | | | get_schema | 100-300ms | Cached schema retrieval | | | | check_compliance | 500-2000ms | Policy evaluation | |","breadcrumbs":"MCP Integration » Performance","id":"496","title":"Performance"},"4960":{"body":"# Install Nushell\\nwinget install nushell # Verify installation\\nnu --version","breadcrumbs":"From Scratch » Windows (via Winget)","id":"4960","title":"Windows (via Winget)"},"4961":{"body":"# Start Nushell\\nnu # Configure (creates default config if not exists)\\nconfig nu","breadcrumbs":"From Scratch » Configure Nushell","id":"4961","title":"Configure Nushell"},"4962":{"body":"Native plugins provide 10-50x performance improvement for authentication, KMS, and orchestrator operations.","breadcrumbs":"From Scratch » Step 2: Install Nushell Plugins (Recommended)","id":"4962","title":"Step 2: Install Nushell Plugins (Recommended)"},"4963":{"body":"Performance Gains: 🚀 KMS operations : ~5 ms vs ~50 ms (10x faster) 🚀 Orchestrator queries : ~1 ms vs ~30 ms (30x faster) 🚀 Batch encryption : 100 files in 0.5s vs 5s (10x faster) Benefits: ✅ Native Nushell integration (pipelines, data structures) ✅ OS keyring for secure token storage ✅ Offline capability (Age encryption, local orchestrator) ✅ Graceful fallback to HTTP if not installed","breadcrumbs":"From Scratch » Why Install Plugins","id":"4963","title":"Why Install Plugins"},"4964":{"body":"# Install Rust toolchain (if not already installed)\\ncurl --proto \'=https\' --tlsv1.2 -sSf https://sh.rustup.rs | sh\\nsource $HOME/.cargo/env\\nrustc --version\\n# Expected: rustc 1.75+ or higher # Linux only: Install development packages\\nsudo apt install libssl-dev pkg-config # Ubuntu/Debian\\nsudo dnf install openssl-devel # Fedora # Linux only: Install keyring service (required for auth plugin)\\nsudo apt install gnome-keyring # Ubuntu/Debian (GNOME)\\nsudo apt install kwalletmanager # Ubuntu/Debian (KDE)","breadcrumbs":"From Scratch » Prerequisites for Building Plugins","id":"4964","title":"Prerequisites for Building Plugins"},"4965":{"body":"# Navigate to plugins directory\\ncd provisioning/core/plugins/nushell-plugins # Build all three plugins in release mode (optimized)\\ncargo build --release --all # Expected output:\\n# Compiling nu_plugin_auth v0.1.0\\n# Compiling nu_plugin_kms v0.1.0\\n# Compiling nu_plugin_orchestrator v0.1.0\\n# Finished release [optimized] target(s) in 2m 15s Build time : ~2-5 minutes depending on hardware","breadcrumbs":"From Scratch » Build Plugins","id":"4965","title":"Build Plugins"},"4966":{"body":"# Register all three plugins (full paths recommended)\\nplugin add $PWD/target/release/nu_plugin_auth\\nplugin add $PWD/target/release/nu_plugin_kms\\nplugin add $PWD/target/release/nu_plugin_orchestrator # Alternative (from plugins directory)\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator","breadcrumbs":"From Scratch » Register Plugins with Nushell","id":"4966","title":"Register Plugins with Nushell"},"4967":{"body":"# List registered plugins\\nplugin list | where name =~ \\"auth|kms|orch\\" # Expected output:\\n# ╭───┬─────────────────────────┬─────────┬───────────────────────────────────╮\\n# │ # │ name │ version │ filename │\\n# ├───┼─────────────────────────┼─────────┼───────────────────────────────────┤\\n# │ 0 │ nu_plugin_auth │ 0.1.0 │ .../nu_plugin_auth │\\n# │ 1 │ nu_plugin_kms │ 0.1.0 │ .../nu_plugin_kms │\\n# │ 2 │ nu_plugin_orchestrator │ 0.1.0 │ .../nu_plugin_orchestrator │\\n# ╰───┴─────────────────────────┴─────────┴───────────────────────────────────╯ # Test each plugin\\nauth --help # Should show auth commands\\nkms --help # Should show kms commands\\norch --help # Should show orch commands","breadcrumbs":"From Scratch » Verify Plugin Installation","id":"4967","title":"Verify Plugin Installation"},"4968":{"body":"# Add to ~/.config/nushell/env.nu\\n$env.CONTROL_CENTER_URL = \\"http://localhost:3000\\"\\n$env.RUSTYVAULT_ADDR = \\"http://localhost:8200\\"\\n$env.RUSTYVAULT_TOKEN = \\"your-vault-token-here\\"\\n$env.ORCHESTRATOR_DATA_DIR = \\"provisioning/platform/orchestrator/data\\" # For Age encryption (local development)\\n$env.AGE_IDENTITY = $\\"($env.HOME)/.age/key.txt\\"\\n$env.AGE_RECIPIENT = \\"age1xxxxxxxxx\\" # Replace with your public key","breadcrumbs":"From Scratch » Configure Plugin Environments","id":"4968","title":"Configure Plugin Environments"},"4969":{"body":"# Test KMS plugin (requires backend configured)\\nkms status\\n# Expected: { backend: \\"rustyvault\\", status: \\"healthy\\", ... }\\n# Or: Error if backend not configured (OK for now) # Test orchestrator plugin (reads local files)\\norch status\\n# Expected: { active_tasks: 0, completed_tasks: 0, health: \\"healthy\\" }\\n# Or: Error if orchestrator not started yet (OK for now) # Test auth plugin (requires control center)\\nauth verify\\n# Expected: { active: false }\\n# Or: Error if control center not running (OK for now) Note : It\'s OK if plugins show errors at this stage. We\'ll configure backends and services later.","breadcrumbs":"From Scratch » Test Plugins (Quick Smoke Test)","id":"4969","title":"Test Plugins (Quick Smoke Test)"},"497":{"body":"See Configuration Guide for MCP-specific settings: MCP server port and binding Tool registry customization Rate limiting for tool calls Access control (Cedar policies)","breadcrumbs":"MCP Integration » Configuration","id":"497","title":"Configuration"},"4970":{"body":"If you want to skip plugin installation for now: ✅ All features work via HTTP API (slower but functional) ⚠️ You\'ll miss 10-50x performance improvements ⚠️ No offline capability for KMS/orchestrator ℹ️ You can install plugins later anytime To use HTTP fallback: # System automatically uses HTTP if plugins not available\\n# No configuration changes needed","breadcrumbs":"From Scratch » Skip Plugins (Not Recommended)","id":"4970","title":"Skip Plugins (Not Recommended)"},"4971":{"body":"","breadcrumbs":"From Scratch » Step 3: Install Required Tools","id":"4971","title":"Step 3: Install Required Tools"},"4972":{"body":"SOPS (Secrets Management) # macOS\\nbrew install sops # Linux\\nwget https://github.com/mozilla/sops/releases/download/v3.10.2/sops-v3.10.2.linux.amd64\\nsudo mv sops-v3.10.2.linux.amd64 /usr/local/bin/sops\\nsudo chmod +x /usr/local/bin/sops # Verify\\nsops --version\\n# Expected: 3.10.2 or higher Age (Encryption Tool) # macOS\\nbrew install age # Linux\\nsudo apt install age # Ubuntu/Debian\\nsudo dnf install age # Fedora # Or from source\\ngo install filippo.io/age/cmd/...@latest # Verify\\nage --version\\n# Expected: 1.2.1 or higher # Generate Age key (for local encryption)\\nage-keygen -o ~/.age/key.txt\\ncat ~/.age/key.txt\\n# Save the public key (age1...) for later","breadcrumbs":"From Scratch » Essential Tools","id":"4972","title":"Essential Tools"},"4973":{"body":"K9s (Kubernetes Management) # macOS\\nbrew install k9s # Linux\\ncurl -sS https://webinstall.dev/k9s | bash # Verify\\nk9s version\\n# Expected: 0.50.6 or higher glow (Markdown Renderer) # macOS\\nbrew install glow # Linux\\nsudo apt install glow # Ubuntu/Debian\\nsudo dnf install glow # Fedora # Verify\\nglow --version","breadcrumbs":"From Scratch » Optional but Recommended Tools","id":"4973","title":"Optional but Recommended Tools"},"4974":{"body":"","breadcrumbs":"From Scratch » Step 4: Clone and Setup Project","id":"4974","title":"Step 4: Clone and Setup Project"},"4975":{"body":"# Clone project\\ngit clone https://github.com/your-org/project-provisioning.git\\ncd project-provisioning # Or if already cloned, update to latest\\ngit pull origin main","breadcrumbs":"From Scratch » Clone Repository","id":"4975","title":"Clone Repository"},"4976":{"body":"# Add to ~/.bashrc or ~/.zshrc\\nexport PATH=\\"$PATH:/Users/Akasha/project-provisioning/provisioning/core/cli\\" # Or create symlink\\nsudo ln -s /Users/Akasha/project-provisioning/provisioning/core/cli/provisioning /usr/local/bin/provisioning # Verify\\nprovisioning version\\n# Expected: 3.5.0","breadcrumbs":"From Scratch » Add CLI to PATH (Optional)","id":"4976","title":"Add CLI to PATH (Optional)"},"4977":{"body":"A workspace is a self-contained environment for managing infrastructure.","breadcrumbs":"From Scratch » Step 5: Initialize Workspace","id":"4977","title":"Step 5: Initialize Workspace"},"4978":{"body":"# Initialize new workspace\\nprovisioning workspace init --name production # Or use interactive mode\\nprovisioning workspace init\\n# Name: production\\n# Description: Production infrastructure\\n# Provider: upcloud What this creates: The new workspace initialization now generates Nickel configuration files for type-safe, schema-validated infrastructure definitions: workspace/\\n├── config/\\n│ ├── config.ncl # Master Nickel configuration (type-safe)\\n│ ├── providers/\\n│ │ └── upcloud.toml # Provider-specific settings\\n│ ├── platform/ # Platform service configs\\n│ └── kms.toml # Key management settings\\n├── infra/\\n│ └── default/\\n│ ├── main.ncl # Infrastructure entry point\\n│ └── servers.ncl # Server definitions\\n├── docs/ # Auto-generated guides\\n└── workspace.nu # Workspace utility scripts","breadcrumbs":"From Scratch » Create New Workspace","id":"4978","title":"Create New Workspace"},"4979":{"body":"The workspace configuration uses Nickel (type-safe, validated) . This provides: ✅ Type Safety : Schema validation catches errors at load time ✅ Lazy Evaluation : Only computes what\'s needed ✅ Validation : Record merging, required fields, constraints ✅ Documentation : Self-documenting with records Example Nickel config (config.ncl): { workspace = { name = \\"production\\", version = \\"1.0.0\\", created = \\"2025-12-03T14:30:00Z\\", }, paths = { base = \\"/opt/workspaces/production\\", infra = \\"/opt/workspaces/production/infra\\", cache = \\"/opt/workspaces/production/.cache\\", }, providers = { active = [\\"upcloud\\"], default = \\"upcloud\\", },\\n}","breadcrumbs":"From Scratch » Workspace Configuration Format","id":"4979","title":"Workspace Configuration Format"},"498":{"body":"","breadcrumbs":"MCP Integration » Security","id":"498","title":"Security"},"4980":{"body":"# Show workspace info\\nprovisioning workspace info # List all workspaces\\nprovisioning workspace list # Show active workspace\\nprovisioning workspace active\\n# Expected: production","breadcrumbs":"From Scratch » Verify Workspace","id":"4980","title":"Verify Workspace"},"4981":{"body":"Now you can inspect and validate your Nickel workspace configuration: # View complete workspace configuration\\nprovisioning workspace config show # Show specific workspace\\nprovisioning workspace config show production # View configuration in different formats\\nprovisioning workspace config show --format=json\\nprovisioning workspace config show --format=yaml\\nprovisioning workspace config show --format=nickel # Raw Nickel file # Validate workspace configuration\\nprovisioning workspace config validate\\n# Output: ✅ Validation complete - all configs are valid # Show configuration hierarchy (priority order)\\nprovisioning workspace config hierarchy Configuration Validation : The Nickel schema automatically validates: ✅ Semantic versioning format (for example, \\"1.0.0\\") ✅ Required sections present (workspace, paths, provisioning, etc.) ✅ Valid file paths and types ✅ Provider configuration exists for active providers ✅ KMS and SOPS settings properly configured","breadcrumbs":"From Scratch » View and Validate Workspace Configuration","id":"4981","title":"View and Validate Workspace Configuration"},"4982":{"body":"","breadcrumbs":"From Scratch » Step 6: Configure Environment","id":"4982","title":"Step 6: Configure Environment"},"4983":{"body":"UpCloud Provider: # Create provider config\\nvim workspace/config/providers/upcloud.toml [upcloud]\\nusername = \\"your-upcloud-username\\"\\npassword = \\"your-upcloud-password\\" # Will be encrypted # Default settings\\ndefault_zone = \\"de-fra1\\"\\ndefault_plan = \\"2xCPU-4 GB\\" AWS Provider: # Create AWS config\\nvim workspace/config/providers/aws.toml [aws]\\nregion = \\"us-east-1\\"\\naccess_key_id = \\"AKIAXXXXX\\"\\nsecret_access_key = \\"xxxxx\\" # Will be encrypted # Default settings\\ndefault_instance_type = \\"t3.medium\\"\\ndefault_region = \\"us-east-1\\"","breadcrumbs":"From Scratch » Set Provider Credentials","id":"4983","title":"Set Provider Credentials"},"4984":{"body":"# Generate Age key if not done already\\nage-keygen -o ~/.age/key.txt # Encrypt provider configs\\nkms encrypt (open workspace/config/providers/upcloud.toml) --backend age \\\\ | save workspace/config/providers/upcloud.toml.enc # Or use SOPS\\nsops --encrypt --age $(cat ~/.age/key.txt | grep \\"public key:\\" | cut -d: -f2) \\\\ workspace/config/providers/upcloud.toml > workspace/config/providers/upcloud.toml.enc # Remove plaintext\\nrm workspace/config/providers/upcloud.toml","breadcrumbs":"From Scratch » Encrypt Sensitive Data","id":"4984","title":"Encrypt Sensitive Data"},"4985":{"body":"# Edit user-specific settings\\nvim workspace/config/local-overrides.toml [user]\\nname = \\"admin\\"\\nemail = \\"admin@example.com\\" [preferences]\\neditor = \\"vim\\"\\noutput_format = \\"yaml\\"\\nconfirm_delete = true\\nconfirm_deploy = true [http]\\nuse_curl = true # Use curl instead of ureq [paths]\\nssh_key = \\"~/.ssh/id_ed25519\\"","breadcrumbs":"From Scratch » Configure Local Overrides","id":"4985","title":"Configure Local Overrides"},"4986":{"body":"","breadcrumbs":"From Scratch » Step 7: Discover and Load Modules","id":"4986","title":"Step 7: Discover and Load Modules"},"4987":{"body":"# Discover task services\\nprovisioning module discover taskserv\\n# Shows: kubernetes, containerd, etcd, cilium, helm, etc. # Discover providers\\nprovisioning module discover provider\\n# Shows: upcloud, aws, local # Discover clusters\\nprovisioning module discover cluster\\n# Shows: buildkit, registry, monitoring, etc.","breadcrumbs":"From Scratch » Discover Available Modules","id":"4987","title":"Discover Available Modules"},"4988":{"body":"# Load Kubernetes taskserv\\nprovisioning module load taskserv production kubernetes # Load multiple modules\\nprovisioning module load taskserv production kubernetes containerd cilium # Load cluster configuration\\nprovisioning module load cluster production buildkit # Verify loaded modules\\nprovisioning module list taskserv production\\nprovisioning module list cluster production","breadcrumbs":"From Scratch » Load Modules into Workspace","id":"4988","title":"Load Modules into Workspace"},"4989":{"body":"Before deploying, validate all configuration: # Validate workspace configuration\\nprovisioning workspace validate # Validate infrastructure configuration\\nprovisioning validate config # Validate specific infrastructure\\nprovisioning infra validate --infra production # Check environment variables\\nprovisioning env # Show all configuration and environment\\nprovisioning allenv Expected output: ✓ Configuration valid\\n✓ Provider credentials configured\\n✓ Workspace initialized\\n✓ Modules loaded: 3 taskservs, 1 cluster\\n✓ SSH key configured\\n✓ Age encryption key available Fix any errors before proceeding to deployment.","breadcrumbs":"From Scratch » Step 8: Validate Configuration","id":"4989","title":"Step 8: Validate Configuration"},"499":{"body":"Tools require valid provisioning API token Token scoped to user\'s workspace All tool calls authenticated and logged","breadcrumbs":"MCP Integration » Authentication","id":"499","title":"Authentication"},"4990":{"body":"","breadcrumbs":"From Scratch » Step 9: Deploy Servers","id":"4990","title":"Step 9: Deploy Servers"},"4991":{"body":"# Check what would be created (no actual changes)\\nprovisioning server create --infra production --check # With debug output for details\\nprovisioning server create --infra production --check --debug Review the output: Server names and configurations Zones and regions CPU, memory, disk specifications Estimated costs Network settings","breadcrumbs":"From Scratch » Preview Server Creation (Dry Run)","id":"4991","title":"Preview Server Creation (Dry Run)"},"4992":{"body":"# Create servers (with confirmation prompt)\\nprovisioning server create --infra production # Or auto-confirm (skip prompt)\\nprovisioning server create --infra production --yes # Wait for completion\\nprovisioning server create --infra production --wait Expected output: Creating servers for infrastructure: production ● Creating server: k8s-master-01 (de-fra1, 4xCPU-8 GB) ● Creating server: k8s-worker-01 (de-fra1, 4xCPU-8 GB) ● Creating server: k8s-worker-02 (de-fra1, 4xCPU-8 GB) ✓ Created 3 servers in 120 seconds Servers: • k8s-master-01: 192.168.1.10 (Running) • k8s-worker-01: 192.168.1.11 (Running) • k8s-worker-02: 192.168.1.12 (Running)","breadcrumbs":"From Scratch » Create Servers","id":"4992","title":"Create Servers"},"4993":{"body":"# List all servers\\nprovisioning server list --infra production # Show detailed server info\\nprovisioning server list --infra production --out yaml # SSH to server (test connectivity)\\nprovisioning server ssh k8s-master-01\\n# Type \'exit\' to return","breadcrumbs":"From Scratch » Verify Server Creation","id":"4993","title":"Verify Server Creation"},"4994":{"body":"Task services are infrastructure components like Kubernetes, databases, monitoring, etc.","breadcrumbs":"From Scratch » Step 10: Install Task Services","id":"4994","title":"Step 10: Install Task Services"},"4995":{"body":"# Preview Kubernetes installation\\nprovisioning taskserv create kubernetes --infra production --check # Shows:\\n# - Dependencies required (containerd, etcd)\\n# - Configuration to be applied\\n# - Resources needed\\n# - Estimated installation time","breadcrumbs":"From Scratch » Install Kubernetes (Check Mode First)","id":"4995","title":"Install Kubernetes (Check Mode First)"},"4996":{"body":"# Install Kubernetes (with dependencies)\\nprovisioning taskserv create kubernetes --infra production # Or install dependencies first\\nprovisioning taskserv create containerd --infra production\\nprovisioning taskserv create etcd --infra production\\nprovisioning taskserv create kubernetes --infra production # Monitor progress\\nprovisioning workflow monitor Expected output: Installing taskserv: kubernetes ● Installing containerd on k8s-master-01 ● Installing containerd on k8s-worker-01 ● Installing containerd on k8s-worker-02 ✓ Containerd installed (30s) ● Installing etcd on k8s-master-01 ✓ etcd installed (20s) ● Installing Kubernetes control plane on k8s-master-01 ✓ Kubernetes control plane ready (45s) ● Joining worker nodes ✓ k8s-worker-01 joined (15s) ✓ k8s-worker-02 joined (15s) ✓ Kubernetes installation complete (125 seconds) Cluster Info: • Version: 1.28.0 • Nodes: 3 (1 control-plane, 2 workers) • API Server: https://192.168.1.10:6443","breadcrumbs":"From Scratch » Install Kubernetes","id":"4996","title":"Install Kubernetes"},"4997":{"body":"# Install Cilium (CNI)\\nprovisioning taskserv create cilium --infra production # Install Helm\\nprovisioning taskserv create helm --infra production # Verify all taskservs\\nprovisioning taskserv list --infra production","breadcrumbs":"From Scratch » Install Additional Services","id":"4997","title":"Install Additional Services"},"4998":{"body":"Clusters are complete application stacks (for example, BuildKit, OCI Registry, Monitoring).","breadcrumbs":"From Scratch » Step 11: Create Clusters","id":"4998","title":"Step 11: Create Clusters"},"4999":{"body":"# Preview cluster creation\\nprovisioning cluster create buildkit --infra production --check # Shows:\\n# - Components to be deployed\\n# - Dependencies required\\n# - Configuration values\\n# - Resource requirements","breadcrumbs":"From Scratch » Create BuildKit Cluster (Check Mode)","id":"4999","title":"Create BuildKit Cluster (Check Mode)"},"5":{"body":"ADR Title Status ADR-001 Project Structure Decision Accepted ADR-002 Distribution Strategy Accepted ADR-003 Workspace Isolation Accepted ADR-004 Hybrid Architecture Accepted ADR-005 Extension Framework Accepted ADR-006 CLI Refactoring Accepted","breadcrumbs":"Home » 📋 Architecture Decision Records (ADRs)","id":"5","title":"📋 Architecture Decision Records (ADRs)"},"50":{"body":"# Check your system\\nuname -a # View system information\\ndf -h # Check available disk space\\ncurl --version # Verify internet connectivity","breadcrumbs":"Installation Guide » Pre-installation Checklist","id":"50","title":"Pre-installation Checklist"},"500":{"body":"Cedar policies control which tools user can call Example: allow(principal, action, resource) when role == \\"admin\\" Detailed audit trail of all tool invocations","breadcrumbs":"MCP Integration » Authorization","id":"500","title":"Authorization"},"5000":{"body":"# Create BuildKit cluster\\nprovisioning cluster create buildkit --infra production # Monitor deployment\\nprovisioning workflow monitor # Or use plugin for faster monitoring\\norch tasks --status running Expected output: Creating cluster: buildkit ● Deploying BuildKit daemon ● Deploying BuildKit worker ● Configuring BuildKit cache ● Setting up BuildKit registry integration ✓ BuildKit cluster ready (60 seconds) Cluster Info: • BuildKit version: 0.12.0 • Workers: 2 • Cache: 50 GB • Registry: registry.production.local","breadcrumbs":"From Scratch » Create BuildKit Cluster","id":"5000","title":"Create BuildKit Cluster"},"5001":{"body":"# List all clusters\\nprovisioning cluster list --infra production # Show cluster details\\nprovisioning cluster list --infra production --out yaml # Check cluster health\\nkubectl get pods -n buildkit","breadcrumbs":"From Scratch » Verify Cluster","id":"5001","title":"Verify Cluster"},"5002":{"body":"","breadcrumbs":"From Scratch » Step 12: Verify Deployment","id":"5002","title":"Step 12: Verify Deployment"},"5003":{"body":"# Check orchestrator status\\norch status\\n# or\\nprovisioning orchestrator status # Check all servers\\nprovisioning server list --infra production # Check all taskservs\\nprovisioning taskserv list --infra production # Check all clusters\\nprovisioning cluster list --infra production # Verify Kubernetes cluster\\nkubectl get nodes\\nkubectl get pods --all-namespaces","breadcrumbs":"From Scratch » Comprehensive Health Check","id":"5003","title":"Comprehensive Health Check"},"5004":{"body":"# Validate infrastructure\\nprovisioning infra validate --infra production # Test connectivity\\nprovisioning server ssh k8s-master-01 \\"kubectl get nodes\\" # Test BuildKit\\nkubectl exec -it -n buildkit buildkit-0 -- buildctl --version","breadcrumbs":"From Scratch » Run Validation Tests","id":"5004","title":"Run Validation Tests"},"5005":{"body":"All checks should show: ✅ Servers: Running ✅ Taskservs: Installed and healthy ✅ Clusters: Deployed and operational ✅ Kubernetes: 3/3 nodes ready ✅ BuildKit: 2/2 workers ready","breadcrumbs":"From Scratch » Expected Results","id":"5005","title":"Expected Results"},"5006":{"body":"","breadcrumbs":"From Scratch » Step 13: Post-Deployment","id":"5006","title":"Step 13: Post-Deployment"},"5007":{"body":"# Get kubeconfig from master node\\nprovisioning server ssh k8s-master-01 \\"cat ~/.kube/config\\" > ~/.kube/config-production # Set KUBECONFIG\\nexport KUBECONFIG=~/.kube/config-production # Verify access\\nkubectl get nodes\\nkubectl get pods --all-namespaces","breadcrumbs":"From Scratch » Configure kubectl Access","id":"5007","title":"Configure kubectl Access"},"5008":{"body":"# Deploy monitoring stack\\nprovisioning cluster create monitoring --infra production # Access Grafana\\nkubectl port-forward -n monitoring svc/grafana 3000:80\\n# Open: http://localhost:3000","breadcrumbs":"From Scratch » Set Up Monitoring (Optional)","id":"5008","title":"Set Up Monitoring (Optional)"},"5009":{"body":"# Generate CI/CD credentials\\nprovisioning secrets generate aws --ttl 12h # Create CI/CD kubeconfig\\nkubectl create serviceaccount ci-cd -n default\\nkubectl create clusterrolebinding ci-cd --clusterrole=admin --serviceaccount=default:ci-cd","breadcrumbs":"From Scratch » Configure CI/CD Integration (Optional)","id":"5009","title":"Configure CI/CD Integration (Optional)"},"501":{"body":"Secrets never passed through MCP Configuration sanitized before analysis PII removed from logs sent to external LLMs","breadcrumbs":"MCP Integration » Data Protection","id":"501","title":"Data Protection"},"5010":{"body":"# Backup workspace configuration\\ntar -czf workspace-production-backup.tar.gz workspace/ # Encrypt backup\\nkms encrypt (open workspace-production-backup.tar.gz | encode base64) --backend age \\\\ | save workspace-production-backup.tar.gz.enc # Store securely (S3, Vault, etc.)","breadcrumbs":"From Scratch » Backup Configuration","id":"5010","title":"Backup Configuration"},"5011":{"body":"","breadcrumbs":"From Scratch » Troubleshooting","id":"5011","title":"Troubleshooting"},"5012":{"body":"Problem : Server creation times out or fails # Check provider credentials\\nprovisioning validate config # Check provider API status\\ncurl -u username:password https://api.upcloud.com/1.3/account # Try with debug mode\\nprovisioning server create --infra production --check --debug","breadcrumbs":"From Scratch » Server Creation Fails","id":"5012","title":"Server Creation Fails"},"5013":{"body":"Problem : Kubernetes installation fails # Check server connectivity\\nprovisioning server ssh k8s-master-01 # Check logs\\nprovisioning orchestrator logs | grep kubernetes # Check dependencies\\nprovisioning taskserv list --infra production | where status == \\"failed\\" # Retry installation\\nprovisioning taskserv delete kubernetes --infra production\\nprovisioning taskserv create kubernetes --infra production","breadcrumbs":"From Scratch » Taskserv Installation Fails","id":"5013","title":"Taskserv Installation Fails"},"5014":{"body":"Problem : auth, kms, or orch commands not found # Check plugin registration\\nplugin list | where name =~ \\"auth|kms|orch\\" # Re-register if missing\\ncd provisioning/core/plugins/nushell-plugins\\nplugin add target/release/nu_plugin_auth\\nplugin add target/release/nu_plugin_kms\\nplugin add target/release/nu_plugin_orchestrator # Restart Nushell\\nexit\\nnu","breadcrumbs":"From Scratch » Plugin Commands Don\'t Work","id":"5014","title":"Plugin Commands Don\'t Work"},"5015":{"body":"Problem : kms encrypt returns error # Check backend status\\nkms status # Check RustyVault running\\ncurl http://localhost:8200/v1/sys/health # Use Age backend instead (local)\\nkms encrypt \\"data\\" --backend age --key age1xxxxxxxxx # Check Age key\\ncat ~/.age/key.txt","breadcrumbs":"From Scratch » KMS Encryption Fails","id":"5015","title":"KMS Encryption Fails"},"5016":{"body":"Problem : orch status returns error # Check orchestrator status\\nps aux | grep orchestrator # Start orchestrator\\ncd provisioning/platform/orchestrator\\n./scripts/start-orchestrator.nu --background # Check logs\\ntail -f provisioning/platform/orchestrator/data/orchestrator.log","breadcrumbs":"From Scratch » Orchestrator Not Running","id":"5016","title":"Orchestrator Not Running"},"5017":{"body":"Problem : provisioning validate config shows errors # Show detailed errors\\nprovisioning validate config --debug # Check configuration files\\nprovisioning allenv # Fix missing settings\\nvim workspace/config/local-overrides.toml","breadcrumbs":"From Scratch » Configuration Validation Errors","id":"5017","title":"Configuration Validation Errors"},"5018":{"body":"","breadcrumbs":"From Scratch » Next Steps","id":"5018","title":"Next Steps"},"5019":{"body":"Multi-Environment Deployment # Create dev and staging workspaces\\nprovisioning workspace create dev\\nprovisioning workspace create staging\\nprovisioning workspace switch dev Batch Operations # Deploy to multiple clouds\\nprovisioning batch submit workflows/multi-cloud-deploy.ncl Security Features # Enable MFA\\nauth mfa enroll totp # Set up break-glass\\nprovisioning break-glass request \\"Emergency access\\" Compliance and Audit # Generate compliance report\\nprovisioning compliance report --standard soc2","breadcrumbs":"From Scratch » Explore Advanced Features","id":"5019","title":"Explore Advanced Features"},"502":{"body":"# Monitor MCP server\\nprovisioning admin mcp status # View MCP tool calls\\nprovisioning admin logs --filter \\"mcp_tools\\" --tail 100 # Debug tool response\\nRUST_LOG=provisioning::mcp=debug provisioning-mcp-server","breadcrumbs":"MCP Integration » Monitoring and Debugging","id":"502","title":"Monitoring and Debugging"},"5020":{"body":"Quick Reference : provisioning sc or docs/guides/quickstart-cheatsheet.md Update Guide : docs/guides/update-infrastructure.md Customize Guide : docs/guides/customize-infrastructure.md Plugin Guide : docs/user/PLUGIN_INTEGRATION_GUIDE.md Security System : docs/architecture/adr-009-security-system-complete.md","breadcrumbs":"From Scratch » Learn More","id":"5020","title":"Learn More"},"5021":{"body":"# Show help for any command\\nprovisioning help\\nprovisioning help server\\nprovisioning help taskserv # Check version\\nprovisioning version # Start Nushell session with provisioning library\\nprovisioning nu","breadcrumbs":"From Scratch » Get Help","id":"5021","title":"Get Help"},"5022":{"body":"You\'ve successfully: ✅ Installed Nushell and essential tools ✅ Built and registered native plugins (10-50x faster operations) ✅ Cloned and configured the project ✅ Initialized a production workspace ✅ Configured provider credentials ✅ Deployed servers ✅ Installed Kubernetes and task services ✅ Created application clusters ✅ Verified complete deployment Your infrastructure is now ready for production use! Estimated Total Time : 30-60 minutes Next Guide : Update Infrastructure Questions? : Open an issue or contact platform-team@example.com Last Updated : 2025-10-09 Version : 3.5.0","breadcrumbs":"From Scratch » Summary","id":"5022","title":"Summary"},"5023":{"body":"Goal : Safely update running infrastructure with minimal downtime Time : 15-30 minutes Difficulty : Intermediate","breadcrumbs":"Update Infrastructure » Update Existing Infrastructure","id":"5023","title":"Update Existing Infrastructure"},"5024":{"body":"This guide covers: Checking for updates Planning update strategies Updating task services Rolling updates Rollback procedures Verification","breadcrumbs":"Update Infrastructure » Overview","id":"5024","title":"Overview"},"5025":{"body":"","breadcrumbs":"Update Infrastructure » Update Strategies","id":"5025","title":"Update Strategies"},"5026":{"body":"Best for : Non-critical environments, development, staging # Direct update without downtime consideration\\nprovisioning t create --infra ","breadcrumbs":"Update Infrastructure » Strategy 1: In-Place Updates (Fastest)","id":"5026","title":"Strategy 1: In-Place Updates (Fastest)"},"5027":{"body":"Best for : Production environments, high availability # Update servers one by one\\nprovisioning s update --infra --rolling","breadcrumbs":"Update Infrastructure » Strategy 2: Rolling Updates (Recommended)","id":"5027","title":"Strategy 2: Rolling Updates (Recommended)"},"5028":{"body":"Best for : Critical production, zero-downtime requirements # Create new infrastructure, switch traffic, remove old\\nprovisioning ws init -green\\n# ... configure and deploy\\n# ... switch traffic\\nprovisioning ws delete -blue","breadcrumbs":"Update Infrastructure » Strategy 3: Blue-Green Deployment (Safest)","id":"5028","title":"Strategy 3: Blue-Green Deployment (Safest)"},"5029":{"body":"","breadcrumbs":"Update Infrastructure » Step 1: Check for Updates","id":"5029","title":"Step 1: Check for Updates"},"503":{"body":"Architecture - AI system overview RAG System - Documentation search Configuration - MCP setup API Reference - Detailed API endpoints ADR-015 - Design decisions Last Updated : 2025-01-13 Status : ✅ Production-Ready MCP Version : 0.6.0+ Supported LLMs : Claude, GPT-4, Llama, Mistral, all MCP-compatible models","breadcrumbs":"MCP Integration » Related Documentation","id":"503","title":"Related Documentation"},"5030":{"body":"# Check all taskservs for updates\\nprovisioning t check-updates Expected Output: 📦 Task Service Update Check: NAME CURRENT LATEST STATUS\\nkubernetes 1.29.0 1.30.0 ⬆️ update available\\ncontainerd 1.7.13 1.7.13 ✅ up-to-date\\ncilium 1.14.5 1.15.0 ⬆️ update available\\npostgres 15.5 16.1 ⬆️ update available\\nredis 7.2.3 7.2.3 ✅ up-to-date Updates available: 3","breadcrumbs":"Update Infrastructure » 1.1 Check All Task Services","id":"5030","title":"1.1 Check All Task Services"},"5031":{"body":"# Check specific taskserv\\nprovisioning t check-updates kubernetes Expected Output: 📦 Kubernetes Update Check: Current: 1.29.0\\nLatest: 1.30.0\\nStatus: ⬆️ Update available Changelog: • Enhanced security features • Performance improvements • Bug fixes in kube-apiserver • New workload resource types Breaking Changes: • None Recommended: ✅ Safe to update","breadcrumbs":"Update Infrastructure » 1.2 Check Specific Task Service","id":"5031","title":"1.2 Check Specific Task Service"},"5032":{"body":"# Show detailed version information\\nprovisioning version show Expected Output: 📋 Component Versions: COMPONENT CURRENT LATEST DAYS OLD STATUS\\nkubernetes 1.29.0 1.30.0 45 ⬆️ update\\ncontainerd 1.7.13 1.7.13 0 ✅ current\\ncilium 1.14.5 1.15.0 30 ⬆️ update\\npostgres 15.5 16.1 60 ⬆️ update (major)\\nredis 7.2.3 7.2.3 0 ✅ current","breadcrumbs":"Update Infrastructure » 1.3 Check Version Status","id":"5032","title":"1.3 Check Version Status"},"5033":{"body":"# Check for security-related updates\\nprovisioning version updates --security-only","breadcrumbs":"Update Infrastructure » 1.4 Check for Security Updates","id":"5033","title":"1.4 Check for Security Updates"},"5034":{"body":"","breadcrumbs":"Update Infrastructure » Step 2: Plan Your Update","id":"5034","title":"Step 2: Plan Your Update"},"5035":{"body":"# Show current infrastructure\\nprovisioning show settings --infra my-production","breadcrumbs":"Update Infrastructure » 2.1 Review Current Configuration","id":"5035","title":"2.1 Review Current Configuration"},"5036":{"body":"# Create configuration backup\\ncp -r workspace/infra/my-production workspace/infra/my-production.backup-$(date +%Y%m%d) # Or use built-in backup\\nprovisioning ws backup my-production Expected Output: ✅ Backup created: workspace/backups/my-production-20250930.tar.gz","breadcrumbs":"Update Infrastructure » 2.2 Backup Configuration","id":"5036","title":"2.2 Backup Configuration"},"5037":{"body":"# Generate update plan\\nprovisioning plan update --infra my-production Expected Output: 📝 Update Plan for my-production: Phase 1: Minor Updates (Low Risk) • containerd: No update needed • redis: No update needed Phase 2: Patch Updates (Medium Risk) • cilium: 1.14.5 → 1.15.0 (estimated 5 minutes) Phase 3: Major Updates (High Risk - Requires Testing) • kubernetes: 1.29.0 → 1.30.0 (estimated 15 minutes) • postgres: 15.5 → 16.1 (estimated 10 minutes, may require data migration) Recommended Order: 1. Update cilium (low risk) 2. Update kubernetes (test in staging first) 3. Update postgres (requires maintenance window) Total Estimated Time: 30 minutes\\nRecommended: Test in staging environment first","breadcrumbs":"Update Infrastructure » 2.3 Create Update Plan","id":"5037","title":"2.3 Create Update Plan"},"5038":{"body":"","breadcrumbs":"Update Infrastructure » Step 3: Update Task Services","id":"5038","title":"Step 3: Update Task Services"},"5039":{"body":"Dry-Run Update # Test update without applying\\nprovisioning t create cilium --infra my-production --check Expected Output: 🔍 CHECK MODE: Simulating Cilium update Current: 1.14.5\\nTarget: 1.15.0 Would perform: 1. Download Cilium 1.15.0 2. Update configuration 3. Rolling restart of Cilium pods 4. Verify connectivity Estimated downtime: <1 minute per node\\nNo errors detected. Ready to update. Generate Updated Configuration # Generate new configuration\\nprovisioning t generate cilium --infra my-production Expected Output: ✅ Generated Cilium configuration (version 1.15.0) Saved to: workspace/infra/my-production/taskservs/cilium.ncl Apply Update # Apply update\\nprovisioning t create cilium --infra my-production Expected Output: 🚀 Updating Cilium on my-production... Downloading Cilium 1.15.0... ⏳\\n✅ Downloaded Updating configuration... ⏳\\n✅ Configuration updated Rolling restart: web-01... ⏳\\n✅ web-01 updated (Cilium 1.15.0) Rolling restart: web-02... ⏳\\n✅ web-02 updated (Cilium 1.15.0) Verifying connectivity... ⏳\\n✅ All nodes connected 🎉 Cilium update complete! Version: 1.14.5 → 1.15.0 Downtime: 0 minutes Verify Update # Verify updated version\\nprovisioning version taskserv cilium Expected Output: 📦 Cilium Version Info: Installed: 1.15.0\\nLatest: 1.15.0\\nStatus: ✅ Up-to-date Nodes: ✅ web-01: 1.15.0 (running) ✅ web-02: 1.15.0 (running)","breadcrumbs":"Update Infrastructure » 3.1 Update Non-Critical Service (Cilium Example)","id":"5039","title":"3.1 Update Non-Critical Service (Cilium Example)"},"504":{"body":"Status : ✅ Production-Ready (Configuration system) Complete setup guide for AI features in the provisioning platform. This guide covers LLM provider configuration, feature enablement, cache setup, cost controls, and security settings.","breadcrumbs":"Configuration Guide » AI System Configuration Guide","id":"504","title":"AI System Configuration Guide"},"5040":{"body":"Test in Staging First # If you have staging environment\\nprovisioning t create kubernetes --infra my-staging --check\\nprovisioning t create kubernetes --infra my-staging # Run integration tests\\nprovisioning test kubernetes --infra my-staging Backup Current State # Backup Kubernetes state\\nkubectl get all -A -o yaml > k8s-backup-$(date +%Y%m%d).yaml # Backup etcd (if using external etcd)\\nprovisioning t backup kubernetes --infra my-production Schedule Maintenance Window # Set maintenance mode (optional, if supported)\\nprovisioning maintenance enable --infra my-production --duration 30m Update Kubernetes # Update control plane first\\nprovisioning t create kubernetes --infra my-production --control-plane-only Expected Output: 🚀 Updating Kubernetes control plane on my-production... Draining control plane: web-01... ⏳\\n✅ web-01 drained Updating control plane: web-01... ⏳\\n✅ web-01 updated (Kubernetes 1.30.0) Uncordoning: web-01... ⏳\\n✅ web-01 ready Verifying control plane... ⏳\\n✅ Control plane healthy 🎉 Control plane update complete! # Update worker nodes one by one\\nprovisioning t create kubernetes --infra my-production --workers-only --rolling Expected Output: 🚀 Updating Kubernetes workers on my-production... Rolling update: web-02... Draining... ⏳ ✅ Drained (pods rescheduled) Updating... ⏳ ✅ Updated (Kubernetes 1.30.0) Uncordoning... ⏳ ✅ Ready Waiting for pods to stabilize... ⏳ ✅ All pods running 🎉 Worker update complete! Updated: web-02 Version: 1.30.0 Verify Update # Verify Kubernetes cluster\\nkubectl get nodes\\nprovisioning version taskserv kubernetes Expected Output: NAME STATUS ROLES AGE VERSION\\nweb-01 Ready control-plane 30d v1.30.0\\nweb-02 Ready 30d v1.30.0 # Run smoke tests\\nprovisioning test kubernetes --infra my-production","breadcrumbs":"Update Infrastructure » 3.2 Update Critical Service (Kubernetes Example)","id":"5040","title":"3.2 Update Critical Service (Kubernetes Example)"},"5041":{"body":"⚠️ WARNING : Database updates may require data migration. Always backup first! Backup Database # Backup PostgreSQL database\\nprovisioning t backup postgres --infra my-production Expected Output: 🗄️ Backing up PostgreSQL... Creating dump: my-production-postgres-20250930.sql... ⏳\\n✅ Dump created (2.3 GB) Compressing... ⏳\\n✅ Compressed (450 MB) Saved to: workspace/backups/postgres/my-production-20250930.sql.gz Check Compatibility # Check if data migration is needed\\nprovisioning t check-migration postgres --from 15.5 --to 16.1 Expected Output: 🔍 PostgreSQL Migration Check: From: 15.5\\nTo: 16.1 Migration Required: ✅ Yes (major version change) Steps Required: 1. Dump database with pg_dump 2. Stop PostgreSQL 15.5 3. Install PostgreSQL 16.1 4. Initialize new data directory 5. Restore from dump Estimated Time: 15-30 minutes (depending on data size)\\nEstimated Downtime: 15-30 minutes Recommended: Use streaming replication for zero-downtime upgrade Perform Update # Update PostgreSQL (with automatic migration)\\nprovisioning t create postgres --infra my-production --migrate Expected Output: 🚀 Updating PostgreSQL on my-production... ⚠️ Major version upgrade detected (15.5 → 16.1) Automatic migration will be performed Dumping database... ⏳\\n✅ Database dumped (2.3 GB) Stopping PostgreSQL 15.5... ⏳\\n✅ Stopped Installing PostgreSQL 16.1... ⏳\\n✅ Installed Initializing new data directory... ⏳\\n✅ Initialized Restoring database... ⏳\\n✅ Restored (2.3 GB) Starting PostgreSQL 16.1... ⏳\\n✅ Started Verifying data integrity... ⏳\\n✅ All tables verified 🎉 PostgreSQL update complete! Version: 15.5 → 16.1 Downtime: 18 minutes Verify Update # Verify PostgreSQL\\nprovisioning version taskserv postgres\\nssh db-01 \\"psql --version\\"","breadcrumbs":"Update Infrastructure » 3.3 Update Database (PostgreSQL Example)","id":"5041","title":"3.3 Update Database (PostgreSQL Example)"},"5042":{"body":"","breadcrumbs":"Update Infrastructure » Step 4: Update Multiple Services","id":"5042","title":"Step 4: Update Multiple Services"},"5043":{"body":"# Update multiple taskservs one by one\\nprovisioning t update --infra my-production --taskservs cilium,containerd,redis Expected Output: 🚀 Updating 3 taskservs on my-production... [1/3] Updating cilium... ⏳\\n✅ cilium updated (1.15.0) [2/3] Updating containerd... ⏳\\n✅ containerd updated (1.7.14) [3/3] Updating redis... ⏳\\n✅ redis updated (7.2.4) 🎉 All updates complete! Updated: 3 taskservs Total time: 8 minutes","breadcrumbs":"Update Infrastructure » 4.1 Batch Update (Sequentially)","id":"5043","title":"4.1 Batch Update (Sequentially)"},"5044":{"body":"# Update taskservs in parallel (if they don\'t depend on each other)\\nprovisioning t update --infra my-production --taskservs redis,postgres --parallel Expected Output: 🚀 Updating 2 taskservs in parallel on my-production... redis: Updating... ⏳\\npostgres: Updating... ⏳ redis: ✅ Updated (7.2.4)\\npostgres: ✅ Updated (16.1) 🎉 All updates complete! Updated: 2 taskservs Total time: 3 minutes (parallel)","breadcrumbs":"Update Infrastructure » 4.2 Parallel Update (Non-Dependent Services)","id":"5044","title":"4.2 Parallel Update (Non-Dependent Services)"},"5045":{"body":"","breadcrumbs":"Update Infrastructure » Step 5: Update Server Configuration","id":"5045","title":"Step 5: Update Server Configuration"},"5046":{"body":"# Edit server configuration\\nprovisioning sops workspace/infra/my-production/servers.ncl Example: Upgrade server plan # Before\\n{ name = \\"web-01\\" plan = \\"1xCPU-2 GB\\" # Old plan\\n} # After\\n{ name = \\"web-01\\" plan = \\"2xCPU-4 GB\\" # New plan\\n} # Apply server update\\nprovisioning s update --infra my-production --check\\nprovisioning s update --infra my-production","breadcrumbs":"Update Infrastructure » 5.1 Update Server Resources","id":"5046","title":"5.1 Update Server Resources"},"5047":{"body":"# Update operating system packages\\nprovisioning s update --infra my-production --os-update Expected Output: 🚀 Updating OS packages on my-production servers... web-01: Updating packages... ⏳\\n✅ web-01: 24 packages updated web-02: Updating packages... ⏳\\n✅ web-02: 24 packages updated db-01: Updating packages... ⏳\\n✅ db-01: 24 packages updated 🎉 OS updates complete!","breadcrumbs":"Update Infrastructure » 5.2 Update Server OS","id":"5047","title":"5.2 Update Server OS"},"5048":{"body":"","breadcrumbs":"Update Infrastructure » Step 6: Rollback Procedures","id":"5048","title":"Step 6: Rollback Procedures"},"5049":{"body":"If update fails or causes issues: # Rollback to previous version\\nprovisioning t rollback cilium --infra my-production Expected Output: 🔄 Rolling back Cilium on my-production... Current: 1.15.0\\nTarget: 1.14.5 (previous version) Rolling back: web-01... ⏳\\n✅ web-01 rolled back Rolling back: web-02... ⏳\\n✅ web-02 rolled back Verifying connectivity... ⏳\\n✅ All nodes connected 🎉 Rollback complete! Version: 1.15.0 → 1.14.5","breadcrumbs":"Update Infrastructure » 6.1 Rollback Task Service","id":"5049","title":"6.1 Rollback Task Service"},"505":{"body":"","breadcrumbs":"Configuration Guide » Quick Start","id":"505","title":"Quick Start"},"5050":{"body":"# Restore configuration from backup\\nprovisioning ws restore my-production --from workspace/backups/my-production-20250930.tar.gz","breadcrumbs":"Update Infrastructure » 6.2 Rollback from Backup","id":"5050","title":"6.2 Rollback from Backup"},"5051":{"body":"# Complete infrastructure rollback\\nprovisioning rollback --infra my-production --to-snapshot ","breadcrumbs":"Update Infrastructure » 6.3 Emergency Rollback","id":"5051","title":"6.3 Emergency Rollback"},"5052":{"body":"","breadcrumbs":"Update Infrastructure » Step 7: Post-Update Verification","id":"5052","title":"Step 7: Post-Update Verification"},"5053":{"body":"# Check overall health\\nprovisioning health --infra my-production Expected Output: 🏥 Health Check: my-production Servers: ✅ web-01: Healthy ✅ web-02: Healthy ✅ db-01: Healthy Task Services: ✅ kubernetes: 1.30.0 (healthy) ✅ containerd: 1.7.13 (healthy) ✅ cilium: 1.15.0 (healthy) ✅ postgres: 16.1 (healthy) Clusters: ✅ buildkit: 2/2 replicas (healthy) Overall Status: ✅ All systems healthy","breadcrumbs":"Update Infrastructure » 7.1 Verify All Components","id":"5053","title":"7.1 Verify All Components"},"5054":{"body":"# Verify all versions are updated\\nprovisioning version show","breadcrumbs":"Update Infrastructure » 7.2 Verify Version Updates","id":"5054","title":"7.2 Verify Version Updates"},"5055":{"body":"# Run comprehensive tests\\nprovisioning test all --infra my-production Expected Output: 🧪 Running Integration Tests... [1/5] Server connectivity... ⏳\\n✅ All servers reachable [2/5] Kubernetes health... ⏳\\n✅ All nodes ready, all pods running [3/5] Network connectivity... ⏳\\n✅ All services reachable [4/5] Database connectivity... ⏳\\n✅ PostgreSQL responsive [5/5] Application health... ⏳\\n✅ All applications healthy 🎉 All tests passed!","breadcrumbs":"Update Infrastructure » 7.3 Run Integration Tests","id":"5055","title":"7.3 Run Integration Tests"},"5056":{"body":"# Monitor logs for errors\\nprovisioning logs --infra my-production --follow --level error","breadcrumbs":"Update Infrastructure » 7.4 Monitor for Issues","id":"5056","title":"7.4 Monitor for Issues"},"5057":{"body":"Use this checklist for production updates: Check for available updates Review changelog and breaking changes Create configuration backup Test update in staging environment Schedule maintenance window Notify team/users of maintenance Update non-critical services first Verify each update before proceeding Update critical services with rolling updates Backup database before major updates Verify all components after update Run integration tests Monitor for issues (30 minutes minimum) Document any issues encountered Close maintenance window","breadcrumbs":"Update Infrastructure » Update Checklist","id":"5057","title":"Update Checklist"},"5058":{"body":"","breadcrumbs":"Update Infrastructure » Common Update Scenarios","id":"5058","title":"Common Update Scenarios"},"5059":{"body":"# Quick security update\\nprovisioning t check-updates --security-only\\nprovisioning t update --infra my-production --security-patches --yes","breadcrumbs":"Update Infrastructure » Scenario 1: Minor Security Patch","id":"5059","title":"Scenario 1: Minor Security Patch"},"506":{"body":"# provisioning/config/ai.toml\\n[ai]\\nenabled = true\\nprovider = \\"anthropic\\" # or \\"openai\\" or \\"local\\"\\nmodel = \\"claude-sonnet-4\\"\\napi_key = \\"sk-ant-...\\" # Set via PROVISIONING_AI_API_KEY env var [ai.cache]\\nenabled = true [ai.limits]\\nmax_tokens = 4096\\ntemperature = 0.7","breadcrumbs":"Configuration Guide » Minimal Configuration","id":"506","title":"Minimal Configuration"},"5060":{"body":"# Careful major version update\\nprovisioning ws backup my-production\\nprovisioning t check-migration --from X.Y --to X+1.Y\\nprovisioning t create --infra my-production --migrate\\nprovisioning test all --infra my-production","breadcrumbs":"Update Infrastructure » Scenario 2: Major Version Upgrade","id":"5060","title":"Scenario 2: Major Version Upgrade"},"5061":{"body":"# Apply critical hotfix immediately\\nprovisioning t create --infra my-production --hotfix --yes","breadcrumbs":"Update Infrastructure » Scenario 3: Emergency Hotfix","id":"5061","title":"Scenario 3: Emergency Hotfix"},"5062":{"body":"","breadcrumbs":"Update Infrastructure » Troubleshooting Updates","id":"5062","title":"Troubleshooting Updates"},"5063":{"body":"Solution: # Check update status\\nprovisioning t status --infra my-production # Resume failed update\\nprovisioning t update --infra my-production --resume # Or rollback\\nprovisioning t rollback --infra my-production","breadcrumbs":"Update Infrastructure » Issue: Update fails mid-process","id":"5063","title":"Issue: Update fails mid-process"},"5064":{"body":"Solution: # Check logs\\nprovisioning logs --infra my-production # Verify configuration\\nprovisioning t validate --infra my-production # Rollback if necessary\\nprovisioning t rollback --infra my-production","breadcrumbs":"Update Infrastructure » Issue: Service not starting after update","id":"5064","title":"Issue: Service not starting after update"},"5065":{"body":"Solution: # Check migration logs\\nprovisioning t migration-logs --infra my-production # Restore from backup\\nprovisioning t restore --infra my-production --from ","breadcrumbs":"Update Infrastructure » Issue: Data migration fails","id":"5065","title":"Issue: Data migration fails"},"5066":{"body":"Always Test First : Test updates in staging before production Backup Everything : Create backups before any update Update Gradually : Update one service at a time Monitor Closely : Watch for errors after each update Have Rollback Plan : Always have a rollback strategy Document Changes : Keep update logs for reference Schedule Wisely : Update during low-traffic periods Verify Thoroughly : Run tests after each update","breadcrumbs":"Update Infrastructure » Best Practices","id":"5066","title":"Best Practices"},"5067":{"body":"Customize Guide - Customize your infrastructure From Scratch Guide - Deploy new infrastructure Workflow Guide - Automate with workflows","breadcrumbs":"Update Infrastructure » Next Steps","id":"5067","title":"Next Steps"},"5068":{"body":"# Update workflow\\nprovisioning t check-updates\\nprovisioning ws backup my-production\\nprovisioning t create --infra my-production --check\\nprovisioning t create --infra my-production\\nprovisioning version taskserv \\nprovisioning health --infra my-production\\nprovisioning test all --infra my-production This guide is part of the provisioning project documentation. Last updated: 2025-09-30","breadcrumbs":"Update Infrastructure » Quick Reference","id":"5068","title":"Quick Reference"},"5069":{"body":"Goal : Customize infrastructure using layers, templates, and configuration patterns Time : 20-40 minutes Difficulty : Intermediate to Advanced","breadcrumbs":"Customize Infrastructure » Customize Infrastructure","id":"5069","title":"Customize Infrastructure"},"507":{"body":"# Generate default configuration\\nprovisioning config init ai # Edit configuration\\nprovisioning config edit ai # Validate configuration\\nprovisioning config validate ai # Show current configuration\\nprovisioning config show ai","breadcrumbs":"Configuration Guide » Initialize Configuration","id":"507","title":"Initialize Configuration"},"5070":{"body":"This guide covers: Understanding the layer system Using templates Creating custom modules Configuration inheritance Advanced customization patterns","breadcrumbs":"Customize Infrastructure » Overview","id":"5070","title":"Overview"},"5071":{"body":"","breadcrumbs":"Customize Infrastructure » The Layer System","id":"5071","title":"The Layer System"},"5072":{"body":"The provisioning system uses a 3-layer architecture for configuration inheritance: ┌─────────────────────────────────────┐\\n│ Infrastructure Layer (Priority 300)│ ← Highest priority\\n│ workspace/infra/{name}/ │\\n│ • Project-specific configs │\\n│ • Environment customizations │\\n│ • Local overrides │\\n└─────────────────────────────────────┘ ↓ overrides\\n┌─────────────────────────────────────┐\\n│ Workspace Layer (Priority 200) │\\n│ provisioning/workspace/templates/ │\\n│ • Reusable patterns │\\n│ • Organization standards │\\n│ • Team conventions │\\n└─────────────────────────────────────┘ ↓ overrides\\n┌─────────────────────────────────────┐\\n│ Core Layer (Priority 100) │ ← Lowest priority\\n│ provisioning/extensions/ │\\n│ • System defaults │\\n│ • Provider implementations │\\n│ • Default taskserv configs │\\n└─────────────────────────────────────┘ Resolution Order : Infrastructure (300) → Workspace (200) → Core (100) Higher numbers override lower numbers.","breadcrumbs":"Customize Infrastructure » Understanding Layers","id":"5072","title":"Understanding Layers"},"5073":{"body":"# Explain layer concept\\nprovisioning lyr explain Expected Output: 📚 LAYER SYSTEM EXPLAINED The layer system provides configuration inheritance across 3 levels: 🔵 CORE LAYER (100) - System Defaults Location: provisioning/extensions/ • Base taskserv configurations • Default provider settings • Standard cluster templates • Built-in extensions 🟢 WORKSPACE LAYER (200) - Shared Templates Location: provisioning/workspace/templates/ • Organization-wide patterns • Reusable configurations • Team standards • Custom extensions 🔴 INFRASTRUCTURE LAYER (300) - Project Specific Location: workspace/infra/{project}/ • Project-specific overrides • Environment customizations • Local modifications • Runtime settings Resolution: Infrastructure → Workspace → Core\\nHigher priority layers override lower ones. # Show layer resolution for your project\\nprovisioning lyr show my-production Expected Output: 📊 Layer Resolution for my-production: LAYER PRIORITY SOURCE FILES\\nInfrastructure 300 workspace/infra/my-production/ 4 files • servers.ncl (overrides) • taskservs.ncl (overrides) • clusters.ncl (custom) • providers.ncl (overrides) Workspace 200 provisioning/workspace/templates/ 2 files • production.ncl (used) • kubernetes.ncl (used) Core 100 provisioning/extensions/ 15 files • taskservs/* (base configs) • providers/* (default settings) • clusters/* (templates) Resolution Order: Infrastructure → Workspace → Core\\nStatus: ✅ All layers resolved successfully","breadcrumbs":"Customize Infrastructure » View Layer Resolution","id":"5073","title":"View Layer Resolution"},"5074":{"body":"# Test how a specific module resolves\\nprovisioning lyr test kubernetes my-production Expected Output: 🔍 Layer Resolution Test: kubernetes → my-production Resolving kubernetes configuration... 🔴 Infrastructure Layer (300): ✅ Found: workspace/infra/my-production/taskservs/kubernetes.ncl Provides: • version = \\"1.30.0\\" (overrides) • control_plane_servers = [\\"web-01\\"] (overrides) • worker_servers = [\\"web-02\\"] (overrides) 🟢 Workspace Layer (200): ✅ Found: provisioning/workspace/templates/production-kubernetes.ncl Provides: • security_policies (inherited) • network_policies (inherited) • resource_quotas (inherited) 🔵 Core Layer (100): ✅ Found: provisioning/extensions/taskservs/kubernetes/main.ncl Provides: • default_version = \\"1.29.0\\" (base) • default_features (base) • default_plugins (base) Final Configuration (after merging all layers): version: \\"1.30.0\\" (from Infrastructure) control_plane_servers: [\\"web-01\\"] (from Infrastructure) worker_servers: [\\"web-02\\"] (from Infrastructure) security_policies: {...} (from Workspace) network_policies: {...} (from Workspace) resource_quotas: {...} (from Workspace) default_features: {...} (from Core) default_plugins: {...} (from Core) Resolution: ✅ Success","breadcrumbs":"Customize Infrastructure » Test Layer Resolution","id":"5074","title":"Test Layer Resolution"},"5075":{"body":"","breadcrumbs":"Customize Infrastructure » Using Templates","id":"5075","title":"Using Templates"},"5076":{"body":"# List all templates\\nprovisioning tpl list Expected Output: 📋 Available Templates: TASKSERVS: • production-kubernetes - Production-ready Kubernetes setup • production-postgres - Production PostgreSQL with replication • production-redis - Redis cluster with sentinel • development-kubernetes - Development Kubernetes (minimal) • ci-cd-pipeline - Complete CI/CD pipeline PROVIDERS: • upcloud-production - UpCloud production settings • upcloud-development - UpCloud development settings • aws-production - AWS production VPC setup • aws-development - AWS development environment • local-docker - Local Docker-based setup CLUSTERS: • buildkit-cluster - BuildKit for container builds • monitoring-stack - Prometheus + Grafana + Loki • security-stack - Security monitoring tools Total: 13 templates # List templates by type\\nprovisioning tpl list --type taskservs\\nprovisioning tpl list --type providers\\nprovisioning tpl list --type clusters","breadcrumbs":"Customize Infrastructure » List Available Templates","id":"5076","title":"List Available Templates"},"5077":{"body":"# Show template details\\nprovisioning tpl show production-kubernetes Expected Output: 📄 Template: production-kubernetes Description: Production-ready Kubernetes configuration with security hardening, network policies, and monitoring Category: taskservs\\nVersion: 1.0.0 Configuration Provided: • Kubernetes version: 1.30.0 • Security policies: Pod Security Standards (restricted) • Network policies: Default deny + allow rules • Resource quotas: Per-namespace limits • Monitoring: Prometheus integration • Logging: Loki integration • Backup: Velero configuration Requirements: • Minimum 2 servers • 4 GB RAM per server • Network plugin (Cilium recommended) Location: provisioning/workspace/templates/production-kubernetes.ncl Example Usage: provisioning tpl apply production-kubernetes my-production","breadcrumbs":"Customize Infrastructure » View Template Details","id":"5077","title":"View Template Details"},"5078":{"body":"# Apply template to your infrastructure\\nprovisioning tpl apply production-kubernetes my-production Expected Output: 🚀 Applying template: production-kubernetes → my-production Checking compatibility... ⏳\\n✅ Infrastructure compatible with template Merging configuration... ⏳\\n✅ Configuration merged Files created/updated: • workspace/infra/my-production/taskservs/kubernetes.ncl (updated) • workspace/infra/my-production/policies/security.ncl (created) • workspace/infra/my-production/policies/network.ncl (created) • workspace/infra/my-production/monitoring/prometheus.ncl (created) 🎉 Template applied successfully! Next steps: 1. Review generated configuration 2. Adjust as needed 3. Deploy: provisioning t create kubernetes --infra my-production","breadcrumbs":"Customize Infrastructure » Apply Template","id":"5078","title":"Apply Template"},"5079":{"body":"# Validate template was applied correctly\\nprovisioning tpl validate my-production Expected Output: ✅ Template Validation: my-production Templates Applied: ✅ production-kubernetes (v1.0.0) ✅ production-postgres (v1.0.0) Configuration Status: ✅ All required fields present ✅ No conflicting settings ✅ Dependencies satisfied Compliance: ✅ Security policies configured ✅ Network policies configured ✅ Resource quotas set ✅ Monitoring enabled Status: ✅ Valid","breadcrumbs":"Customize Infrastructure » Validate Template Usage","id":"5079","title":"Validate Template Usage"},"508":{"body":"","breadcrumbs":"Configuration Guide » Provider Configuration","id":"508","title":"Provider Configuration"},"5080":{"body":"","breadcrumbs":"Customize Infrastructure » Creating Custom Templates","id":"5080","title":"Creating Custom Templates"},"5081":{"body":"# Create custom template directory\\nmkdir -p provisioning/workspace/templates/my-custom-template","breadcrumbs":"Customize Infrastructure » Step 1: Create Template Structure","id":"5081","title":"Step 1: Create Template Structure"},"5082":{"body":"File: provisioning/workspace/templates/my-custom-template/main.ncl # Custom Kubernetes template with specific settings\\nlet kubernetes_config = { # Version version = \\"1.30.0\\", # Custom feature gates feature_gates = { \\"GracefulNodeShutdown\\" = true, \\"SeccompDefault\\" = true, \\"StatefulSetAutoDeletePVC\\" = true, }, # Custom kubelet configuration kubelet_config = { max_pods = 110, pod_pids_limit = 4096, container_log_max_size = \\"10Mi\\", container_log_max_files = 5, }, # Custom API server flags apiserver_extra_args = { \\"enable-admission-plugins\\" = \\"NodeRestriction,PodSecurity,LimitRanger\\", \\"audit-log-maxage\\" = \\"30\\", \\"audit-log-maxbackup\\" = \\"10\\", }, # Custom scheduler configuration scheduler_config = { profiles = [ { name = \\"high-availability\\", plugins = { score = { enabled = [ {name = \\"NodeResourcesBalancedAllocation\\", weight = 2}, {name = \\"NodeResourcesLeastAllocated\\", weight = 1}, ], }, }, }, ], }, # Network configuration network = { service_cidr = \\"10.96.0.0/12\\", pod_cidr = \\"10.244.0.0/16\\", dns_domain = \\"cluster.local\\", }, # Security configuration security = { pod_security_standard = \\"restricted\\", encrypt_etcd = true, rotate_certificates = true, },\\n} in\\nkubernetes_config","breadcrumbs":"Customize Infrastructure » Step 2: Write Template Configuration","id":"5082","title":"Step 2: Write Template Configuration"},"5083":{"body":"File: provisioning/workspace/templates/my-custom-template/metadata.toml [template]\\nname = \\"my-custom-template\\"\\nversion = \\"1.0.0\\"\\ndescription = \\"Custom Kubernetes template with enhanced security\\"\\ncategory = \\"taskservs\\"\\nauthor = \\"Your Name\\" [requirements]\\nmin_servers = 2\\nmin_memory_gb = 4\\nrequired_taskservs = [\\"containerd\\", \\"cilium\\"] [tags]\\nenvironment = [\\"production\\", \\"staging\\"]\\nfeatures = [\\"security\\", \\"monitoring\\", \\"high-availability\\"]","breadcrumbs":"Customize Infrastructure » Step 3: Create Template Metadata","id":"5083","title":"Step 3: Create Template Metadata"},"5084":{"body":"# List templates (should include your custom template)\\nprovisioning tpl list # Show your template\\nprovisioning tpl show my-custom-template # Apply to test infrastructure\\nprovisioning tpl apply my-custom-template my-test","breadcrumbs":"Customize Infrastructure » Step 4: Test Custom Template","id":"5084","title":"Step 4: Test Custom Template"},"5085":{"body":"","breadcrumbs":"Customize Infrastructure » Configuration Inheritance Examples","id":"5085","title":"Configuration Inheritance Examples"},"5086":{"body":"Core Layer (provisioning/extensions/taskservs/postgres/main.ncl): let postgres_config = { version = \\"15.5\\", port = 5432, max_connections = 100,\\n} in\\npostgres_config Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl): let postgres_config = { max_connections = 500, # Override only max_connections\\n} in\\npostgres_config Result (after layer resolution): let postgres_config = { version = \\"15.5\\", # From Core port = 5432, # From Core max_connections = 500, # From Infrastructure (overridden)\\n} in\\npostgres_config","breadcrumbs":"Customize Infrastructure » Example 1: Override Single Value","id":"5086","title":"Example 1: Override Single Value"},"5087":{"body":"Workspace Layer (provisioning/workspace/templates/production-postgres.ncl): let postgres_config = { replication = { enabled = true, replicas = 2, sync_mode = \\"async\\", },\\n} in\\npostgres_config Infrastructure Layer (workspace/infra/my-production/taskservs/postgres.ncl): let postgres_config = { replication = { sync_mode = \\"sync\\", # Override sync mode }, custom_extensions = [\\"pgvector\\", \\"timescaledb\\"], # Add custom config\\n} in\\npostgres_config Result : let postgres_config = { version = \\"15.5\\", # From Core port = 5432, # From Core max_connections = 100, # From Core replication = { enabled = true, # From Workspace replicas = 2, # From Workspace sync_mode = \\"sync\\", # From Infrastructure (overridden) }, custom_extensions = [\\"pgvector\\", \\"timescaledb\\"], # From Infrastructure (added)\\n} in\\npostgres_config","breadcrumbs":"Customize Infrastructure » Example 2: Add Custom Configuration","id":"5087","title":"Example 2: Add Custom Configuration"},"5088":{"body":"Workspace Layer (provisioning/workspace/templates/base-kubernetes.ncl): let kubernetes_config = { version = \\"1.30.0\\", control_plane_count = 3, worker_count = 5, resources = { control_plane = {cpu = \\"4\\", memory = \\"8Gi\\"}, worker = {cpu = \\"8\\", memory = \\"16Gi\\"}, },\\n} in\\nkubernetes_config Development Infrastructure (workspace/infra/my-dev/taskservs/kubernetes.ncl): let kubernetes_config = { control_plane_count = 1, # Smaller for dev worker_count = 2, resources = { control_plane = {cpu = \\"2\\", memory = \\"4Gi\\"}, worker = {cpu = \\"2\\", memory = \\"4Gi\\"}, },\\n} in\\nkubernetes_config Production Infrastructure (workspace/infra/my-prod/taskservs/kubernetes.ncl): let kubernetes_config = { control_plane_count = 5, # Larger for prod worker_count = 10, resources = { control_plane = {cpu = \\"8\\", memory = \\"16Gi\\"}, worker = {cpu = \\"16\\", memory = \\"32Gi\\"}, },\\n} in\\nkubernetes_config","breadcrumbs":"Customize Infrastructure » Example 3: Environment-Specific Configuration","id":"5088","title":"Example 3: Environment-Specific Configuration"},"5089":{"body":"","breadcrumbs":"Customize Infrastructure » Advanced Customization Patterns","id":"5089","title":"Advanced Customization Patterns"},"509":{"body":"[ai]\\nenabled = true\\nprovider = \\"anthropic\\"\\nmodel = \\"claude-sonnet-4\\" # or \\"claude-opus-4\\", \\"claude-haiku-4\\"\\napi_key = \\"${PROVISIONING_AI_API_KEY}\\"\\napi_base = \\"[https://api.anthropic.com\\"](https://api.anthropic.com\\") # Request parameters\\n[ai.request]\\nmax_tokens = 4096\\ntemperature = 0.7\\ntop_p = 0.95\\ntop_k = 40 # Supported models\\n# - claude-opus-4: Most capable, for complex reasoning ($15/MTok input, $45/MTok output)\\n# - claude-sonnet-4: Balanced (recommended), ($3/MTok input, $15/MTok output)\\n# - claude-haiku-4: Fast, for simple tasks ($0.80/MTok input, $4/MTok output)","breadcrumbs":"Configuration Guide » Anthropic Claude","id":"509","title":"Anthropic Claude"},"5090":{"body":"Create different configurations for each environment: # Create environments\\nprovisioning ws init my-app-dev\\nprovisioning ws init my-app-staging\\nprovisioning ws init my-app-prod # Apply environment-specific templates\\nprovisioning tpl apply development-kubernetes my-app-dev\\nprovisioning tpl apply staging-kubernetes my-app-staging\\nprovisioning tpl apply production-kubernetes my-app-prod # Customize each environment\\n# Edit: workspace/infra/my-app-dev/...\\n# Edit: workspace/infra/my-app-staging/...\\n# Edit: workspace/infra/my-app-prod/...","breadcrumbs":"Customize Infrastructure » Pattern 1: Multi-Environment Setup","id":"5090","title":"Pattern 1: Multi-Environment Setup"},"5091":{"body":"Create reusable configuration fragments: File: provisioning/workspace/templates/shared/security-policies.ncl let security_policies = { pod_security = { enforce = \\"restricted\\", audit = \\"restricted\\", warn = \\"restricted\\", }, network_policies = [ { name = \\"deny-all\\", pod_selector = {}, policy_types = [\\"Ingress\\", \\"Egress\\"], }, { name = \\"allow-dns\\", pod_selector = {}, egress = [ { to = [{namespace_selector = {name = \\"kube-system\\"}}], ports = [{protocol = \\"UDP\\", port = 53}], }, ], }, ],\\n} in\\nsecurity_policies Import in your infrastructure: let security_policies = (import \\"../../../provisioning/workspace/templates/shared/security-policies.ncl\\") in let kubernetes_config = { version = \\"1.30.0\\", image_repo = \\"k8s.gcr.io\\", security = security_policies, # Import shared policies\\n} in\\nkubernetes_config","breadcrumbs":"Customize Infrastructure » Pattern 2: Shared Configuration Library","id":"5091","title":"Pattern 2: Shared Configuration Library"},"5092":{"body":"Use Nickel features for dynamic configuration: # Calculate resources based on server count\\nlet server_count = 5 in\\nlet replicas_per_server = 2 in\\nlet total_replicas = server_count * replicas_per_server in let postgres_config = { version = \\"16.1\\", max_connections = total_replicas * 50, # Dynamic calculation shared_buffers = \\"1024 MB\\",\\n} in\\npostgres_config","breadcrumbs":"Customize Infrastructure » Pattern 3: Dynamic Configuration","id":"5092","title":"Pattern 3: Dynamic Configuration"},"5093":{"body":"let environment = \\"production\\" in # or \\"development\\" let kubernetes_config = { version = \\"1.30.0\\", control_plane_count = if environment == \\"production\\" then 3 else 1, worker_count = if environment == \\"production\\" then 5 else 2, monitoring = { enabled = environment == \\"production\\", retention = if environment == \\"production\\" then \\"30d\\" else \\"7d\\", },\\n} in\\nkubernetes_config","breadcrumbs":"Customize Infrastructure » Pattern 4: Conditional Configuration","id":"5093","title":"Pattern 4: Conditional Configuration"},"5094":{"body":"# Show layer system statistics\\nprovisioning lyr stats Expected Output: 📊 Layer System Statistics: Infrastructure Layer: • Projects: 3 • Total files: 15 • Average overrides per project: 5 Workspace Layer: • Templates: 13 • Most used: production-kubernetes (5 projects) • Custom templates: 2 Core Layer: • Taskservs: 15 • Providers: 3 • Clusters: 3 Resolution Performance: • Average resolution time: 45 ms • Cache hit rate: 87% • Total resolutions: 1,250","breadcrumbs":"Customize Infrastructure » Layer Statistics","id":"5094","title":"Layer Statistics"},"5095":{"body":"","breadcrumbs":"Customize Infrastructure » Customization Workflow","id":"5095","title":"Customization Workflow"},"5096":{"body":"# 1. Create new infrastructure\\nprovisioning ws init my-custom-app # 2. Understand layer system\\nprovisioning lyr explain # 3. Discover templates\\nprovisioning tpl list --type taskservs # 4. Apply base template\\nprovisioning tpl apply production-kubernetes my-custom-app # 5. View applied configuration\\nprovisioning lyr show my-custom-app # 6. Customize (edit files)\\nprovisioning sops workspace/infra/my-custom-app/taskservs/kubernetes.ncl # 7. Test layer resolution\\nprovisioning lyr test kubernetes my-custom-app # 8. Validate configuration\\nprovisioning tpl validate my-custom-app\\nprovisioning val config --infra my-custom-app # 9. Deploy customized infrastructure\\nprovisioning s create --infra my-custom-app --check\\nprovisioning s create --infra my-custom-app\\nprovisioning t create kubernetes --infra my-custom-app","breadcrumbs":"Customize Infrastructure » Complete Customization Example","id":"5096","title":"Complete Customization Example"},"5097":{"body":"","breadcrumbs":"Customize Infrastructure » Best Practices","id":"5097","title":"Best Practices"},"5098":{"body":"Core Layer : Only modify for system-wide changes Workspace Layer : Use for organization-wide templates Infrastructure Layer : Use for project-specific customizations","breadcrumbs":"Customize Infrastructure » 1. Use Layers Correctly","id":"5098","title":"1. Use Layers Correctly"},"5099":{"body":"provisioning/workspace/templates/\\n├── shared/ # Shared configuration fragments\\n│ ├── security-policies.ncl\\n│ ├── network-policies.ncl\\n│ └── monitoring.ncl\\n├── production/ # Production templates\\n│ ├── kubernetes.ncl\\n│ ├── postgres.ncl\\n│ └── redis.ncl\\n└── development/ # Development templates ├── kubernetes.ncl └── postgres.ncl","breadcrumbs":"Customize Infrastructure » 2. Template Organization","id":"5099","title":"2. Template Organization"},"51":{"body":"","breadcrumbs":"Installation Guide » Installation Methods","id":"51","title":"Installation Methods"},"510":{"body":"[ai]\\nenabled = true\\nprovider = \\"openai\\"\\nmodel = \\"gpt-4-turbo\\" # or \\"gpt-4\\", \\"gpt-4o\\"\\napi_key = \\"${OPENAI_API_KEY}\\"\\napi_base = \\"[https://api.openai.com/v1\\"](https://api.openai.com/v1\\") [ai.request]\\nmax_tokens = 4096\\ntemperature = 0.7\\ntop_p = 0.95 # Supported models\\n# - gpt-4: Most capable ($0.03/1K input, $0.06/1K output)\\n# - gpt-4-turbo: Better at code ($0.01/1K input, $0.03/1K output)\\n# - gpt-4o: Latest, multi-modal ($5/MTok input, $15/MTok output)","breadcrumbs":"Configuration Guide » OpenAI GPT-4","id":"510","title":"OpenAI GPT-4"},"5100":{"body":"Document your customizations: File: workspace/infra/my-production/README.md # My Production Infrastructure ## Customizations - Kubernetes: Using production template with 5 control plane nodes\\n- PostgreSQL: Configured with streaming replication\\n- Cilium: Native routing mode enabled ## Layer Overrides - `taskservs/kubernetes.ncl`: Control plane count (3 → 5)\\n- `taskservs/postgres.ncl`: Replication mode (async → sync)\\n- `network/cilium.ncl`: Routing mode (tunnel → native)","breadcrumbs":"Customize Infrastructure » 3. Documentation","id":"5100","title":"3. Documentation"},"5101":{"body":"Keep templates and configurations in version control: cd provisioning/workspace/templates/\\ngit add .\\ngit commit -m \\"Add production Kubernetes template with enhanced security\\" cd workspace/infra/my-production/\\ngit add .\\ngit commit -m \\"Configure production environment for my-production\\"","breadcrumbs":"Customize Infrastructure » 4. Version Control","id":"5101","title":"4. Version Control"},"5102":{"body":"","breadcrumbs":"Customize Infrastructure » Troubleshooting Customizations","id":"5102","title":"Troubleshooting Customizations"},"5103":{"body":"# Check layer resolution\\nprovisioning lyr show my-production # Verify file exists\\nls -la workspace/infra/my-production/taskservs/ # Test specific resolution\\nprovisioning lyr test kubernetes my-production","breadcrumbs":"Customize Infrastructure » Issue: Configuration not applied","id":"5103","title":"Issue: Configuration not applied"},"5104":{"body":"# Validate configuration\\nprovisioning val config --infra my-production # Show configuration merge result\\nprovisioning show config kubernetes --infra my-production","breadcrumbs":"Customize Infrastructure » Issue: Conflicting configurations","id":"5104","title":"Issue: Conflicting configurations"},"5105":{"body":"# List available templates\\nprovisioning tpl list # Check template path\\nls -la provisioning/workspace/templates/ # Refresh template cache\\nprovisioning tpl refresh","breadcrumbs":"Customize Infrastructure » Issue: Template not found","id":"5105","title":"Issue: Template not found"},"5106":{"body":"From Scratch Guide - Deploy new infrastructure Update Guide - Update existing infrastructure Workflow Guide - Automate with workflows Nickel Guide - Learn Nickel configuration language","breadcrumbs":"Customize Infrastructure » Next Steps","id":"5106","title":"Next Steps"},"5107":{"body":"# Layer system\\nprovisioning lyr explain # Explain layers\\nprovisioning lyr show # Show layer resolution\\nprovisioning lyr test # Test resolution\\nprovisioning lyr stats # Layer statistics # Templates\\nprovisioning tpl list # List all templates\\nprovisioning tpl list --type # Filter by type\\nprovisioning tpl show